From 46689789aaa752d6022ccdab49890d2b45ed64e9 Mon Sep 17 00:00:00 2001 From: yandex-cloud-bot Date: Thu, 20 Jan 2022 16:14:54 +0000 Subject: [PATCH 01/54] chore(release): 2.0.0-beta.1 [skip ci] # [2.0.0-beta.1](https://github.com/yandex-cloud/nodejs-sdk/compare/v1.4.3...v2.0.0-beta.1) (2022-01-20) ### Bug Fixes * cache node_modules between jobs ([0a9490a](https://github.com/yandex-cloud/nodejs-sdk/commit/0a9490a91fdf201b9a802302849735d3e17686f6)) * correct logging service endpoints ([a712923](https://github.com/yandex-cloud/nodejs-sdk/commit/a71292314fbbd31e6fca6e2931a07db0fd69f647)) * disable husky hooks in CI ([4919d54](https://github.com/yandex-cloud/nodejs-sdk/commit/4919d5488515729ec9fa52dc2317bbc3c2c99b3e)) * eslint rule ([e097134](https://github.com/yandex-cloud/nodejs-sdk/commit/e097134907090e76aebb6b7130bfe0a81609d2ae)) * install long package ([7120858](https://github.com/yandex-cloud/nodejs-sdk/commit/71208586ef1c9163781a84f2ab3e214bd6b61df2)) * move legacy code to separate directory ([b38248c](https://github.com/yandex-cloud/nodejs-sdk/commit/b38248c5a06633366f01f1125e9536088f8e39ae)) * move to axios ([438f7c6](https://github.com/yandex-cloud/nodejs-sdk/commit/438f7c6274f1a71bacb7218961e03b7a3440e002)) * remove prettier ([6bf0f24](https://github.com/yandex-cloud/nodejs-sdk/commit/6bf0f24835f4baaa4d7c36200198d75100ee747a)) * remove unused test module ([4e90a92](https://github.com/yandex-cloud/nodejs-sdk/commit/4e90a9286c6a72456f8a561cfe851df73167e824)) * removed legacy example ([10c9409](https://github.com/yandex-cloud/nodejs-sdk/commit/10c94090f752f39e708b8884aaf9791f7571df7f)) * set public access for npm package ([a68360d](https://github.com/yandex-cloud/nodejs-sdk/commit/a68360dbb8f6a618cede5bfdd6111ace7dff62e6)) * update ts-proto ([a20a650](https://github.com/yandex-cloud/nodejs-sdk/commit/a20a6503cfbf93841f112be0d068a6ffed0b9136)) * use bot's name/email for pushing release commits ([3b2553a](https://github.com/yandex-cloud/nodejs-sdk/commit/3b2553ae557464f9779ec74eb382ab15662fe85a)) * use bot's token for pushing release commits ([76cf6e7](https://github.com/yandex-cloud/nodejs-sdk/commit/76cf6e7deda2822404c21f6ab17ca826d11bfa3f)) * use exact versions of dependencies ([b564178](https://github.com/yandex-cloud/nodejs-sdk/commit/b564178c3e7e680e93a4c3905f09f41eeb14b0a8)) ### Features * added git submodule with yandex cloud api specs ([7916612](https://github.com/yandex-cloud/nodejs-sdk/commit/79166129c9deb43328983d15ad905915b1c4249b)) * allow to override endpoint in service factory ([12e118e](https://github.com/yandex-cloud/nodejs-sdk/commit/12e118e371003bb9082227688a9d6b428e2d650a)) * change package description ([e9ecf16](https://github.com/yandex-cloud/nodejs-sdk/commit/e9ecf16d60448e68e4adbfd89e7d87506cbc684a)) * endpoints list for all available service clients ([78c2355](https://github.com/yandex-cloud/nodejs-sdk/commit/78c235530e00823de2878d59f2b3b86abce12f6b)) * export generic type of wrapped service client ([bec2aca](https://github.com/yandex-cloud/nodejs-sdk/commit/bec2acaa282130238cd2d91ecfebabca0379a0b2)) * generate exports for all entities in cloud api ([5136bb5](https://github.com/yandex-cloud/nodejs-sdk/commit/5136bb5930395760c7f46c50a69c85d681d82ea8)) * operation utilities ([e82e279](https://github.com/yandex-cloud/nodejs-sdk/commit/e82e27990533aeab73a9fb298cff888e5f422e1e)) * regenerate code with new ts-proto ([1e2dcc2](https://github.com/yandex-cloud/nodejs-sdk/commit/1e2dcc2bbb4a7614f23643c3520bd04d702920af)) * remove old generated code ([a5ecb33](https://github.com/yandex-cloud/nodejs-sdk/commit/a5ecb335acaa7e2dbbcc6beb2b00040cb82182d4)) * remove unused legacy code and dependencies ([3d3a6f2](https://github.com/yandex-cloud/nodejs-sdk/commit/3d3a6f2fea427d08571f6a5c443efed80ea2e0c0)) * restructure directories ([55bf685](https://github.com/yandex-cloud/nodejs-sdk/commit/55bf6857e2b4ce4226129775acf18b7818375560)) * reworked examples ([2875275](https://github.com/yandex-cloud/nodejs-sdk/commit/2875275a44411295469211f948c343123072e7cf)) * rewrite index module in ts ([99e8ba8](https://github.com/yandex-cloud/nodejs-sdk/commit/99e8ba8b913e5943a0125a81df020e50a79760cf)) * session class ([826e6de](https://github.com/yandex-cloud/nodejs-sdk/commit/826e6de7198d4cc00d33084a76454ad645cf6708)) * token services ([8b730df](https://github.com/yandex-cloud/nodejs-sdk/commit/8b730dfbeac5477ef49f0af5b697d1c9cdd0b157)) * tool for code generation ([b463cf8](https://github.com/yandex-cloud/nodejs-sdk/commit/b463cf8e44fcbcddd99c92337c1a9cca665399a1)) ### BREAKING CHANGES * removed some useless classes * changed API of Session * changed API of service clients --- package-lock.json | 4 ++-- package.json | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/package-lock.json b/package-lock.json index 5a0c9bda..02169661 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.0.0-alpha.7", + "version": "2.0.0-beta.1", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.0.0-alpha.7", + "version": "2.0.0-beta.1", "license": "MIT", "dependencies": { "@grpc/grpc-js": "https://gitpkg.now.sh/DavyJohnes/grpc-node/packages/grpc-js?fix-class-options-issue-with-dist", diff --git a/package.json b/package.json index f39a2c98..8d41cf1d 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.0.0-alpha.7", + "version": "2.0.0-beta.1", "description": "Yandex.Cloud NodeJS SDK", "keywords": [ "yandex-cloud", From 22ea1005f4fb19e9b4bc9c38724a2f659cdc9d3d Mon Sep 17 00:00:00 2001 From: Ivan Zuev Date: Wed, 16 Feb 2022 17:41:23 +0300 Subject: [PATCH 02/54] feat: update nice-grpc --- package-lock.json | 98 +++++++++++++++++++++++------------------------ package.json | 4 +- 2 files changed, 51 insertions(+), 51 deletions(-) diff --git a/package-lock.json b/package-lock.json index 55a7fc17..f9bc1ac3 100644 --- a/package-lock.json +++ b/package-lock.json @@ -16,8 +16,8 @@ "log4js": "6.3.0", "long": "5.2.0", "luxon": "2.2.0", - "nice-grpc": "1.0.4", - "nice-grpc-client-middleware-deadline": "1.0.4", + "nice-grpc": "1.0.6", + "nice-grpc-client-middleware-deadline": "1.0.6", "protobufjs": "6.8.8", "utility-types": "3.10.0" }, @@ -1154,9 +1154,9 @@ } }, "node_modules/@grpc/grpc-js": { - "version": "1.4.4", + "version": "1.5.5", "resolved": "https://gitpkg.now.sh/DavyJohnes/grpc-node/packages/grpc-js?fix-class-options-issue-with-dist", - "integrity": "sha512-RzhxaO5zpygPvFLKiWu24lb3lYlEdQBeZJAYlEgoB+OaqIZLFDeG/833v+lInxVYvPiNB/GMCfQrhktzsereiw==", + "integrity": "sha512-GrWZNWxbvWNKOWphkYZGiilHZvRHFzOyrSNTc52ZckQDVRG17obLd8L/eZPLZA+4OqHWLqiPSgA7cK3jqVKhkA==", "license": "Apache-2.0", "dependencies": { "@grpc/proto-loader": "^0.6.4", @@ -1166,16 +1166,16 @@ "node": "^8.13.0 || >=10.10.0" } }, - "node_modules/@grpc/grpc-js/node_modules/@grpc/proto-loader": { - "version": "0.6.7", - "resolved": "https://registry.npmjs.org/@grpc/proto-loader/-/proto-loader-0.6.7.tgz", - "integrity": "sha512-QzTPIyJxU0u+r2qGe8VMl3j/W2ryhEvBv7hc42OjYfthSj370fUrb7na65rG6w3YLZS/fb8p89iTBobfWGDgdw==", + "node_modules/@grpc/proto-loader": { + "version": "0.6.9", + "resolved": "https://registry.npmjs.org/@grpc/proto-loader/-/proto-loader-0.6.9.tgz", + "integrity": "sha512-UlcCS8VbsU9d3XTXGiEVFonN7hXk+oMXZtoHHG2oSA1/GcDP1q6OUgs20PzHDGizzyi8ufGSUDlk3O2NyY7leg==", "dependencies": { "@types/long": "^4.0.1", "lodash.camelcase": "^4.3.0", "long": "^4.0.0", "protobufjs": "^6.10.0", - "yargs": "^16.1.1" + "yargs": "^16.2.0" }, "bin": { "proto-loader-gen-types": "build/bin/proto-loader-gen-types.js" @@ -1184,12 +1184,12 @@ "node": ">=6" } }, - "node_modules/@grpc/grpc-js/node_modules/long": { + "node_modules/@grpc/proto-loader/node_modules/long": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/long/-/long-4.0.0.tgz", "integrity": "sha512-XsP+KhQif4bjX1kbuSiySJFNAehNxgLb6hPRGJ9QsUr8ajHkuXGdrHmFUTUUXhDwVX2R5bY4JNZEwbUiMhV+MA==" }, - "node_modules/@grpc/grpc-js/node_modules/protobufjs": { + "node_modules/@grpc/proto-loader/node_modules/protobufjs": { "version": "6.11.2", "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-6.11.2.tgz", "integrity": "sha512-4BQJoPooKJl2G9j3XftkIXjoC9C0Av2NOrWmbLWT1vH32GcSUHjM0Arra6UfTsVyfMAuFzaLucXn1sadxJydAw==", @@ -1214,7 +1214,7 @@ "pbts": "bin/pbts" } }, - "node_modules/@grpc/grpc-js/node_modules/yargs": { + "node_modules/@grpc/proto-loader/node_modules/yargs": { "version": "16.2.0", "resolved": "https://registry.npmjs.org/yargs/-/yargs-16.2.0.tgz", "integrity": "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw==", @@ -7401,22 +7401,22 @@ "dev": true }, "node_modules/nice-grpc": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/nice-grpc/-/nice-grpc-1.0.4.tgz", - "integrity": "sha512-/1fAKywTdwHzVxt1Ski6120lx6S++RpGjXp7y0OUTZze4wHrwgC64xuuRTT6COz5BcX+Pch7gTc2m5fz7+M4nA==", + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/nice-grpc/-/nice-grpc-1.0.6.tgz", + "integrity": "sha512-cRImN+SpnPaTAqWSbuU5ixq/wo1Jr1QOv0IZjmcb40XNU0og4JEyt7VCtTM7SAbeLAjdFxd65wiIid696kVTJA==", "dependencies": { - "@grpc/grpc-js": "^1.2.6", + "@grpc/grpc-js": "^1.5.1", "abort-controller-x": "^0.2.4", - "nice-grpc-common": "^1.0.3", + "nice-grpc-common": "^1.0.4", "node-abort-controller": "^1.2.1" } }, "node_modules/nice-grpc-client-middleware-deadline": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/nice-grpc-client-middleware-deadline/-/nice-grpc-client-middleware-deadline-1.0.4.tgz", - "integrity": "sha512-IYLEzWkLI0ij41WVDLBjBJohmlh2cI+2ttMDawK8h7G209vrAndEJ4iiN9gQUqtguVzq4S3e8BzQgJ26hBMQtw==", + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/nice-grpc-client-middleware-deadline/-/nice-grpc-client-middleware-deadline-1.0.6.tgz", + "integrity": "sha512-AokugSveg+2IPohuLbGR5OITgh3W4yZvAmLhuqistjwSRLchzQI4CwQEL1Tj4R0wscreSFoiHkXyG4qtKygOug==", "dependencies": { - "nice-grpc-common": "^1.0.3", + "nice-grpc-common": "^1.0.4", "node-abort-controller": "^2.0.0" } }, @@ -7426,9 +7426,9 @@ "integrity": "sha512-L8RfEgjBTHAISTuagw51PprVAqNZoG6KSB6LQ6H1bskMVkFs5E71IyjauLBv3XbuomJlguWF/VnRHdJ1gqiAqA==" }, "node_modules/nice-grpc-common": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/nice-grpc-common/-/nice-grpc-common-1.0.3.tgz", - "integrity": "sha512-bFETAyaUxcPgcNL6ZW+aOxzrBig9t/3I6ikKBw5dpxzthd9gfNPOG3W8+KPbIMxnHi6ANmbj57wwPoOI6m0qNg==", + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/nice-grpc-common/-/nice-grpc-common-1.0.4.tgz", + "integrity": "sha512-cpKGONNYqi2XP+5z4B4bzhLNrJu5lPbIScM0sqsht6sG9TgdN7ws3qCH82Fht94CfOifL6pQlvkgnEJp5nl2cQ==", "dependencies": { "node-abort-controller": "^2.0.0" } @@ -13645,24 +13645,24 @@ }, "@grpc/grpc-js": { "version": "https://gitpkg.now.sh/DavyJohnes/grpc-node/packages/grpc-js?fix-class-options-issue-with-dist", - "integrity": "sha512-RzhxaO5zpygPvFLKiWu24lb3lYlEdQBeZJAYlEgoB+OaqIZLFDeG/833v+lInxVYvPiNB/GMCfQrhktzsereiw==", + "integrity": "sha512-GrWZNWxbvWNKOWphkYZGiilHZvRHFzOyrSNTc52ZckQDVRG17obLd8L/eZPLZA+4OqHWLqiPSgA7cK3jqVKhkA==", "requires": { "@grpc/proto-loader": "^0.6.4", "@types/node": ">=12.12.47" + } + }, + "@grpc/proto-loader": { + "version": "0.6.9", + "resolved": "https://registry.npmjs.org/@grpc/proto-loader/-/proto-loader-0.6.9.tgz", + "integrity": "sha512-UlcCS8VbsU9d3XTXGiEVFonN7hXk+oMXZtoHHG2oSA1/GcDP1q6OUgs20PzHDGizzyi8ufGSUDlk3O2NyY7leg==", + "requires": { + "@types/long": "^4.0.1", + "lodash.camelcase": "^4.3.0", + "long": "^4.0.0", + "protobufjs": "^6.10.0", + "yargs": "^16.2.0" }, "dependencies": { - "@grpc/proto-loader": { - "version": "0.6.7", - "resolved": "https://registry.npmjs.org/@grpc/proto-loader/-/proto-loader-0.6.7.tgz", - "integrity": "sha512-QzTPIyJxU0u+r2qGe8VMl3j/W2ryhEvBv7hc42OjYfthSj370fUrb7na65rG6w3YLZS/fb8p89iTBobfWGDgdw==", - "requires": { - "@types/long": "^4.0.1", - "lodash.camelcase": "^4.3.0", - "long": "^4.0.0", - "protobufjs": "^6.10.0", - "yargs": "^16.1.1" - } - }, "long": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/long/-/long-4.0.0.tgz", @@ -18525,22 +18525,22 @@ "dev": true }, "nice-grpc": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/nice-grpc/-/nice-grpc-1.0.4.tgz", - "integrity": "sha512-/1fAKywTdwHzVxt1Ski6120lx6S++RpGjXp7y0OUTZze4wHrwgC64xuuRTT6COz5BcX+Pch7gTc2m5fz7+M4nA==", + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/nice-grpc/-/nice-grpc-1.0.6.tgz", + "integrity": "sha512-cRImN+SpnPaTAqWSbuU5ixq/wo1Jr1QOv0IZjmcb40XNU0og4JEyt7VCtTM7SAbeLAjdFxd65wiIid696kVTJA==", "requires": { - "@grpc/grpc-js": "^1.2.6", + "@grpc/grpc-js": "^1.5.1", "abort-controller-x": "^0.2.4", - "nice-grpc-common": "^1.0.3", + "nice-grpc-common": "^1.0.4", "node-abort-controller": "^1.2.1" } }, "nice-grpc-client-middleware-deadline": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/nice-grpc-client-middleware-deadline/-/nice-grpc-client-middleware-deadline-1.0.4.tgz", - "integrity": "sha512-IYLEzWkLI0ij41WVDLBjBJohmlh2cI+2ttMDawK8h7G209vrAndEJ4iiN9gQUqtguVzq4S3e8BzQgJ26hBMQtw==", + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/nice-grpc-client-middleware-deadline/-/nice-grpc-client-middleware-deadline-1.0.6.tgz", + "integrity": "sha512-AokugSveg+2IPohuLbGR5OITgh3W4yZvAmLhuqistjwSRLchzQI4CwQEL1Tj4R0wscreSFoiHkXyG4qtKygOug==", "requires": { - "nice-grpc-common": "^1.0.3", + "nice-grpc-common": "^1.0.4", "node-abort-controller": "^2.0.0" }, "dependencies": { @@ -18552,9 +18552,9 @@ } }, "nice-grpc-common": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/nice-grpc-common/-/nice-grpc-common-1.0.3.tgz", - "integrity": "sha512-bFETAyaUxcPgcNL6ZW+aOxzrBig9t/3I6ikKBw5dpxzthd9gfNPOG3W8+KPbIMxnHi6ANmbj57wwPoOI6m0qNg==", + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/nice-grpc-common/-/nice-grpc-common-1.0.4.tgz", + "integrity": "sha512-cpKGONNYqi2XP+5z4B4bzhLNrJu5lPbIScM0sqsht6sG9TgdN7ws3qCH82Fht94CfOifL6pQlvkgnEJp5nl2cQ==", "requires": { "node-abort-controller": "^2.0.0" }, diff --git a/package.json b/package.json index 8856fc5d..9f151495 100644 --- a/package.json +++ b/package.json @@ -29,8 +29,8 @@ "log4js": "6.3.0", "long": "5.2.0", "luxon": "2.2.0", - "nice-grpc": "1.0.4", - "nice-grpc-client-middleware-deadline": "1.0.4", + "nice-grpc": "1.0.6", + "nice-grpc-client-middleware-deadline": "1.0.6", "protobufjs": "6.8.8", "utility-types": "3.10.0" }, From 19129f329cd33583e02bdaead83440d392e44e20 Mon Sep 17 00:00:00 2001 From: Ivan Zuev Date: Wed, 16 Feb 2022 17:48:59 +0300 Subject: [PATCH 03/54] fix: use serviceName property of client ctor --- src/service-endpoints.test.ts | 4 ++-- src/service-endpoints.ts | 3 +-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/src/service-endpoints.test.ts b/src/service-endpoints.test.ts index d3f485c4..e40b369a 100644 --- a/src/service-endpoints.test.ts +++ b/src/service-endpoints.test.ts @@ -21,13 +21,13 @@ describe('service endpoints', () => { const serviceName = 'myCustomService'; expect(() => { - getServiceClientEndpoint({ options: { serviceName } } as unknown as MockServiceClientCtor); + getServiceClientEndpoint({ serviceName } as unknown as MockServiceClientCtor); }).toThrow(`Endpoint for service ${serviceName} is no defined`); }); it('should throw exception if client class has no serviceName option', () => { expect(() => { - getServiceClientEndpoint({ options: {} } as unknown as MockServiceClientCtor); + getServiceClientEndpoint({} as unknown as MockServiceClientCtor); }).toThrow('Unable to retrieve serviceName of provided service client class'); }); }); diff --git a/src/service-endpoints.ts b/src/service-endpoints.ts index f9d3e010..726a8726 100644 --- a/src/service-endpoints.ts +++ b/src/service-endpoints.ts @@ -317,8 +317,7 @@ const SERVICE_ENDPOINTS_LIST: ServiceEndpointsList = [ export const getServiceClientEndpoint = (generatedClientCtor: GeneratedServiceClientCtor): string => { const clientCtor = generatedClientCtor as unknown as ServiceClientConstructor; - // eslint-disable-next-line prefer-destructuring - const serviceName: string = clientCtor.options.serviceName as string; + const serviceName: string = clientCtor.serviceName as string; if (!serviceName) { throw new Error('Unable to retrieve serviceName of provided service client class'); From 891b4b74f4d0b95e1a862e0f27698cccb003cc10 Mon Sep 17 00:00:00 2001 From: yandex-cloud-bot Date: Wed, 16 Feb 2022 14:55:46 +0000 Subject: [PATCH 04/54] chore(release): 2.0.0-beta.2 [skip ci] # [2.0.0-beta.2](https://github.com/yandex-cloud/nodejs-sdk/compare/v2.0.0-beta.1...v2.0.0-beta.2) (2022-02-16) ### Bug Fixes * use serviceName property of client ctor ([19129f3](https://github.com/yandex-cloud/nodejs-sdk/commit/19129f329cd33583e02bdaead83440d392e44e20)) ### Features * update nice-grpc ([22ea100](https://github.com/yandex-cloud/nodejs-sdk/commit/22ea1005f4fb19e9b4bc9c38724a2f659cdc9d3d)) --- package-lock.json | 4 ++-- package.json | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/package-lock.json b/package-lock.json index 643d26d2..9d4b6334 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.0.0-beta.1", + "version": "2.0.0-beta.2", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.0.0-beta.1", + "version": "2.0.0-beta.2", "license": "MIT", "dependencies": { "@grpc/grpc-js": "https://gitpkg.now.sh/DavyJohnes/grpc-node/packages/grpc-js?fix-class-options-issue-with-dist", diff --git a/package.json b/package.json index 15c88c40..ce69d1f4 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.0.0-beta.1", + "version": "2.0.0-beta.2", "description": "Yandex.Cloud NodeJS SDK", "keywords": [ "yandex-cloud", From 34d259223902f8c1d9a1e5f5cf4440651059f666 Mon Sep 17 00:00:00 2001 From: Nikolay Matrosov Date: Tue, 8 Feb 2022 23:11:46 +0300 Subject: [PATCH 05/54] docs(examples): add example of using the SDK with streaming endpoint --- examples/package-lock.json | 242 ++++++++++++++++++++++++++++++++++- examples/package.json | 6 +- examples/stream-stt/index.ts | 62 +++++++++ examples/stream-stt/test.wav | Bin 0 -> 551682 bytes examples/utils/logger.ts | 3 +- 5 files changed, 304 insertions(+), 9 deletions(-) create mode 100644 examples/stream-stt/index.ts create mode 100644 examples/stream-stt/test.wav diff --git a/examples/package-lock.json b/examples/package-lock.json index d63c95f9..c22c7532 100644 --- a/examples/package-lock.json +++ b/examples/package-lock.json @@ -9,7 +9,11 @@ "version": "1.0.0", "license": "ISC", "dependencies": { - "@yandex-cloud/nodejs-sdk": "^2.0.0-alpha.7" + "@yandex-cloud/nodejs-sdk": "^2.0.0-beta.1", + "wav": "^1.0.2" + }, + "devDependencies": { + "@types/wav": "^1.0.1" } }, "..": { @@ -162,10 +166,19 @@ "resolved": "https://registry.npmjs.org/@types/node/-/node-17.0.10.tgz", "integrity": "sha512-S/3xB4KzyFxYGCppyDt68yzBU9ysL88lSdIah4D6cptdcltc4NCPCAMc0+PCpg/lLIyC7IPvj2Z52OJWeIUkog==" }, + "node_modules/@types/wav": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@types/wav/-/wav-1.0.1.tgz", + "integrity": "sha512-AKJeM5mqO1pdR2/HaTUQzSCm12No36KUM1larivXUmsLx+4JmMuC2Tv0kCdZzTx66h7IH2Xr92DGc9NQsXxa9Q==", + "dev": true, + "dependencies": { + "@types/node": "*" + } + }, "node_modules/@yandex-cloud/nodejs-sdk": { - "version": "2.0.0-alpha.7", - "resolved": "https://registry.npmjs.org/@yandex-cloud/nodejs-sdk/-/nodejs-sdk-2.0.0-alpha.7.tgz", - "integrity": "sha512-vQkmav7Ke+J24GjyaDhh9wl0/jI5C9y0gwbOT0zVu29GfK8y9BwopcSt8NOM9BGBfVLQwYSMqmTcByPcPx0slQ==", + "version": "2.0.0-beta.1", + "resolved": "https://registry.npmjs.org/@yandex-cloud/nodejs-sdk/-/nodejs-sdk-2.0.0-beta.1.tgz", + "integrity": "sha512-1nAtsNaCwefM7BP9GI2144aCsGdhQBxVzxZR13N6+e66A1uJH/nxJpHDXM1nB61jveOmDZUH8/JOyjGVLOM3qw==", "dependencies": { "@grpc/grpc-js": "https://gitpkg.now.sh/DavyJohnes/grpc-node/packages/grpc-js?fix-class-options-issue-with-dist", "axios": "0.24.0", @@ -256,11 +269,35 @@ "follow-redirects": "^1.14.4" } }, + "node_modules/buffer-alloc": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/buffer-alloc/-/buffer-alloc-1.2.0.tgz", + "integrity": "sha512-CFsHQgjtW1UChdXgbyJGtnm+O/uLQeZdtbDo8mfUgYXCHSM1wgrVxXm6bSyrUuErEb+4sYVGCzASBRot7zyrow==", + "dependencies": { + "buffer-alloc-unsafe": "^1.1.0", + "buffer-fill": "^1.0.0" + } + }, + "node_modules/buffer-alloc-unsafe": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/buffer-alloc-unsafe/-/buffer-alloc-unsafe-1.1.0.tgz", + "integrity": "sha512-TEM2iMIEQdJ2yjPJoSIsldnleVaAk1oW3DBVUykyOLsEsFmEc9kn+SFFPz+gl54KQNxlDnAwCXosOS9Okx2xAg==" + }, "node_modules/buffer-equal-constant-time": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/buffer-equal-constant-time/-/buffer-equal-constant-time-1.0.1.tgz", "integrity": "sha1-+OcRMvf/5uAaXJaXpMbz5I1cyBk=" }, + "node_modules/buffer-fill": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/buffer-fill/-/buffer-fill-1.0.0.tgz", + "integrity": "sha1-+PeLdniYiO858gXNY39o5wISKyw=" + }, + "node_modules/buffer-from": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", + "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==" + }, "node_modules/cliui": { "version": "7.0.4", "resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz", @@ -287,6 +324,11 @@ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" }, + "node_modules/core-util-is": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz", + "integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==" + }, "node_modules/date-format": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/date-format/-/date-format-3.0.0.tgz", @@ -387,6 +429,11 @@ "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.9.tgz", "integrity": "sha512-NtNxqUcXgpW2iMrfqSfR73Glt39K+BLwWsPs94yR63v45T0Wbej7eRmL5cWfwEgqXnmjQp3zaJTshdRW/qC2ZQ==" }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" + }, "node_modules/is-fullwidth-code-point": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", @@ -395,6 +442,11 @@ "node": ">=8" } }, + "node_modules/isarray": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", + "integrity": "sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8=" + }, "node_modules/jsonfile": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-4.0.0.tgz", @@ -594,6 +646,17 @@ "resolved": "https://registry.npmjs.org/long/-/long-4.0.0.tgz", "integrity": "sha512-XsP+KhQif4bjX1kbuSiySJFNAehNxgLb6hPRGJ9QsUr8ajHkuXGdrHmFUTUUXhDwVX2R5bY4JNZEwbUiMhV+MA==" }, + "node_modules/readable-stream": { + "version": "1.1.14", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-1.1.14.tgz", + "integrity": "sha1-fPTFTvZI44EwhMY23SB54WbAgdk=", + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.1", + "isarray": "0.0.1", + "string_decoder": "~0.10.x" + } + }, "node_modules/require-directory": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", @@ -634,6 +697,27 @@ "semver": "bin/semver" } }, + "node_modules/stream-parser": { + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/stream-parser/-/stream-parser-0.3.1.tgz", + "integrity": "sha1-FhhUhpRCACGhGC/wrxkRwSl2F3M=", + "dependencies": { + "debug": "2" + } + }, + "node_modules/stream-parser/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/stream-parser/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" + }, "node_modules/streamroller": { "version": "2.2.4", "resolved": "https://registry.npmjs.org/streamroller/-/streamroller-2.2.4.tgz", @@ -655,6 +739,11 @@ "node": ">=4.0" } }, + "node_modules/string_decoder": { + "version": "0.10.31", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz", + "integrity": "sha1-YuIDvEF2bGwoyfyEMB2rHFMQ+pQ=" + }, "node_modules/string-width": { "version": "4.2.3", "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", @@ -695,6 +784,31 @@ "node": ">= 4" } }, + "node_modules/wav": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wav/-/wav-1.0.2.tgz", + "integrity": "sha512-viHtz3cDd/Tcr/HbNqzQCofKdF6kWUymH9LGDdskfWFoIy/HJ+RTihgjEcHfnsy1PO4e9B+y4HwgTwMrByquhg==", + "dependencies": { + "buffer-alloc": "^1.1.0", + "buffer-from": "^1.0.0", + "debug": "^2.2.0", + "readable-stream": "^1.1.14", + "stream-parser": "^0.3.1" + } + }, + "node_modules/wav/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/wav/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" + }, "node_modules/wrap-ansi": { "version": "7.0.0", "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", @@ -837,10 +951,19 @@ "resolved": "https://registry.npmjs.org/@types/node/-/node-17.0.10.tgz", "integrity": "sha512-S/3xB4KzyFxYGCppyDt68yzBU9ysL88lSdIah4D6cptdcltc4NCPCAMc0+PCpg/lLIyC7IPvj2Z52OJWeIUkog==" }, + "@types/wav": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@types/wav/-/wav-1.0.1.tgz", + "integrity": "sha512-AKJeM5mqO1pdR2/HaTUQzSCm12No36KUM1larivXUmsLx+4JmMuC2Tv0kCdZzTx66h7IH2Xr92DGc9NQsXxa9Q==", + "dev": true, + "requires": { + "@types/node": "*" + } + }, "@yandex-cloud/nodejs-sdk": { - "version": "2.0.0-alpha.7", - "resolved": "https://registry.npmjs.org/@yandex-cloud/nodejs-sdk/-/nodejs-sdk-2.0.0-alpha.7.tgz", - "integrity": "sha512-vQkmav7Ke+J24GjyaDhh9wl0/jI5C9y0gwbOT0zVu29GfK8y9BwopcSt8NOM9BGBfVLQwYSMqmTcByPcPx0slQ==", + "version": "2.0.0-beta.1", + "resolved": "https://registry.npmjs.org/@yandex-cloud/nodejs-sdk/-/nodejs-sdk-2.0.0-beta.1.tgz", + "integrity": "sha512-1nAtsNaCwefM7BP9GI2144aCsGdhQBxVzxZR13N6+e66A1uJH/nxJpHDXM1nB61jveOmDZUH8/JOyjGVLOM3qw==", "requires": { "@grpc/grpc-js": "https://gitpkg.now.sh/DavyJohnes/grpc-node/packages/grpc-js?fix-class-options-issue-with-dist", "axios": "0.24.0", @@ -918,11 +1041,35 @@ "follow-redirects": "^1.14.4" } }, + "buffer-alloc": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/buffer-alloc/-/buffer-alloc-1.2.0.tgz", + "integrity": "sha512-CFsHQgjtW1UChdXgbyJGtnm+O/uLQeZdtbDo8mfUgYXCHSM1wgrVxXm6bSyrUuErEb+4sYVGCzASBRot7zyrow==", + "requires": { + "buffer-alloc-unsafe": "^1.1.0", + "buffer-fill": "^1.0.0" + } + }, + "buffer-alloc-unsafe": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/buffer-alloc-unsafe/-/buffer-alloc-unsafe-1.1.0.tgz", + "integrity": "sha512-TEM2iMIEQdJ2yjPJoSIsldnleVaAk1oW3DBVUykyOLsEsFmEc9kn+SFFPz+gl54KQNxlDnAwCXosOS9Okx2xAg==" + }, "buffer-equal-constant-time": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/buffer-equal-constant-time/-/buffer-equal-constant-time-1.0.1.tgz", "integrity": "sha1-+OcRMvf/5uAaXJaXpMbz5I1cyBk=" }, + "buffer-fill": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/buffer-fill/-/buffer-fill-1.0.0.tgz", + "integrity": "sha1-+PeLdniYiO858gXNY39o5wISKyw=" + }, + "buffer-from": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", + "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==" + }, "cliui": { "version": "7.0.4", "resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz", @@ -946,6 +1093,11 @@ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" }, + "core-util-is": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz", + "integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==" + }, "date-format": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/date-format/-/date-format-3.0.0.tgz", @@ -1014,11 +1166,21 @@ "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.9.tgz", "integrity": "sha512-NtNxqUcXgpW2iMrfqSfR73Glt39K+BLwWsPs94yR63v45T0Wbej7eRmL5cWfwEgqXnmjQp3zaJTshdRW/qC2ZQ==" }, + "inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" + }, "is-fullwidth-code-point": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==" }, + "isarray": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", + "integrity": "sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8=" + }, "jsonfile": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-4.0.0.tgz", @@ -1209,6 +1371,17 @@ } } }, + "readable-stream": { + "version": "1.1.14", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-1.1.14.tgz", + "integrity": "sha1-fPTFTvZI44EwhMY23SB54WbAgdk=", + "requires": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.1", + "isarray": "0.0.1", + "string_decoder": "~0.10.x" + } + }, "require-directory": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", @@ -1229,6 +1402,29 @@ "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==" }, + "stream-parser": { + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/stream-parser/-/stream-parser-0.3.1.tgz", + "integrity": "sha1-FhhUhpRCACGhGC/wrxkRwSl2F3M=", + "requires": { + "debug": "2" + }, + "dependencies": { + "debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "requires": { + "ms": "2.0.0" + } + }, + "ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" + } + } + }, "streamroller": { "version": "2.2.4", "resolved": "https://registry.npmjs.org/streamroller/-/streamroller-2.2.4.tgz", @@ -1246,6 +1442,11 @@ } } }, + "string_decoder": { + "version": "0.10.31", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz", + "integrity": "sha1-YuIDvEF2bGwoyfyEMB2rHFMQ+pQ=" + }, "string-width": { "version": "4.2.3", "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", @@ -1274,6 +1475,33 @@ "resolved": "https://registry.npmjs.org/utility-types/-/utility-types-3.10.0.tgz", "integrity": "sha512-O11mqxmi7wMKCo6HKFt5AhO4BwY3VV68YU07tgxfz8zJTIxr4BpsezN49Ffwy9j3ZpwwJp4fkRwjRzq3uWE6Rg==" }, + "wav": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wav/-/wav-1.0.2.tgz", + "integrity": "sha512-viHtz3cDd/Tcr/HbNqzQCofKdF6kWUymH9LGDdskfWFoIy/HJ+RTihgjEcHfnsy1PO4e9B+y4HwgTwMrByquhg==", + "requires": { + "buffer-alloc": "^1.1.0", + "buffer-from": "^1.0.0", + "debug": "^2.2.0", + "readable-stream": "^1.1.14", + "stream-parser": "^0.3.1" + }, + "dependencies": { + "debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "requires": { + "ms": "2.0.0" + } + }, + "ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" + } + } + }, "wrap-ansi": { "version": "7.0.0", "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", diff --git a/examples/package.json b/examples/package.json index 34dc4909..3d250922 100644 --- a/examples/package.json +++ b/examples/package.json @@ -10,6 +10,10 @@ "author": "", "license": "ISC", "dependencies": { - "@yandex-cloud/nodejs-sdk": "^2.0.0-alpha.7" + "@yandex-cloud/nodejs-sdk": "^2.0.0-beta.1", + "wav": "^1.0.2" + }, + "devDependencies": { + "@types/wav": "^1.0.1" } } diff --git a/examples/stream-stt/index.ts b/examples/stream-stt/index.ts new file mode 100644 index 00000000..e00c1ce5 --- /dev/null +++ b/examples/stream-stt/index.ts @@ -0,0 +1,62 @@ +import { serviceClients, Session } from '@yandex-cloud/nodejs-sdk'; +import { + RecognitionSpec_AudioEncoding, + StreamingRecognitionRequest, +} from '@yandex-cloud/nodejs-sdk/dist/generated/yandex/cloud/ai/stt/v2/stt_service'; +import * as fs from 'fs'; +import * as wav from 'wav'; +import { Format } from 'wav'; +import { getEnv } from '../utils/get-env'; +import { log } from '../utils/logger'; + +const file = fs.createReadStream('test.wav'); +const reader = new wav.Reader(); + +const formatPromise = new Promise((resolve) => { + // the "format" event gets emitted at the end of the WAVE header + reader.on('format', (format: Format) => { + // pass the format object + resolve(format); + }); +}); + +// pipe the WAVE file to the Reader instance +file.pipe(reader); + +(async () => { + const authToken = getEnv('YC_OAUTH_TOKEN'); + const folderId = getEnv('YC_FOLDER_ID'); + const session = new Session({ oauthToken: authToken }); + const client = session.client(serviceClients.SttServiceClient); + + async function* createRequest(): AsyncIterable { + const format = await formatPromise; + + log(JSON.stringify(format, null, 2)); + // First message of the stream should be the config message + yield StreamingRecognitionRequest.fromPartial({ + config: { + specification: { + audioEncoding: RecognitionSpec_AudioEncoding.LINEAR16_PCM, + sampleRateHertz: format.sampleRate, + audioChannelCount: format.channels, + }, + folderId, + }, + }); + // Now we can send the data + for await (const chunk of file) { + yield StreamingRecognitionRequest.fromPartial({ + audioContent: chunk, + }); + } + } + + try { + for await (const response of client.streamingRecognize(createRequest())) { + log(JSON.stringify(response, null, 2)); + } + } catch (error) { + log(error); + } +})(); diff --git a/examples/stream-stt/test.wav b/examples/stream-stt/test.wav new file mode 100644 index 0000000000000000000000000000000000000000..c1d933f18cbb391453460ff233974fe119d07383 GIT binary patch literal 551682 zcmeFY1(4iG(;(U;HAre6W@fZK_L|1b%*@Qpv}R_n*KDuH%*+hq84oivHZynYO75@! zu|J5rd-wkL;v%*yTB^!IWmZ;YWn~WQ+pAZ>JO*iW*HJxZEnMNtC6P#E5VpJmk=0}p zg~TPzn7U#rQReU8-x&BC1Ak-SZw&m6fxj{EHwOO3z~3178v}o1;BO54je);0@HYnj z@5cb4`LO?5V*mGl)qmd?5Lj|x?Ek!%|94ya-=m|w@0O?IYyLaNM6^w``|IytA;0GT zyOd+R|1QrC`|le6f9~=BzZ@L=h~9~}J@$V(mjB$6y$3tZaNGvM|E$OV+}6*dw72^+ z1XNUX@|Mz_mqx`uodnx4DJzm9-N+3DNzq>_l>NE0Pm%KTdUpW7zVj#2-b_v>iwZT?Iz$GY}& z?Up)r=&vmjDSJB(cuRQ)#LFZ>U*Gcm4qmXAwzuS{+4826XdtxI za8frR*i`HHfbwF!t-1h&;zUYnd}v+ya6X1maBuGj943h_?|p z{1S{ma*P1(h}rWUX?sn3$_!dE|Dy#0V!-v_PsEHMMueZ+w!`gDq7{43L@)N5I$ZaU zTK1ZHkk*1=Z-q!XN)W9Q?U=#05MM>K;25p_?f5?X7yfh4rXS-b@`x6QozngzFzomd z5F-AsIrb9dA9InQq@7YfQ|Z@w_7+;6gq&mV(*}Acg7pXFXZeRH?MT~aj(wK?Jag^i zu&)&@v(i3m?Q^dM>X?fJgeYOpvCq*Kjv?SJv!2LxaHXS;eJqYAF#<>2{&tMX@&5Nv zN3OlM7GC`In;oTE`mj@@1^(}24jy&z7BPmOe-4hf(~iLO&$;$E0VRHp9DB-IPwl+qcoN_7E5A9o%TdD4Rm7<6v)lf({sWuD zSnZ>?{NP#(_!2Vs7JG_lgTN92Bj)I@lygvncoTTCr|noE;y>eq$R*k(dZU8<0RpWN z5P}mN^PFhk!4LMfh^HOq;2p;pexB#{vMnWAxW>*sc5Z8VLL>%>2?8CA!x5u{Gy(zv zXZ;BF7y)YuD2WKjk9_1u93^2S4v7t(_81q~me>Oaz==Slcs~O7ztWBvkHr5G1SA1? z6DcBQkK6MEB;k+XC@1`pOQh`O>~MP-0wN?ik(|MwfV7loY17^+F&<(J?Eebbk1s&~ z@$IZ1*rEReI}Cs+dm!ir|A@msC`Y7-(6TNOYm`HNuHxl~-8O%gtUm@mFOFKl}Uw_1Wbp&EpwyzNczYsiT=OcUE zzQ5Ty&(5bUduz)+OT_GZruN5PN$j4)o@sx#t7U$*bbj*&RVLG0%CotoIE9Z=#;1bdES*C*b@S3AlPU)>@X*fC?SR^A+UvG(gMH51dBedi#G8N+ zYYgKDm)KJT)DawI8K4BgX9V0{kKieyPXgi?3lVqlntkl{`AmGLBiOM*zzKX1d_m+8 zP)AvN2}hbJL9ELy!Cub}BjQ9(%iCU_1TaFJ7l0qs)kn~_g82CahnAtcElMJ{R%x9t{6^Q9@3aBH&8*n+8n+m)Zuf*%{YCH%3 zil^fxcrl)ef5J2IGElZVsUta)GM2`K?$d14a_St~ecEm6F7gMw6fYo$(mF#5+5lQ> z+CuPOMdeXHQG*~Glmy+TxzoDQl4-M`1eir{OCJdP(HJEG5q%r;s_6K*|WpO-e51GGz$GpW+45Z77o{n<&F5-^rcH zPUKyZAA_Sf2~WWLf#+Yg zGqx(5$#&m1!M4Em$yRRLX^XUtw=wWj_(7bBAGhtWy|SrnKKKOuEPfqdfe!+3f6@uk ze$WR*YEL>%Dkt&Co5*pLQc4gNrKVF(Q?^icQ$AB-s7I-9sM*x6v{BFpIE#)lF3`F3 zC-5$KI1IyAp+u;Hwujc4rlmfl{!Sf5WmC^nexvwMhEtAEj#I`{JSi4(1Njko9NA2I zOj=J$Al2YU@Bug#zif-PJ+Q`E?^_Zq+2&2=c(cOv-So}$*>u?CZrW=U8owD18hRLN z^_%s<`V`$TU8;7xHbNVw_0h^T4>UV9Gd1xVtD3ACs=2PYsp+lxuHK^d2+d=E3J*5fOFf}2XXpK?*Kz%@cSba);Tzyo1U7e|}Q)jC$s^_RXsYBG!>VfKo zAoNi?tMw|ADnPwlEmp797&L>lyR@IRKDvE6m9DS;i2k+yx&E+zwf=;@K+iC^8e$C_ z4GKedV_&0}5i<@l))?=bZkyj*o?A!T_S-hurrHMCMuJ$gEe3bTAKOOQM%g~v#^Lq& zG*TUDBKa(NE4eM%nLL&(A#b3(rTj(dPmz-!lW&uA$b8CB%38`U${xxI$`eW*#g*EJ zx`KL)dV%^E^%+$|9Yni9lhgd5p3n%W2NVFgL;aw8P$+y7Hp7ePIQKmyR^=quC{JA(<>GuVgOZ`ou{PtHuvC{7&5h2zR;&6&%200f;x?hNi$?rm-* z7vpv3UE)RXpYb6edVJt7;0yU5cpG_Rc*A(Zd5JuTcaJ-Qi*hSCUpY@X7dY!Uy*O-6 z5&H^z1A8ue1^W>DJbNp5f212I~}S4y!GT%F1TmWu9T~V=iX) zV)`;MCY$NR6fki{0pl2>6Qh=Xn!bjUOAMBh%IPoGAgMPEc;N}orcLLWyT zPoGSmKp#MFMRx_utqiV)%iso>2GY~$8|bS+iMDh%x)m;kAH%2MbMQx42m8@G&=nh2;C3AsX4sFC&+ta|ssx>Z7}qGf~C>o{#5tqoWY^QrFvt#4D$ zQ1<}ZkEON+E0~SaK>0-ZgEAk?l{ktE1t%-WjpTgteewo!U$PsynskOVmJ~zs0xPr} z&%mGJm+_tWWV{m|fd}BuI0cv4N^CE|I@HM~w_df*wnkZ1mWP(bmI#a7{KmY`Jitsd zC!5Zi7MmhXMaKQcVMc-RqhXH0-H@rDsZRxbw?JF28Lj!I{!RTsHA6*J?NCY-0~C+s zKJr5{H(7@CvUH_1T>4cqQ^J$H6;BmY#4knrMRP^Ji7d^po3}R)Zw_h3n~IyRHjQhd zHoa}U*_hHuZff7Os_AP}So5alq~;6F7n?JieMGZG=R|d)G2$liGKp9+U0NdTBYPkV zk>8R>E1oE(DtoDVtK&5m%@^%;-C_L%1J_t!%r&XZG%I9t!3UC7kVjHXl=IXXv^Ef+ z7o16NW;8PIu!bXQWDiQmmSOeSGPZzokJFC(kQ>Rn%WKDf&G!(r6SxV){5n1==ptAw zxGxY2*9or*_X$S|BZWetRUj7_1(=X3{3ci-2oTipFY(9o-T39aKX{S6o7@C$E$0m2 z1~vO7`vAKm`z02SeMKju83-Sl$|`0~WWEQ2!eIIbxD~tvT1PubEueI!oFvzi29p}` zy|@#8z{a+nw(_mhEW6BGO@oYD{SjRkEvBhgy-*%fER~Ow#Yv^&B+;hk15Ixlr48%` zpL(Apw~B_cxuqYA&4oP+lJj=tj>_@LHe|{(H0fn&_rG6FtxV~aa{Aks zWck;`uXnzLf3bW{`m#9LBQ+v@aaMEgxI*`md!_x$ww0YP+gjGGY;9Ri`Piz-wLKb~ zn|n*N@+)eY4mEAFP9wWR?HL-@J!}=Xod1`Q;rzQxYqx6m*`BC(jBkbio1o31;Sp6) z%VL+sC$;L^I=%I*Hp({h5+d3jY&X3C;5+Z={fhZhG*_cZ}~wKI2I{_Mi}B~caOHLDx4 z#S2tJjC$KGih*_(K2G0AKMo6ESNdyaSB%Dko#whVdlmX!4V)0XFrLx^^o%1|y53($1~#;F`HvP z#|(-+6}vn(KW1yRF+vbJKcLDx-`&&Yi6DrV${E0^WrwqeW4q8ph!ZP_(FQ(68$ykt z1dtO+v+=>U&lb7K(>PIoK}*)iR7t94)dY2;`hn(}cCPNe?xyaOE?%Fn-(#3=Ja78V zQfuvihmz7riR7i^G2{?(EAlP!X37C-25kf^WRx-2A@yh*_A<^^ZeLy|uMPh&zksjc zH}G=+4M;Rc!Jev+G&t! zF?F7Ng`{uuz6S5QjOs&`hs)DT4aJWNnfZfrK4ki)2d7qlefx>?;p$tr*IiyLd}@6J zKiq!*%e~R}w>&I+a`DCBHy_?7ed_gv{)P1^;r-4xlV5#$QU84Yi{!-B)yr@2W}UC!X!Rr1Of-Pdax^ zv?bo@!tWZ;74Pz_OXn`z6Hj#B*{M&*H|?9+6(?M6-6nolOmbvs__Yw9pbP$UeeZeC z^_uN@!lT;V*Bx@3+i{rO zjS>N*{6Vyyv@6snlnNkeUnCjv^LQb?fRsndCp{y*BHaMGnH<|z>v2nsxz4oO7-$%& zf1|smwW-&uzA0ZSla#+HQxvBalNE>LKC(NK@8aR&U81U{vkf!ruGdViURTwws=BIM z&0n>>>PU6&wWZbFs)kf7EgN1syL3<4u!^l!LA9X`ShK$vmNbbwizkR?G;eHTG)-*U z&`c4pl{}Pwk>$wq6|a?tRO{4tG#zx#27gneg<@Ne|4u^5Jn}hGE#3w%voXP$rZYK? zx&nFuxU3J7h(1Nj&|0(*y@zf>*P&VHdTbEehttHl&z;AU^J2i9?7{EP@6C_k+juFw z9lUs69rr2s7WkKPV|e#?(fm#PGyEI;$NY=@KK$okH_76j;!ftebLAWr2jjYNow(lI zZa_wx$ZgGKbE!ZvCFXqRyyfI_M4T$lJI)>Ouj4SdvE2FGM_dKhgV&WekGB)_HILVq z*NPX$3+9FMeghKRLf#hMVO|pN9j^?GP0N$;cWfxx;N;PbdH_(hEqNa)3uoay zZM&@BEnLe~^G8#>>4q`bc+C)QxTz1(C+Ylj=d>=`BO1QulsZuTSk+I}q&%cdR2meo z6x$So74C{^`6c-Txlo=a+ant&bC;>58PXflozex;iPAyRHd3BcD#?`GlWddpmk1=y z;&ky-@pPKT zT$CyLEP5k)DtZoLCQ&qyR!)f1#3V_$q_1R>WU*wEd>iZB{Bxd zV)bXGGlw#>8T}X^>7D5B;9hV#G!)9EjiZUF+o)dD=akVv?z{tbLN@s=X$dJ9@WT~+ zF`kGEajEUQ?KU{`%(is^GEBAgv2~kus5J;EiApU`EC(#pEUhh&rO15KyvE!GNSZ0& zj5*xIH)R<28M_}ks`iG-(1?9-2AyYqq(@bvDw-j zAQ}N?!d+3B$VEI}d{HbG_mn)AL`ttnePuUfo#lD*X$qxcpE6dJuUez_(tOu!*Y?yg z^_luV!LAf*GMhe`_gIEo{lF>y6Mh(+;vy+#N-}jHtvAq2Jc4J^d5ouwkxUtLH_H!s zgN#7y(1n-^Tg|qx*K-t{iQH6fN8W9o6Mr?>t>Ohc1=#|=aH#N*@SD&kY~?h|>4?)) zr!ptVIo!F2^HAsU&hwo&Iq!7d?7Y}{y7MIGvCjRR@Rc^HVPg9nY5Ka%YVh+%nYwVG13i}2& z5L2MX(KcuvvJBxOw^=<|<;+z~lzEBKmXQrkSvY(I4uL;|^NfMEpXNt(|Xm?%~EMzZ+105HH|PS0A2l!PYt~c`TE&< zi|&XnK=(-79h~lGXcX$TYDj%j6{UKs9IUKYY*hp+zRBmvS@OrSX)>2Pow!KG7WA7w%EgL0B$wL+?xt}IiIQ4A3{|Hyd1ykBvi3 z64MT|pXI$}lGSKEWDCaM;iE~-Kip1tQW)Sg2Gh3F$j~}S291M1!x8jDbOn77;|7Du z9Lv1JG&6g$_OWtU?#N{10#b^&qQlXB=sUp65!ejuB=!Mo!uae?;2ge{eU|-#ozIrA z&1@FOlM@L}Nv6Hnx;q#m)k!iOcN$?4|6X>^Qay+lEzQZ?N;& z25cDShpEv_^bWcc9gns~IcObn7g>(9MJ%lMtgV2m2rGyACpdTbGHV%^8Iu|AjC}f0 z`XD+&PX`np4)fp)=rA-4;y|gieYBA@A6hlgGc2HXp}JFbluXJc%3Mk;g-(%>Gs%CF zdz0m)B+?|H(cA&}eV8rBdeT~IePk=gKapglek4A=-kN0oXzXZMso$yZU`R0XOiNAP zrZ8iz{*LYm&>4-?z?ulnElsR;o_3(tpjoU*Qr}bMDTgS%l{_U|>8b3j9I6afK2=02 zUdk8AJIcM}p7P)1W5MiLB;PK-E`KcFC+{Ga%hF^yGKGvUZzZ2BzbIG9dnxvU9i&vD zQZSTJ%9Tp7a;GX>{XyLxXp>SkRhmlhw`jU*Z))Rp#kwQG`fRv zN=zHfT+2m^uXV4r+L~ZHY-<2ozuEYGTtM1IGLYtzmE=7XE_D)hGr*CA+K=`JEsqur zeFW=wHrxm|!(Q~Y^jGw9x)RP*~DDVva!Mu81Y5Yk-KOsED?*u!m&V%hk0TX zunX8lYz$^VucIr_S?EDD50#)B(UnLvt1ojJvynw%kL71Oo88LYue(ig;X3sd4B>a= zr||~}*a8J_6?Z7dz~0Qcz|H3E=U?UT;D6;G62uFy3%2nVurDH7=4|G8W;^Cnh8So@ z-okBQ3r#@XKwd@eLM?+J29K#^T%-4d#!@Um$GHIKf?KeG_(apROQbERpfsjaEW zIKueda0+Pdz8c;bju;XRAN7Iy#kzgkL7Gph)=Hh+U%p>vkdBbP1*iTIlC_f85=1&x z`dW&~0%iB5t0m_}Pn+g8k{S~mPcbW0%H?Ms<^iNFZ(y=SUi*YS}Y+E5$9vL}hEh!z}e@)ese| za#fvC?N{e$I_Zw-pBX+G%S>ELU+Y}kWZWIBQV&TlNu{K&WDA)^9ZP!$>ELhlFh)m4 z0;30GCquxLGR-U$)uFesc%U20WV1OdI85#(u90iu=5TLt*Ku9Bi#U(j-PnWJ-`NK_ zJGj?)Df~YK8G@yP*Zg{L6^C;DI5XKA>@)TbE62jv7uW%u6JWjX!d=Ae#XZkC&%O<4 z-H4{4N$4T;8A`@nu{3lr3Zp`_E$RlOsu4&y5`+XG!AK`$IN;q<)?!vP%Z)_=yYU$i zGMF}|p7{)LG0r&8=*tKPe4NV|$h^RO$-Kc_%?xDTX9O{>0NvJDpe5@Y4*J?P?NgPSuRB-BL$y zc-uIs`MIcB94g%_YnHn!164ulaE*_as$=M98{AC!=0VmSw&ysXyc=Bn{0{lh?=lLR z%UJ2G8OS2^J2r@eb7g#mFvMi#+ zdw1};?gRNo`o{al_#(c0eXe`o^Q!k8>q+r6cue$k_bT-o>Ald~*L#Ln2hV)>rEYPq zZZ2J&i=0$WW1TNKZ*Z15#W+nC?ht$gQhXp^#hb+2!hOw2`oen6`ir%U)g6TOU{x++-C)JD4l$|BN=5?n1rujBGn1I3 znR>=9#sJ1)MkI3^)0xRtQ+M2OXl#rFEd~r|zP>BTpbVlQj56+Z^jk%Qo{! zlb12lFh$=^cUYrU8I+ThvlQ=Sb&|1Ss%U-Fs|L7!Ozptx4;A5MLrTsRD~biB(z3Rd z5!FT2l~s=_6r~@Ey61Pyd6-p@^*nog&WD_Rx!?2N=hx-G%Jf=jFYXR@FsKXPrpD&Dd;e zWjSMQkC%}KQ?5~cp>p^sV+|_^ZNRcQ4|)9r2ZaxuYMtF&L)~zOH4= zF}!QNKl=RPyWa1(e`Y{U;He;8P-t-HVDI1?L8`!nz@&f<0Tuom`~&}qwkI(a%x6MhnS3#ReU@M^hzxT&0J920vP z`vI1MDv`cG=3UHkWj$bWnEnh;aON!rI^NSzG87MAgNxzY@L+IhmkRBM!l44%b8x%V zOf%CEpu4?ABhwaBCjcF-k_rL&XcVm%?Hct9C5mDte<8OfuO>ewb#^j%IAvZie(CuoFf;?bdmt^?52czK@D1omj)D@3Uc!Q%#SO$TSzTA zUG{qgzjAWLjj}JrtMjL3@zPRK=BF@IeZQxsm1V2}bW6zkGxJ+!Mb^5Uo_YHG=)x_9 z_X>=8)|_S8jafIdALO1b$S6)J8&cV=dPz+~O?)kp-9amizwN05ifeXB>B=Q{5I z-}`=@1JVP7gZ)D4LvllT;fo@6M}|ZVh+;-fi5wQe4X+7x3(X8^3W*Ip9BK)j8g?XX zVHgy)E0h{KGGuCSry!?5mH%D8DBm64IzY3@?p@qQxPEmx;d0nzgG*1BS|II`okj{) z^S*Kxv)5uZ=p}S7+84cqoM)|PE&v(|1^gB4xS#057@HXb85;T@^g#Mypq)PtZ2{VN z610RyrIk&%muYddrPS?|OXPA=9O))L3irZa+ceh47EkkZ z;|Kj5-BN9eCRQ_DJyeA$Zv$P!OvzbMeN%K}SpD^yr?T#qO^Qn>DIzEIg`?Rrxbkc^tE5|(Uc8oi!<}H+vZNrotcx7H7fIDMoPw~%#iF; zIpw+8dA;)^^Y7$M&8y3sQn0G%T}fPdN#%^1_&QSk!1{uEL&L$Q6`~3WthlD8=~o&> zrf=r)mKPQdkho6b9Vla=J&b!uGW!ybA}n?q;&ROOm0PxZsYj0IC9gK#kG!Y*Z1bh~ zUk#WT)Fq^2m?1ne(ir(TYG(A?=p)gr=(SPm$QhB15$O>fB6X1`qjIAzMj=uDk;d?} zu-ecCq5VQT1L~!Ovm8Tb&O)bBK{nqC zv;$Xl7~ys2VXl9 z?DhBwtwLM%jhDvuiOG#(fv_#gE&5yZjhN%Hu5lEg6DP%PiTM_NGipSnTLdG#Jalr% zy`Ydlv44qQs&ASP%V(pv*{i=-PtQVkh8xEv+UclZAb%<^hkKH{9O#ioaU$8N=ujk$ zIfCIwp9pKASI~8+7-|o%1$SJn;W)Sl%mETZE^QhOrfs1XQASe2!CiS8x9Te8d4)m#yF5i! zBMp$wl_*7VO-Jg~RaeXVlq47M^JeFY^A;4ID(+nhl^?1YSJAE1rJy-0FKuI+vN3aVukYMrFEddO$iqLy_T>nV5Mwt99=Ae5jC9^tw2; z)U`aZJic65VXf?4bEYn_@sOyq><^`zCQ}RR>-8%QwT5HHJku46!uFDkK<^p5(E#o| zL4dQb>nJy)TTl1N?(^M~-1~bR@Z99h^D_mc1$PP?711f`OZ5JjxiRmf!=qJE`Y3Mn zh-g)Gbxd*WhPdl-bK~@}t78>0gJbpsx_pV$Ms$z39_|-@E38x4lhB-yWx-bhZ~MRV zHF;0>!aXwG^W3;@n_Lms6D~}bS?rj#)ka-QE2M>heJLM$1mS>Q?WvzBOMq@>3E4tQ zAw3~&1Xfdn@U6BrR=rth8e{lPe-milI%!=r64gB=qR>fe#eGD)=BtgH>JQa~RGunL zEc%i!$UjzaxtLsjr82sDUUhQi^U}TfA2T9Svc4SqtpEJsYh6l8dh2XTE}qkveK0FL z^Ju#0dw!}s)je%X`tXcX8MI8#tTWjKIj?e!d4j@S#XU;prLD^H%H-ufl?$q-){LoF zHO-TZmS-wu>b2ScU6gKsj<4TlSZ$(PCzE>9A{qA)5xaudP58y>fU}RwTbCZLJ6u1w zZg+F?=;(FLXQ2O>paY@(BHW|gqaQ>!N1uzvqs~S>kAk8jqX$K2L_Y+)5*K$rZbZB_ zKCacJ_?2;J?6GKa)U1f`@MmGxu+`z0!n=o$2}=vP5JV4H>f6QpvgbJuPY;Gighvk# zcaM|qkK87^9(0}}^yde153)yL%g|aR2|0qSLAoGKtPLy{>m)OPd6nV8*hz1IW8taL zby{!QP}&)qjMfX>`yyZsONZJ(yJ!W}Hq^(!#zac~og72Pz{&Y2Neyg8l58KW^DW;@ z?Tt-oc~&r@-4l6_KMtyysf#$Y@e*r85L=yG)@{ReQ-vX%zc>$ zGmA1eXSK_&%U0)9J_l?KkALpMG*o2pyx4Axb5AizhqxNeZxHNco zXmogN#MOwbh}?*45d$Oea4tyQh-{AP5VI}zLfne@hWPUM_VKK^oiX(2)X4gX84>Y%4fDZw4o+-;oEbOkhVa9q^0^-T_a8rvTZlBRm?&aSFHutZn_E zNSY6|3*{WS8##)c2y_`i3?K=&17Xq<~+{Xoa2)-GiO&WDSvi> zs?b~k@C2zJFr^mp{dsd=(-r9&3A0uDXU-? zmI)QJ1>DErr0UCC!<)pf5Il0a;&RKa&?DG;sqaz$g@Lr-r6JElUx)PzUjV|HaCLZx zh#e6Tk#nQ0(Nki(#odW}7q=wtcI@GpnbFIl3L=k2o{lVv^p5HTcAsUD3nNJ3@u4xn ztpeBi`}u|Wp7fdRvjv=^&iDlTbnq_nboSu5nVoq~>%m>@LY|a+m%D>IfUD>11A4|t zb_nKzh9M(ZTbUohZ2@7^(T%PGIdZVAt72*8#fm+ps|(7rx~2(JB9cFUUHGl&`;pA3oK?B<+}pXGa=vCR zOz)RAJI#`IIDKYDcIMjb3pr@+&|FRK`@CNH&+`il3X2paqsuy!Ppv?z9#`vX+Sf%l z$eOamVX|s@cjYS8K=li?n+DZZ>82WcSXSdKY98F1)rekTKj2IPxOmUia3Ans!7Jfw z=eBOGJ$w5U_&pAY4f-=UIb?F^q|hFr$3nY=)rE}?SA<(5hDL3TULUhOc5+;0+=DnY zZcyx=m`l-DqiQ362Xfl#$k#x!o*U&86&Cppa7u7UL*Oa@9AAx3wvWPx?>or%yzfEZ zb3Q3vn>_;D(wt8SKk_etJ0xcw#?x{0xI4LJoKki+FcThxE=DG>Ml+8v8tAd~LvRB6Tcf4f!6)j1R=`+uGPlty`>+b*g2Ld7g=CT%*@%_iGZ= zFj%qfD@G}<$>+#YB~jwb&638t`VY0AtJ5oYlxs`Q6)_5j6)Y<_Rajq~Ulv{YwKAbX zR&ujIo}HLJGo@GZ*yK?uYtkArWjRfG3-V9p(Q|ubAIfB9xTMFXPtI`4x|J=>;pTPC z?^RGz;8l36a9`24Vrhw`bVxa^@_kis&Dh$^y2^$L&Gq7rvdM~ZDvqX)R;L}Ld!;)L zjA$w5J64pepw%;0qvJVCc_;Xv`D6Hvyb9hBes_UVxZkDFJ@2?;Yql*=>RIJ3%(@Ik!8v z1K<%7Z$9sLpqZbH28?Qgv4*C|1hNvbVC6vevTSq&viu zn&TUT>YHoYSB)vJD)B1LDEw5gxL`NfO=`;w6?ZF{mBzAO#cp}~GD1>~$=2jcDKo&j z)i3v2{_uih`QCYpa?WPeWmKf=(y`3_S&W<&xpjF#1-gP6h1q}}tBU)UxRnks3#({b z)l?l&JF>32{z2oSW{$X85-(3x`f3Wbs9tE`8=8MY}Lj{H69 zRCJe^3o(T;DKY(GRzx3D1O-wNS1lZc`q(#v*)a}#{6dAA-%_9YpqVeBsC#{KAsb!;uV|iwd zF(amKhK{;%nj0z)Wu3f-JQxJJ{Dlmcu9t+1jx~nXf2h$_mXvvx3@_SIxU$f{=wor$ zviyqKRimqJRGcUsR5&o_Qu?~o5h>*ysM>G+l3w7bi*&%Fiex z)%$_@`2c;8;l1Iz@s>H(#sGE)sq_g*JGPNi&7BMGfzR=J@<#FI@@EVGa=zo5=03(N z$oCKbiGdG;GJA}IzMVrR7s>h zB0u~yIO`t?Z4+7%m1h&#>+2R^&A6EfZ!Q1Sfqi;{&h>+(bW1zfJ#> zzKq_Ho(4|+rGQVyQM}1EaGL+qHq*AqcGi|<%dicx#aXwTw;E^YM``z|d#TPVCo991 z?-T-szq~-|D~S~OHi_z|)gG%VDBn_=QH&PpXS7 z7H9_|%kskiiI8eTWi#eYfJ3X!spda-7=_At<17a3le=2;(+ zgfx^smdQlgqf#^wV`DDZb*zScjhoKz>*V3e_sH_H`5f`P=|3ajMZn{L!vQA)ngcck zb`5GBJTzoe=!LL{;cp|}MxKpo8@)Mtc64b}X5_L6zwp?wJE1E>mxtaC<%f+6YZG=V z^kK-^;C?|b0{r|-eObOoeY*LOeBOJ{@P6iX+B4AOs2kt)j`ISixx(jy-hvXK_e|nB z@osSAxZ5~a*#%e|>@MmFaz7$jS-gL@CB49P&3`e+zjsJcLOcddvpRe zkFDo0_^X6^=b3IVJgU9U`6T=9_lxs?=kFHKJz#f$e;_5OIp|67T3~bK9QG_MFnn`( zbNHl)*AdbPc|>x=;)q(XPHhZJ2KunLkomz`L8F6;0^^H3vC4NnwHK8^$%gXud6yi*uxD@_40C zB~-81uyyD3^Nb(O3vD72gKDNV!TT6{nYBzZYXpmm(9qe~0M2C|RfsyjaOv!J$GyaE1 zKjY1d;dKBj*iCLA_ZqOQeh%(yA7lNo0<Q<1?Q77;_uDR8Yk9ouKiFGR@=62Vg0d&C5^ipr!|Ds9k1?D8C6axRTq6J)D)VE zJ{M;dKLz{t>{4dg=F-k3t&3k5tt@gcQWlOV5)_{;zEQlkcwzC0;_t<7CFv#3WzWhx zRxYnvSG}yJSFOJGQ(bL+m&PMa-+>L)WeG!u$iK_)DHO`->Q@@MwzIy%@XeHJDX>{c zAygi;8lFOTVti)I1-DlxSX+@A^b~t0H;|tzSnWh|8Rk0AZK8Wy52?pO&-b1zuhCv7 zyzYCYdWCt5y~}-)e0}}a`8D`W@W1H4-@mPYzTXbN4t_bloxnO}@jl@Fn|Gtvd9RsX z?YxYh$2|F-2Ry9q{oR+i?RWj);_Y(M*~j^r(=ewpVRzvnK`uXpf0W1J9pQR{9ey## zjdO~v!Ukid=m~Th+6v_Y3-xqlHn_9luoeLNoMo(FEMu%;jAzgo`+%p#DWI?H0nG#Z zi;OavVj#aJZv(dbi%9SAIQ$RWS!-*{ZPPU4BEx;Xw>}qGGH%vw&|Xn5Q!bN#kq(gr ziVuoT0vn_EqQ#>2qQ0VTq9@IJo7jyz>YHoZ)x52eRu)!5Rr1P%m5G(@D!WvYE00x- ztk_(UT=Bf3OU1|Xr1IQyMfr#Fb>#~{ysn&95m|Ak;%a4ml}nAFwz>9I-H!U!4ap62 z8V5A3YyKdr6DLb{OGnB2$;TN zkMkaE&jFr2J)=C^c~19S;hE@J=rP{|^SI_7=zhvA%1!3_(RHV5ylcM8R+la=m`j%P zZ09ddxUjSExWK@l&6o1d@)q#g@bbC+xz9OWIC5ZVHkZw1f5iR(Rt^0yIap_l0YOA;L02l3*dY8+*l{$`9fjc&~XgcuMYEZXstjrz>YH zXFi}zEqf2R`SxMgU`Mg;m<4?c>{(`@-B3Q7ja)$T3D{`U83|#y7@$Mi<6CdIJ3s>;m6~UPCOH49+%*w2{>7K$7++e;~P% z&yc5)+mgQ8z5!{v$g;uG(IPP?n8QqcjSYq~h6j2Ju&{owQUmGDSJ6>!k@b=92Nof# zg}VmWeLmto z0X7IbxeK`cxB*;*>jCsC8-RDtaA5bD#Ocd%0yaY1Id3?-fQ#~F_9HM4-hzARZ&(55 z$)>RvVNEEE9mmqKX;?GL#x7wwST}4Hkk-1R8-Vn(6WszRcnwjq&a%o_ivSG~RD$e8 z9<$akJ2FntUxOR9PmE;7HXuE`pnKCd!@e*CXG7k=c~J%Y$r31?D1#|$fPYd43XePr ze*^4A+5-7ul6jDMi+Q5?chf-Q0mB}{Tf=f7O&GOvHFc^vN+%$1KaxF{83El6%0J61 za z$u#XW_qTXjo2*H;(ZHkeHmN%~mpqH2p{%AFsE27?AuV(Q4y4}#79z(PO^jIPW@b6C z=(@pzk*Uag;7PR&EkK>I9@r8r2`k2Qm<2cf^Kp>9m_44|osF^c0JhzM7t|^C zG4?ICp6$lD%`U-)W4kdSJCwZ*_=CJe7h`SNzq6;YhXaqP{p=3x7ubAkDp>b)=rv#= z0%0_41+dI(M!ukDut!)cEFFD}&Oy&11%SKeA|fOk*?{au>XAys%DTk7z!<_9%5Vdk z&kf)3!)L=_2WR>1U~gu&tDSlYEpENNOcQ=}~DL*+ZFD z7AxN+m&iLPHUJCSp~`yYa^QQnUmXPe2c5J#wS3)09jc$9FV$}_xET){$)?k$ab^!o znq`Z%J@B!d417rkkSc*SRw=my@Zxl(?xHqOyU;GuY_!499dMr|fw$8e>21M2ZesW_ zJ(w}fMa;{<#^MY!g&D+}&sxVi%2KfAA%~Fuh$pfR@kM)rTjhEH`+)q8o3D>ELc)PBPayE-sYW({-SRv(5BrXKqfd}$hzoiQSbu*8&r7HY zjl;4~1F!+yi+F&V9S{cVE^`hm3`u6qWX=LsqBohPK+eGF9L6iaY4_>V;3|j)e}Nv- z{s1-w8B`f{36(;nQ@>MAlbuP&@Q$QpQYb0iR%G2_^|5xbtTazC{{g&D`Wd(D>vco) zSM_uB{lNaQ#=tZr>85GxG)Wp6kl#-$k0~z61@eXRGm2}<3*dw$mGc$56nzx^<-27^ zWnW}HWFw^%U@5ssc31{9n^L{h73e9xO7BQNNzKw>vOL**d80g1F+p)p5v}}7S*0YY z5`Z;srpgHXC8w)zsH@bTn!TDfTATKZZjXMPA<9@`9BaC5GMM@RpR$)`nYowclcm2k z(>mT}u$=@xuYZyl{SJ?&wBq_W=vpLz*F)+v+uHUrn{8Zcwn5M&(CfonEgxtXifzpxUSEtkNqJ zRk7+Rn$cRdcA##VZo00cu9a?r?vhTfo1{k!6^0{5i?OTehN-_T7SqrQj z+ZbE6Z6+?lCj*Pv)8s*v9EvZn6fL7pq`jr-X+h8sXe`tg3WC-^?En=^=pz8-&M?+7 zMuV^#oO6ybUNYJ}qm_Pgc4Zp0a02v*Da z5EB|oJ4Ag2*2-Y=VRAX8609ybc>-xBZnB*Qde?euqczVu&U(kv-!j6dHd`8{i?G3cQeqtCgyr>PodxvtK<&<*M{lyqDL>S&9^S zTY0LiLADD>>X>r2a+p%A*rAvW`13H}P=y>(yim+i&Qg9+vFt>yEG#-r!Ft|e-3vUDdus&d4 zc?0+{n;r#SWRs=#r$jEj?h$?^u<2hjKT8fNAhoU2aoa2V&04ebr zSQ(kvGprO##!|svmWaJZbI_aEZXo?!z&?Q8E)j6xEA$!C3%QC6LjMGok(Uq&>o#j3 zaudx4cFx1l{jA@C?c;nVkGX~s3G5%o0}bF?dINnf;}k={h^HR~H+^KdFVv5Al=^^r zmzDxapi{Jc6i+f0_>GBxckDb;1Zg+vJg`{c+CBmwr83JP>o9ATCCD<-Tmrn^i%gqL ztAN$=ZM_?q|7t^(K1g>(lc(OI9;coPY>3AJ@Aj^m9_rq}Uj2&ly7C~Pl~r*>5um7% zSIO&vRkFYQn(T;7CEF{v{13X`0zQhQc^{wEtZz25ad#sS+}+&{x5EzSa5?Na^x%*~ z4?Eo94)@?r5<=YFv+=CY{GQ%nUwGf||DXP3cW0(&rlqR7y6dTGNZ9zQrByAeLaO?} zTjWC3jjBym-K#oRU8$N;t*Tj3bH3(M%_5+ki>rs!MAeGw=GXmEmt6OAy&W*oha33N zFaOlmY8$a~{->^!{+d2fFEJc4Ofm{hb1@2zFz1|aLk}ifDA!w#b@?6pYPlJKt z2hdk+ZC2Sd6Zwip+B^_O3-1cc1do7H9VtkGgiREV6O{6vfD^sp-Mo%-k29B(PA!KA zW?#ySItwev7;_ubW8)3u7Nec)ewTuo<UP!cNNcOx)i5>NYYS>yf?6)tJ*hL-ZNq5z zw4rrlc4M@5weA`8;1b;;eSqPP!OwWq*uhk0+H6+Phv*RI3gSuhrgl;c^(V)J`x`f( zJCo_;*a8c;{Cw=Cd0pfrNksTEOo_f?6^!T z>n#sPFZ?YXCw+@G%2BdL=&N|6b_1o@=nlOhhwEzFB>6qmVOeC6qk$NO7xNi z5)tO~@E)ur6?`2=^eq0*{99NvIm?yt{>C_;$<+f3 zF$_qE4zTl%V1lqtzs4M35*eo%+zq+<75Wn0TwQZZVzk?}n#S&p2O4VYSJbzwm)38u z+gRJS#=rV^;Qbd??W)>XWmh#y^Qkf(*wg76rDh9!fo4_Ct<+Yus(6U`+q8h|zspH+RRT3dat=1lGL zx|sT_4O1E$8s}@@Y2~`!n9UUF=D_zc1%8e}rcc1OZl&ilO{fplL{2??Q%kumc$;`4 zc=Z1*n1LWL%WU#6i+V5WD^3wF$K3CZw6*MMZ;# zZPm8F*?OtAs1B>v!b5M4YN~1s{CD=Kj^L=5YO$(V)!lBmT_?M?c2RabyZfqTDnG1f zX9PuKck$Ivw!ry_9lmjQ3BKSqHjX#z*mutuE&OOXM$L#^+L_GC^ z89^IN+f7r=Z|LPrGCkP5+gJjg8?Mzi25FmU&o-7fT!k(e(RjAuc71q#2aJPGb)#z% zt6ZyMH7hg`n$a4qW-csZ;TmqGN5zTqRpnR8Jt}5aWR(Y&MHeR({#3ZCD5PXvX;j(1 zvdS_=`N6Wk;16)SWJ0N_R8jV+G`f^2?OZms>_yp;^5Ye)E4x4-1Ig_Es zgz;vBJ{x(*fPEb(tP*ar=^+Xe3naHCGclemmJh^C?xmtu(N(!z`K$7b(%$x8+oq~t zRe7qVb`tv`_5^1gMcxsKa-)Q#~t1UCo*K<_OR0i8*c(OOKU97yT7_8{4n2I$Qq2d|*LsrP;@`y%8J4V}LAwDt;%@ATCA|Xfg_$2E?uCjX63OcCbGMkNF=E17;n3b|!E< zI72z-5O2hlvl7vIMl>gpL)7d2j*eU<9Uq>4)wUKO_GwWVWATbIU^{9gP|p>M&@`BU;ICVz?r8i2il$MwNTUJp%r1Fx+P}QlXZ*5)e4|Q+q*48g>SdX#rA6=Zjz>sZ>Hzk>k zbO&k&r-0jy|EJ)VaI=j_G+J~{q!zyvKaoge`{b>aFKyL!{`S@OTOG9Mh3$?n98Hd0 z)#ugioa&tZbspi8?lQpjv}=kh?b-qBn0hxCcY%AgTa{au+diPc>RmnDwz!qKdASF= zH@Ho68}I7mqIYWR^h7;NJxKkVI#XStUZeg1FH5b%RR@(rpnZhhDAj4K(Og$vQx<{e zUMTx0A7iG!P<|Zi%8#V6(ge)a>m@@akHrz(K=64}q_!MzP#tPaa zicc5dVOH^Dcu~CTh)HynW5=0Ey=3|$f{Y*I%_Px%=*Q+C&GzOcrXP*22+Gt-cTCII z{@D1sA-G|7{mQ!ewX14w!>&2M>bfSevaG^f?o{5XY+mU_#M)S2tS@R)6kez-aLMnQ z7m&L>`)%gljG-A%GWupF05P*XCo(TDPnhSO(=;nO<3RfIw0}}dQirDx%Ve@7Ie9r$ zUeA2{0+)h^`RDSz3gQb!7v3rCRuqU;Gr!V$_yBlS+^;-TwV|eM-JkV)8h&hy(CW2M zbo=!83`0y2w3hjmQ^lRee=dj>9<})e^It~NL>3_*tVmIwQ}wss=&-J<|Kr}nBhO={r_}4B*N@)*K2Ln?d|UeV^X=-D zS*?1ZTB(w#euE!%OXUp3S-Asxj4KP4c9Z-eb`W0^{Q%6eH?$Ml=B~{Wo9**IlZ;P_wCeUey-Oy~>!1d*y$_Do|I_q-0p}?joO}s>0t2XBYgP zw=d^hR!l}*S`2UkH2>Q&N&w zC&wobN&O-Hea4{7?U|~qlUe7pe#qLIbv3(3?t#2R`A-TWigp*DEje4-Sa!UknPyB? zYiKaLYVGQeH4N1D)z2|bHtXo2)LzabZanO36NU9QtHtjn_0n+pVMQz32Gv{pV~)d} zUOSfpZNA;j$$gW%+5Mcy3eWCdHC~6jhx_>WHu%2t+vndY;6?xqn{>}WhroXWeh3H) zkOVyR@93Z4cg^pr-!{K+zmL9$edqaVeS&;?dmr**JX?A;_lR^K=(fOhxyyRzyG|ia z7uAmHpB-Zz{2j*H&$3&lI%b=ylq=gR)?;0Bli9NaZVx1)d^i`VaOyBqOgE-Cv$zyU~TDS^xoW*W2vj*bCZ0r2-Wa#hn<+hK1}}wQDfmO!+Ge$AvE-JlN)e{o zWS{K#)XC4Kv+F3gAKaZh9(f%0yz6zxdxDSFN9X&4|Nek!f!e@JK|6wf2#E`khBgW9 z78)4(DP&5BQwTT2Hl%6Dh>&q1jv>o~&jdXQd>`=MU*k8^FUYURce!sK>~Yh*dwW%T z>~`<&rf{uuHaYn@4O9Q(c*B9QA7;M?I!cZ0Fd%Gfm41pS`8?S}X@Nu|_7xukSIx1x z0Dr_lVL0L}r2_kZNARQI2_m^&;4R{H;PH3{Zc}(1Gu+{~{8D3|1Y1d9mp_BA3lDnDxu_|L9(8OB0c!&#jeGIq6-CM^E>1jbDCx6W$a2jm10a9lW3p7OZX-6XtI0i@2N{tTc&nO z$w(X-UlDWWT|UqHLMQkn#U(vWs!McEAx`7ehaF2Dq8#Sg@3q^bnr0iObW(Jc@001J z3#D4ge90B@V^Ok=lg%vQ9f6DB8UHHQwsylKZ8*^AUHDwY2(jh;#qGxBbJIC-91SM| zd^VTc8Cdi$u#}$UEa8mcOajNvv{@3P>sO<(aX$wJxRiZt8TcGn%GPA7o@aq$@A>Ea#YTk01aFeLDD z(8pk9$kO0MAPec>UZF?fOVB&ANwbkr7o+w>wThbEtW8t9$UaTZN92X8!!L*34J!)M zgk^>0guMv!2}=lRADjxgIobcAudk2M%fqYEqpkZI*9k63&O@9hIQgn=9DN*I?9Zuc zl}i*q$>8**^mnzvaji7ppMqBZ`d~KPs^i1*GqQQm6 z{C;_Ba;maUWj4tOO*@)gk??c8TU^bjbszuwI4ve8?n(T{FYCU1h_8uz{>kNi>o>bz z9eJsLp?Ep()r&XGyVW0Femo!ZBJSho=AQ%O0)c1`jGrH`{^FXbOYW7nCVgp!Dl0N4 zG_NW@uqdK*OvNkB`09e1&vj24rfSFN$C|iQC~v9YfQ_wWfb4g9Q^i!}F;yFfh3XZ~ z0=IUaV|@PhYaZ}#V5i^_AvZ!yA!VV%!`DRgY|=CGQBzUW%BTTR!=v(|_BD5Haks_m zmc~|pwQk$GeJhWaSLsZG$^l*A1y_dd;NP_q1{q&D?Fl{h5nj`2vv;t8$CQ?3F$#aBlZVdNl z?lSI7d{03n!QI>w+$-GITr+nh?>>*icSL--m%K?pFlw=`(hSz|zVz>Asrd*Z`ut$L zVQ@y&F)!W8MyH08b>6k}tIt4k?k|rlyIc}koLhLffS2DScV~9btg;ME+Q^jO6CcFC zi_QCZ`@QDf(f9p6{u1*xZfksg{P54gF{9t#e7)$!t*63h|LBURb6+IC>h-qfea}xz zV)naY-siu+6ekoV?n(|$tICMTj>t{OA6R^~%(F5>^S0Wnu35vT z#(%ZX^)}|)R2A=naIN^MEJbNnsiEaBbUg1=?ULn|;IYvAtl#CpjNpZ#;_%lI;gKnk zUQLfTo!`tgs&7=MsQpo!o8N5ltYu8Aur?3dX1DWgf4JSYwp1Hq%csrXHJjLUd88=P zKk|O$NJO~GkBnU4FpDPI%ocVK*z(U8iSmH2?_N$L=U1+jx1ML_9pncKHVJNH zZL=L>70y9)js(FT!Fa?Mtm0pXJPt&RmeV|6ULS4{Fpx$_R~vJ@@wDNLK3aEOJGqhH z@S*NRZO57&z&X^HcPU#?vZ`ozL1x~_+)LT_Gq0z|r$(myk@RoE#(1B&o1Z3s81*jo zP0Ke|-UPmL{_yPM@lV@6&H5;Rzx(x@=jqWuJ!$gf_LI8kkmns&g{RwS|!Ois0JdGL5*~1GK zI*2>V)+i6!O>%tWlVUT zEhn_{Z%wz}-R63mHEo8rnb2lCe#NvAwH?uRblZR1jB9$H$c~2*G$LLMQb-`$HMM53b8@!wNnsLu2>g| zcoG4I-wdw}O^qg_1knh;&}W&Gu$Oe@9zrzFQM@ExXGHMqBk&W_!Xq{`tRy!?X81@( zAo9*e@jmffaez1ru?cKMD-osR5&Zbyzz2K>xVA5F`Mo)zR29A2oNXLuQ0QaOuZi_5 z>L%58t%zDc?1yh?ic?nSQ`u`l|*;=CFEcJMpf_rJUszn}58?$x#zh0ppwd-AN$i>z0QcfCJ$ zkM)TEm{5~6E#>#rth7!UJu;P9qq67bCg+<9l_gKhnrL>_MAmO?e5LPCYq`^eXGGbO zU*(732l2$VuU$*WVa}gjJ9!ND-sk5Ocq^!T$e^(C5m%cGiF9bXxmow-cUsi6T-|zj z+vDx-v@^Bs-u7ynnATTY*|h4}D!-Li>+)95TYYZzpp|bcCrH~BQMa2OZ?Y-;L1<{m z-k{uof&Mz*V?G)$ThIC6PNT~W=VeY~)OL=Y?Q3jLDz?bxOI$>c1bMvYTq(C3X9Tr} z31u?rp7e0@XwxoZlA%^#rsM18YHgr-bZo4`nsG`)eM3;=uZTf#TH8+dK{rzW0Z5M_ zhzyu-7-{@u9Bz`CZRh|dlCp=klg53`+rbYPd=zXGjzQdy&!SD@7Lu0|Pw5z}w^c~n z%I3)S%D%{2$hW{=yjtE>&XwN)4&#wDQhG{aD_Jg%13t5p%}n7wSbs0`HghL&dQ$Gp zTk|kenqj`aUi(Aisd}nzXHBQ-N~}n`Ra`IYT>7*)s%U?KCa+KKaxl0lt+!%P2WQS6+sh2W`wQ}I~qPS zqBP=i6DIOXvkuL-w|Lc3(Au-jlQ#F;1h(neI*A>>b$uiY}Ba7Wp7vm0VJ!)}({Bp|64+RaCdmK4<})hk;M+fmA8iXHL`vUjj) zNhRUp0kCzn7SNyuNj^#Y5(^XZzr2di_-y}K68CS+ zj!&{r4Ij^ctoY#mVa@yGcgb%rzxn+7{Ohi7uDs=b2#z`Q`Dj8+QnQr!lwB!f5R>wc zgkKVVNPLv^H04Zs)9jad>Y_EJQI&72AJn-wF3^26+%sjH4xr4+@wNcsnRFxG+Q();+vQ#L0-_h-pnc;H}Z3 z>B6Q5o33qI8mVhy*JOEw4t_MhhTRR#4tX3rC@4MPfd5Fp6dxb&Up#$0Om1IX_qa@S z9^*7!-O}-h{Tr3D?OVk^@?T^;=@{{Gn;1bh&&XMVC`19kvJ65r>z9bq(}r0}?>ApE zoiP4vn4rI|TcD`Fu*O_w$(Q~ETsoZ4lWS$+L zCuolK#4M4oq`lNd_8bUrt$ej2NfD`hqdaLlUu9M;f~U%M`xo|S?Kjz9wb$C)J5<=e zx6ikiIh5E>0*bD;T@_;OowqGQY-L1}0fh==Zqi7Jvp5G>_)~&E`LlSDTqQ?IiJ5HZ zTceGRhKIUwS~Wb}qU#pa2GuC4?W#@hmXzcdbBlWvZ7aN2kdyC`KQr&| z+*>)1v$L{%vleH*&1jnOB7I%@z;xgA^0YtGT++^^j!hM(&Pv&m{03e=oszyFzQDPJ z=?NhT*S@%Yc^&^K{z$xQ{M64WakJu1#x9R}_-R4R$+)#&rX_7kb<5nE^B(I%yRnv= zpXZx5D|c*e?>ufnw<2RnuL^E;eBEsAzlQtfIn*WYUj8&;2T_T5loVDY#Wm$6+ubU; z{Se1=r#7xf-KTi9^BwO$GoUQsa$t1O{@@Jw8{~zw3mXuAI{aF=Fk(qWM1(2qVCch; z86ox|e+N$s-VuB`I6PPvxCzpHq~BQI_CC(u60dBJ5$-cxCpyno|LU;SZmaEWMMrsa z*=}i9sRxj2`y_>kF%W3uD$sCwoFmLNT0-wO_cD8!^MI!)1P-l-QDBS)4yDd82XQDJ zOe;+i__6IUcSm&H!R8|K0eS+ngev3=fX(zi-$(F|pr0^7_%p`i1>y~oxv-UtlTSwP ze?s)V_9}th4!cCVF#ESyS9W(??D){pK|M*mNIhOXTD?*ISRJQ6s~)DVbX?|GjP?I3 z_P+M}5FN-t)z)^Q@|q%1{sp-FJ(5XcThSHaXm~wJcVB#Hv8FvJ!3boIx+1i^W7(wA!zDSzUd5w|4i?rF%qXZqKlRAx<-g3^mN!1H zSzbZzh}?rY>De8!FJ>vT)@1r;x@LCDoSC^NGbeL+)}5@_tjk%;vzlZb&wP~#!w`q8DOyNlO;pQC;w1KtGQ4B`Yg1Q~<62VV#_2M-Q;7!nBYt;&#AArpgl z1icER1NsF#_n+$@=Kle!gkyad`~2p;+N+Ca38IybadmaM-N^h*Lc0Prr6Ia+J9o;_Mo%D zi$h*Qesm8tgq{v72>TeeJ1in>SZG$TbI>mVz5NIIP4V60Q|`Uh+a7C?k)Aw{Vz&>j ze_`xs=47u0{^|4U5I&h%#?tc znUA5R{tv|WPlw(Rq8$Mo#&g|eeX?P`sVA*thI5v1*8}eq%P;2J2-aZ_ifZ86YJjA1=laDe^@VS=!Y16 zn)>1Oj}h_xL~RCW@Hp(``l>@!eXE?RvNY>70<48MRy2kGudpn+*8j{`F>80|2wf) z>VV9s+&2ZnlB;EXD*izvxiuB;m42F0Rdv-r)Xi< zUHZD2-4A$HdOh-X_Sx>U%=f)t*MJX!n}a8WmW90zFAYBx-ZK1Bm@sU0=-|-xp|?Vh zhPDlT91;>TBG@PBNI;@ryw6FmDITrdT;R!`?GSI5Yip-mC+{YkAiXY8!|NbdBo`$L z!v(E*Ejc}y8RnBlclbc})4kA6)egjd5cay?bh)}QdV%3F{3{L_D~!`jxuz!OKIS>* zWb<}ajLZCzvL<#~1Ji@MdxQaQM_)_zN#HbvAk#I_dX7C!VHlY`oey3D&6n z4dWZSHn^jgvk)CIw4n=*1~klT*pB@+7B~EiD6Cf+9yNSIOtuM)j~ks3pVeLWLN`gT z)1NgAH-0g?Am--`(|oMrW@F5aG^QBl7#j5Z^@H_}`dHmkoeFCY{j{GOTfisgSv|Mj zyUtv5sd`kEx2C@0Y5Cr=-%Ib5R2K7#&4q%(rUirYo98h(=IqGqzF9$;E*Z1aHEEG) zzNrx@o0BV&?2{rAPbADtP$kUzk_*qi&0jWr*&V<2^S00P;*;ZT;{!h5`^-(anDi_~ zl%9~)qTom=q6SyDtrIp15W#S}uARQl@YKYo7ce631Hp3f4OyKs#opOD+HI!iS#K}D zO95+xrUX|6UkMo(Iv#P5R)%~DnH730bSvm4Dr_cvTAzf(2Dc7g8sruDr~f)X#&@c3 zgwHvzp`NZDi`+b2wyGT*?y7>7OJpVDYc~G~;(6V;Z8+ztHq>KgIx_>iG4w%v|GTi@ z&NK}-R_mYW^w?M7W8;Fx`;BL`_jTv>=M4cS6`jh+F|*|GhGUnTVnH)Rxw>m}N;Cp^ zggS``8$HPmG4C{;`qVI6oW;$R-fr!(x*TbTCD8>~sHu=P_TFlnF7pUltAd!U1< z&7tO%<}CAYx(t31J*iWOM{|^#fqjEkFtONi=B&A?`GskMNo8s@+L;EKHkmG)l7UU^ z2Hnio{0f?ygJ~}8rqlFebQ85>8wWRxuRjhS(P6ba5dSxX@v+J_+v$L{Gvs1En zW(Q=aBi6>+EIu@8apt3p2^r<-|D=CO_sZy)5tI?0u_EJ3MpDM2444)(HX(XcMpjtv zvVsXEgDM=WAJ;X}HtKI051M))O6V}AGeuK8M7g^p_|--x{#Q~ayM^_v-i|3w>k%Ji zl2?}ZeV;DA2YoO4PWNr_Iq9>(XOYhcpQb)8J~iIUy?1*p@tp6m-~GKC*KL-omusWT zK9^+YY9~jhHfkrw{q`lQVYZ(Y{&H7ors$C{gCD^=#hFgsMAW5n=4Xlvdzc7OnWrIE z(*wlcen|DCVwuU-#11W0TFrIY&3O6?D_V_RP13QLuBb&`jh%q`u_Ts z`nLKOdV&6)Za&rklC<5lYZ~t~2ph)M@2~q%8-$2RVHi8_!;2_Ivq)1=Ii|9v;*Sbx z#fEa%^0>04W!$ohrRz%jmwqg9DcJ{)qJG8CiVC44wJZ!QJWvpm|5v_q!LkBp;9-X4 z|C@h5UyvV`m!E6Qy^{L?TKWBKPR{%6vaCK?e`GmlCn8eZsLWTHVOi@lTW0J{uTGzm z5s@Lx2+z#P_R4oGaxT?Xtf=YRa9Z0;uQd9Y=h63RPi8N*hg-p$&kqzZ!n0yenOYHS zTVf|xzi>`)z2tt-vzPZUpBkT&zGL7G-r29gx2s>4pTd8u-&x<+h>v)|tAl49a4wzP z#=^EP}KLIR!mGGU- zQ_&;wJ4mf@@;M3$v5hAycOW|Ic%?uYr$|s#Df9}1B1m~jIn&k=F)?1LTrtneu=@lb z!7|Js(roq09L%2Vla6rw%Y6R59^2O96N*6a{mB|;~u9a=TE8z zc1bh63-YPaTxu>frh-djP&VWE~THD5L_H_yi`a+CRI^HlRfL^xk$ZfDk+ z9-5Y$d`&NnvoOy%XXp>wOVuCL_tJCq$+~lhulh*qqutV&)8Nn$Sl_g+d+o%UW!3wD z{r1qTuf*;)6=mfM%ZuUvlvO&fw5()!$*ba##ntdl6BOMl{G)JwVQ67&!KQ*S1%U-G z^IPPf#QgknZgB3EoDn%*In~+G*|)Nbv%_*ia^7T5%buQnDf`dtxUAgF)tSdK*JSR> z=$5`JZ9&@VG~cwwRIhYtR&wsd!kDJx)P&}-kf1h zGk2n;*p0N2(s4TQ-tor^ui4xX?Gn$Dw1QvMMA>VZH<0D0Y6V~NF+f>58Xt`zgs3hSdMEvR|_>13^f0FkP_fO7k?7{R75=q9`VVAgF%m=19 z_O@_LltX=0Jnw?B^WkOa~?`8VKlUJbfCSfD!YANn*NY+-#hRy*O$O$%a>k{f732H~KF6 zgSt1`T&!pp*59dHT05X-aP_RJ4Vv?n!pch(Gb%V0>&lhoPs`?()t1gEtuHxI(y=7E zxOcI!=xxz&MgBz}3y&49Dr{c(sGwhgq#!^4M1Gt6!n{v;iTJ;rcQY?8uP`qu??T>+ zyrFqPc@?<_bAxiv<@n_wdR2C5*0L-YjI0q^&9hQ6=VczvyqFc3^GB{ce`w*UlKBUH%6JNnqXh=;N$e(rImYy#}F^Z zyU@3_|4#o6{+0fp0tN^20_O(|4cHwpEI{Vp%=eV{Rj)$NWRHK{&2AZPiEgvqj<`N? zF*=7h2Vk`z#$k_rQ@d-n0m?)2JZZT&(WXGqhJORvSsmj7$_u48m|vT^VA&wUc*?jG zeoI-#KvSg28Gbs=OhKk?WB+4Rd!6)8eR`gL2>6W!j>tVm6McLptqTnt>N7%RkcyIMy!=(*gNWH ztQ6fwyyzCT4N4L>V6P%o-UfL3AlZ275@04u?62(XQllq3&W{tAllT0HjtAdMMIF`XJUgCS$Lahkz(WQrj?&KBS&fwNx|c zcp^?JRY)nYpTigG0ODnzhMYeIM8Gqg8wGsS4UUmB71m=ub|6z?RV9;q5pf|iIRiL1 zu|w^6W;=ZYv(a9_aHJSQ3@0(lzSMQY>}b4Jpgq~>)%Y8H(fZb3s}t7st6f&Jv-$+k zQ&orv*}q~%`TMfoWeKIDN;6A#maHz>QF5qcSILHwmDmmJr;=_ZvXb=Td&PT;hZJjy zHWq~xB^NF)lonnqSPY7EDoD$}k$)@yB|d-2AD%xke@Xs;{Or8tc|Rce=AFz{=RVKb znG=??Bio#nk<~ZbGp9YGR^KS_D860VvSN=Wp_*T3*Pv-Uq;oW^GG?2`)5Xjf&VJ0( zd<2QYy`l!OT>68onPQl&kKJtheGazj=T2>0db##>yXZd5Gu|uRd$&&;-%q|mzt_Hf zd}Dov_#6ZUCVRH={MW56wZpp4EUAwz(aarL!*vUuz1+gM-sM^@p zz}J1dthDj5-G`}x>2M94H@nlb_=hCYF~ zHeqzO`J(xN`Iz~d`J-7%FQWyp|M#bOoD|MI?ytN6L{uFskO?mf0}vB8Kol=p4vhCy zc(cuhMXDFP8=u2lqaFNBd&qmrhsnptCje*sMDC+lulS^JRnAl1R3<3nl(&_CC`SXm z^*}L6!Bd=)x01h-jR7v`0z9>X5%(oca#yk*k;-3+hrnX_vxqDD!^Ry^tEXZ2sfU7b zf(HI3%t4nTl4uln9Vdp0qV7R!sG@gb#|Q~V(`V-2&Ew3y%>&INu_D{q9AxI2-nZT3|A1T z-k{s9lj#;}KQsn6Zf+=pA04m$0X%pzYrEFos~K5ig3pdmb&_QdfrzTpngC5w<;lt) zD(fmvRcyq*5#uUcD>BNTmftAfj>zW@<%;rv@|NXI%YDn;%UvKZdF7&V|MK4D6U(QU z&n{nCzO(#9`9I~4${&=UE8kJRwft;(YPr0kQ^nGX`xQ(@i^?IDKf)9C1op3yYi4Nj zH0!H6R=d^M)jGnLT#h+)V`H-Rh;ESH)=&$4^L5imbCvl7eC&Lw7t|DvCzr#^;r)dj z>^=)NV6Tb$HZ9;UI#H~U+>#8C=1G^yOt42q%SDQwiusDoiX-p@f3CQX@AZnsil4As zL@PxY{5#qKjZ7($<@eQ z=$SUbHf6%=!u5y^J6bqY*biwac0=rkxP3lC4|s%oVt+&r9F2gkwFY}h{3}cnYK8VT zEp4V_=f=-AMjLlgXVDzd2~o00A?^lT(gkt0*h$hCn6i!7C1W>sgFGj>A$f?I$aAD< z$zw!Izl$^1B$p(ovBJMrG99`7Bz5A)7==fPJuw+O0OYh(^c0b~+-+i^=d}=KAqr|s zK>;vnZTSVfeY`%v4qoB@$o1hCaV~OZAhtj@bpmK74=S5E$1KHap9eCm46bsf0}=bK53syH)4yUbh@TNU`QVq8ad$AYAP(w3=(om(3)&HyC2hW(vn8mcyhw9z2-zW!> z`YUz$x>Q&&-@(7=9(J|60rbHY-36qJme0#Lb6@vR_XyYD>SB;H9x?tiblLELDACpH zxOk2|o))5St?!0?Oo!`#)GyZms^6*qOMg}WLZ6~9!OlfC1{Xt^p`D=@cIug8SO_n< z?S=z}GlrXn7lto}B7@POG6opi82iKbe}Qq0afk7U@gn&2k@1x=-k58w#;7U6Y~B-| zcA=)GrnZ=$^|p9UjzsOJW8G+_={M6J({XsjK7wyshN;qIHc8Dc<}h;?^C;{pv(`Uy8c#0~%l+WYqc~$UJ&%)lG?|IL74|sR5TkjQ==OlJ^J<8k9+kvQZ>v?N= zzwnmxe&)?VX@3MtYbk8aJ^X<2@#eV!(=3OFIK|U)E4exFVR;Iw zKgj(R*vk>zc3f}ZF-tk0Fn%9`@7y#_A5J*-FVj*Pn8h8Z)=^V{pY_4M&-u(N>_)te znGHXKaO`VbMaRQFdK45t3%Wylc)U8$d|GQRGiRFhFSK%G|n)w2qiR^VPr`$0rw}bEDSZX$Kd0Vh^^J(lZ5{+F7 zQ>c8Z5_&qtk#OwcY2(8Q26D0`H2n_1RCWbkxhK{ydU1N-S2s=>H4j*xw>w&V&$1c#xR4fpe(bRqH)_k41fcN}O9mD?62l3875k+_pX5+hnfZj{( z!Ez-&UhP|;bpw#ICU7M*abcEH)<`l3LcqD zsGq6%)Ld!~H48Dtrc=|9rh>vILF!JR##7_4FYRb*6y_}>so}t!vS}z~O+#RPBfrV{ z5y&$NxyPWL$KuZMc*bPvM?7^Fop(R}FZ)uilpXdW zP*n^JW%rzW*O$StC=;-26$p`#*?-)J0Q{aV4v|r%n`id zZ|vZE4%~SOw0Irwy}{hV`|o~D_m~Gr_nCW0cbVI``VVqlMd>c!uG6^t2+H^;vl~y{ z2AckrS;wrhlyf#pG3CEFs2kG>Jk%O{8%8maOavrB7*YtVWP$h(MhdcghvEvrJjm4& z^0y;6uRGHVF|7wdulvTcbI=xxQM;92>$U~&-+`I%9+bvfw_~W^aZ3%)GUrj^3;49A zb145g9G!z!dj|jH=qzeP5;-Pk+4JNM_FgtGInU-`pKzYJh}OG;R=I}OAuY(Z;XSk) zNe?h5dh|6tVV*M2(Z?jc1n0kH-eKd5DaE)`iGA*C zn0oAcuVaji8N0(1#^O_O|Fk3tc4AiGUxow)48NolT=ic2ezF;Z@l->mUUo=N*LA9pZLb|mF2Xvx3e@$I5u6Fs)QAb>Dhg@xt zw-wf1TR@+0iYJ8ONde#)FKAdUlo~Mw?ZG`t)K!E!@-4MwP&eJzTESf!W!Eqj;H*-# zWD(l4kSPF{<>9{o$HiY$39gq}QaM_?{OhmE|FlC5TB!l8rDgP%_9r!G+megAi!JR; zKIJHl3Z-&DE2v?+a6?HwEs3;+AIi?QNeF84-zgMpoz@hL73V-p^0(aKZK<&v>g(3ukd1|I2o)MF|9T50 z5}NwINsku)p4P0TAoM21+mxUH($?1Yb_eB=R`;U3(RO~ImjFc0^~WciBP=Nd?Hq=G z_B$LtSz$=*x3vdCzUC!&kiPK$+IK$aKOa1q^oS?kLf++S=@&K1K$0VRpG|h?DQumJ zgwR?4NrX2Eufly3iL{mVH=7qBn^7Y|Kcq%Zpdo8=!JFKWNDbLqy5fvGO7ShRr6K1D zt+0vZ6T&H^JgjUWQi1dgp&;H@8X~da^J+rD6|`BqZ{g zzpkSyTckITX|EvJUNFxriO4!~^c-^UDbBpW^*5HB?{E*1e4ilu;w*_g z?K5o>thT&M~pU}RQ%1IBBo??56^bXq>o}dk13(fssy0g-k z^_x&0p+!QGEKL$=NobSMoIgHU>LYT4&>h0&S}2duos|;EFVf#Eose=^lMBwWG(+-O z6Oqj9Q`m3znciRDK%^CWhJ0Gjvfn;mudwt_sE*|Jz>}?SC-R9%Ws<(pKg$7D$|G`v z$ZH;GkCoYET;f~q3#LDYZOFx!bdXYWH zUL#W7D$y$}{fn(S@kCb6RYT&jBOW`(k#~?(XL%cu_zjk0@~aW&$X#q6lA{(!?A>I< zw3dgwm%O6_(w9&d8Ck5QW5*b_MC?do%}efL@9d*A+V^$WD z(VvwqMBWhUWc$G?Pl!ZeWfUu0Soz>$p$yW4%=s$sSUKNGOs>zY=*x ziM32SPg{eEUaAnCb!5ap2?&`9%dqCKr*IU^u=_` zuM~^+NX`%)l1S=ge3EM^kf@0`A~}duO}Cs)vpgxwqH7Y3h?SwNTqH7)m5Nmsie#z3 z_G>*z4M}ZTUC?A1#|hVxUxZd!8nyDZ^@!CMxnKK(<$H2ul?1%68V0sh`%faH6wx58 z+6SxilCx}LuaT=nHWS%Gv=dhQB^rXvqMZ=Egis<&oh)AvU6thsGR_id$x2VFq$G0G zDrK!>F1cnMfr-2)lGu75;UFuAvAU_1K3ScJ)z3+ZtYu<y^H-L+9KOa?3hoeo}CR4T4w3_`&q&F z^8;3H61iuUoz}SmOT+A}f&Kmc%z-?ErFg4^W#t^p7eudSOF=YFmM4imNXkbzlF%BP zgVhk(-(>zm?qoTbeFn?DgzLW5gp`)(w5-gwN^I*qhm?{n3oH4pxvk|SC1mw}mdZ)_ z2z`@33baewnn)Nze}wkfeqd=(YUwW`lUU7|m6ojhR9UFeN|i)z66r~3f&BhP19IF; zp2q6@gxalr`~B!?m2QMjxTL%BNlIm%^)VP(NqN~nqRp_{I2lRUv6uWJXW6=tTCqH5 z<-~7%XRRlpGpk0#QX=6$Qd2^uY^}e|4!);VmPf_7+sfD0w~*44@{qDv%g>gZts%Mg zzkiml_^xhjJ*+h(ceAqw(q1HycYfmw>(f|i^xrK*@{-nM`H9mZPa^sMJJ+`>>@&&ypWM&(+qWlK<*v0P>{D27x6&^A{Y}3j z$KRv?%U7(0{otw-ehzt6CJOX!%;80l{wxQ5UN zkzH0fM&ujO69xDr(vTdpGLOt$*!c^&N`8^&kiKG{_&wdSJYuEw@5@Afv$RiUqNGNo zy~rQie(bSTR+4L^eMw^5jJ<9>&#uUP-y_y{k>|0r{5|cGGc0A1Qm}Ms{UoI&ZS<{u zSo&paY2|5hj>veHs!5&M``9vA>+-GT$uDx&nymFCwfS$VW+{rDpIcXX$lat&cfKz( zTMw3N*z$k-ZRIKU+1A!!>5kOx`+EteSxdo|iF{g1#g<8LX%8}2B4zz{l|4g7C~G1u zWc|&4lQZNVLbL2@p!F@*dXs!?8LfAa-n8ZnICc^0HO3f9q%Ri`+?i+M3I%OS0M!d*$26$sSujt#ZRUhO?tM zkt`&U(aL&;)kWEH&pQ6GD=B0R)S8cdCwoR{DHp4o3ct1uE4A3%Y+h1=|C?CeW^2Y? zB`wNI19k*s>6_#riIo8CZ+1Lm+v@wV&3cCI3G&HG8*-k#|JxPo)$h-cUSt2hKl}gx z`~N$ywa3`{kT+ZFz-)by%ks~EUt!O& zPx;?xv+ck>!CF=;2U?$E{btXTQj%wp79+n|DPZjjl862K{%LH!Z(|lKmq|XB*1o4q zQWkQ|{;_mm-Z+wA zR^0)1(!Q|OZil^i6-R(QhkxS+aZhu8;qVdV)sBCXzaDtRJKRs$UzNvsN-ac`p-He1 zx24`PM?q-^s54X%@HbcKZg``LTFAuFiS#V2#uYNZz#4DQI57qER(dM^j9$hFuoiF^ zHhv{+_KvXDHwP}@XIL&f!P@v2ES)o9c{bj82Fa@xXl69E=D%7tFr`J*1+s1BEHuJdAOm_A^uHeej$~qs`27 z${XkvCr$!T1@+WAP6B5aCj)z;b_H7F0PNs2teA6Pl@&2Tu&thf4Y`IsOkbmwur1Gl zHMR!$iskfTd(1N*(r`DxbAe&e_S>qsH6khX^BNe@eo7T|#a$kQKryMPmBfX06Yl3*3J8U1t#xQ#eq zI4TiOD;T(sk%;0m710LQqc=}-?gEjM#>wFnbLuz-?0`_lDL_i+JVQ*veZY5L=A7m1 z8oW=NGh0i&Nh`o}t4Ur6YayD^(MOuq| z(}DI`09PzyYnqm5H2Lzys|94t)`43}+B$Aflsp;DmA9fesP_ zhvJI8k=x)*7a)cPaQdMJ1iR7==bCb&fLtLcmX^TO*#S8$#x7XlK)Cb(jyQr7#AyP2 z%OuVkl=K`*dJ;ISop{eqL@_#m+U?~0hB|G=(P12IMQOKicHrCs&JTD;TihK0ifd!M;)jjbkq{xhfk6*zkb9a=rGblJ1 zZztArf6SG2p!!@;=v&y@Uclyd7ubYTNEboTk1e*j+u)AhfKi+eq{wifD_Q_+5ekk{ z0l}oS#DNV1(y15tdm6ZE29PE{0#C9GsFCv)WXKh8+7ae3a3A}beMpDk-+Y0&iFd`~ z&GnGKWL1oeI6RA1%te_iV13KLDs&v|eQ$t>xB$tz8)%WAfa_=h42T!dQt&yZO+Y#} z(pf-G#$g|bXyCb40wXq^o=lH|7f^r1)ae9-URSz1K3gD~W=CKr`y-8@=hB<$KP~A9 zeF2jHEacW+q&@UDdJVl4X&vOs-}HI<3}nw~`Ud@!enCH>AJZT3zH(Ya*FvSSLrHwl z7M_TO>xprK%hUoJorpZ|5o0MEXv}(=i}PN(RHYu0*tR zq5+Z~X4mq#IIf3XH3O*-_AIjIN>)!V2d3z z0(cJ=^$`UONEZt)4h6UM0cS=+mWG4cTY^V>fCmRsgTPTkzT&w?0QEH-sIG~SvUBi1 z5g4x_$kQLPwh#XM;@3drABdyw;M8vT><;c}2TqT~y8|JoT~Q`EYDB1))Xs$RH=x#h zl-CJYy}<8LkkP%s`vgfg)bhLmc;*n`&L)9d7lNmkS<)(M17!79$bt1pn}H#-V#3xS z*JPBHl%3S9BWlzf|E*EuHjobOai%lmLlQHO?o&4wMb+Pbn;; zq{oP@hu~(2MY765AKBpQV#^s8iKIij5CoESc5KyQS#6oD+i}0@o1_)3Qh-OXa{~(B zM8hO&N(?BAtooC-W`D6~zpcEnt3E`3CM$sojFiN3V-MN=`r>NXdKPp7#HtdoOSy!7>nw7r~B@k%mPv5ZscV1&hGq zXIMN}1nLNQsLF&~B}kYrNbyJskhfO!UMZer#n4sZ&Jv3>C3j|nhDXL>K0^g==iRf-}?Q4vAKiim;*D;8{^V(-18 z2q@S^P(Y=F(u;r~y>|!+B_TbTNl&JIXJx(nyTkkbo`3F>nLD?fd+s@Vuf6s@``&}) zv|0*O$t)XLET>f+P{Xro4Y`8MG(gQxsk1Ojd_>h}w<>>|!uG$F-Xn+euU4BeN7)O> zQe3VuVU0q9&FZ?lTJvuXsxvW0tX|2k{$W8dW_;{FmIT%W`}0(NPPNtvc%P?^$B_qA z8QaudY*u%)L0$E4^|?uHL4M;NRiC5kjI*lNC8`~PHVsPqC4vH9o{_)VqmOI6KNPIFE5Z6GyDVDH8QpN`{E&(i-IEPX7Sm-}jgxx^i7_V2<+SX`q zN-HuBPq(%$G7qs7wMqeUAD}0Q4ItM~r^-ftACO+MNtsiHY(C`kwNr~^;z@R7dTRy? z>;f{6Fw!sM?J~BC5maUfASR0l33E3&>VzxGg7GHgauP-0DPo+~)oMXBi^up6GhZc> zk7K*cTR|Qu>@YHHkX40TsWz?XC76o45p-qFXD5Yq$p7l3mJ5_c@6l@2nSsc&aOYb7 zAu}DbRUQJfzL=kYyaeERg8D>6B}?sPZiD11Fh3Ak56D}?F)TH^wSH`0AZ?CnY38ls z7ZzmSBJ+!Ai@Rbrn&ccJ4=q{s0W)&g4_ej&+6-9B$hHpVSup^x0gj|#bOXGU%n{i# zkSQ6zuTg8zGG-`(n;`punKO_fN;wf5S%KiXly77MBA1482g$=xMXg8-ZUY&mbQUOm z1DYat3+E$)3&={HkgNw>8MUIkQ~N3T%;?hcc_`hm8CZZ8=huaKh^#4?xpqG83mG_) z`$iN(?vci-8Kq+c4=so&n>MBcv|(Hi8Y-h!+=e(GSrWd%ahcoA3<{t!QHyyPj2Hsf zz~Cb4D%v0Mj{9&NkVtwKM`;mW#wc+lI3E9?hg?U*n4_4C9WzSG{B`7*!ACUh*e3ZJ7+*)O2CpvJN0Ot$Ts_GKL_QvK+ld5VQOJD)rv^G8 zFAB)Pb!1C1Kb850#0SksL5>uV(xd7QJjPw)et^HsbSD~<_xoD!8LiGtL*^C2`($P= z#_8yT;LaSXJos_C)^eSymmneHugd5We#4W1B{R0RyR}u$&|~moXpc;-QYxd0@OQ8U zdnpf)M~+LwFM=n`5o<^hdLQ)w%7xS~N-?%;qbk~(Dn^xzGbn+Yw$Xc{#!wSQ8zE=d zf@67Q97W#>8W~pl3`v(UY{)wFp3h2%c7TlmeSu`~^Qv2MFQB&8DjWhfAzKQ2#m4jb z6tZG@$^v`{TY$!7d1PzM&IJ<6^A^mDl`Kb;C7c_G3#?@ML{DHL;t@b^a$Vu%awXIY z{08M{&ol5D5a25Fe(@Vek$*L+`^0&8=Al;Ykvv>l8s>}vHGn2WNwlKi3NRCxf!{!1 z;2&`nypP-u`_Qi1IUF;=Z*0X;M5%i9i=_dRST>}HWB%HxKG9Po49~pa%GfIFz%a-W zexv`z&xZ8D`zE7{Q_r2D4q)ISjkGBAohel)0h$5qLWz9xh|&)%XB>bCgCl_dWQouN z{0SNYF^#QKsg;PDiABIQ@Jx%UOlv)9IjC2t1tI~=DQGRW^-M+wsLQB{M5TB?_=Gi~ z^+4@n>&7RSh{8m3WUm1=a6A|&GMm98fj-DQW}Y&$lqFM|PoIG20o#ajd~yT%@MN%j zc8ACbriNqB&pp+$NB_G7ut-5oWL5($B|{oq7o1bFqOl!ymfG@I9ED@>EC?`N7UU!H znGm8b?p?DxKw*q_#^&?km)Hs!3B@*859ay-7ZGK_HrO9#Zz1+V+XRH>UXIE#OF}Fa zk46u0w3UpVHVx&=)tU1usE_`FO~Y~8m1qMQnPO>jMUBIAuSfuDE(>IrffL63r3@QpIXV<3Ku2f^dO+Hs_| z;A&c#D4VR1N6_y_3>Y|%E6`6R)709ZXb)s%J{NG8JxAXh@V$9f_1=lae8&X)?C(eCnM<3{Wa?9=bqn!~Nq(W-|)YmgB&LXc@F@A?gwRNKGf9 zCA7d`JFuUKxWKx?{-S@7zo7LmWQe(a{1l%-(<*8k`V=-Qb`cAOUZs{|P1e!7s5f*Q zN74e~IQ+rB{qG-2rPG}ns$HWDUdeYk@S+5MTq{RZ$`WnQx!I1*=5 zZYk?%Eh$^}#{Q;75NoJu9kh&AklG9+pxqWLPH%|_!ELgI_8WWnWR%!Z=s4~}%abA^ zu}9I5{L|K;SLBrJXGoj;Q97uR)LoX#dm!rI9*8x(pYt&X_7@AV zy*zMAU?WBlaV26uY6p$r^{JJ(50s3nuuhOp{P8Tb0Dg&%vxU(nOp^1 zxEFg+ir^9Y4w5Gl3i+23wHmX&QU>>$iPIJjsw zkQisN7eqIyiE{V2BKiwxfo~iuJtX~xQqfy{dYK3UpH&cpTmslYoFG#`jk&M}C|B5( z^pvzd+6#S6>nD07x*3ZJ&~~;h?UJS^nyjMra5o~!>|q>FMxaH67}F7b<<$iT7$f1c zmhxl~xIdyb^a~^!%_lnHL&QdbtEI6%!F;g@? zOQeqa2>BDu#4)H5dj)IMIBG3_v7h7#J&i{Qlc4uOBoNJlUP4o9K= zJ%#=yF31(oZ{#c}oBI);mVU&25Z!={&{VM|)Jrg2a#gO8QHvFY=D_9C zIex>Z==nTowi*w?s0~IwvQ?BbM}6Z7d}s{bO2P;MG8CAj0DXjyP5Xx$kvRzZ3PYBt zCHc3e4j4Fq*sC3Y|YwMZ;3t>GznTozv{nz zGkA22#L++G)9rXlSc|ub_R*h&Mru#vV-07*lf*ire=@!z<1{iZh|wF4;Bfo}SAajt zr{)!Aq zg7otJABg=xHsLdJA$GG=BDk#Mdcxqy$WS-YDaP5EO#w;d7$>d*=|r@PQ7>E%+ZfH~ z)kGgTUV?knd}z^e#8V}D39covevWxS;&E4eUYjv9#@hbdx}~e5pw+AqYNEXlAvS_W zZNgsIOnE~J<49t=SWnVbYl|8{H z_xXkuL=+EC9J-KYM_NTZ;* zFfPqzV~w0rWP`RJG6{skRw5f>eKL|IkxD@@e6wGu6NqI1Eg_|lX0AcDxzH05>7=hf z4I}H)yfxH@*C+NvwxK1|6lefS!)8dqT~gx8R~0hBId zSsc6KT_-Cm0b79>aBRdF+KCZ9=Cnb>wWuVu(WXiS_d==2mjovho=3YyU8BFpJCv5w zw4QcBECVoy{Oo+TAQ3Aqf1@Q zr${4t1a%M`12PPh)tDT$SbPt1QfBb*&X_XPs;IO6=e$u$K*AlHx@DNDX-DS6+- zGC??Lhx6~Bd&Nq#g|Lk>L;oKBCCAptmhlUGncqBDI0*R%YAL=d<(zFumW>)lOh|y}uGd)z#fACEhKrD8n46}xefhU#GM)I;21L2<;o~s zu!={^9(u!4FSfnr@oC;9yeI0Cmf1oculZ)wF!A$PCay}aO`@bgW8pJo&tyHQk$7Ah zQx*#V-HnZuKt@Gs83U)h(-+l{hg*qX!UfSjl1B*E0!G4|FVp~6LObveXAp35;@L{sE#gYX2gAU}^DA;)n;6?H%pIW~mS6{Loz$p11CoOk^BX?qTfyA0@2m+&j zvCii?SvDhcltr{VqIZ-Dkv5#c_p>qE1J{-}RN)O%$Oy(ertpn7QX!j_dAG=8&W|F0 znAQTB%<%O1u2YWR(T9^hkL6xkyo{|SUcW?~=|dBPiKOIGf`&LAa*5VuTm@^f?Ta>s zZu1)wE3%4nWyC8s9w_XRxF;-*I48Aa8PHp4O}0DiuC}n<`J^q`5q({{em`887q*_a}F!u?3E*iiM`n8~Z6f0>7!D z!lhX@y>+pwlDWc&phyUi2#fpy*U;P20*HX&SaMd;Y)$sqIy?uxDYDKCNbm`?lz1~> z8<24Hn)o*2ZQ+|%mrRlfC(&D4X8ELlz%#YcBYH%<0$30II<%;?0hxu=k(ecRNi+&l z$iC*C^Jr)h84A`IXP}-)%kf*Zg=N5|VxipWEUACqeHVQ87lw7;EA$ z$)aF%TgpaeKBNO}g!W|AlafI%5&g`_rbJ9=&4evUgrAlSu@b(~QQj|yUT1#`P>vLVGB=>U^$<`A4*OB2gMD3r^CS^B31OZ6>qO>H_Hp3x(aM zk3{{8)j&$5SP#iY-3B(Hjq_l>!>43lvog%8f1t4C^;wM*W#Ogu*6hltcw;6 zs6ku+I&z_G!}5?x3dRZp)KG#Wh4lHrdVp={2@M+=ZD34G>X)mv@cp7NF$>=;#oo$H64m=4e{+c0&~9h#j%pfzP`+R;+np>2_LO}IJU|`!jb0+(p|35t3@IYkQ>q|2=qFiXxdO3Y ztT4YdOMt5~7A+;6 zW4)xmX|+%?uZnN5XWR$u7g~<>qFoYyi_r+lVuk*Z$3W`&L?iVBF>QD!TqNowD35Qx zpBlci_{xYtG72egF69U@vQ;@EhIqJ01TP31 z7-wf3L82W*bBSoMY?Q^TYg{Kb;tel)p*nfVXa;wxp z_$aQ6h%QGA_-qEziC!cyhc@Z_JBjs#MdQJdM8ieO1XrY<)K~I8@n&#m%ogB1;ux_| zJdgK=b!4ZIeL|ig-TW5m7f%E6PeeP(L1ISQMLi{M3ElzwMc%2c3o-#sa1}vUWV(~B zFainw#F&ueUeznCfnCIN^{{lvbt4*Klo#dUTC|*!+ZB5?pcv{++@j4CuM8d=StMf$ ztUcf``0LETaLF$5p{`xzOJOyO1s9{qO>@4~w?O zU*|{QfWnNI#Byl*l=3taaE~Lt%$|^FAJI4#iM3}-Wi|{xXDtZHC$E9Lz+A)#<#|-B z6Ss-oypqPpv`hzKDbn`zXQUVZyY;30g;&XF9KB%i5Hx?ArJ{$>N5o{>Iy|K(5n6dh z1~Uz5Pa^|uW1nS4G!_fyA(A=&{JAzXr|_$LQ>I6vM_Ff`zKqZKcZzPikFCQvR+{j z!ZBmdgGMs8q3I2@laUF;6tSRB(em_l=xYw7L(nh9c7`1M??kE#I_vJP5yX<)~E1dh->GM(J->a=g!I1@F|Q4 z>-3Z8N6Gj#v!i4L9PejDJ=mA10eVB?a+2u^TgDg&nFZw{*5wZpxBD0X_tb7x#6^ST7cP?>>=VF;}0^+ z5E%ve1GxaRylC;oA7Y%CHkvF$bYJt4m}g1LuGxC5F}FkB%>tePABwXM^34|rs&l=ExI zdP$xykV&!@QA-)~l6ylRxDrodG!z$amCKvap%;?7ui@rVQwnpFD%dm`%Y!DtZEWyF`y zwiB7y`ovi98A0RodkV-*-w&uwWX6%A7lP$Pa5)0KD!hiAO|xmj0La~dnfwz>#V=Yn zU?fnR?2y~EmS8zVR-!Anusp3?maZL5OePP&v2tB}(@WE^nQOd0?JWC5q)OzHk|tId zy(FV1;1tZ+B05385IqH>K#nF^gM7xa2j=q0mjC_rzj0Oi2O87R5JF-kw6N#*6?&5q z6^X@5bOIX8u^u1>@RZm|ER&HC8Sw=|5O0{50Ayf9M7t{1WV{bo6?vgtX?UQ?1bRT) zpB5NajkXXv$FWUj&x@zZ7$M(w#JC-!fUrUZYGFPIB6(bgRT2v&@j^xqWxg_IyKo=3 z$r;Q<$D~0!Pv&5h^y#P#_s5~v4;h7mN@a^20fvgc$Jv^XtI33Tk7Ql= zWG)kBmiUhPP+p1fBD3`PaM!WEAMTagpnEtLZGvq)n`o`A^BOn~MDUb^iNQvYt;Mo0ay@Keq!ck% zEiaMY6m^ER6BhQr@=6_Htx-$-iBC>l)AR*L$z2Fbpwx*J335@}V?VXNjcs-Io3uar z4*elVU~H6&R+3nGe#>Y`>~XS(_L*hj3M^ZC8`lyX609c66V2817CK6v#ckRr?F(^% z=ZQYkUO=OHB}x?Pz#7T5*?!b!IXCutQa1Vt5~rb{NS1tauO^N3oJCf(qbS2#gkScY zFB|{e*IJ!orC0Mu;ITk@wWz7Y_8DJe98NrRM(X+8qP+2!Z=WPrAwS_9)bPPMCi#Co z?Z_t_S-N=n%mO9Lf>(iBl6#0w(57f{QcbFf!P0x|5&0F1&+JbvOGK~*wpqMl`r1H3 z%}Xc8l)N+Xw1K94DwUaOd@BX*xm}%&+)X^+VpW_L3>j}V#1(Nh?J6Ag(lQ@e4-IAM zmusV`&>Co#rei2o?gVSt13l6fvJ2)p5Jh7@<#@3z)Hbn5&>>10Z7n2^NC{lwHuQw( z7q(~;4@su%#>%ExwpiVfZJNCkZ9Csi2;SgZvmX3~{*`T5`(G?VyVG)^$F*^9$vA=S zi{($015FZWAHlC=5nIVvh4c$?jJ1-wX-LJk0ycmxY5FMf0Y*AyL`hm`CGzqMzL zCD$T0)+eKJ66ND~;`z2y?i8L2>megOS}nCLf+oCUt%Zqqu)0JO$&e7)j>Tg^VPUed zQ33KmdhdL$hZ-P=OACNs#37M%*c;hKNhhP$EDr1%bq~lziPq#uY&JEXIKoj3GPT%G zkzFmS#=Ron!V@9$9C?s&4tfU6wA4HX@kO+)l9G^uQEKgh0uv@D) z)=;wWI9N-xg*I9WtH~v{PuYJ&2CU;Pi{O_yhVL8|uLs-7B1F1j%eCkNG=i}=Z4L6z zU*aX6AE(1U=o!3zOyE#2=xJ#11KwXUA$IwrIa& z>%wFCsa+3DLaPhQJs$~h1j-~9Xra{4G@U7Rog}l zrgjq>@Xk2+H?Z$crN_`2b1NRveq#iWu_St7#0#v`_hBg<*OP5Hhfzzg9BqwJD(ofu z0bUWCgzNK6Z8Vnbh@6O91>@vgoGEBXo*}isFF`&cC3y|eQR;|uxmTX$!=Bh0#$NIK ztFrX-(U!aiZ71G_hMM#(vHg6UB=!l)p$7!z8Jk!{o_aj^Rze`0pA7rKZu-h3!@S63gAaDULY|Ej=bPg@&x%NCG6*W(-CFB zevElA>%EAlB3FZ-f}A5hBPSV2^jNSBTcn1pxx`Sk`-r_iuE&n{ z;_A2p&O=Ff6ENEX$6`%>;al_R=n-+OhHXo%6wDzepbuhM8l#%AJ$gPmXfhCs4VuJ2 zPBgnI)|RLMnUl;2>O3T0>_4?#iyIR;L}CRuG>4|7P{Dt#93wjy!JvidY zaZHX~@+~FIqT+UZGIt8!GAb(H*b}=hW23R(e1=}$K*($@yekTIptl6?h?&vwon(o9 z*Aqu$INpgIGc1fS08s?XfCFp=%^%WGfP9dx8XNzicOzVd`c7?UKVsaR3ws>v+8s4<%uh_Cs7vo%JKzw895?`(8J-Kp*`_zU2GnE zY#Sqk1LLP?afx-%FQF7+wnRuBWsXchn1QC_uu0rU%SFV%Ia=J2ET0@5Wy21EnGk(w zy=4AH?1Mb==|QmT|K>Zf{29`Ux^o^0GGb)G5gR4KLn)${D=ZsPA4cl%ggaY;o}0`; zalWpwY_t|+3BNct!zd`c!B{U^q!{1AtmQ}k1wA*piWCfgYQU! zjEg6UcE|iLY+D=+3@1MTHqg5i_JHv^v_GQQ(tq^M;9G$QqBLL&%MtXK8I4#&da$@E z=hQ${9B&$C>!5}Bz7x#bfgZsg@jEb+2j5GNw&SxEElO4~#~ii*C(!o|t+o=|uoZ2E z{-x~0iU!qu*LKzaHOj))sb4%3t%5OYnX?mZkCwsSkm_x=7W2f|SEwsnL$A)nF?fF& zEt6gCF{-P8=OM~uS19gjwSOLk`blal+6TTd+QQuGb%V{}t%GE)V6_;l$7lhb_Oq(r zc;6=Db6#~mdjpYex7w4WKGApZ?=b_3PHo5h6u@vCgIPzA9g6FrK9~^_cZ@s3`<*ds z5UztZqn`7c@v6kI!XBbUi<(Cs;~8Y{5w#thD5T~bcdJtHE(g4uqERsz%rx#**Fft- zCZHoxHOH(`Z6WfZr7h|_uew4+m0zx&&ZtyZcc^=`sd-MpfjDCxv<*+QMKXxB6$BC z?i1~aUWLWw*b5>gocRNLG=|SNF%iX3I^KIpeE^ODy`U%9hC69&ZBd{&a@FxBOtgKS zYJp;vyOXDKcdFHPNF=O?Up>Ww_sX18TQRpHN8mW3242fqg)sXx=B+%_`ulY28))*? z^MZIv5bvhJ8)-1p;t@4tQ!rD-qoa5$xc#Fv*5i6xvg`JV@~2O zY7W4DYIfJ_)QrQ|sO35}+bU)PzD3QFI#SI{`=DAzsaaK@QZuhUsb-RWOnpA3{-0Dc z48PVo^XhBr`>nXQ)%>f^sReVhVlLK~)$*#Eqx=mu_xT%Y{W&!^E6#hYb)MKq)Qqf{ z^_H{U-l66w=B&fLTj!s}{L6jR%&c8hZ4*@e4N8U}YX;Td9@U4>tD4*U zq?+B^q^>+l%|^ab&3(OG&1wE-^c6KT`46h*4t4ME#7&NypyqM@Sp8$B_N{TXafxas z`m1%f>4vM>>u=GG(!Hf-n_sCb)wR>#q#vdqsPC=sqrXgliTb=-KS{qrzfQkWzfE7N z59+J+r}Tb(s-d@`kD^Jq`U00}VGB9yYvec+v2b;c4~xnBfXT zvcaeJ<{IpVdVN@*Xh<+r=-2A!=@;vN*MF>kQvaxaw0?*_U0<%-r(3K0S@(|aY26dL zd(>R_K3!PV^@N&%{wXz2Z%@Uq3!|9({R=hs`;cgds3DrKTH_PdYEMS)Q?2HS6omf_ ze-OSs+$n4ihe9=>GohWKze3YOAA}wXT@`8{@`ZGvvf$p}>fn;#+~5zvw}X!a?+gwP z-Wco~>>lhByg4``cuVk_;Gp1@!EV8XpgowXwqG8+KKMZJ-QZ8bIl+a&Rlz;Ml3+{F z7Rm};85$LOJM?{MNoaehDC7wD4?h>45#AOKgi|AZRbRfKW^q3iNr~PNeM`+#>yIX= zr?L90nUfdAmB(eMmYuYX*%@_N<27DETa?S=`4jfM(Cf^m@X72|Kl9maB_ z!_>`mi|HlP&!&~8^`=dx6Q-ys*_>j|F!whFzR`ln1|{JaM*axjAAT@ADx4Kw6Z$%|BD5(qHgr1pj%wTDmd-7YG@F{6nvOMn z+H_sh;HKM}Qky0<>Kdmt3~4x0e@*@Ny76`Q)!FK%*7mBct65TWbIsAfYk`XbNByt* z`}i~bE`PY%;=kJei@(nA3LN*3@~^CRRlio1QstVIk|Yh2Pat?7!UQBCcePc&z@JkWAg@Q=`*aQ8@hWR-1cUKC$ezxU9Xc4_be*Hd*hqZL!^JPjnn|jCWq-+U^?S zp5b2aUgut=J~z1MxIcG)<8E~S=o#ug>Af+2UcA|Nq0i-89{*T;|M-saVefA5zuvI- z+W1lNH^pBZ-z&aTe3AEE?_lq6?}y&0-q*Z$d9U_*y^}l#++E#ATw7h+Tw7eHU0vMI zyWe$R<38O0eU844M2Feob@&_&_FeWr?bGe!?3wnzZO_^|*tS?l zTBDYKER!uaSZd4*&Ew6Z%pUV}(?Df9mnrG`%W$z_jneY*x}ypYr$j3vS4Czk-T5I^J-7lTwOCKP_24mc6Cbi^;P{VZ>adGJih$7vi+r_ zl-atuB&m3A(bPg)VZqtp*>4LgiY6D&FIipEP<-gzBSl}F-G92<>FhJz3P+!Ns^rcx zUB%i8N5%bRr%IkGDJwZp8Y-JxvAt@Je@0+k;IqK(fs}wd@OR*z+EMjg8^3BQYwjOR z4|NHx3Qkm-cQEvH_*i&qd`hbl>Z)bG_+GcNIBHojI;ITm`O z_Zn9}*XPcij$Fqa`*6F-zRLEo?G@W=s$FllU25yBS~uBNZCzn~&Dzs?%<`t?VvE62 zZ4Q{ds;$RcRw;=&X{olDt=ZPgl-zt}-EM8L+H4`~Ve5SBkJjbZi1jI3y={uUpJSaP z>Zo$eatv|U9hEAiI@qz;(boB_^K<9(&WoHU9p5|Na!hcnaFjY6PNQ?9<7USe`$)Uj ze%SVnZMZGnmZ$1)+H!?uhk3U7NAqLm8qXaytUyup$>_Go z{_s1YlwetNcJoh44=NfLHI8dEG+y42Rlm3P{hGT2Px=>C+p0%Y&8YNMPOtD(Tu^Q; zt0*Zc-gfTAqTYqyoy|Ktv+&^}f6>@;zn&{7T6uQssVnnW<=OI@P88=|UU2`Jio)We ziAC!RkDPt&%m=5o<-eMLw7`G*_p^D0`g2{2Hy4j7{^nfHxtGq}Tl{WGP3i9P{K_fS zpZQPw%l#Yu!~OqOe_P$d|6bt6+DGb-HjZieAruIYRrd16h&%F6ctN;Z_$N1F3@XT0liSDx!-cZ0jVr-$byPnvg=w{QHc__4mp z3Ew8RNxC$tYmzl-LgMsjDWrnGj@i#-UVX<+s zX^J`B(#G<=dA@0q@fE`t`u(~-y1x{+s*8?~9*gV?PYn$VrnJ;Gd78d#e4+8##{P}F z8!m5nvwl?F)|xVZm+IA(uT*p_FDR`q`Lg8YlG{s?OO6(QTzv4{)S~LMz0b@!b$`KF zHN|*F{)hP|^UL!WDfs*39eH1!c<)5P ziC^ zZm3! zI6rh=fPhS+-8y4W&p23wW&taZ0_in1mLEFV~| zvFI$z&99n=nme1TOh1^eH8mJ#8wVQ?8y+$=>L=^l>*wqG>h>sh+!(ztIwNu<>W@aUxz#;{N@KYfij-;^DkkP7cqH6O-M2% zzmsf9nVgc6Iw7?>b#U7Jw0^38pH;ouH*-Sf{>>G!4`Onov{pZZ58Ne5_c!`O8C<^)aUk<#&3)t7w`8z;`Mtz_oR4!a9{22rmS*<>p9mR=Z(%v$5cl* z$9DUd_BZVJ*c0q4ZBMCQF0f9t4z+f++N>unzgix(BwMDN?dEq)-AxhWX5*{In~nDy zUodtzzHQjAe_cORU!v=+o1#3F_Hk>XfrvNqWY`tn5*ib-ht>q&4CVyiRQ)`;>Bq)H z4Oca6ufMbYSl!)qJ8PY_hMHskg6hkv|EapEYH#Ipm2E4(t5{I}ZrP~PS4)-^`_FYh z_e0T*MX5#m3&$1e3ST+9^vt5uznm&6_+Bjw3icMnpSt_h$Wz-2*5=PSX*;Pu`N+xI zlON{$3Z@lwKegsmztgXse(<#Y^uAMu)8C)I`AprJHD^y0UU05g@$BNiii?WBDw$WB zS5{VDR1vJar21a}fIxBJo0|7)r_@cVzrUep<26k`HCuwiLhpuGMrxu>ar1OnD8J!= zVXpCRQ=&P?^15}8?WFy0$FI&WTtnSE+}%AZJa2hN#&`0a^Zk-=UEtHaP9OG;8{Z^ug(VrM=Imy_EJv+NQJ)>66m`P5(3f_4I4g-RT?B z9!#^OtxA15H8(YoaysQi%BqxSQd}vslY1r4Ny<&ykvKPTa^j%G)d?3RZ1O$qtBijl zzEUx;6z_b`9jcd)yFYNJx&LxK2yEET^V;Q z`cgD2dLS|>a+~s5)`af}7l&RA8AERe{Vh+ogqy!;PHFzFX?Rm|)Aq*O8#gxeYxt*r zaQ*bUyxMlPpVxS6W(2MeSOS~<_xQJ0_o-f1)uHO%%10|cF4vXsFWX%9YuN*3hO)0q z<4a#IIa~Z}ac*((xtZr~Ja?+-{i2aYPZxbvG_`0<(IrK}!nuXl7di?rESz5GC>l_7 zXVJSwtBXz+l@%qN`|R9+;;Q0DO6HUlmvk?Ewe(QwQ)Rc6KV9*9<&Y|W)rZxY{yF|j z0!IQ%Yqr;(t=m^WUiEl&atHSRMGGo3U2 zVgA(eq;;Sz#a>~b=eXIK@BF|u&>eLz@Z9P>>m45dWBif$?!MW+4hfSIDiVeyE=x>I zdNAp?q`IVwm3>Z0u1fMJWh4(tel2;08urOeX-@t+`O@S-((+Cxh|6+WNcY^m;Z>D#f=YCI}dy>1Yd$#L3SEei8 z)$CmBe8^enc+pXAf68vNZ?wHSpBp0K5M+S!1Axa_A}R}1z`Vmq=Oojc|eTAa?{m3qG!V(0haL2WJI84SIsZT0U+*)^tacy(!d~+SIRM{QtZh@fx@L5Zx#kDuAuaYts(V#0s+wC>qJ|guRgJG2 zShcmXslrz=zWlTDKg&NUx0FvWn^AVKY=7DGvOmgl%YQ6?ru>bv`%3qhB$w_feY-4J z_Hg;(@*Wi(Di)Pzm+vmyS>`Xhro3InUzP7vFZSpA-|~;}&-6F=kN7`V|Mr?cYL!FZ z5K$=yPWdHpdp#qu!6a%f0&e4)G3U zb;ri%_`dah?R(kxhtHf~N_g3~D}G}9s`#_<2jh3dC;F!NPWe{)hWQ%eSH?dR?~UK& z{mJ`=>g)dAd%e%79)H?%*gai&YHzu7-TPh3T}xeGxGr?9b&hoEol_ld#{_#HyUQN3 zHQ97_tG(Fvfh}MiWo@=BvOH_K$PzUdn)jKPm?xRvGrwcL+gxP2UHQ*op-Kra@U#9P@cj{`RU8D0Js&HI|)ZtmE8 zylGC;yrz<-?#=C*r!>u1o>_f;r}{_h((9(wy;+~yU~iaS-@ATQU43nG?RPcb)qG!* zQ*&qFjQ`?*t!8~qWz9D=bpco4l){W2Rcotq{G$Wk1;+aWRmGLoDr>dF|D6A3zujM2 zy`lP^>g`nzRz0p{Db@dC;JcdInn5)Ie@XS}>I43?nxxtvYPZ&|t^KKXMeXRiAL>sx zEN(p4IJdD+J6KWcdAwl8>7avGJweYe-Zt?!D=FFSebJlcUFNyf)64UMXR~LA;^g0Y{`KruJgtu> z*OTwgcRlNz?r=Iiu6JA}*J$Sp&NAm-=Pt)=dw=_*_Ur76Y@=*twjK6#$Itd1wsLEg zLiU@?H=1Xejg}G0makHuKbVu0HBT_VXBuX_-|(qnfpL{-zsYAxH1;-3)Q`~5*5??y z8aC^v>YMaW7%nsn)jy^?tm2c2D$@RX++(^f`m6P`bU(*Uh^|)=y(b}IzvU2FN+)uT^*_mjtVYr8Qrp{Wp(hA&|9J5!O6|HG>vR}tNFW@ zKU!ur4{7S(_*}!@`rqq^H@w=|yD6pV(Z&xOo~zHU)77r4d7}2Ax|+KEbxCz`wYHiy zfy-(_H3Mq5*0in34-^K*1nmBos;pHHSAQEQsu@!Aw10ioB~@Qnb*TQRx}S0yAD}SlHs`__-YG6R1Ch&7jLEwk#e=0{-bq|>9lIlOJ+g|%&?an+Ev*V)?J{;@8#US(@iIn>{l%^WJjL7WaFufODd&)_tLOvG;RthIhOgcv<59 z)4f^g{~hiQZoT^}*HY&h#nc~gUg28cn(lhk)zLNIS?1{JSZPnPAGUsD*eQ&BZ*IKW)53_$`Yqb7p9cL}GTw;0B{EF!@;}pXfgUxt@>9o10 zb%eFSTxfjUa9Uwr8-1y6hyF3+9j0H6hx7w=v*R|$_0?tRKQw%$qHfc4uIQza>!OR} zJh~rMlslxB*P@2-=wLzcM0jd+UvyLCi*ROmLU>r@%gEO7#i5NY!&@p_b_TBu<%VVl zcePAt9@w;|aaz;(mI0wjp@)LAn(u0w)>zrlP=87NL-p@AEN{HHsiE=5#^)N_H(uXR zSvRqER?Q(Y)q(040v{^0c)aFt;F7>> z|7iaw{`vkP{*l!mR()ReKy^p|F#o;&ivs&=Ty;y;@bhi{{ndB*BmO%AdjtNOm+QW( zKUx1({Y~}l6jL2rcSoJ6{_2K@8jVeFHr>(uam$0j+k#VDY%TYPC2S$!YDpc&; z8QmFeiEFFhr4Q*74C4%5<09k5rX{8;%v;RQTAr{@w%uaC%KndSimkxb#oo?-f&G9z z$vMK6^e#CWy z>!|As_Z6NLZ>jfycZlawSBmo)=Vgk|k9JkMj<~B8AFw(b_F8anIm(tWFYK)+r8k$$D_!?>Q&v5}g{&vADuytqr>S*MRX z6nQCpcW6ZD<8Wo<(deE?QMi5h+0Z{K?$tfiDLg)06j~lUs&v1k#T(q-;%n*B{7TdP zP5DhPG(XY&Y16pIaKq@vBxRBF8yg#f^%vI9th=)AMD2{)qS{~TI@SMGcYE!*K;OV^ zf#m^xO=e9f@M9n=@Q?ose@Fj->aJC*D@RqmUv2QeRsChv(aQFfgDOsxAFNnjwW@kU z)#{43%9oT+tQcBptU9gy_=2j}D#ul#S~IW#GJZ{+!?PB%(lrr)ea z21XceG4@pP?KI0|>v7vd_P)w0=h!FM{q|uFi?f&ORrmeM`z}|Oz~(#t z11jozk@t0Pt+&K`g^Ibp=vnXC?&rVTRmT%49 zSe~}Mrr5#+6%8y^H9MtZf>zs1>*Llj%B#<|d|~pKel#cBy4p8dTTFWm8HRfeiN?20 zA6WjfK50oZ4%97IcHjox2XSq63k`Lq1*W$R2X*Uonflebb8+v*y|3QG**-cq@3zdfJ>z=CLP(QrxM4-5OMD^#@*H-&0Q!9QgJE{2m#iiX!|14cowy=Cr`7LFE z;$Mme7seM(DZI95k7ECA&-Fg{jq>z<{xbDDLv?Lo&p=Xh6N_X&4@PnzdzrO^xB z-+JEmX2vgzU+3$Y_)*d&$(JSjk~5MABzI2!TgB<_OFEMDP4Y`A52toZ8WdZ`KY6Z^8fC%e`H&0tF>=&baH%Z z8>T$`4z}i2R7$h&w$)khw)|>JFx_w7W!+-G*l~e3{X8NOD2&jNF2 zlg{|CBFz)k2wT3cF>Y*hV|YVoTWDr@XJn-sR~;L*L_DGD<_nr~8xJ;YZFs*SRR2-^ zb@k14{p&Jn3j;^g=*QjFB~@dpo~m3_zNmCi$+6-#B@dL`S#o(vu(}9hr1oQ_-OFxSI1@^e>v}g{6`Az zEikLsOBf42&R>4=)ssW>=M~I3)!}sU=~-vf&W$W7D)p261A=!=Ju4JwJVP`eo|Hqc5a3y}{TyIzJWbf_X!Je1hmF}ArOMKO9@-(|9s#yQ?j!NZwL~RZBUmSG~ zkKzKO92-?s{U-Z(d!@ab@&Uc}MB66oYqlBoD;;(AhwTFtKb&Q|#(K5ocZ=2Lu&3E8 zRm3VyMQn}sFRUL~wyQ{N599TQ)rNB8W2SG6BMgfa!f(=@ijEH#2OkQK4jO}doBK8; zH_mOiuwi1|vcSfweHFTj4dt84o5~BzSC>6ja$7!|v5P2JGmuYv|r~2SNu=A3A$Tcc|Bav3rm2?6mEje}CHC|KCkp+H9Y> z%ez1Q@FPc;9X)wu>EV|S^*H$Afj$R6J$&!6uTETja$v#i(_fyQQutxf{lzOwA1zO< zI9su>YG9zK_Jf8Onm-EJqTj{M)E(B}Yb-I{XR+9F979}2&!gU4-xrD2lyK_K^sa3V zW`CV?Drag=MNV_OuWkps>w7%h{poHax+Y$5xU;2m zNvENm1D&UKab3{nf<;}*JNrA$@7Sipgxsgvt;h+rJ&^rW)^%c&>0?t!#UqW47ZN$0=ojORbd_z2#!_BGX@{E{dD)F#m4; zM0sW(nA@2@R33S~(WM4`8jL+mRi>ypV40y9WT~*$?P@HFwHT3WLTkBoL%>CG(Iv%MQ9u?o~HWx zyXtPPy}hQ|->$lIrK9|ZlH#JCXMZ?Vbu#P3Pe=bg+~ZLH1E==R+kJLt#rDt^*QWZv z-&j+za{P+Wimj{rtiN>A+0DAme{B5a@71gQ%O)-vx9Gyf+y4wNduUbJ+726r{&VN1 zqD_l7ulslImeE_E*fwZK)h_qGCk`ebtv~Tnf%VM7v;M-H&lwcrzESyJprw9H%h!>o z^<^fLE#PQ$XZhMC%}H6A_HIVoHg9B2%0Av!m)oynO_xWy-*HhwuZ@@1_sQw^aKBgk zh5Bv0tjFb^%Zo1?e)*6qF6e)M|8rNEu2^$fTt8i(hc4ZE$s3mr?lYm^TbJK-#RHem z?EB~?S6}Gw(!Im)IUBPd%C@vk%<;7Qx!urq%iA7l(;>Z6a!=pK?jIc|RomWXn`ryP zHp14&I?%ksaJ6oKMnpEqGc!WW4)75nL%)GKL8;#&!U`1iaWJzuzXJMOUGWqaTHzGbAjr|Gz% zN%wkmL#S)Z^u|Bxx&?l!e7fxY;s*=coW3J}c;4jWLyvxPXx9FXd%oP6u`RIq+J9bN zzhq6#@|-^}U$}Q(mw6x0Ke_0JrPr^VwYuM$k5{i;Id$26ORie*?cCqy9GJ6rZr24P zmaJdiXKnR{z~-m7U9r=&``qr2_srb;{{G}c{g1ZITYqX-(W4Bx8z z!suDKOl|(Hv0MEOHN&d)m4=G@DkfI;sBROuzIJ$he#5_wotm7 zxUT8hmUdxF++O`?(=m(LKGX5Nv!@!be#pJs-O;n(xB7e|AQHO7wsIjJz;C~lKJ_b(i_r1`Hemk(c2w)D=$SI>WC?w@nF%=>)7 z*~O!lo7c7clfBikqyO#~_op6lpGeM+oO-|T{gQDNm;1-n4Q(D8S*m~5+|GWf`)%J( z$xp(1fe6 zyLQiD^Yx+YKfYnm4acv$^IG?y$FE#*Sw^2%tP1UK@{i?=QcC9>EIjK6WMqh7iG6wgA)8cmNE;sBoer7hR5$Dae zJo^Jqqx%QXu=vLkmL(lYnVt4(#_Tq6ZCi3i`VKof+||L?KEK^_?S609&~8xf zx!j`mUv$Xn@MG@MoKW_&S=%%FWL%nlBJJI@R}@YkOkCmfdN(*Pvo)KF^?A{j&=)PA zHoe(!d0j=|x9Wt-^`$$Cj-4K!Z$0smN=wV#*LF|WUC(bHvSsh4YyVld;laPBths&F zx)q+~2mYG8Pp>R_kl8R9ZDCy1;OuWrn@QZBBeI_0Nnev%2I= zZ2v*0>0N&7`f(5M#kXDR>3ig|!~KU`HDlo8gBD*s;o9cG?XMd#_`+-FU)^)yoBeO- zH|f%27azDN?ZUsickBL8_g&prbua8TylYeE7dlL7J3amG#IN1kZH=Zn{g$}TqoK%% zNO5Ra%dVzd8=LAcs!OOD<{w!7M%CELwiP|ftIFcbt>r7rzEV-uz2)Cm{dpn@(ixj8xQ zCbjF3J3jZ*+!u2X=5Ay?L4{Du8vI|26TA4{m9%UImfg6 zXVqlR$!MGYd}>+pr%6o-&GG9zb6t6k4Ew{@3iDRe3}Y9=CpvH3caiC#`esMti*=XP zc>L#7o!gWzFMGIbQR#Ojw-gr?wJR(?eOJMLe>&{$w6}A+E_1rfxL{n@?Ol6z>)Cxsj~9E~ z(0y>%^_`#S&^xCz^SQLz+VAMMNA5T@X25Q8W!`7|%yFG_p3C43#qajrny@&rD*1vmfBGkxS7x2cw&%>sF}M4!-4ul@H*|cg z)5cEK9S^i$larM-D!nTCmPBRSecOGnCw!Q=Ch7O&1Ie9|w6v>XH zhqnaZYk9MILep!FPc>|*pIg7C{?+g`@`-Zc23;hd+S%5OEx~ezHn{fs!7Wm|9bAPiObfkvaRp< zuX9(?fuf^JjxRoT%aKP9TKE04%eu2{XLL`SgC~xz%wJXXZ^e|F@`hhqZiu|7``oa{ zc$;~F^(u$SRpC13e#U#7FE1f2>F?x=)T`>B&1%>7)|?UTuFo~Jf1`a<`;8rTcl@o> z=1z4TZ|M+f=WhFRW>Q+ml zEbF{qhSW|g!l%1q3BFypuMVA|_xZPFH|#;4w#@<#Hkq*aNl z5?1>*#sBS{=6S_E#`TnQup??aZgpEJNnyo&bUR9(cuxH z4^$N9#+DUoWaj%OSJT~%!y9z<-_%}L;|Uz9{-)}g%6lrhm#-{MD0x{;F8a>dO{e!3 zl%MRJcjfVON3K6qy5F^L?4C)x9^R3?E$`n4H$A(-y6)XoPcDCH>2*tUm!DphvwqaS z+jri)KXB;5BO4Asd$4qG+%EsNom=y_&EEOt-sy)LPejj*D(zUcE^t%5z4@xpYmwLD zROXB+*Ls)j4f_SoQdg44>fIPWGqF5ne)`c&YxdZjdvm+Dzoh-R_V;uc)6vqYXQ#_M z)pR`6VPEbOIdR!vXO2i;m70~>BlYprxv5EM&!t_N)-Cn%dax8 zFJ<;_Q_!Yg)~>7r*%R7c);1@*P1dM3qcg+l`_1@Ao1bM7ox2j|O(GGc!-6 z<&QjZ;ONxDLk=F<_syP!-B<1!yVIGzX_{K;=h?sK z*^DGdgtu#sv+zHqsC0 zZd$2Tx>wduaar+L&XQ-y9g0!PO9~#;`kR$^V93QyMUuj|1Fy=d*b$wbcy7r zc%}FcUM3=o_G8ULtKczzHg5qpm@|;AM@tYHGLi{Bc$$&gPPt4r6Lr9}AA%ENwMz%z zF}X9%o@ooW7)_%)*S4=}z1-~6)6 zDj?@BOW&I;PShrRPmD`mp8h#6tju5gv!<|qEWGR7I*Xy>m1?cEW3ID=UnDLlz|WA= z=vj6JPlX-Avm^rfJyog4V$Xq|A3U?Y9Nry1a=$PB(!kAuD*}T3gg%Eni&c3Fj$)z0 zrHEBFs*ZTP@jTbH45fp&qT|`eWZQe`nxII9=P?Dab=U-bg(m7vcWQ*Y%G{%f5{#4 zP9Ur{DgIW3$Y0B1-4Hh~sa;GG@5ib5SkX!>5!j38f{Xl{JPL0&Xs!#`yI4`^SHz!r zjXs+;o7$V=fF5}aaW(O8LZp-L_|nzG=4la_sz5hZba=P-Ys+rk-a5VIK@(!U(y+8{ zRn1;qZ;hhzefiu{NAdgO-NmUzl%msx9}0r={$x!__fP%!N08(l_bZAK`TkdC__pt{ zp##FBB0t4dBt1*JpBr0gwH^`*!p3@pgG8daU#q>UqU$pSQvX@m2et@;&SQ0#>n)e7STso+Atv{N@+% z4sh22IjG{SV>hB#fgHpB9}V7uIBGfND%p*+maxEy*|V*$%+Z|(f#8>S zNITxNXSH2#{m|Up__%Iq&12nM&DhGl>Tcz$%N~?&E)f^|7Tqg+UT`NbB)c->b=vWi z?@7zz|BmSvMf*MN=hrax_nSYOBch^O;*dY%GxlfGvbE_iQobb%lAgwYinYeoC11)I zmE#RBjH1d@dbipwb)Jot&5AZ*8>eZ71=?*hU`EaStocuv4!Gml67wL z6<1XjRhMeNM_=#rzL|cJ{@(%`1G4;Sevv+7d~Duly!UzU@$vDS=fB(MbZgN{MrgbGftKnc&(^FarIUMzj%&p&lGVq!H^0!-x|}8uBG7g?@%H2kFIn z!9K>px$n8%cq@5Rc)fXxc_qB3{0D-s!cvTecN340cuV7@JU5|Rxpa#(P2whbEIudB z5ciebm9$ERO6N-j(oK@9;xBl%C?87_Y6PPNU-`AXI0rky&;M%3k2zqzg zLrOVGL+EhsftqGe7sY0_jI!J{ZvdV8VB7tc-%YC<@73XkJ^B^Z4BZgTnaaf#8`VMO zXG)Wb@uF!3&5g9%sJRzbj+Lrh!b$Lc-rX{Oq z*5I^RiA^zUVvfb-B`}inQ}1SvEi{(&DtA^yX^|SO!CCvh9x(2~~H`p58Q{O+F;wU|*|`(R;i1D4$l} z>;CG1=|PTe>>iK0+k*NAe(_K66?pIUpu4|O%~D-c5|y}eGSv1{<@?B-)SiV+@D!0@Gq(28hWdKfL2T0ude?)gNVN|@&Q4Su6N(D!&t=tGi|1ITYl z24WK-&*ckb!y4jmavAjky@_F9BFF;f4hETl0WJ875ys3%FLJ7&rtT)x2*+Sq!W)7e z{F!`T!6;#G%tJH~cZZkcN4!%MCt~9Z@GbZ)+zfN|6`CZ+kd_(E=Vsg=o0yO`x+@0BmOy7)>)PaAVB?{x0#}J}CBOga~ z#=c4n`7E zjb78BP19G@Ej0dYUeUJMRBvNDGhI63IBG6~f-<1rKA5|dZxl?$Cg4ITPkv6h%)P`z z>{af?^(K3d^XB@D@{RO66|gOc(*1Y$!`<(6TOQONu-w1UH`Hg4&orOAKFK~QKCgUc z`aJhO>eb~j-u<}pojk%VUQ&Y(5hV#9z*!Z{AI}%_Kk#f^CO4ZsjI|V5!8k*+K}Q}X zXOd{-gX9`=5M?r@4<(XZM2aUGp-%k;M$Xy90^+}I5^zO${eo>B5chdf* zPh|9C%8(d@2Of5%ne?ALHG@W3Eus|rrG{RsY)j|Zr z1@HM2`LlpmyU8ENck#M-dfp`75bg|4A$te=4r?S@&HTf7L+?R5N$CNujYGuVgm`C) zqq~CvC#t*I-f_BJ(|W#{W*iCK+GTYyhR=Gn?u>Sz=2m5#npWNu-V+B4KIQ$FdnRXT z*1YuHDdUn4CZ@+b;szyrOfF6<&uq*(p2f(VnAR_u62Cfz64N(U75^h~aLV9}@7V+M zBn9P#CyKw9tS=>%8Ons}Ff zUhpOR)%%eHG6Grxt_27KZuv*~&G0?!9qAe9u|>65F-_JYJtO@i9pHA^?Tg!Sw`l25 z368%Lj^iU-4*LMQ7eRruUV!|E0u*lp{Q#pI6GK#}AFyE= z>*=WGIRP#GxOuQ@+}AzHy{0y?;qwS<)hjK2usGjTX8hSN4v z6DVUT$z*@>V$v#N0O6Q(o?Q>`ij2-79Z_uuTHm+$v@C4yG!ATBU%#j}PamYSSM914 zSKKQfRkomXWy!9hiTP34NalldB;6x@QbuIf_S_eFPjVM!Q!^9O156=O&X= zrluyPsWbk{4$OU&mz$3j&Mq2T{IfWtxTg4UNn`0>>YU2a+P&3=nw+|CjcrZwEp2V@ zJG)!7)`4J}AMe^r+(iDDI-em#dF)A?Sx_ku1yhA@vFmt{lp?>N{MY?&&m6CF-W$AY zz3#&&$_w*O@ecPn6A$n8Kse2N;Z;xC?{cM ze^3w7hJ$Bj7bAj^4s7XvjI)e&j3taCj5LNPa|bx~dI3T55@;S7Vqx--zYt%f0shY6 z%y*1L`diuqYBHPzndB4TVhtlXz~fl#TI#%GkAVKjbMp#Qzs~L*quNKeIa-c3Q=8s5 z_HCF~*Uzv-@6cV+uB}Q_-!AJa=~G-)K+SK;TU($lJW-@6dRQD^{IzgVZf3@iw5O@F zQXi)@r~I3~EjujtS}rNql^dPkr(k4$W8Q{>wMCrb3q{Y0)|51t)u{(pR#&Zs0f+ep zS>4O}gN+SFbJMMsKJDe5kIin@n_XJxC1M3Bi#!MVWL;1#R5Jfz9pF~-bA+C_MnaYS z0H4b@S+qP+!BuusDpjfO-8`#6aZB>9^_lD&sVr8s$iK;!xm}j7lWu@&Vx{DY_yImzbW)hcKMSmUHOHHi z!$vvd|KFv^3PT$a7V?`hg+7zkNiC#4hIL#+y-9sR4F)DYjIx$u0$*?d*&7_LO{5p_ zagtV&FOpxAt>itF6iO;MU7u3IC=ry0ltYw@lq`x5_+7K8187%hFM+Nn)2>tdQ#n*0 z>Qd@q>H*-dM^ZB=9b`LcGO3cdf%pqjcz(N1y9BPW&ZUkE_P1Tz!Tt2i{Lz%yxvAq* zTesE`Eq9y0HT`S+(%95ssNYzZY*<)htPauzYL-5wvB`DeAb zDqc&|->-RLI99u{ZcY8chJwbYO_43hZNVMMowLo0tZ%wX9jUHY#3N(|)j>T$Q__bp zGMM+#tLzf)Ezv+E+%}<=&@#5U9_SNx(^}(yjn5ha>JQW+26oLL{lIFau9tR+MpiWlI=Cmw`jm&N z+bf<``c~bj@~awAvA&E6O1Gh?u83L;Fk9(xbwEth!Y>vNEaiTh%qq zBki>6rkw+~It#a4j}clr0*7@5P_sHTWsUrfpX)e{xjo$HhS8v)xD|{wRKesDpyrBtH~8L6+fz4wc*uQ_3!nU_1pC$^@!e2U#u^$iG_W3 zy3SF5r17qCSJRbdbxT&OqHSDTW1CmU{?6|vuH}PuX4ftIcE=RptZNBLGn3878ra3&cg@2JjwT5?6z(v=z$- zr;(mNhMxMFpQR;H z#h^JagBx!?WgF!_j{H`9>u9P&Klui6Vdx-364m+pWV>)p{v#wXSG^%787{zKbc!iXTUY!(@E{{X#cmZH~71i zt>0UQx9n}c-Q?Zm*7U^Kr}1R{={jZIs@iejDzg|S)NQO=P#b8NRU^`ibTc$U&1~&W zU4m}6&Y&Bv?_blR_tF!=Vbnu!*NJrxbp$=5=0?p0!<0I0{h|i6!J~l=s$6^HKaC$6 zEyf4U;Vlna#qGYGe@yetGt9@#WXmyVTn@20x?&w{pyF%J`%j4QzUaFD<#_{QzUh; z9tXuQ#L41V@q2K4=80Rt_gN;6hClZfpT^UHAqm6QW8JWJ;RoRY2(kVuSSe@(pZ+~w z6L$eOi8GiJ&)y8Icp-QPIIK_T6f_qZgM@*TB^PM$9t;^HfZ+vH_<6b#stQlqJz#+y zlqO0or2=lacTlUWr!0gQ1q+Da6_AQyCB=g`hD!R6xE53&G0{fIgIt9PaF+pB0h)ok zv#;a5eX+fzYjYRcrM5k`No+f8srVv>+9Kn2u< z^bz`7H6IL1Yx4|>nq}2Fy8o&x^*3smhUv8r>k}LA85bJ&86%tKwY0VDZQ-^2Y>~J1 zZ%=9e(D4Y~25Q@qt`S{_x~ABtgN8lJwSjPecn{7=88wrN!VPYqM>B_`ov1HsIBOm2 z6S$xka=voQdH2B8lPWkUj1aC68U#+kTj5Ob=KaKGiDuwp(5W8d6g*7yU6dvnUxOVKZT!*n(g_lh5Lh;!on+c!zmhUL-e|OX42o)Pg&A z2P+iqjy54Wq!W>&v(SxbPxJv&!3<)aVbs$1(f_7DrR(S|kRK39e+P~q3jH?iZ`yWn z`z)n7sJ9_8!2+wdiL!)p7CfA#lt&a1WjnbZ?zM5GOyUq?4&gT;4bDuQu-VlBi7thX zDEry2&$gL1hHaPiqs7NU2QSwN^GVZ+&ZXeg{ne^&p40Tuc**$6_`&$Y_{BKb=-U`q zzrC(^t*+*$ejs@DNHrS0qZ+CHqZ?M8U5)FNdS~^n>d88XW}Bu7ao*MHPM z)o;+B)z7a9Hw>%WQ{S)QNkd!1jK-@$F2ZhB!3w4AmCS(n-3 zx^nFM97CL!T~ml%q*;`|sncjq+9J3y(&>j7N0~2>Y?RIp}KS~wj`!B&V0ML$Ip(G09F_8;aAYmp?{g?mCjd;>mGG#}H0Yd%-#iM_yTv8z~) zaG`Lg@I8=Q&jc2JEdLvS4*xE%ihF=NjjM#Li*uZ1z~v6+@Hm<5Eo=j88Y>CiiuOev zND|cTcad|*I$$vU5icMwEzFeF(wsiZNu;ZgmBdTG7AuDvd~PE;2G5lKs{5gk>#NHbjXQ?o#O zMmtPv)>t&zng)2&jjzBezEm8m98u+^nW5dQ>t4O2dP?Sn9`lvn=v@S#K zwEDnCNmF>Usim@YZQD-pSS@br-X7iY$#mbc#OCZe=_qoxy0nB}#1o{k@QO_WA4xLx zA6gZ?ktt!NvXemPUCy1y_2;&8nt+QB<~8&8L-)K4`+?oYqA-rgTU3KRz%FB1q)*YKMKTZL7iAwB2e zyyx6($R@F}GFi2(9M%=qNmc}OhNflv`K%xLr zx(W$_bJHEg(IjL)?83dsD#Xkj&0N7)MxR8RN&Sy9g<^r!r}Ly2kW^3tb;mb*r|rJ= zf@Px_GZl6;w-0Uq8|rwxMcZ`CI1=8@uj}X3C)P#Qjy0^)4_p5el3N%9P0_`d7e(iGY4ednT_G+8H z(IBsvG@dY?YP#I~q2+9=xpj5h#P**Z$4y5quWcdrY0eYiL@pp^k~Wbk6hCS_&4clt z@tMJ5PDGsOd3GgdGjA{?gyaZ^3*UfSWV>LQV7(wxFiuz_{DH-aLh#<=$#Aae@$vXB zQ5HChM`3@2Lxna-577(e3m$>GcYq(tKgs9v(|K>feN@1?%SPBPRsyR(s|HoEX0fv1 zyXRz9GZlymDQ3Q6rZamZ{~#L>5^|EMWa=5Y3=IQ={D3N;$`cR{IupH#zC%}_dB{Q7 zO(kdp6 zF%2q_SG4ogb(A`C0(eovK*2uhqPg6hC+xYlf35MBX!9Bq59&W_+rHKp&4MOpBdPIc z{X%dTj4`~_`|HB0?o=wPR%!-mH*0rlr)tM)IxBq2xuuGdH^tK8PerDp>f*wZ3#I4F z=BR@!pH%Iu`dArQnO$L4tASz9sL0U_uf7A-x}$bc!->W@z?BvmtDAp7-b;MTfc9zN zAAVt(YWvsmh_IA+p0JTnLA*>BK!r4z>ZJcd8bOl-MV@<+KTi-Ws2AK3E)fnC90$km zP*|N_U=rxSZ$rmpr(~aGr_|qVs1y+o75&DdvF_M7VUVytI6!`gxT2xhPvI!xGJ!i^ z0+m52xO{xM-8oy?GPW9Ol747k$4`)ndM9~e9q}2dw zh7w*R|{qv=241rbNDfs7$3g8_WOHbxm}i1U~`nJ<_f%vDGhqCf|rzNj63_Xi0? zSu7R1gx$t|#-0N%6EQo9wG7tLgg!;pNE>7dY+-(4rom5zfk{JRnbp86ET?s+c7YnT ziP(qG&vn;1!zpooaG>A~JKwdy_R{jw^l!(bwi_+~HH|ZdG~REV-}s~og3_IL%tkhpH=;6DyBbWoUQnN7b%vXlR<g^tYmx8Zoj(kTOTply&p^H5AKF5Za?fXGOK5tGE+SgRBh{ zUxDlK+2UexnuG(HEnL|!h*;5Bv=yDaL0F63bBrNGKlauK!{ zI4%s#4Uix(06l=_Lq8!5Ee97C#$3&~PX75S)6tUXN5OBrm9dFY!PpMF@&fZUa|SXLHK3zd5v=R%MUWCTkkiJl zV!vg3v!h|Rc|aNujXeN3=i!_+oZB23c#`{Y5zc2IzK$YB2A3WUfB!q`R_b@kILcv2 zb1{%Nljy`|*JbA!`yAVC%W!jj=c$g7?UJ^%mg&v8#*K~o`aX514D_1N>chH=T81`R zGhH*N>U71t@~aiGL}%d|<`Xs>{0UnuB%G^(FN&_2PzDaPBo3_qJ5G2be>wQoEmP zKS@QU)5Xk3=vVe*j*U|VU76d!;#3L+qCNO#v8Uv3=_MIYNpbh~$n;na+`>*zt;c713&u^NUxN6MK$88MK5lScnY|3;fd^#>1qJ#jl>r)!^U3}GmdPs$@*f_g_tc}y-L zNlEXCkBNyy8tEMAC^?1FMJ)h7)mp}KrUX(sP9u8cK01$8#oEXo!WqRyzy}=4o5yz- zyn;^5Fku>WVshcs7%#jp)C)V{zfNMs*ikH4*h4UvpT`>q#Mc<^WUdEy5GRdw0lkh) zW$vIqr5>k5ledtD5%dn3{go}vGRYLu{;E~nvcKtIm$7`uoXVi1cdX_X4 z{>)Fy73VC@8k+GW^>?x~(K}urD~Wv-lM`c!B_)hW;{55IqDc{_E>GE-T$qp%r;Pg& zhb2r*x}Q>-;hy`r;CgXQnY?OM^&tbL?sMJn`p@;n@LpbI9NW6UbhT?QK~K5N=!JR$ z)wPDN7WTszNP4>6m4(O=WjA+|=X0OSeu@5Z0UrXrgW3Wg1=a?Z2R-g~sasdjuOPJB zg>IH^Te}x@|F`?VZXp5ZeTR7c*x|eR@XKcO8iX{0Eiw>{YraH-^%a<$IVrA0ed=k7{6Vx7~3P731>kCo{dw)x5XnR z6_U?Vv)f_$C&d)ybEQCaM72j%rt||PIzcf)VVCW9TOhqIc@FQ3WujDUEY?l9pMQiq zkNpGrKz~jllA>JK9l>2QtWxvsj)>NGO}iTSwXdtUXwFpRmklY2E!>$uB)2{5Xhvab zZE{jVc&s40`gcOai13ZyU0(}B7+<@8Q-1Fi-s^W#^x9Zs%$MktsLsgOzjyrl`KvCn zCFViAEa`npL1t7wUV5=2S?g^u*7r5`Xp%J7wVZ9AWg29SwtEv9v}DAcJ4850+~iiG zc&u9Mu5-`vSnajI=ae5W5bbX2*|YbaKDs_<`poP7pjS!T)@oDqNe?COKKl&l#^UKh%@Z^XuzYM>hMwz0Y#k`6oCk#vuO{q%NrHM1= z<^0LdD6Xhr7$nUN9g&u~b{63zsT;VjZ_rkvJl;^DK~yF6Qho4h^m*hP>X#6(Gw4c? zCU8#B$?mLPv;Ug;*TBE7|F!?Grk?A%PYYTYm=JKr|Fv(q_bjgk9>^KX)v1)OP*0UAA4YMO&X*c3X~G zpWC=VyuPpxb5dMR=P{QTF%@z!Fme((jQSfg4{kFtbU!cMgU zt%_C&RVNj-a2|h=UKXcdX9eB(tz0FC%Q^+z0E_XHswYh(G&@Fg+09uUYuXO9jB2{q z@K@bR!#;h8c0#4OtfY8nK}62{%<#06DN~atCsxE)#D&N9ia8zC{(ICf#?PK%%x}n- zTOS!84DSzq-13DNIxsva@@ZVJhVPRM^-4ABzniEaIdl#vTgu>Q$-)m$YPvftfL#K{VCH)yMZ;{ z?jYD{UAeX|w)a+d=n2^@-Mbp?Q=Hy}ainGBc+y(pUBWYhfzS)?9T|m4*D-@wo$Tw} zM&4I`6?|xdaf0*UGm~N4L^tv6;suh8(j9K&WD+^1K$LOH@v0xH2Gt9dNcCHpq^yLm z&&q1$098-bOJ$X!yJCxczRW4D1RXL+;t_+n>P7%@%FNrU}3Dl{^u5Idq&& z=tX22vym>NS;*x?qs!B|$sTJ9vi>&THQni40QKINmae8v#+40wYl-?dn%foq%Y2Gz zbKhn~W(-RomFoY8m0Xv!Eh#IZEbeCP{Fu*?7k^&(ZvOgD$l5QQKfv=2x_cGekfW zT^5gayCMInuqw7IAF0yZX&xiphpP4}?<jR78dh7$6k2rMcmf*Z@ffo&4^mKI6HZgRIJ(kQVHz7eX!GSL?NAF)L;%Z)2r zEz6SakQd5d%6anZGC$dRw{gIi4v{>S)Jurc$v_u0N;gTvC5t2;lFQ;pc#3F@C=Qch zZ-iCC_t;TUlZb@Zhz^PNVCez}?<)5OCycFuv^O@Z5FLpwLUu53Fm})@s4g;}G=Om3 z+1t_3wZ`_u(qx*@No`j*|JS&#zM;0W#-h_zy{l*{KU=!2=ym>(yd}BUvY%xXrHWGy zBp*GwWC_}7V_y6>V8^wX(ND?>hp?g^(yjZQq0b~JN(7AGe>Kcc9k=y;KL>8Hvk zy4SklI+7u;9_p3{~@m>41(9iZ9BnMYguo7ZVk7z znIBlhwq~2fwyVq6G0b(2FoCEeRZ}gD7sy{|3u=Y|CGnu)Y-KIr7V-B9kBF{I4#P?J7VZT#u~>LPm@cS<6o;3*i99OL z9};$x_;Unn1ycpx1>gBsctxB#mKJ%)5YxVpV~Fzz6I>^ps58Rh;V9|SSWlVrI&QXp zGTyJF)G%}ttDdU2msOV>Dn4CE%s-!VDl06bG>w?L_)lTdz{K(KVKGx82mYM+eZyD# z7bGO%>$x!R-*4j_f9_sark23_R=N6(z%i%d_8^~A1-R+vpE9vKI0U14k>{!g4m0+ z2AGvaKs)p%pZwoz_=_vX@duRu`8I;p%`7rqHQh3!mcf>()~8)#oudgv(oISSy%1T( zn#0cF+~&RFj}>TyX58JaMs`F_Q`}VyP@I+BklvHLl^7*t={4y%S*Akf{>&rI)8=*0 zJKgKE$9NU4_~dq1JOVqyKg!j!AH&P^I_nub9C8@X@@DZf`KA0@{Ly>~U(O%S-wEpN z5P?Q;LFf?j;8jf%0*=R@z{}?5flfT1|42|E{E5wmbcU(;Hk=C`=jT8|eG%mI?s1N= z{)01mHJol#(nG>P*ILKEE(lo$N7nn!-tDz5znTIX8Fe4^i?yW6?`7YMS%sVOzGu(Q zJeh7y4N6fYFHV#uXyVwhPosQ(r~kx$$CO0w82paIWw!VC(P;*d{@UP$2#$=@hHQ zdg&Qih4hGs&A-ELW6q++6aF}kK!18p*Uqko_I$@@ho}9hE!e8C1ez)!>*kXAo8_&g z&Z4l@+s4|~S_jy|?ZaGF!grDzbq~FQ5y{++KHwY>u<_~Aa(S40m{*1ON$*szATM9f zkso!jftPaphD)*Hci_n4(xs$WgGA^ZMrp)`3kklu!Bfc%}M(p|MHIXU57DsTy z(?gp>^kAHrA4(6O9BGSpPc!E9FK$vtX#3P;)?R7gHg~igH+{1Xbj%@+r^*?v%o`{Z zlDVqjUpR@q3v#p_?9-gny#9g#f|GnBe}`}uc&%818r~0XUpAY0kaCwmagZdx?vceBpD!&RHb>Y^@;L(6figNaKJIY!9K&hZh8pZ+6(B*hY&j$*ILD@#SO6YR@u#GZzw4ZHx(s;R+rC*{oR*qF)FAXfNfLsXQ?1>p| zsic%{$-5G(1q6SgyS$XE9-qR+d&{0LP?tciV>?2}0-*jScT z)uyj+nALW{?ALY3d4Qy+;`Cp%4fJWqH?}|bUr0-I<6h)l<#ll$v+Kb{rv~lg6!#Zz zE1YmSf(A%n`^CZ0GqjzgO|DS;J?k0M-Hw{}W$nY-`nJw%-Q6~~ePR2Uc4o)LPKU`C zk^%|#ROcAtRPqJt9EJxvncc}L<+TfzVPCL9%r0_?KTA)$14FU}d|UDa(}D@b99#*iKlJS~#hu@CHEg@;!DOD;JW{KOiTN z-JoJ0K>uZ}frOVmtT&MBeG4fCef%PFoOP6A=8lG3flFAVXb%1hA1J;cUN3IPpX0Cb z1pE`e40`x?L9w?8O9T~s3ZKI>aK5u|u$G_@OWrxIOo9mL|O_{s z|A-A;_O(1j|83WgrN7Ffn-jZdJj!b>3MyM&8K@r*cJ}bj-Ce(ltLd9iE&C~V8NVk~ zEQ>`mL{{jUW(gMxR`70scP)%ni)xS{pxibh3Ctiy4J`wDVNWT)2nQia%F8~&#xQs9 ztO37@wew|LWAm%#6D_iq#Z95cqUK%gEYo8166;XMJ7NiiORr@nvlsI32z!erioZ!` z$iif1Szkqv>VZ4g>m`Ol^+vO>zhF!t>ibfz5WG&=qRWN!ppE8?44Zj4*D=Jh8J-=`?gq6e2x{-n{6n;B;$>BRLTq zK!VsQ(T^gbK^rmr*Qno1e!q<9`jPv6dg!aK{X$woxM8b*r^G$|6Q0R0SX7#%zE$

Ynmm+{K(|&9AUa;(svR&;#;|ZCwyw6Ha~1R)=n}{wYfV= zT(?MON-?d1VL>H4z%s?dr7BsO+^7gtj#4sIcioqJR(kP$+5sA+I<0JDbGniOgfXp-rNSAq#H zpmndPDfv_om7~avP0de!nfOww-62W}RkDvOF^9nbf92)0fV0 zIL*Gc4r^TnNtt`wc9;g(lI(q5F(d;`fSAxGwt!!P6^O4%i`~-8b#0T-~@~ns zA8q?$R(E8zermehu&UNqZ_&6a6y=uUwFMt@W@qkCEB!M$$uGe(erWuG_`3P>P+%v;71=NNW zo_9S)yB|^>k}q=`A(??kVfTgKf#g#1xAKy>b`F`tX3vLgqF~g8#39khPw44&L)<`> z*1~Nwh!w!bIA1xnoP5qL$hCdP?gP1_{UC2r!npzNo(tTM+*RBZNTj^XZeh)1Y0x*& zOP>xL=1^n>a}Xn%)`x!9L5n(zLXFZ_6Fyk@`SGgRZ3tspu#zDpC|wL)dZBP2$z>PXAI*gBN+x<3oeqw@$Ba3!2Cr#9AggsN z^*E%|Yk@;MK~aHfpGw?7Sn7HUy{T)C(T)u;m4oXHaaiq3?CLK6u91*YHvy6!IgSg? zwS+9vY;XV@8J`g=Yd7~2f3UCvGvj#@oAeU649CeADjbS+KrHum7ke!APZUJ-~ z5FnE`v*TG_tUoYg;U`*&_GiViX0b!rIqVPY1&{-_mBjgV_SM!T5W20%ECXLNq-VKiERnT62B&0 z`9n`j&RCnBnLDL`U))q0rB1AxTI~ka@$shM);k?n%+IWKw!wDX$s&v+z9qVdbtIhf z8TjaxlyJ&GYF}z1P=Q*QRP&QEn9`eafpVGB3M}quAmytdk(x-JMwSBo@|paI#3wOG z$wVq~2O*tcAeNB4$YaP(@^aAT&(H@mb~1y|EEbL9#l68BBzP_SfL#$?#BYiZf|txH zjdI%{E0sy*qvVUA4jus+`^V*#a(6{fMWDi4K~lT`r}0IZQkL!Z#O*xL%bVT&-NL2* z(&Lgiu?bQf7K*Mwec~ZJ1!?OGA)C23^juini;%^f2^D{Tb{p#h^vJd7c2o@g&poTdxt?`cD5zo{djix)^4L-r;yi8O-A8Ra-=U(_|z7HCa3 zPc`Lq%x?eLN^6R;RW7Tzs(w>`rtCmza!FuGkK(+-GXq=L3*JJxa$6H4@Mj~&iO~O|d&0j7g|8s`68cLzk+B=5#Owuk^(MrDJV5UMC49!X`n4V23~=h~aPNd-Jm) zHJb*jG>DtS`J2-T@1^l<3cL`{vL>=}pxr|bF5+;spD*aZ9lx-?2FI!*URenm%thigz12}G4<4 zs%e^NO;7DN?Izv4>bbz}$PLAYL$&O>OLc_$9rf+?4;%J29yVTYdeZ#5CA2lYP19b} zp@u{&o7vYo);6>Yu}9n2J6JHWZ6?g)dqvnl98BVZx8xpW9#utaqeapWGKK(gN<=Hr zJFI~)JLop#PJiVpd5?L+_ny@u2E zC+lw4el^6_gz7I>&(@iN&Q@2<0hbx4Y<($G+E$_}{ioDey1XpCY+(7*a)NrLI$yoE z!c}p!vIsbly;YW~r<%XDBXonSC+nxeYJ4ytwexG=*Q$W7e_Nl~;9)%7)Z9F`^}~!pI_b za6#`9J%N?&6fwc!-YKd9(*L<=KK%bM>=-r|bQKZC!^H3*LDJ|8pog0U8wCaYA;1pn zA$@!;CxlI4k6~>`FCf?8Z8DqArzKK$klCcigzl~s$0j?e>kef4kjq(-s05FRSRKzEmzP&n){^mQ>aX(^)*rN0c8fPc5gY z73u)>K=l~)X!Qj3R&}y^az$%JOy!TNADUa*-nu6`O7($is{W0BTg`4mM6IfRcf*s$ zFk@j8sikM@Y)BbP=}?;Ho0nV0K`Ni9tE(#;@=4D*54x5S+=!vXA*2XW5ArMWPzsf5 zq1tIg1|4!V3y_1T1>M3jvi7nq?5!LPXE8UQJBIg~hX7aD%3lRE6$MsdfpDd84vr>u3?wgW$a^Bc3Jna+QLAXi!&`^@v4oPUzgxA#G0p zH^bcKo=qfUcEf-5x9cv~9xzO)A?aV~`fA@)Q7Tufo6Ay5T_xvBmX-vUyf2}bt}Fds z+EOYl8(y}u>_S;M_$twI_wrul^U7bAd#N+j&ng~Qey@tv+}94!ebx1GV^J+VV#+60>X9c!?ycbB;<&i(*a-WVL8zY=8H^* zseR)p?UXEPDlLo;p8JAplglgnAiE#uDPrSeAellU{?XMKe5nb0mA zkL|z?VTb=mhZ^AY8YkK(`YNh}7q$o98=r-r!lUqdn6p6>GsGOR1SaJS7q1gP14dB} zE{EZgK*-!J6~7km5zmJeST8;f6MC+QcZdgybuh0-jE8_S(Tv^2CSVTXci~2%n=lnJ z8+!}X-~zP3>6HZv|6+eWo-?6}?;VM;T~JIqB06AeX|vTTrXEailAHgIv?M_y02czx;OhZ@r;g!*80l2Z?(G(y=wH;M|A}4nW|xx zeJYlyzm?A_pHhCcTvskqd%!0^-A(PK7OUOVp73QUFDlO|Zz}gupH&A`@G2!$LdYP# ztL+E8^pxs?>J$2PHUAj?tIeuwsJAqj8f%T}CVexrbzIx|_S6n%CvK)$@+~*4i*0>i z3>4K-??`Z-cMT+z5>68*ki5xYouVqhv&Cc7Gu|;5AXccaMzX3|C*Tfw$r%Wz)eat$ z|BOFMP%pS76agQUgNb39%vsSlQKbln`}+x;JtQ$-EC#R0a7e2;4Ru$oxVvPFhA9D?(XjH z#oav++ufP}3E$uQKKlehLN>cIbM4H%_hh?AphA4QTcm3n*Yk)kJ2|g+dV{>`K!?Wm zo$dPT`e~bLJfuY7HnP}TaRvU>UU`ENOmYYw-VE&r@ha;{RS#-YRYa#ELmr(*>t@gI6prfBSS)$Hnn-67ZE z2=eWIgUunwd4)?bZ21H?Gxq($kiWGJ*7%B7Mc9Kc-UPl=2>hcVK3jZ#`IOf8(9eXv zxu*XOiE#A|^Bsj;iMPIyzCZEvq3k&tN-#Xus;- z+Fi7t=HLMwqP>&i^uT$Bi@WOq*9va8!OfKG{vSxocY4-G_FPZk;kTfxjNT#G$vo)u z9bIJxBHnlg&GQ|*Xo^0_w>xUCR{QSqJ>+{B^^2>ZohJJJ=X=2S0cPU9?{VLyzFmFo z5g+cv93InO$2{(VC3n>yz$~AEzm?~;)vK0QjORwrV9$#lRXpyx4@PgV6K-K{H(Y~J z*K-FI_{6EE<4|N!AGW)z`=b4WxbTQLOXvzsrQtRX#J;k(^{084X}Ga|aoHko7+N?@uXH4dDI zaTytzrYud4TW&yJ{rtfNn+x9=XmNR?9%0Wl^HNJc=qf+OTMcBP+?V_%i)nccG#2``ZT?RZ;)>t z-|D`e*aMBl|J~FdKsT8P{ZxHReUZ-|cwuIEyY*0QH4c96BhQ|wM_Axd2C+yl_ZU<( z{zja*#^t56!Ksu}W5+?rEm~){12t6pH2b7==t&YlW58ZGTJ=+&TE{_G{Y9PVgrc^F zdW8)OS|JN|NbUsW!4Ax7i%eL*^t9AtDZ`WNCsj=JNB~7vY>n7GuocnpoK{CKjb0VK zBKp7R>Cw}oCr0;=ZW-Mux+|V%e(eJ&?C*F8V1pg)*_Ulc_ZzcU^;Ej3TKOhybpAG{ko*akk1RHrHQ zfY472mSBRlUOKDp11$L<2Y1ISj;&FBanyN;i^g>~>JJaP`MPg&_x3=wpr;dRp=x@c z^j5uV`ZUJ44fEOI^VX*ryg?gj37X-% z7?s+)e7F0q@SO|^3PVnfMW2aE(m#-`WALp)_0h2A9@x2U@0||Mtqvq)k*D7CCYUeG z?wio_WWSrQ+e%ba*|~%}k9InM3Pr)8I(oz`)a?QH?^bD&*gzokE#JsGsT$13D)0&U znUag|6df{bFWg$NDSt)Y+}sg49kWBT+>pugByD@@#FTc)!AZ`EmiU4=Yi!xrF)=@* zCn4r2ii(N)6BUJ@kx^fxUPaxCIu^AqYIf9A@PXco(ns%#u7M(e6S0fpdO~NtN{C2o zo#dGunS2=0N6)l2>1{FwWJYA2%Knl=@*3waDY#zv!SDmod9KlH$~J$uys&x4cbe!N2tRj{s~H&!rNFve$K!!V2hUi~ zIf&sRfR%Lh*@s-yJRcWC4UP2e^h2<|pXhU8H)>SQ1H+5}gyu$M=Xk zA6ql_XUyi9zA@Ee9Ac8AuR%t7L^p}9125MFPcPVF5kEPv8|CN!F8JqPjI}&peoO=d^#NJHIlcnw-s>V)YZN+7??ZQ) z$$p`JCf|>~mtaXY;Mk7&SqZ5LL9O+B{ZaiqbWZZo$NC(C*Q|J7LVa5(G9aFLtp>s+ zd0p~s?fJuFG~{EQyEo*coZAW4(ypsrlAW75?{!K8qsL&x>FS&^9^;PW0y+nc&C zrms~ zEN*X{DQ;l=qxkX(n-Uxo_a|0MdYIHV**PUOKwCCw>GCpKJ&$^O*GUs^izPv^G z0}Hwq_5=Ic#NsK~^Vn^^4DQ7{@?8b_LhKEHM()$w!gcYYbVhR;+1DrS4%_c=Sd18? zmNRwv=5o|^vRfT@v-=&7DV~0)xb5Km!Mml;4WCk|kp8MK2U~sFH^tY}ua)0YWX!pM zadWc&8vlL%hy8c>FGAPNj{f!itNEAr_wYCS{quY1ch+wYI;wWZycNLv+KXIA>U&i` zUSC%K1v6b2v+@eQbaU@~#Dlfq@Ami1gay%fY;!N~ej6Rmeu1IsflCFK4dCwT=5)fb z(4i4BTrPtN*`oE+*46Zp=8F4;`=~IB;Tfn0B&ww7WLNY-u4V3E8e?1r*0D>5M}?6E z+Jf5oee!1H?#g+Z9h;SsnUrBpubDnK?NO?fIy~iX^62EEq-9ASNtY9QC(^|0sCe;9 z_z=H5zGHkqyd~~?+~c@UaXhY3{IvMJ@h{>n@eL9dCfrWQNGP8;36(T05gRs7{++xy zrA}%=>dmx>^tKre;7Z(bqXBO-D2eGEGiqR#^=JQ8&{H@pXU8zCt`Fg@#fK0&VQ z5BKrt(zC=(bzK7Hv;{7i&OM#)I(a*dbG!yFrmlz!&8V~83l_u>%>?O?_+Bv5va}7E z#`mM+fUJZn6Xi43WJ`c$2zYBFjqd2nIUBQ+RTxy*zhG_t%RJY-F}W{tD(CFZ)@A>f zm7duz^HWBrjIZef(lgRlrG)|yzK}W&JVdd;sg|Y;NokOxM=f-2a#Hg5JBV|>}(-e~0GIf1wWNO{C!)eaxi_>$`hh;=&4A0EWoSP+N@6E24^9LOw z-12VcO~@}-@C)ozoxpzIa^YB1G~l?b@TK+gpd#V5`)k^XUW^XV)vPUD3^JhP&*(90=C}pjkH7(lUtcUZ9KH z5R7rLPmsP1?9D9wYWTW`^{4b_@i>f%hjq|=y@0A%fXyHCnF@Z|BJaDfR!zLkUe~;) zLQYJ;O{Rc<`m4u6j{wwz_H{S7?Zp1pXV+1#MwboXEIR32)%mhhnA0`Xx1Vt+>#)t9 z+E2Cn3)YO|;3=GgzI9E}70x0IfsOH_b4Vm_!>=(tTcjp~m+_8VT3%+&u=GV8UMcUh;iR%DIJYM&LJRVhoK<(lP$!#+#QB3T-A(XE(OFROdjjI2Xh-?ALD zJ7jOl{+aEcGa~0w4$U13HcG#|IeCBbTIS!(uUv4bz`yWdVI{+PL(QVQMeT~C5K)Sz zU5H{{m`7NM^@z0@81j}Y71cLjh}ZLHd?InDx99*t77mDwq$nvuQ(AjR+YMFyv+NYR zMfOJfDGtdF{T<&p)M6og$}_!@p|m6CZNi;643dcV6s^ZX4%8o{kf-YgC0Bm(Mxlf6c6t2 z6k!6GzoyXwGK&85dj zkr>YwcPlm*ohuqvq%ZnnSZ8Q&a5nrcJX1KYux+7FVMf7|f}I5u3R)BdKvNcgvF$_t z!~7fh7xT~LpU6KAroY?yPx9aA|IN?I7YqCf!V0<;%qZAj@T#DwpmO2J!o!7s3q1`z z4Lc403}uVP6kRP+i#in_D9#1T-+p6`v4v@`$z&RU4i#ROX_n8HFzZfhk+r9MQ}$Pu zDJe=D^^|JIrm*j z;XR#7J?Kr+2dwS;&}AT+MX>VhHF{-xst=S=ilcHv9wOVzcdX;BrLDg#+bvx!4wkp( z4dxzZKXan#ifM&ukSWw8nxc&ljYo`&jYEtr!5UT0=xP*<=Hk5KgyLVt-|+ZX91RA8 zRJ>v-))+mF6^*rx?Ttf?bB𝔳RU+Nk+j`&eYU2%Cy0B)%4Y5FqJa5F;6jXH{UfU zm>n&3EyFD9Eq5)cz%iRxC!xQ{H>-fvHA>!%-uj9hri@h%D4!KUt*cH_kE@ZY2kVIb z7WY{ZtIkL9z2I%6q&}HW&XRbdr=93xdX*+qfAE^G6z&SyuyQ@cW#Sd=M!QMPq)E~) z>4}tvxvhoO9-%p^d5&Jh_S$OTTbiI}w_<37*M|Vwk4t+cJ>bB@s=w|Ci>3Zs#>#F0Ef-YP8Tl++NM!Nx>!#Zj! zYqhXd&ou`%voxJF6)|IKG1(Cs4ni)q4jVX@Fj&|UPRD<$XIik#0dMY(AyIJxF`2tv- zr^?eoFE#$uPM0v5i3mtu4$p2uuoY3E= z1$xEI1`p^3CB#2;qCYszMLQD zFVO#+k}{+L=||>}?c@^rK&26h2GGW|ADs=}z6KgWj1r!slht-%KCY?>`dMj(EczKMaVvCa4_cf0&|>nF+#-9)LiDt1Ldp?= zr0}==0^fo@Aboj5UXF`AmAwNm^KQ0;jYG9{U37=np+8xy`c}QJ9#Yq-^VHF5FSWH= zM-5VaR2{NhvXyw{Cm8&mDtDD@81+-iVP&7POWCPxSGFjdl#M0FW@Q^**`w^oyQh?k z$_?ed@?80VPsS)|N}-}CTGd@Gt5#PVsBQ2KBh_i@LUj!~Kc7@@sxQ>()=)P<$n}<0(ggxu0>?=!vG-|;s9*k<54tx+;mKX8O{0P6spYxwQm7BN@ooRw0 zrya>aG6`d_9_#unxlLY@?<9c~5Kf$^FResFX+zouJy3_y33NK0N0-vIbQ9f1ccahL z5qbg{C|AH?aTAvI9x8aA(5Lh{>T=PR5-Z~cj;Hhiy@PjeV(ib;Q}h_>cK2cy*3p$X z*FvnKN$B`JnD(XJXj@2FU0Q<%!FTkg&Qzi@F_28?kzeSg^PJoRqsK|IA1iSk7%=CM z$&kdsq&L`-+mL3YE~!o`lG3;@H)0Qlb(NcWA(#l$cp{JDKcJTo1A#{ZrJejLxYy6~ zQy7u`km4PD8{dlY*~B-X3(P94{pEZGj-`AtjwO6q$zgkSIsS^kSlW&V982(DtMQ4I z_}o^UXFJYzieKU9_(6V>Kjg2G?{N~W=Rd&Pdk^0o&2#xb{)$I(fjAH&vJ3o3Bgj`Z z{2YgSno34sCf=jt+EH=@Gq8}3L+8m*s?wRlRXP#(JwR+~*Uov7Ta?>omt==$x~*D& z-ELh6ZD;WaSVA{ZU-VOah8&nrtUFr`UbR*56uzkSRY!K0rw|>uL*wZH^vvuoJmPv* zgGI7s%mb*;YQ>8=^SMl;_ElP_h3q?TkIaCHYyvdEIKG`#;sfXjai)|ajz>q;JakW8 zFSO$w)IQ8W&uTQfN16|?(~Go^9lyFK`*ioK>08EYxa&M82d809Qyo#~t81(ssTnK! z(H&$FGFC5XMoJ%rO2Te>n2%Mg;4_~lS658d&*nZRqtSp0)j7sR#?7!`QAJw~qYV{{ z2bqt`ghlcMx*VMahY+f^hFyAPU2d*cyuPq&!87!a*q&dxaDLIZ;@ZXY3=Ikc3r`hR zgIE8h_%RsipJR7wm~p77f%(0upV1!tv!&tDPBH`(EiRsAoB%&*pfSVbVf~H%a1#_o zCdx6e)-O?CDjVfRR&MdJuC!9Qqg+-VV;yK&go>N(<^an=%Q{rpuChF{G`0H6-IZl( z8J4bgR`V1$bquuZQni-4OzAI|vu?I9izNS*xx8H#<;PapdS8C6+*iYy3)`u7Qa7SP z`ZzSrFSUaDPcbV2Dp!swot4tcAGxi35EkH_b+6n7x}`6=G!5fpi65PT!;7xK>bXp= z(b+;*VIfV!o=S+|hhFgK(Eqk4dN6JSxA1B4t~5e36qfEPI%?)hLEx_5s(qkqWp^2< ztOx9(jn90N&JWxsE47W0yA!7Kwp(tu!tN-#kW4@ax^TPWy6^b^N_NwLip$qYF>qifVZ9J9j1YFw$+VDm5mIP*`k9oWJLzrsfX!$+e5PI8oqO|vV5L~gmZ(P* zXXPw<4R(~9%Yr=II@vM_-65NrYML4#S{Z5_RXndK({KYE3jFGKL7;NijfLA!$b2iXOU3tUs_am9ue&sW$`{!h7}vTkJ-m;P1iss9qc z6~3SKDs~MQqkrd94?ASje{sF$a>MzY(Zqi}Q+V6iozwe5t~q0#n}W+#5O9v)^X9Wc`X=(K66 zzf-EGI49po{F-nyVL`&0_#d%tW0ps2qTfdz`8)h~!Oz-1JO8-(?e&*hpUZs;`snbX z-+S$c>z~Sgpa17)^tgnKO4&|cCG%DTDf4jby zm(|VP<&ony`%=2|QgyMqm?eh6SFA53XpSRqzk;TY(2V?K+mz0h1;&N&y{{M#8@m}V z8sC^+p&D$k`Mc?aX$QRcVAB-SRI`&cSAMPzLG8V_;3cjU2Z-tLomvRrXgGaM^2t+b z5ynV5ZHn%r{Q$>$PJNwX9d(Wl4i)W}q92VX9xLq}?f*m9CO7-xb~AMjTAg%~cIKgK zh+Ngu0Xai%#pjE{ND4kFNkuFeU1Dq ze2VxmM_(aEBds;r5_7yKPOfRfmc%h(H{(rf5avJA)<^|;~%?-?b zlKUaAN&dIIp?Q&cEeg&81?pp1YZ!t$zL&;Fru*hdOT2ZhoB+LBT5XP=p_kE($W5#c z{pqU>(AC9mwUfgI2UoIr>Hs?EeIqi4sSp ziC2+bWfU@Fea`^Otq*H|1$sF`F-Kn z#h?3rbpC$ttMPNiPxU@{y}S2%z{~c}$uq}iN1unkdi>_$yUQP1eQxk=+mGy@fxp}S z?Gn`_wrl*UgzUsU$=6e>Wtg*^@X z?yovu>P)ZWReM^k-C<+H&WA?T_*#8hwcb@{RNfbOx59ydoU$pU2l?megM99Id3siJ zpW|B7#p1NUG1LBr-65T-xh77a@A&~1s!o(YqrN!B^c^zOtLVI8pux^?19m&VFdWY| z6gDlKRfs-RhD^gA@USe#PHBXxyty{AIzr`F@=C?VWOI=BMLz8tp|vz!Gf+EPciirZ zeG7+u4lNy+|#le+ruvS2x?s>(v5O^9;~%-6MxA=i%;4ys~}reBb)_ zExowRF|Y`-^5ZMEtmF{bGAKECSCznO`>Gd)l&f*aLl1>jsP!Q1 zU+B)zN@3H&CWUUO;SKKPbJe7(g~8o}0t2sA+*!VNxxHY`YFmnfwJuH1eYSaT_gvt9 z(N%OoXDx?ic1?6lGeDXxd?jc3TNb5$SN_R5xwbXf^362MIJWqA(aEAsMU#tK6;&v5 zEGjmn8nO(!qDDpAifD14;-STjiu)HA7Jo5rG1WGEz*3HoHA-V;f#RUPQb!`jJi@n- zL5QfIQXk=)uu@FIj_NV&a5vUOA=c}FE=>=`8sb;s5Ms&=^eU0aG9Ja^)O*Tl_(20L z(WdFfF2x;-^oAz|Yw{Q6^~uf3UYOM{b4JGO^ag41DTk70B@IsOn=n3pP2A?#$>1eP zjePho_V3=mLH|ZazKnVmJt*cQa*8_RxE%XCZV9>!J12ijUYep!?U(u~H7V^*Mp~9J z=Q?anDA28uh6P1ijh!q>vMUQG{|V!ygPNV%FuN5FU%-&m)6K_Yo@XI?kM;Drf|`=1 zeoy?nl#VJ>P+$j)JAWlvf#039T*(s3}!rIrD=iYz|_FKf$syG1tkX7t>jbT zUci8WG66%&4J-Ss%f%z2m(?y@q-Ib^i>e@(WH|k%yIGcNIIg zLp4!iBjGvO0XD8|wY_>>(JMpc2dO>8P>g2m9xGruECo@)P((2XUXIiv zrLfm>5Yf;+{+<_c#=r9+{5boqwpBe;2Xq2DqjXU|%NFZ<%VzU#Q!KEjRqzm|7rit* zEL?@TGUgr2Ynt~sw?l4ZPEd|R_TNm`%y${*GR9|A%rK?zPCt;=AT>0_75z)=q>fB& zl^UGtmugITl@gzFBGoDVT82K0WsT0BlszbWRd$&iuiUzMgY%~sbO#r%zhMmONJ|$_ zGrmMO4@WDJljH}=4s{dz!DrDr;s?oHTTRyjHEgBO`*0=pA}=|g1Jl7z?0Px7uS7lj zRF7GnTfCYgV4fxj6Q%gvqcqM~$&!vbhLXJlzD(O!UOyjF@4?Zi&PS2_oN z`wPU|F^F#05%A2(+Kbitwd~n5fOg? zP{{pYcDo7GHIThhhpCc!MhSy-m6mr~WlMX@adQK+z4?b}f+@+k3>b2KV|ww-VyEJd zMLUY>6x}g2Go%-uE1XyuP#9BixnOs}=7Jps#|my1yeRlqkXoQAtX$Z>aCYIt!U~2h zh73a$Fp;h*x?Pl4)S!4@u><0|cw=MJQIm&xjX4uLVP{YypJv^Jy!%ng9mGB8C(fp_ zBsPnSi1MEj5s1w(;KAjEnZiGzo;VX7=AWZ)V+&~nu%e^r-qu_*7RPAK9L+|}0nH`N zSF{lDLAT{<+Hh?pZK3AA=8Wc*CSPOG#A%*suHk>CYdUJWXr^gaYUXQ3Y6faLXnZvZ z(hu~e3IwY_C}J4BMv>yAcu9+znzovHn(}zXRm1RY8ZZqkLr1sgnivh&lmd6iAnhj9 z)85tI(!SN^X&ulZKUVu*`yDY)hBi+7Rr?%>^(yoa?xF1hT)#H1sWkRtqR~IF13J?c zN*{rOc9j@#tma~rutEqIB7wFLdXTgx>4@Qjzzw>EwPCsHX0?j?LK&h^?9?oj8_7A= z13(A;fEh2h)UsI3zs*<73&7RnV>X!HnRc6|VGftz_de4W(LTycnf}S1){&2PvX6SLbc#E(1YB9dHsaw zV=J4@2C>$xD)V4W%~F4$LgtvdQ{AMl1UflP?TS9-P1Pni8mgf{*DB#>3$+Wdup#Ok zb%%OEy@RVsQhk74&S(4CBbLGZfLzW2W9VO?TeW}<9U)P~g*Jh#-Jxc5(ikc16@CgX zsB)SkUJz46z0^b+imJ8q(q|yh7O;ktM|bXeng*~$Ej8^iH?1{|@Jb+N-VXhq)1}|i zGwHOnPMRWh2X5(zotf+63ZRaafH=Mt_6g&Kx;lW?Mk z-2DXlw2e>a-FYaODU2+dy=0fzK43NTfVK>0{aFv(Q76_Fzk9LXYyca{CWBjQAzR6I zuoFNdZ?h*r_>z$UBXUW*YSXhj1kiR;>?aKirapD3u!EPr3+53T=iI?mlyM+~Y2H4VJ>{IM#d$5i+ zvyE&$j&+!kHF&IJ8}P~&yt9WL!0eu87uYqdv`6d}r1dAx6_2^k!+_#o(jv3w$*3Iu%~Us!^VE(bQc8tCW-;Hz74Yy(ER6Da8(SfE4v zDEQHiW1XFW<+}vj^aj6Of|EYtPx(ur@$dO3{uOuq7b`LWNNx@gT{HX+31i|3Z=@pD zcrA=mYtjWc(O}5@6j+Q!WCccZCs;+!02jN3(SC)w_>DeONti(cF%yLdz*XHaw_ZTG z%VM4bX)vuuYXWNxqqVR);FK^bx+} zJicukS&bQ-O~ymwx{#JYq^m*-y>P7(u>$!`#2WvEU6uR5pU(h$-iz^A1qqu8e0~h@ zt*+RKZ^4@av#)`X4aOsgm*=HR4u8z88%HR^9XQMj4m_~b-L(ReZw~gOSXsYSZQ%lJj zvI%mr6XUub@2$b_gJ7~aNlxQ90#=0eWHThkhLrDvj{V=U7k`~07cn2l;V13FtNX|S zJljb&mdMw9NZ4!~wr?4QdFq4v>r47$hpQ*{5!&G@TS5ahB6Ubj%wDCE`ExAUO|W2Z zx&XQ{iO1qTzhgh)J@x?KVx2z0jNUETSGWQVd=BGx1f#eE61t{j7a@Yr$9PQvWBXXl z!U*_xgZO~|JNjX-sW0?*f6Tw_u)RJI^EL#>P(B^eRSI)m26I-1lqW&hEeeFZ1;S2L$0HD9Q?i%75o*5_o`qoq;kpG zgYnFEmMSIhR)a3AQSx3e{lw}jNR#?cB#TgX7i z5;ZYIJ^ z|Hda{aE1h&FRA2g$yh%H7@uMsHf@aLVIt$3&4^O$F*a6=qJgV;UBUn5@I3s?#b?rS zfF=QFNyK^L5bylKy8MIRG5D_UC0Fnr-}oIX_7k+p`x5zbiN4Aw5fYsrR-_4LOXQAjC6+<8%riazYoQG9r4}(ykhg8+Lg>%Gt6%t z%vu*5&2iR195r!%n})1}@6=;u*sQr066T8)qhL(3F@8FXUIrvH8!IvwBa~LM9<%XU zUdal~$6cmi3~g&r#(k+6XNJ2g;+(r-wfo`@Rs5#_xkV;n$#+nkUB)^vj=^*Oh`SnrV-K&qVR}0}~H^!_KV}zezBzNF*$1tv!;0HXys?Oqe zknY~Fd0X+{4^W|I!rreRcETzkPl1zkc$|OWL7sz69DtQt31efz+JDDy!qV9MzzB@< z7M#_#1HA_$d=r}g4`w(Yqo-grZJJ#RDX^`d0(|m4Y{WUt;~vc7;u7CrXbI{t7FK3v ziSM=))@2{OvRm*7UO*mWAiK8pV?%v3IG2EP*}O1!tT~&6*`C?-ttZxb8Awjt81+v$mM9jBy&AM{E1ahl{@?ap zt?|4QK5vusHu$zS_^S)#xg}<)Zi%i9f%JxB4b;Nb)hf~2jiI-jl{{MD^R`blhoswn zHo*VX!2N{cN^JguKd!ubSScKWPD(+>dKX%SUc&l50uQwr^5BjG&&fc>gopZ9;mSmy{lC?x zYLwcUC9?ild7RH7al{dkmj`s@KKdK=RBz}mIukK>IDHD~xdtmT6j@i{>>fTH%%&kT ztv8T^qwF>AZ5Xct-6+x@`1V=UQK&5lLOig5G~}TTqGh1BpFn>e#htYxpD>oa5Qm<_ zD%0T(Em$GTF^?Sx#Wxny0C22b0j6*t*y#zOkua9NM09Z(al>ZB73UGPU&nmLmS9+k zJOmP+2VHm(kA1kpj<9a2uu0dTJ9j`rJHap5No3N8wx_X(FnW33N$;R$;kBafP6e4%Yi_gq`i?PIRUn(1}s@ybpKutjCdhfGHW1%bPX69Cy8go z{orQ2E1VR*pt`@0a1W?fBs5|)R$&CH&jU!$O%=oyu%@ff=>iZaXtU*rQRCQK!n{(Z>;fWFJgV`+PL8tSB%P@d=zs?~$xHE1KzWn-1JV`QgJEDeK zfh3-z;p8BnP9{TcgZKt@t@;Q1e?NgHH6gwDLdep5ejHfRQs_?$pGt~JCpr>qx+}2z zao`_b$`3IM8;h*G<}{u=BJZy$bY3uRNPa^Xy#(4jg-@qvuwN5G<4IE>#$L#@oGFYK zV}w5>3K^#XydiW*2y}=WY046iEAPp70~t+(L^=Y=dCM=6nRE;Ea6GP~5j@y`=xxv# zeDQ1GoqdIc0BNSuO8typYtPAJ-T}-I^VD(za0EG|sZ^xid^;)wDq<#e^b;G2T;&VA9a#r!HwS!@eqg5Bj^nF@&tf;BKQ}6U zmF}t&zmMF?4zNjeXegNvTVqeR3bDdgng~0zk2FRPkmtf2Y5?9g598R5*u0i%^gMK8 zGjzv&uGHYy>13e`UC!$=u4b?$$Tg`W>;(RNm`Y-TI8JPVE)R8tPU0lVMKeUorwTi- zKEVHdgyy=2HJqi~Qfu<(7?CyP8$U?K3O?dOYG&1#j=U6Zhyj9y@8W^bIDVKV0;%jv zH_#~Vjy)ahvmpoR5PAzvW`1NG?8{%KRlg{UnFh16lRENl*!^ip3h`Ge+Ttkz~|btcx4heKj-mOXQw=L(rZ?z+*VpK&g02M+C!)%x``L59+gLP z=r-|}sH3};NXsaB6p4|(N-}S5^)`>Uj$togLqD-PSizH7Pds13ekg}o8_{1oL3CDs zo99^H^5x<)VFIrJOYxBHhBr8ZKY{m<$r{l%;yz(H?*#32iTa5KvR2)#gdo47fjE>L zRRZOUN?)D@6u$`Q+FFk21D$iW3gv_v^enQU=g=4A7E4q*sin{*x{+9sNJ^~bqV>Dt zi~P=us=|)bouadNoJ6zzJcl}pZRuxqkUUN9*%8k8;8 zOnDkm_}Qq2YsS`z@3dJ`J3ayoh6c5qcvz!|dNKo1YB5%ej_*g+`Ay)I+m#vSn`VQ$ z0=($2nAJSa9IbAWo@n*rSoNVbR`I5F#Wb1%K0BkBquC3!hk4YA7 zL>eh|EhjB|lqoz7jBHl2h%ZrRD9K8WdX|M_oe$#&)cx{0*{psft7%yx0hRDT*8M#m zrz}_Qurl;5q-quXk7d}s9Dqo?Jnurci{~*D26=<^nR-uf*SLy@m=lRXWyF&ZtvSvy-#hH&4MnLCm1J?W%zIixoUb4^@9V{0LM-bVC zlK(hC^gN4p7cNsj@&w+^b`mQr7T?iG7)HB#*+`YeiK+uOzH<(MXS{v5eM>*D&Zr6g(nd`7{0UlBGeyrrEfHk zrRuz$Y)}?pMm|7WPUJ7(&1R^p6(_Dk_s>D z7HAJ@6KN0mp!u(|SZZRoM>?z4wAw2@k)QkzQQ}F&Lsx-s-v#oM2T3_9JVJ%kUGiG? zmnZS{(jv_Sx=48@Hz38Dzq;b7VX332dm2hjTB1KJ64zLM`02TM~)rrbfX(y6)@x^Q8)ngPy}Ds;J!A)w}g zZB-IfduaHtu%zv1Q*>&}Q46gu$`c+U?4`%~X~c6axeqY@XCxGRkLQq!-Jh&Lwby=r zjAjX$^csJG8hHbqBQ&AMuw!$IcY_w}!&<9l*;8%S1ZdT6SV z|CHP8t5{F7m|E27{Db&j3MFr?XRQi{| zpV1eJtKobx`9@XYoiLG1Ro`G$bQ5X`*{rT&ujcT4_y!Zzs{A6lVE!OI)YnQNUx?T< znb&41Y5-S=7TWk9tk5d;F2?jHW3sn`9S+D+x^marPyI_zi92aH3sut5r(>tOp6AmZ z;zZ#j*Qp1TXUtA$AsNJ~bfdaM&QTJ`B_$arBtLpr843~CD+*8 z9LMrSy%@t*Vf1<;_P)jEk$$KYdWv1@T8IbVu*zzVx(YqL8nf2cnpW5)bWyFq2f-_k zg0`BczT+K)=0Y!iK<;cAVRdn&O*Be1ODFjx$q`!$ z6U!AWban|m)ct^22azkRIWkx?sXKD}%8+JY5I@a2 z)9cVAZpb-m3S4bB8=0sn3K{;syRezOS^Uc2Y%Q7^|w*H&E% zt1n5q6XJSxrM!wOVwRAH$gD2?rH$9jVNXnpP1lr$LT^;I{N{ehbgW5xqHlOhVAtv5 zeyz83MNP4UtJTm&zozJhC|ne%RF#gxy7@uA3X!Cb+EzKj=FkOn56hL$C_l+?%`2@e zj^OVVZ`OnU1x~S)JCjjD8SyeXhb)*MWRrMRaN=W>0qRpyEF7h(8X-4?l`Dm)=`}po zp@@$QVbi#JKs^XMbDy53GZ6o@CXA*b-~1MTk6ywibP}GzM<{RPS!|4OS-LBZA+y!> z>N#>9)q8D8ygHkeB}u##d!x!cTqvOSq#@r%E(s~9Ci(-3e2bWN0O>7U78i)`|t=t97+DWpK_rrU7VFWk>{e?WuDCsc^Q|yRV(_U)9iY#RmSK&X+MezZz#Wc{K zs4HVNQM0&#Buk~Wi-pHZC+kc#O!z1yvt#lWWgyVG(JW1U#vdc5x*Yq89{V8%v zOojd3#_FqCe5kkw)-Z}S1NxWEwyBT~%VONHMtgwH`Yx?!Y~m_nPgrb>}A4p_r; zW(WJ2gBWQGCE_kOW z>#g+#Kb=v!%vM<+%AUkuOchE|XJFSkK%#Q^Y{arU%G5i`Se_t`(nQiv%5OQGcxnDf zD|o)OjnYb(udO9*Q72nEDbvNJIxlH~(#w2Kel4`u`ipbbO!)=>DV7#Bh}-*-N9c=_ zf_$-XAxPUzD$g!j->R*|2GUDzk-w^sQ7=)AI1AEA7>` zbeHr<_^h0>3}?Q&ANDy^G;JzwL)JUaby&t+j4!M?nh*9lLQ}bn5+i)njHcz45YkKclH@HcglVH?CW5-5v7FTgCvnK-t@JnNN zkVFG0Ntk6T6LHb)~Sa$L@+UeT!PD|n*}egc2p=ZB?X<`AE{+<_0d$v8l#5!CvS?HuhD#frKRN~ z+l^YIdg@c_DW=z?Ymd-g)>>w+tQJRVMp0K~wsnBo4LyRck{`${Kh6?p29V9`EE1WF z3z)xHLAp%lV|BZe^3q;$3v;o?SO=lGi=Xs|-M1c*Kk+ewkz7;9DK*(y}h@j617@Rz45-{E6!6mCk-slC$Q za!wg9bdlDQRM|)QKmw$%bf1!FO<=x=MmG^zo-2FsCSoUPF8tsLiU%JK#4wx;0N(C} z9j%v$9lMbIh)qLSJ)CDZ4N_YwpLv8hOl(MA0l^#!oTd&?`_ZHeP{L$jH>ZIlwq#Su zF`+uHxfk{#f1_HZtx}Esr5}Z1h{5Ob)^s$zj7r@9=y}o}KIL5?osWS%SE0XvNu(nJ z>kFT?p0u9m)d%W5@(0MHKud}3#9zFW+LTWeW5jo49uEhqUz+KZ20R`paxSY6_VdO- zl!C#tcK}iE9X=a=@*p6(VW{<81@2#F2g*iupm0-jUpOnzHE&cuYXa=%3Q4F;-^a&@?$T3W zdZO9_bx(VRI?`8CSGB6m(OY{2Dom|pE2`}K!^i3j2^hp4sBcLRsgKx+ot0a0)QF3B zRWGFxYE7pJ`SKaLrEo=CM*O0LD0ApEM7?1=8Jk~yIgj{hwhPYc zQ{}ASsH-V`SJ6|NH9-)TSlFtv@M?D`chwET9qBolDXVgK;knjDGO6d~ov>;b1veVPFREkJ2)+Q>pdzoL zlm=^37qKZHBac>=lPN-5a#9_~w$X3GEV4;mu5RR?;pOb7@2JR&t&`>3^sIKV7%4Zl z#H*Wx<8(4R!>-b8f-^~0pRtdm0c}El@Tx#f>mg(G54Lu_#3*HvWv-ea9+AfLoAOe% z4Qb3>)g(4R94750VWO2GfNDPlu%Bd5{X5lVYj<@oN?ZxiNYnH5ps+{$q z<$;@c@$Pa2iX@>o`v8WV( zz-p+Ym@}OujG}35gAyy3Vxxo{shiMFakczEw%!6vs-xQ)t~$~-&M-KGy9IZL;K3b& z2ZB4n-4op1C3tXmm*DO;$oNd#k*a^yoVPFUz286eOzY|HBUQV0?Y-98Lyeqrbz4#S zk-1Sn1&?Vo{R{OPo|@@kqaR9n=^yNt@rU`2pF<`p0Xen|G@G=AJT?=J28i($l?Ra& zCvvb%f$X3|3ZGS+!^SnvqG+&FL8NeTI=6bW|o zS+fhh0(PJSIsrq-4ygm}$D&M$oxs@4B^{>nUxMX7E~O0vjrIlyAbzJ^-JmAN2z9e&L1siZgScXLVuhp&@-q?yb%9jU3@cy`#AjLZ3@E;Fd^358DE1wo z4-@%tGLtW3TacqDB&CPPuVTO2^V;S#-di!0@m$hZz&9G8j3p)5A7mdQ%H42Y&(Ivo zcxfu1$r>TDMv*@)AQh4ulKfz(+$Fo3e z=2Lb?NlD3l@kn*OBUzb)%E`Hh^Otwhh2si7p zX7q(5(H$(BkA@D%cV3@e<~gMCbOq*^V`Kx)aefxb_ad^_gl#hC;FU6TB(ey_u?C%K zt~cYz1L$gdF#C&|w*omh9BaXO$kunGr{E1dh3}A&-bKarr9cb3hMzf-j54>Gr^#OQ zD+L(#c_f-HQ97z+mG{qNFHf1-46atV`Skr@Ux_^)K#iamLfyCoEPRN zSv7J{dM6oJwReOCts^fYIpMo~BdetX@YD`s7T*MIgA8U2>r9uBA}oT1kaJ`K(6g&Z zdfJ~CV`KRoaB6a3-V#WuwOG|xkbZ~9nGCzrR$3@GBNvR{&AK#7?nvGmf0=_RmvRfw z4Bve~+Q-Y9)y;UbCVS5}Bdb22J=6@tlv=7LnW1me|0W^IV_MF17|Zz|a%MRvFADy} zSh7a)&^3G>^jiw?sHXCq&7!@Ps&YcO!@$AB5Q2K=n>x^J_ybPb)$P8t^( z7U|LimdR+w4oZIc6PW^g+7c^lfphIgVu2Q3z?*?%G#XjtcECi{W6g~)UR^e&{_K_h zfR&N!%DuS?jYm#XXa+tFUQMu+PWsH_aXyREbNsW}hE)O*dM-v-BVgWV@`V^lC!qPU z4}Ee2JAhb960!wXd2yz|X7-^=NnN(sJj&|;k>8l-GIetb`J3J(KHdUIoF#0anU8ZI zyE5=0=3j^$J*7?fUBo_XOTUs=z;e}~h2^h^rFJr(m^t8M-NQ;Om|Vk3Z~?wy9;LjG?OBE92y z4`lY6aj*FearA8TAwS46@i}B6tngjFg56_Xkwc55rI39|Vi$l#8%dHdrFV^$($xURZ_e0+!GqR3zfPs$z zhP)K!#`1_he!^8)NFU)h&x5<}TuFA)9r`R&u{Jyf>)iyJ4Xcr5p3HO6VKh5=g81JZ;4)eu zXS0@VL*#E2*2FE4Q*)3e$jh(bz36t>z3D(ARRAvQ63-7t#ZFwCDHxak0!4M56^5Pe zg&922h-Eg^B8nvU^rgmb>P3aiI_4K$=Y663bA@j;%A3>qeIO|Ez|PbGsv`rCDtmb! zW2DiIWLDt5oTr_!YCgc)@ITR)w(}JBmEWfmrK9lR?n18skz^@}+-DnEYvc>B(m#=7 zK8UL^k#2%Eij$AQw=cwg)#UG(o99i7;^YB3YZW<)mE;Lr)oef#zT~xmGJ3$zurU6T zoTAx?X|`muNgMhN+zey{=}yddLx2aUO4qY7W(+@oSWh*!6?0MrjN32lHtL->qB*3R zq&xC0ED70=)W7m-x4MUc{0OCb8X?8x4?ZoVHg)had5<|Ap z8e|jl^gV%T$;;2Nnivzgu{Ijb?qigM0L@esn3xx=n%R^ef~`wnPxTUJJvv%mLEUBt zBRgV{Op2$`{3JAxrgDe;QGP*8s3dLV;fVegK_sp@A}d$eO01C6%fax)uAAv;Ug-g1 zp8voG#3NdF&Aeb<;BV3!GczL7ALvzN z&g1w!Hv9=>eGLW238imNbypNlRgKOPPNDLMkk; zBm2#gW_!L3I0s<=SOnQc3)3@fsyPyV>SxT5HuEr6RD0w$(gXD3PppbmN4`Z)n2*fr zR8f{n#o0N%B1@G1P~u5b#LSD5e9ADnJD1Hz%ukyD3E^a$d0%OUG#sl0%1+Uja&hSj zOK-{yNO0s0-U0184^im9c|7TWv7QyNqg_Dv+~akCnz=-LG?o;_zU2W%s}1>wSA@rS z4Vlo7W>(CZLy$Y!0DMg%@_$jZ5b%u)$S&Bn`pDc41{!k|;w87qpU6S}1@)ZPq?Ei= z%Fpi@%*;%WA_vnHzJSad1Feg7I%&k81A+05Y(~z%E^u*^Nf8>$e+GB7o_|v>FG{ri@mj6o@B3^e2QKfLi znKJQ>K%eA>F2MxkB+KxOm_a`x3VRitt?q~jEP!Ix6P)>E%%ZcUWpWs`n@k7e3iI|t z7Hr&Son@~aP3{^m5YN1c9^V#tItBcg?bw@)q%`eD*07~)Gl@V<v7^Akv~^R-KHv^;-0&)5s$hg}0J}`(cX_9g)V+9Q>L22Ma=sdoehAClHlwgt=!F zxP9wU4Wkz#(gVmGT#H`F&U_aKzDzgQN1~RM!X2}L56&`~RXFCF@ zfq86?R7*j$2&gYNdm;^#w~}NtFHk&3=^6Bl8|)lVhX>hotg&|>cArR0Hk9q7?S)0lwGV z$L0bHk&HRNDaLdJFxGE@0@{zcp(a)+l$Mju`&RAvs z1;lX|WZoRWJlKFm8^})cf{4Op1=46BGE>7a!k_W=z@sKJ2N@3>KqV4mEY-WRD^fLO zJ=U&!%+BaZD(Q(ix&p@haVUFzV7Y;H{mz@B?==82vo$e+!OhNRo2OX;>ADs** zhorJlf#Bo8gy;ubF%4+yNAMJe0~NLmd$SM6w+~15hX2L>!5sZ3_>F6k|BM7Bsxnq- z-*EL8BT93YZ6m{hnqs6KGmXkD88P0xv>VG~K4&rX7x^_teK>o{YQPS~K@ZXfudf(u z&R1jRJwxuH_RL5YNvlgE>3v**d(c9g%a_sv@Dw#%wI1vnsVCK^zwj>LVRR$|=o31c zb|+Clxx$SYce_v%Z4~UyPF5F~#vY#X#5CL4YulMKFPEJKiZ4u28;PR>}q3p z(lfB?7)e6FG#v}XXE4yVXIND>A8QJRs0%!Q=)sL=OUV{6Q19^*SV2E$GLGgtbpqwt zhxLMI{TtnlxNU3F3_9rb_yLlYdT~_~NFwyNSF?2_7oy}rNUH?KytcNlPA zWq{Bc2_JJleFV(gRjf6$a zCXiaZ7RvynXG`GVs{zqghMh6G8PgEixP+Y8Tei}u0-c`MW)LuHuXtTn6Opcoh-v-8 zDq*d58Efet#yg!EJ7Jf*p$|Sb`#?`~FsfxPL_b@KIME|yvN{4=eg~+omux!bsO{!) z^sEtVFjnTyQI0xAQQ?fq1Jw2EMs*5aYpJ$Te^;lg>!5=>TkVHehN)B4F=|I-SLy*b zQxzH_FO?h0J!B>hDzl)9+Er<#lvZrYXXxpQ|4Z^3c^K4di^}P+Cn?ZDJtobU`l5GK zMJ%NN&;f@67qV8+^DE#45wgHk*%RxBZyT}-Tl-@w|PVTagmtdEuet3DoAxeL?~Y5==l z9J&ILK!R()V7$Yc|6lV8dcrQmu$Gzg;g?MUE_%2*(Clq?MRcPRkY+8R#Mlt6J}MDc zLiDvPREx`)CCtLWniqtsL>@CIR=uJ{o7v1P&~S}3A##rvY6h8h=%OgNMd?+HTL~Bk z6UwZ*;WsoqqNa={nSi&TalGa=JoxtlEi2w<*w%+N$pFSKAsnG>MfHw%4# zvAN3Jfb8Gj<~}IH9yiaJm&{x6_g|ZzfE-UUJ*H`@z;dT&S#kCX0UKYQRmXa-B@nv5 z!50|D#zA{x9?ta|pskLw^N5k&fT#AFeTE%K!TjKY`eMd3KejBd42*dr-WuGuZa_^A z!Ihkf>p2e`zs<0H{~#82IgP1#AAFt|AU_5BOa-!CFu(F5u2KR1x8P4TK~L#{9@8Hx z6r*58rlBt_fmgN(-ug~>r~9#TK7syr1N{2?!+=j62Oa8(7`s!UDmfd>gSpUqm`@jB zzFdouumw@7ZFC2CBzvG|yC2B*BN*u?!45fr80a}@M4ZK#IfMI)_&mw?SlF96Iy9Bp}3m^9(_)W~(nQ+|O@x)0D_26z zV=*FY^U`d_45%|qL7Rl@J|1l>84VjE+6dT@e!wdA2YP4_Zu^4u_ZuQ1z0z)bK-2G6 ztV}xor?msirYY=AGqfgHX$U^GU{u#chzA(df`ihzG;G0O{~7v=y*Ei>4O z=`rJlr!~RG$%;QgE;yN|I4tZ0S>odM&ClagH@wwcY)&*d-xEK@8H>kw0(Yv=l{0} z=JjK|FBsR4@UCE1-%H!-F22wD)|*((UdHcUNZW_A*sC+xN1=9bG_4&$Tit_H_MwAIiN6MW$1Xe-j%G9c5J;gNW6 z72dI4U4`iSI<&Reel2oy8`0LMHK81`2|n2RwD-k3;#b7G>u|db`>-wT{Y_ZYZB2W$ zA?*{J@UHmmW;E;bLWM*qHY`Hicqx8;9UdLBM)c&!Wi6-$9ilk7`{{S>&U9XYnct6+;mvpKVf?X2U}>XEJ9n1{kP6w;9Q9# z-GbxamUc!2^K&<@(LP+Qf6&D3AzaBHeNXgVtBIa=30Ge<(F;Yd6RH71^TK*0`pE6H zK6e*=NAw=S&=>77M#wY#KS6trv4c{QXhK5(Wu>q#iAm#-3ht^<2S~(BAzBL7qC!(Z zXfFtsD+mMlBjE1Cg+dGHQv@U47LvxJ60BZ0GvX5!(m2=Ry9D>TY?^HpniS%3S^Qe* zv|qFCf5JWpwFz;SgwBT0*bwJNJQgY);<4Cw(Zn|5S&{!VaXjL&byU{TmB3Mn@3USL zM=RLU)~n*#kLN<|Bk)bu_glYV{fhWy>uOn##P!L8t6*KnA6G+M1FPkL*Ab1rAevAz z5n3i$|EmdZ@{eZqqYP;*_9*bSqtYH(oOZ!-4@XQ$w9vF>J+}H`SlWA`X}was^5fOO zXT;Wm;VpWu_%AYZ zSkHx8iJ0*MM-bQ#@ynKihy(K=5SeLXO~JirDw^OoQ$)!r{CT1EAs&OE1mDg`yA?VU zqJdWOKg|o~p%}|<+*{3h?8Yb;V?8;o2^AK>%#Md05ZWu228;DbJhp6uxVKtd8rwY{ z+s49zSUNTV27Da8Luk2tPrLn!xm&c)X-Xllp>gpZ*5@_+(idrV=pA^GLK7umrG%w= zoHlccdodr12#UBDGvvMhw1AxxuZa28nstRjiFjmfbsyV{CT3Oft)e|k`z7(smOXr# zwjaVSiuM}&BGgR8qqo@0ci30a#G^oapJq2L?UWBVDxnf0nx%dsZUrm%G7>f%6rx!Il-au8X)$!FBVadC`Q1h|s|iN;uY47k-k}6VMs($6>97 zKFR;zpV&_H9O*xH+_L1Nr%7p>3tb(dDPc8xS}(Mx^*hlE9mqZk1s^+_cr5g8M6Mxhu1R^JcCxDca9s0CTu3UvUXZSdb?N!S9* zYZRkS%ss+-2rU__iRZ$mWK5ftGW};h61o@GR@Ub&i(;u0S+4{>pAp-MErp7P^}g66 zYaCnKi197<=*ON}qgp%{+XbHe_lURNu~a661rf(Aj!~R3ajt$mvd)`j1;p!tGig}` zVFvNScP>89Nqu5Q)pUJt8WI* zQs7KlJ<_tO;&I@SIG=&@E|fs59{S&BS@dwxtbMiiTg4t)&9c1K-dc|X_ShP8f$^B; zH2trU_`_Bs5SG@2XC*$xF`9o^Z=oAyS#-;uTNYisDn`C|O^p4(_Ci}oSb3`n8zHQP zm;?T|?HB%>&>Tum^X|mfsd#L8e8S3zCZ3C#K+Fr)v%m~tSt4sI;T`_(U;Dp%V9h_m zVg*`Yd+WJ)X3a|0H(C28zTNt6H-0}b>-?Cde%M57FU4FW=AeM*DilqGy%jAmLy7kS zTZ!$3wH3|U%KEf5rv+XKJhQg4o>{*j_D;;2*7*^8W6h0X8*2u&Zh!2twe643S+-rg zpOSX`;`;*a$G2JEANcP7z5>?u`f(n_RSH}UYhM4qXa0vLAm$FMC;jLnqMulOO!OB& z`V^Ap|M4XPbBs09i2f+vvzqnT>f!(I?T=m^_>ASF=xMzYbfdIi5c^`SEv#89@SS2j zu(Td-{ib-``n*to6Mte4#lBdN|9id*j6Lxk*58jgPmERZOV&C@jAb!~E&s|I-NLGh zW?3Z5%33zYn(+b_+Oi%2D{J|XV*aw`vw%-&&4NFykTp;K_wA1@13sYTTMEBaY!`Un zde{1PVU4ZV1NO}NM$4ZKSb59){C_Qe`m`BQ%!^_k%kcj+YvvO(XJ9q=-+sCH6pA*d zeaiC6qtdpsUKev{Ut!5o{z*1XtW8jR78O5^DKV}(g zezfd#z&{SmBi7Zmu6^K22fUuZOcU^+13e{hZ)rUR`g&R?`=1_X^*pObT7A$O57t(e z?`8E%@rw9H`X7%hVCR3_TRk!0u?4>2zwfP45ZDuIR0V$HzsEpeG+3`$zYsVYu?M1A z`(i!*aT}P$#JTz3=PWR@1-2DiipSO`#J!lgt@ViYTh}6R$5u5nYe#{)!%wpM8@xJwGYpo+*wJfNxnwH%R z%$AlN6gJN~-@>o9yk4OV7U;jCzemAq60ZnvG7A5>u<{iv;Nn=tm%_Ta6k?zC5d)|I zwIPxT7)va2iUmEYxIz!6tMHg!Cr205YNRv+V&BZqwt z8Bibg;4`BBO^_Gt%D*Mvc`UU+v7}Vua zRjiHC(dc6IF(%>YL^+WLf?~zGVqi(H~Gv zAP%9*y%<=)m#7Bnmm*N>ql#P_wIZ{~QL-$51jqB5bW^%49R}B74XPrIfUf!=X(lqP z2T|4WK3H_0@Y!o%VNF3*(G1dSR2CeAJZ3BK7W&doPAzBg?T z6!v<$-T%4xoHNdjlTlp-XRI* z*h3)oQbfv70*wksUld1Q6X@y&s1_S0Uq`lf4329aaQCKFUO8EAq?F6{gfs7h>eaqJ30ft1s1esz;5swY1H#T|~{? zlBl2B+%9UO=CtQQHPs>Z{q}G6+>SwxXO2eB&CVxIpEIMYq^qKdDj)cW3uo_dQQx-)4WZ z=GU)-pYJn&1D0?cZ$iof#ac*ir|eWUTNTGh=OI^2P)zW;kf_jpp%+7ohaCy)7G5Qy ze!8KNOVUq^s*>SuhV~iXX6%&ddZvPz$7Vj7`A+8ZnRjO1n)yOzcjn?*`ea#`<=-qx zS$GytmK&(@E@zpWIV|&}OwTh`%6K+I{|q@YT#o7%#nS(oJ~1*wq$^!a_|>p8p)W%U zg!~yC8}w69Yu7;MILAnPds}(6h*DmzE7hdHwez-Yx%tM(3tgY%noXW0%F2jopGu-$T9~{(A7s{FrT@vwuGR zso1AwADe$1_HpIMKR#CbSm9&+kL*MF4=vswe%J8biMQnK^f#;C(6`0ket9$DO|3WS z-fVpBczycShgZt$Rj)3;41am{#k?0SUd(#oefjpymJdJ2gnzsDeL(EG@0Guv|J3At z$lK*_9=z@SfyPw%J}yB?Y3UyAiS^|4Ht~+}9`vS1rHdy1PyPW~n9lSjP**xawn|5o zmo~5Ci)(Fg=Fl!-UBm4Wbt5W86pUyQ@i5|Kx?JgVW>}UfEUT0~PxP4RD$%R6-^*4q zTST@!+2rWQIqu|Il;=_2-||(;*FSIJJlk{S%b65iH@aB#^yqv!QghtPIWgC{TuXBm z$c1WHIWpuZ5M4cct85jrKFC}>)1eIAqME0l9T^@OpDrr0Rph|PGLe6!8yVpZD;-)d zxQVO1V~)+GewHKU8BzmOFIWIAz*5p+AiGLK#ifans3rP4_(S|pe4BjzePw)Kz2m&` zo)(@nsJwJ5wP&g)Wm-z%6o5jL)2CEPnUHcT#Y{;~Igv6d~xm9xIWM9&jq~=MV5*sEiPe_UXJ^owV__#W8P2*<79gDjYcQ$Tp zTpVh<7yMrD+t{xMzI=^o7?bdM*XI(SyL}q|al?l@?{B`_i!0gm?U{E8ANqcN^~L#p zAx6rP@9aynPXpfNex32!GS{b^!8vQ@xE#GUdUy1V=&#WU z(PyG7MEA(vDO*Lzi|@)*H{-jgh3Q*F7D`t*Vnlel@b6)d!}f%=4LcQjCuCl*GpMWc zs(q+!tolqDsN{#v(q*VP{v$1<6^RdaE7I(OT8rQH>|mItH!h)WbE;NJ``O>tx7}Ob z`^|H}vjSDVH>1vXA06imHt3fwv3xH zHO%roYpLwtvlohvj=q<@Q1(*U8f1N&C2Q6@S&wJCko{e>KgSJR)l9iA<*c9c?;LK7 zyZzaVX5X4Eceb-x=VTp}wN2J?Su`hw4b%STDbPj|F^%lzp6in zKhC!XRn|3B-)Zahd(NQ_`xg(OQ6Awp#4w*y#9_gnCJFNtKg-PCk;^KJCEgVo%lha2_bB|FVMU@%wZ zRe-t9Bkhx@Tt}WKzn04>JCr)A%NAq1WMAN@<$U4%-6aR@3+fR3F}QBXpCM;Lu7#`* z=^fHFWN%1~(1g%+VfDg4h4+a#9&saLb3~7bQV}^KGDS!ckHdF|ZwfyYel`3~`15cj zqFhA9h~)6u;ibaAhK&vT68cAI&(Jxci$gnx+C$HVj1S3)s%W)>^`JXJyMtB)tq$56 zG&Lw)P!HDuX9;JFe@`cw^khQ%k0n zN?ivJp_H4t|HAdIpRzx>RPvpqO-YIHAQDrSrPfW&lkz?(bJByvONr+aUnPbog(huF z%#)ZdaZ}>>q+-c`Ca+GenEX@HfrMf4mEwyg987d2KT0lUW5kuWnnO4*#+#{JFR$8*AS4b|XQdG34Kc*9Vo^r8Qt)<~VQ-BB32x2Oe#H@S;4@a=EVZ0#|#Yx>h}{n(7c+u>F9&jYD-Fah7+r zb`^7FakY0{a)ksHLS4+=K{uLn<2wO3ZXyb2t5}X6M8?C zg-#6n7=#QHDpjou8_0A z&4a%NZ48i3r0qcn= z^`>cRmXxH(^eyF-0K<&G$dK;|YI_cN+VB?k^rq|cp{@hrPb=5Lx zDgH9r2JMvAQM=~<-QUz-(tpp_*hhRn`7Zieql$E0{{hro8|}^JwR@|2SD{K*ocE#k zg7*}vcNO(+@HFsr^_=pg;)-?fM0g5&CSsPD>Iw0zarZBJQ{7cyUEX?vy-sfu>T2ir4nc3c;(g-vdh`3*_~!ba_$vA5`}d>z*F}G% zHVc)%%IJOc4f-3skTDC`#bIFG%3vuBhwgnHAW4#;x7q=!Zn0!2>XNrdm8DV8C067? z(8x@Z3u7eRRkEqA)d}hn)R^9)K2U$Mt+c&BCA3erR9j|yMbwGTV*hG;hRWsJ?Ij%@ z9qApr?Bnf=?OW{q?G8i_+_r}H3HG7(I`;I~?uPBU?UC(}t-kGsx=1~!e!!aMFur*W zRt#0tJ6Hvc#yY6JqN66&K<35+>GwOZbxBy6J;$0YCs-Z1 zp!5F*N~-Iy(i@G)NhI=4m4Qku1hu-Kp=P}VvGqvAR+E5tYKnRSn}LHohH4TU!KWMv zTY_5WC z;Sr-1_-YHyFR0^I#~5s^g1V~1ERLGtzZniF1eY~(AnNf2N_2ZM{(2b|jX}ma;~*j$ z%k<_@BYC5DH|`qoh%_`d3gH`C88uO*?H{8kFs|p}QItaEa)DfZYlNZlK{#}V_X1Hp z9D6Vi`$gCQps8me%26M2lO&);9WaCWfPyZ8b#@b=4o5*Td^O#T)&6^$MXC!;lS^2O*8u0GnzR}#_|aIu zcY`XeU1H#Wj6y39mDG%=Pbk*&D^R;DKdfySUL7O-Bb|~Kp;}iqNye%D5IE4>)1XlJl&T4%Y2i=YFtR&E0 zUs*mVQs)Gc-Un=RUA*(rOb?yxuD~_t0Ro>R=F|}klTLW8HB^PCgDK(!65b7^hSR`9 zPX}%{6uhlRtRWC!gTT+Zf!C&hX(VcKePt@HS#I>)nb2202F}j~Wc$DHOt@<0NH@L! zy)+A140UEkb^#OL0#W5Lh-0h<^0ow+!tqdoDvUVI5^zGEBEnIRd;t)h)OZI=D(_pjj+qMyV<0h(Jp8*5 zScf)bITSg3d(tm$OmGMeT6>p{Ti+ zgYSjMaR;nu0v^X{HVrJ6o7j2@lw7uf5#<0DyD<>?r@*>60VT4!n6c_1F6V;|Uo_O$ zY(Qy`L6xt=>?1=p7hL58T=B8MZ=V3y>kyPV2v}9WBD#B+??6xOjcfb{b*H}ZGl+5( zBS(SsqsaT@!j)TuS{G;Vd>kUg*~mLoAgm9rekZWw3gK*sx_D7!9b(wKaUL%Kr!^Xt z%Lc$lWTXl;ab9yd+e1T%)FPMFFF6xN%f`(}%Jp`O?2e1dW^M2qa6e8*PKWsm*fEe6NXdOO- zdTmFH>ibY)-UdyB9*Ej4!4XYGcBDEq3hJ`o%;n|<)|iY$UyGncp#s<&`?1Y@gebGd z^McpepPU0Gz6-dc)etWRXIFYgbMbpdXEO$DwSCY!uV5bIA<{>h0jez;_})=`s&POs z%*se@l)ca;Nq`39b+G@>lYe15zL0n5xwZKk*#0Nbj%tn3cLO~6!N7sPK!3}^pRge~ z(jic?o+Y)FZ_rG9JXBsMfW=sleC5AjOac9j`DQ0n(XRuEo`L%LRB&pFf_L|Tq{0Gr zz**@I%(bZU+8M~iTWp+Zhcd=ZRLeO-9iwrrJjSL3FGJoyn(wXWkdwwTa-IpW;~&Z+-_B?KL3JtMCP8Gw6rqP$w$Y zNCzXzn87+BKQfm8MK(#9b)btfTapgEk}Y69Rr&cQOD9OkcnkS-6`K;5BK1E+qQk%*%h%7(*YU8IMk+44a-kF|mJ1^YN2=eraM$M+v5BfzFpnG4kpS8-;xX9Z|4 zrH%Skx(L-$h;y4=QIoJ8YX!yWAyDJJ3YGnR$e8p4gS#I)3U!&{vJ;HnB#iiuyquZE zC~Xq*T*|IwlZTQ=$g0$2BghNX0vQdS!vb2D%riUd@AORkmh`W3Q5hpohsMV)&}av3#Nc0kWPk>tnt`V)F5m*6cF#fp4CIZeM}UTh17cva-` zULkrr%}CJxFrGp`?@zU`Y_WyGl`+2(!yy;M{*Sd$EUPx?BS~n@>4m)67$57I1|U$VvHcHLF@$ z`pkUDhLywGs1PGks#?T;OZi61vK~;JdTEq3m+%pCA6q-yEIAq4?X{tQ_^B=qwFd=c_XT~TLa3dThasOT&-V)--ZT6L%Ik);|!E=X%A7){12a}vD^)>}(y zG<=nxd3y7Jp369D>TERjJx&^JE8>`=4&XQa`Fy3c$A*lY7Ss;qqPFMiJo!D@3!Ak; zZ>$r;4({MXR*tSx_SmA;-sG$9g~H?r`j7M*X=yC>x7C`W*xd~5;@oDE1~7*)7Ey=a z&EilhYmWR#yyTJRDZ8cGte7^+U(<*~y@S=zFj|eu-?hkQR8Gl{S??WIv+KaDtAG(y zRw}G?P)^g6<}Z3Jqc4x8>7_^T-in|O$1mVCx6@kd5BO)Lr9Hdt4K&`u*i&N)YRfZi zo>`ujR!XS3p*S4_`>>gAAQQ|9sMwNfu7t8qxO5ph^WP*ld1=-#%9z>UiEf9|0F^Yk zgtAil6SX1w8gBo2UokDl93ic>Eq69??YC!;PU{`LzkBBS>KTvuH!!_NV!Xu5cWguL zRg}AInbzLlUMpb?$C=ne3z8eC4AEQ4@0jLlXHO=x^s;^(b?jI8Pv~BLTX|zYWnZJz zfMVr2BPZ(L_BV?e1I!cT0~GL~*MfZKPX385m2T3Z;M`W@CE+D^!Fs79t4L2uw}=f| zcW>At`Cnx)?FsGktfW2^C7+RL<~+nk#y~adk?fU_a9p8J_fL zAOXH(e*BqlMSbQl5`}9}p9ZmAdaB-(jX^H+6L^*bpu965`Ss>xHr*#bRUS&y&Bxw_ zZkI0;wB(=3#}rLjtu&Jx(5K9;ANT!+_~ab@KCNTBWLv9zm8!_?6-ntwXPaC7gFLHK zXS!=?cjz<6^Po@8Nf`HqXYPo2B$Y1(}E7J^xEmjR`)@eatgSd(Jm2vi*Q9O7&xw zUM7E_=h=R}qR5Bq=k%Y9Q1+R2wS_y|I#wzd_&&X=f4wi8cGH|LeOH^?CMw%eA#f^m z@~UE0z0sTj4{tY=)q6-QY$NR%mE-KA_Ed|4`pFIXiZqJk0{`Y4Je+V;5PHJ9$-zoa z`oMf?E~J~)cD9pJA+v?QmA@sFYErNk8YtCK6YLl5ujGtul>d`A)ZbX|ZjRs$rHASq z#~Q~Gbs2QnP9PHS1f0`yVEmr}kGnf^(Zi)Nat3)I)D^EFlUa|Ifkv7JW^PX8tz)s8 z+R9w|Nq-sRUvgP-BC>YN!EE`Yj7E~z>xuVP^&=N&)+W#7g7$e%x3i6L#L;*4xFukd;%aIqEupw%N%*&F6RU z{m4W9i(_4Hwj#x(26O?|DQBe->NNF{)QrC~IvF3JQ8!kZuC9;@nz{X>{Vky?lN%YC zEjGVXcV)Hr;Pbo>lfNe=rZ|14%uuC^vl=3^+nvXhg3Rfw;x6YcX*57jyudyf#Qc>V z7F8J(pM8fjm%SW~(GGahxhJ|Od*5hPc{b&MV>z9k*(mueut*u78w;6{B6x@_8TE{oF9#eNy+h9-RoFCTVq$OGt{=9OfYU~ zE43mRYi0Cm#vVV`A7figpGSiTzOW1~B? zB&GuY@CQ8cGpOZI5t>82pf|Wi-LKlEM#g1tsCSU|jn7wFqeeqXTV9X}U625#v)P=b-{v8%UGx#x!INf><`7Y;K|^LNwNa`%#T@ zBxZL5o^m~Hfw!oC8@nZ+Q`6gq+dn$?*uL?HzMh^KUpB;n(rG{W7aObP)vo&?deAA` z9m4b_zHXlPZq;|ppi*0)04Q?pQR)@aTZ{1g?)m25W~#KRdKlT5#*X6BOJlI6AWE5A z`&)m>221ypLFyRg8+~qW^3U@=@EtNv(G;5_XnD{(+d8(zd&r-&s9^LLc=V{}K_BPjE@E&Rr$9a1O+f!wa{3}$=J=!n+y}rr5YW~~$5gse~ z1ixOH2nag0-=Z`0bMC51w-Y|3lre+t?}CQ8mf2P-vC3qO-wehy-$(Cr-z-1X?&?E$ zExE5+(l*}y%=z3^-F}&+bNk~D#%E8N?++$>)ncwnA(cX#1V6NmV%OZg5=SK#b??_N z(o?o9Kt?=KO_mFpo|5K1@Z@5#Y8G`R+d?a{r_rAmkxwhNpv-;%8Z&({`{v=)n5EUw zi@}GR4X%4pXak*ho2e8umAkWj} z{V5f-3QDPvoe^Kdn+Fe5!S?p#NLifP%-`P}1CZMSK14a={4J=S!;mhqrAAr(w!gE# zzkZ!ZE2-*hWt~(H(XNTmCU`~D^9p*TPxc`~DA#kA3hL{~E~jI6wPXHBJslsYI2`e| z_O!5`7n&WZItreH=Sm-f3ww> z>tUUr8*yE*PszV>Yx_Ina>G9_k4g{?^hw%u^DF%h#Y;-ovP>)kJcHdvS@MsP$zIIX5}pRs`Vbo! z2o2RzW*OtOk(EEAn#5^Us1>b*21Z$E*z0Bna!3kO3)$W%3LRjS)XwO!>;%7JRP!J7 zH{r8wb6n3H+oiwsvEE|7SNe9=7-#q!ihWnLy>)E1H&>pL5VNH3eaem$pYIkKWs7v= zu(we)X&4l3yOf zg2MAZwhYe6u0<|p&o33zKLC4i&0k+%tP!oUQG#~1ZE-$y{p_l2k0B|(4#-1o^yY)k z+-kWD}K@&h0@hormRL*e;y41Mrc@*tU^!en)EWlvAF=`VI0;z2_Pb zGAJm&iV8g11FewhX9uAQ2o@bOo((xs-a869N6V-5GT!grbjB;vS6QK+LJg_BMt0*X zzoZPZr`i@PBTy@2nZJ)Wi+`CJ3H^ZY$TdXKztnY(;g0QUF{uaYLXGubbni?3?#n{})Xs<`=XhZ$ejLQ5Kt^h?Q zu)A3t8J1eoMti@Y%&xu4BHkNursCQ_WQIo?4(Y6%O75CB7*%FDmj`um)>ICgQQkhu zFO%Zk=Z$G{ZAV_0&ACriB^x5k>Bw}J-k7GBhaS@lsR%i0tk%yNqQahy_m{5O7C8>8 z`+0FK!CTxv-0<+?QX#1`DjFVx((EN=fqaNBH+CCmpp84hsNt)Onl}zJTFPa+=2+vr zVec%v&8Plfe5yZ2%VcIj)N}^wo|YlIrE0bs_We>GZAnVC)PZ_QxuDI3(XikX0s?YW&_?DQzN`sY&(i-!;FWFPVA7@UI3aWAH4YiCq zO}3NO`e$EdEe|*4*~(zP4#&MKrMjM5{VBL$$XLfs)a*NMP9zto&i48f-G6&>>m^B5 zIlnSo>80+mJ+%F*xLJtrP_jFzm#2aGOqvPnbkDxqvBQ2)IRJIJj_kU&%H2JAWNNgo zNPnqZ&21ZH{{mdVHQPG*Hs7t+_XT@%`-oOR&u`dyCuNfJn(J443#k_}Eft^`d)c_) zFXwCJ?_xHR-#X?6B|7&hi&-|ktznWV)uHYn)%3N#lG0fR<6>}#SIqfgbZ>QLzp6pb^ex9ziSZ)tgB zuy(p7^x#m=5fGatqacH`r6r};nrrq{`(R-P_jL-gqzWrKGzFHn^duBiBxa(MF zTShPGuY9kx%RENXiBHSyZkzJi(}mTrzYF~$Vx6u3{UzKbAJ4w?L+qpx`5=uiy z7l)!YA?JVzyK3Y|9C8=S%u1RAjSoP;^@C!dlR|S#a;bl-Go(CbBi|NxG50jjYJX<7 zNgAcTP#-F#lxxZ+)l@V(!@TV8;G_OyKpGT8zW=x>+b@NNgqL>JrYp5azUf*`Lorrp zb}d@JWSm6gw-YI8AX{NxqwN?JZF*C%j1-3 z$_@D?ZN^G#IecIIH zl_TbM-*(S%?*-V@4{SGCjcjC7>}!2!amC3i=uGC ztacW$Mbq7Sb??R0&Z*bk&wcgaX}(g2*d_Zs^|0KQPG^&iSH^5!RLU!5=RNiPeqHO# zewVl!;+Sl&rKCfBt@`W>c_~ejs*<+mL@n8y=sDn*=r?Dkutp(0Z7ujbpr6O-rJ*;q z+bjZ|vuz|ozOUHTwu(o7ERU9S_E>xF>!v+37eS-BH?rZipmKN}vB4pTHzrCGq$XsA z`3kj;KN%U>2jmM&(WbIP&8CLSi+E9^gLX%w&|1t+Zm=E3UA;PD_)Vmr<>tUz^pw(3 zBxHuV$BY4jW{jQ}imp$2Geh^?_uVzFOXcjpIX2nf+e%}We1a?5 z5V7L~<2d^l%6|J%X?3hTRc&DV$u>bfA=f60^qJmTo>9JMMiJDu+@Nfer_x!-K4+F{ zVV)c-%{AL-6|^M%5UM^%BnPFy+$_NrONAC_RbGf)Hdg{+GR}-=!|C60M`fh^50q#< zrUUGj!De?e7bL_gdt(;3c)w>iUG=@*E~Y=4T=phWd)hdYMT*tmNQX6&)UV zvHrc#oAr6DsqhzV>z^<$@Pn^|CkN!)K9M1jTkwp{jZc}jxe>ciE=buWg`_Y7m@!$`s*dTdqTT2=!0?du#Qh}&<4-;-^;4y=Yf`-4G#U|^$)WuRnK64{W{O3fuV`FC0@aM z!AkFWyepj%Di3~ue%T(*$A|tt(myTX>V%O0P48>WRbLg^h_`NYXbJZ(4}Te5>n-zt zn9waTBVnU&DEQ;sLV@7Wy0?RS!;iC?<%xmS{*(Mueb;*DvEI+a^)C>qHmN=msq%IV ztPgY!tYb}#HzJqUwU*O61@O=(;_=x&%>u)E*_iX0<*4KX&UW?^?Swa!v8O zgXyf^bXxrv^+}OTPdnBJ>hACByTH4Mb-h1fZLL1!Uwso956aSN-cz2!8+*2Imv<&H z6&K+LOd<;9s>lOy^)=yUAj*F0$s|u|xbJcARFDIF$j08DD?sASs(+3?y)AO9#{+Wl zEB>DTkoOvVR$Ic8@lfQ0eq0J_NL%v$rg|&MVcH4)=kiFSNE@_A8>GzJ_Im8T2K}O`W8fhX?brV5Oo=3bu74cm+;|u*Dd=FT$ zyID);3Ot-+LGQ^UZe?#YDViKv8Gb8r46DK1=>3f+%^)hR7f5jvKxbvW6CwbjAR4gHbAPmPxrQ04g%U`^d`J2op_tWyvP0=ZO$x5EAK_r^R8ztQH>XY*}e?4 z^C+=bqrl%=PBhVVu^g+XL00<>L{#wn!H>$~UhRkgx&q9`N5Dnx&B_DQsP{_{#GYeb zd^5A4PZHtdW9+x#@!JYs*cOl<+mNFBHyAc8iK}Z(Y}JSO0^b1rtUpmeZ-RCsT!b(& z9c@^7=mSvCZUsa4TK;De!PbY|&^+R&-UZX_6ZqgrbRY2;Q<)RHivF1dmQEYm(aLiK zq?}@+g;E%i5cmFpmXGHi!@*d4pS7#52C=>$zU1aa!}TSTcqy9oQObSbti8l6+Y#=6 zCXrls5;K$H*}|UH+-C`RmY?yxlKP79)St=K&A{ep4hmIsqNHX;ZvwsaLgr$A1nv7y z&y-jcX@GSo=hH?Zv&{r|Z3VGWUBE0H4MvyG^9z`CFEJh(k)Oyt&Y)H4Wc!9_|1PNF zThQ3vrS*%*u>2E`ayBto#}Us`$J74;{+f{gz$>QT3ZaTDVnfo2PyGuG^e2cw#r*%t z7;oe|%*ZKkUpO!+)PIQ4{|p4Z*&w|gVs-BBXtMJe*)uq63lV5Szneub?;;Lv5bebS zM8B?P&wBbNkx03nj43P2(hEn^>M*S<0>fxM_dZA;=h8QR+8H4}GLv4a=iCr8N5Ybd z#71rt=l;o74pAjUR1nY9f*6WA=r|uKR7A9Ch&B^r6~h(l3dN_x(}y@(#n>iMYdfwQ zL{4)rC@B$)eqEEy`KK{%=kg>ykOf}oEd!b`jK}soh0x88Ch{zYp2$R3wdD%sO?T$l zNpR;WT-TB(${}X$L|P_XwNuzShIVx0JRNJoRXw<`@Y968))$1dqo|=1*BnLZ#2uT{ z2VG+5ZO1?%LM7|U5uusscw5eBO{+Sx_ZVsyp4$oh%8sEoWOKAFwBDIq?vtUkv-s@F zeU<6ngcz=4sP}lz7ebt{Ec2)-kD^$(9R54grzg`p$FLmw;P$>%=@sz^d2=H520UnME_aTX=bjSk26;P%=3-3{bT9?Wd9(YVRCxnM^ zweSfXN`cS_JkX_zpDQwGS0+~_{ok|H3acMQ9cz&xt|fABhs-9u9**Ib`2M}C;d^LY zyxd)N>wIAr1vppxbYJZiUWK7Fxt0cbf>05|o~efZTu2S1VPUo-LUpLV_?{0--?I zejyjB#kxy`Hr4PSjyR5f62UI#c*6QhOJT%T~fjyO;+5R-T zYL6kEi6*oip|{jep%|)zo;O4_0q*N4LszM_q77Yd--a~f;ZC|&5Sc05oLY|R*+cZ4 z@F!H4+NI-$bXUe!Jw2kaR1XWY$gUM?kr4DM`Bt9_Jx4uT&zAP9*fBkUUD*)n$Z#o+ za37%;RmASDr;;8n<99h*rF^OmmTXmLQGW&c<`8n8IA4y{QnRStP~n1nAK{vk*i}MV z626n&vyy!kY^%SFPE=!d#K)Mr8x)jmCs>vti4sBL;;)u8KizDgDQ)dSYQwpUn2|1b8H5Ii)} zhAE=ax1zQ>%6Bv&dayHf54G83g7JV_t~RRI)Vp@Jo;u9A)^;H#smH}DdU~B3?<3KH z;V8L2Q>$zw9CwP!RiB>8=uYxMe5O5Cm#%hC;@YgIvi7RA4h1R9md3(3#H57emul4b zXrxV=*m)-Zq($tw-G>+d7Lky7j5?m5pDV`o;Hd%w#|4xQ(MdfujVqJ5B*62h_G$WB4$MYy74fWeZBa zSbHTI)NZv|eJUMgGRkqVdP{s`oTuK^wtlHJ3P&*M++KH<7@4cWhiN^GxR*F zQ)im>>8RH6x^bggt22#ybgk}Ut>mgB$jfJ%-{#0?j?`U0Z(pBO! zjkb!mj8b(UC#{?YYADlM)8v4b~lqa z?s}7w?k?``?x^XNxI|HXRy0=ni~qJhwZ61tR;#vk%=%g9+i$hQ?&I2Jqo5u)ZK>nZ zt}5~NyBh3z^{>XqB%Q7^o{aBxt#@Zw-F7dN3;*pQCrRRGs7C8O8zXmy>u=+GduCl_ zG^3|xk9sM%c8B^o&utT`Son8RO_#bn8anB(Aed$#!P}nZD4QtJP~YIV!U@sD1XQyXhI@ZL#y?6!Twt zXgF$dsiARb*lX%}H_~xB74?|LGySK!>`YlQPBX-N!nNtYeWmtkoDEl5x>35rG^BK- zS%9W}OjDXJG;O3kwq>*uKg0ExJ#U<5tQXY=6?Zg#+*J z@Q?P7^bhtg_g&+?Bzivn*F#{T^~DFZ$Fq&K>rU`j`<8f@vHI*_uDXbodUuAKll54M zU*c$1KAaJ~fIQ=o%!{wa<8loiDL*(YM4BpN^82iPn#vR0qmqN|Ik|CbytpmTc z5<2J>eFm>xN_05u0xtkb^%=ZML-3~Mv!3n%{D?!L|BYlzpGBtWb09lJS#xVOez2=p zKleF&u1WNM2mF;sc`I3mt1B{9egY5K<%!VmYS#T*2FJ~yH|Ih#i#^-PmTu-52F0c_ z|F;^-n8e(_dt&u1)R#*9WND;1Z*AFR zz>f1??>Q>^F!XseUbc?#WgX}M1=KbW6pKo7DdtA6^>hWpLABiqEtIfE`aJIt&_SAc zH?tOH`$#8ZXfiwxd2jSxLeI0HfG)!?xsmOMK(V+Byoaq|Rdn~X z#UHJ`*D+pgyfZ-hynt-Hqv78KPZMvRZ_V^KyX+PTTAH@WXWG37gQ62{RwC=cSnB5>wPUWyaWQ6;XIX^`TB9mEX`CfYVeGo5h^9DRq$TS~Ijxg(3 zdG~^HlI&eX&hiGRyC6Q~Qn>`iYPrL&>Wvub{M)WaI6JCYVPr=99pA}ed#V5YdyUM!* ztb$?0zP<)UULJibIs}xD&+*3;BjH}7Z#qF)Ie1qOfE~g5qMXqIq?CR1#v6FcFT*c< zJN&(d^)&Z-s>r}z3Jn}0-nfc*yzj}i?1YDNBsjle&_otNO}XHz9Y@~zKGt~NO8Xx3 zJO#?^OiwpZL}o-+u|n!?NZb|RvGzvhKExW(7lPu@n04m1LuD^U`{I8N(5hdeH`DHk zJpF9wC>(u}<5Q8h7lPq*FC+Ifd{_#4M{gv}H=a?fjW^npNq(0fbd1*IX0-Hv20yOm z$&ZE0W<+oHv<3zFELJ@o#OM!T)V7go=pppLTDcwn`vUrOF_K1g?j}}Z2|D6ovg7-Z@%uIM?HKOg1ayRG?@vY)jSEF@H#A}tz_f<;+aVd*8{%KpouPMwH9cuz94S+&^Vjnw40&> zqr1>&%UKt(7D_1qp`zS#3+qUZ^4@|3U4rC4Hzs@gkp*-#7{Vuc=OHr=Mb=01(2=J_ zI}=UN)3epn4E(WcJrl_rnTl@h9_fR(#Cna3{2;67Z8LtM_$Druk{n7;J$2P z?M|)SxdChJ&&Wb-f}L<+6zk$wB2?<41CZL!Mz^r)=TVX4B5y_B1wo|J+l3tZ<*`vK zjRYe{!9TCU)u~`OpYLl0p263|WgNp;uaDe`9EgCZ@GF_(slM^v+0fTGVg~YvOX&}q z&iTa4{s7v?*W`B=d43|EqMUI#4Fu)Si6hFQ2fCnF@{l5Xqo;Vjpf@jKrSVtbGby%^!WXFxII{!3yVWq#!sDCHbviclAxLH~V?{^$U`|A?&W&B~j{(OWs5 zq2$<)VlCo4B0Y-HA5X^ltq2i_lKX7?P zb@WuE!3jwJT||5=M4Ep^?_S8-toi89Rz!(CO`G+mFq{~p%@nQqxz}?H+I<@9llB8e z`%X`P`t&=`@gRd~h1FT$+k6QU_NT}@zxRE#-%R-JpeGNki2~v&JNqKu?a;|}tU`SY zytoJ?jvTn-Wp4}LBrw9Jfe*Eu6-sBbBIrzP%u9(Ao9_LdoUxC5A>S5o!$uNSc^R#I z+_Tg3u=i+exyc}iE#bWJAXq(#E&HT*BvDU|kRDIamX+8}??IQ-h+wQH2J2^@<2!8q zJ;Yc|<-TWn_kdaSFg8GhIHrb-QO1q1A?{nPX`CKB9+q;_Yk%g%79q zX22O*dv__%x}9|Z0=&l7d3@fM^j&vGpgp)-S7Qf15AMS%?{6S&y~B#M*VBvVFnTW| z9mBm5FzZaXUA1Is0_>T3R;ky<*w{v|> zeHF;k<2mC6-)}@L_5{CgHb+JyUwit}IrmZDeBTV;MBf76PB7r#@?8qf{7Jr(eaHEF zvwgd7BzFJBWS&g-jqufj>ZEmjXG47}h%HR;6*BHK_`Q^s&6hIrcVY*odcWYgPe4n} z_aw7^a6g{WJrIzS6Im-~QAQ#iZjJVZGumJ) ztp)2KK(zk^|5pDcf%$>20;>ZV3GXMIm-t)a=}BvoE=pdSd?2||${i`iDVL?bl)5T4 z0P^SLwC~eerN59~l|Bg6&wUx4GtbN%ka=9H3KbST>?Yy+yG=ExQ z>eAGaso|8@L3>@6d}{KyV7&gFI6Sc^;iZJL6TAs)$QbEI&dAaJi69Ej!&?3wtCHoKWf8fB!2N&em94;>Mlw_6kDt@l;q=KafzuVt!|EUKy z9J;jd%+j_;##dL>9veKCIO%ZBht&(J`d2NjYEyG6Xirt)y66ILmjATCu!MUOmnP+< zyqTJvJ|bfT3-%1o8kzNcR(hl4#$%dNb#)itfQwVK*`Y@55= zPHcB;-j=)%^P1(I)9$*qW7^DbJ+5_S>ykFUc9rc`=AGOAruO-H1?|3UJGIRtt#4{| zVeX|ZpJ}l@=Y*Vxnr~`0rrE$|XEf{F>_GO!>~PagP4_iEuhDCnW$D+Y6{dWiygBL2 zq^*hM94CI7a7MzbWIenCc1&0A5@gfQ%=di3%tE9-sXjY&6q)4(wSU$ogKzOlT~Xb& ztb6e@t6!W7y6TF$`{4_3U20wDx^8v3b;s5{!|E7c2Pc4nF**E2WCicVA7gQzOl0Qu zNW#6mAN>SU(ih$>zBKXyQWKv`{5`Qr(x9Y)NeM|S65mZ+k+?2#LE`kpjfum^4!~hNps2G_#@$|g#HPQ6UxE1UdkE_Z38d)53;(&J78UP z^IRF74k~B}M9+*+Uhty2MYTO^*VhcKSx_CWx}oZ)%B0FxM|xCTUH)R(OQjc=q!xRM z`W8+&ysqHrg5CMw=f9HQCI78M%@5sn@a_YB_J{W->^*nSC%cc?y?)ngyTf;3J_{I43&IGs*W;Ad;Awa$MRA8A*+XH8~}FO|$am-{u_O;*u6Ow3yf8oR-6L zcePs3=FxWT+TYRP>W+mSyLRf+>H1DZok}{N+I3pD6-PbZeM$FT-CG~^QP=)m8g=g3 z>BElCc3jo5XQz3c>N}m>`MS=-ItM$Q-|5nh%{xrW`>O5Ft;=%Ty+%RxUcyh*ck6u6VuTtBU;)bs~A6cYuYIQ$HV6H4kgQ+!^+O_Sum)s~MiL-X!0R zzU{sx{_g`@5`IqnEvYuSS?V9D9atOV%(VV#*=fI}ew?~9wO87B@|tf|`A%kX z*40@vvgTynm=(yHmf0+Ge8%qdi_-s2yN!^(`K+YzKCi(5S08A6;K}`s_kX(Y`hA`E zRqkE3chuhMJrnmd-}Bb)*1H$%8nf&AUH9*L0(yIF*MqyR+Lg0w>CW?ZR_*w4$GRQA z?f7!X#2q*C@7r0o^S#}j_MLdJcflit9~b>y)VOHE;j#H+51w@3oC9wjJd$5pIHELM z@o4poy4Lm8;Z&^ntC(@Pib&}mSb8mekNe94BNJcC+y`@WTV2w+pv`0LcIV}GII&}TrwN@FcKWkZ|IRx*Kh>p8*Hv9db=%nO z>u#;Ob?@4<%h1kaI!*0Z+TqO(4|h1L!@>4b+F#zjTl=kfSLS`*uBvUI?S|Gjw%XhB z(iRJv|C8OQ>4S~CH)@x4PUgalAsJ~Pkz}TKN{hnfO_Q!mSm19=&e}QABoJ%1g$|+_ zPN;jb)>pd}Oy-wsUaA>ab5za3>PS`Bs_vC7k7QT$DW6)_qpW$^6=l21CYSfGsH^zm z$cW0nE1OpxU3Dg0|7_J8Rc}{)T(!L_ulj-NgP`S~RokoX%;50QQ}yqK7e}T=FXi2F z0#?*zz7^o+j|10ivHu_cus}{idr)86r!-BgNWU<%D$|#>H1n9un=>9yUz2t(m@JiP zH>cO6|B&%>=K8GZjhZ$-pLHyLY*dl8GxNuc?cgFjlh!P4W9s76IjPU5KA3uY>ZsKI zsTC>rq^v`l?Ma-Ua9yCQznyQ8cLv^qPqBGC%qOf1r-sj|e=M{xxV{vA(BMNUhxQ!&^x(Y*PdnKDV8+3n2c{pm^1x{ajy~W!@ZJ9D`=8xEVgI%J zJMT~3-+F(~{T=q#?JL+ z@OO2(92Sn_poEi1EpUUX0K-6gY2k18K6yWtZqZyg~4xvxuJVP ze~@?letl+m0!RT`HEUV)O6>Z%UeL~c%Y8}yF(CNg8|a&m2SRrwX=U;SDN9m{Qrf28 zfF;*0?bWm+X(y*&mOeDSclxpEJ=6Q74@kc^ePj9=8M8CCW`r}+GAlCP$na*Io&H$b z3f2I6GUddSw3HpmbI51zl)OHvf6{`)fr%XweTnbjo;o}5xPQ9uKIT#SM_&XT_lju=_U%Qxf5st0Cwd%Xdiz-Wxj61TW;`oZU%G;FBD9b9FS30`1 ze`#Xr{F2j4@{8XszPNZ#(OpGH6=fIsihe14tZ+bKkHW@x2sPPL;dN%!|>Cf3~o1NQyT=VtK=jMFe;?b6+Ew9Y2$(@Nkwyf35t-fm2 zy7ld?pKJYE>t9;8ZgXv$+uICl)1!^2%@3{bXkFH7c&l%7+vVQU^0gKp=X~4zK(nsR zCTAyQztXgG)8$RhYOL0 zl0$w+=(|um-g)-cU&K4=iD0H@Gxr=}CUrGgM^l(Nn&WHhpX=`hLi}jfrFuTGebSty z{Ye?g7bR~_ek`R~>f@}D~4m|XIK1H{yyvG+*jVad{f!OWrNE)mF1VtDjiq)Wa*63drOZg^_HfV zW|x+gEG~JWWO~Vrl945uC5MU=OHL{o#0m|AO77v!;DVC!;>E=;vo4Fj__d-Nif%0$ zTXa&<_QF|(GYaPxK2>;9Vfo<=hu0i_^YB%N1BaIu++T1`LFaD2B~)OQe`4UIgxv`X65mXkll)}LC8@pB2B$xiF)wpP*6K#98!vA1NYkUT zH)p@v?AGR|=alC>)M9Uo^IQJdvVZQ>+~v6?xdT~WWn8PIR*Q2N=kCn?CwFGJ>S+|@0gYZ+)6XtA?-V)Of({ha-K(~V8qH(A;EyvB1I?d6SQOXl#*br~%) z#-(pa%K$T{RZ6SmE=i{(j!IY@XcKtdpW}av*__{*WB3biO&z|4lcPWKj-AW9z&Fg0 z3<|dkPUP@}tn?SF`uX%&n047P}z*Bza zEpZQ2)L4b3>0{aBrcns6$asQAW|=qHl^?7xyS`R(xXd`^Be~{8DmP>4>ry%NJCP zB?)0+<#Scnv%<~eHMy)R@+EI3f99GhJRxtAzskQYusb0wsc-V&l#^Le zZeQAa=~rabWxSesLe@W7lN+7T`1{7gnv^z~)wEyslI&pih0QiLyR&(?`RJVQb2_%T zsl}KUy<1e}tj(E|Ga=`WoMAa_bJjP%r}}x!x zaaE)7jmokHWzEbCW!#qWV|qsVRcY%}FHHR+r7HQXcCE1&RYFCOnd`Cc&S0YT_-4QxZQ){3UUJVrgPw z;_k#h!L!-Hb~G^nHiSPZDJjaaZxa_MzMVLpmIV`5B}_@UAfYxeoAop^0&Dy?`cwRC zea~^veMCWY0=N1(Vma0^Gk*-(wQocQM2f<15Koa2{;~d%`cC!hLl1?H4;2RI2S*2w z3kK^})jd*oGVjinwfP`TeOUVh^GB!Fwx}(vSyl5^&9gPP)(og=Ta!?8q6%oXPw0NHI?^OK2rG*ys@bAkIK@@NM#Q9xTETos!yx7R%KM5ja9w4 zy0p4)&Al~qSUV+DGqm>Y+5^meU03%}T|)43R>rIijt(7U1&uBBgTNN*8jC~d#av$q zAMO`;+HYY!gO1EH9`TL#|L*S*cr>s%a6-bf37ZmHk=?%|@tmZ0lFE`!VP%;O$>}Kr zQXWcKoU$dQAf*nJurpHcN}Z9qB=y_W9mtW+X{Vu~u1R}{b&2L8RUSyYEA2sk-I#VR z?}wM9-H>*5THmy+v?Hl|SVeJB>Qr>ueW};-?w6TbnR1X?KTCNs@R*6TAUm%p7i4yh0^lT(9+hfJgGq z*g6d@SaD)M`Q`KRmyE^_*cD&rA^aut@!yWXkJB9+w~|@&PoUIiK%zbukAEXD(uYt#>?hNkdF2%tk%<%@nPEHJEhMI@kggS?g3!O}hhK4R<7Wg(+ zXdNGVF7$Hf_0T)4vb7QnmR+GzRw+!c&*ceEsy~;tT5qp^xPB5V#Vo1+uKsV38Y1=C ztU}ul&%xCotv?oiiCO1m^xIw{o3iM~e#}abV)cj@@%4SgOzvjl3_^GeTH)IngdH>* zEYm4?NSETt*n)q|izlrM{?CE<-fqBy^C)p6@8Aje-t#y9JTE@sj`&Un5=C`0tE^09 zCjC9IGuC>y;K2!b(|ozU9?a+u@m+=Vdjzk|bY$RS-eG@;t=C%4Dq$Jm-M051$110T z{O3bU*ZOZpI^N@d0A%H-{FD4s{WE#nearujf4=`C|0n)WDWCb5qPdq*mit#CM_2h* zvl{AJ{#UWu&`S0$^?%M;pZP!bFXFm+lz05I{L}m|`Cp*UC;bnyF3}kO_5Lfl=Ky~{ z|B1{5ck;Kv(#Y~B`a>WX6tN!bF1$!Tu_Em!!~o3(rQ>-d{zJa8zEL3mpXVFEYPQEi z;cb1{#0Ld=J1-;#=^yV#d{8Usg+;taP4`X$Gyg99fR}@saF+K}*2wANZQ;%ECgD-2 z#v0jy2k8fVmY?7^dxL!O@p%7kB0K#8&sp@lMd10#Yc3$$dn38tU%)4`@LxYo6xA5~ zljq@w=nK!ZB<`Rdb-$nWVAkQ?`;0XzW<{n%CV;I}Ho%n_z zQF{Bre}^}+deu+iADQR>2CSS9!|#XRqr4k_hw?64^VqY16@oru1>i5jUxinM*I-|5 z#<#dDe1O$#!mK2bNuBLLo9_!k{9xiuZeZP-`-q!+4u96$wB%D-^ez74Kkz>v!W&jY zY*QM1-VBVMw!~E&kLR&BR$YI*m_xDFE+x`vBr~%kK|C0Z&t9kpw-bXgj`{ifdGd#1 zA39Pby;a^yL@t<#9vn1SDNHdf=?ly~se%*PkJ5Y&N> z$*foT9B<}|SQH_+%gh7+g6H!O{JPs>Wd~l`gYZHLd4ZNKs4R(eA|Wkrx&uDw;~A@d zjMxyojl)1ZxE_z?9q`LIIA$Vf{8RBc&t`-d5>2oKeX;^i^*UlYHsR&og`f0*r+~PD z5+VbtaOTwGQ)jA$&`>~q@QvcIz>gl_6R_UcFa7qhP)j}N6?OarQ_pu5*OyXDF?AIX zdvOSF>H%c!Zm4V<5d>S|+AZ8uD|@S~kFA%n4sQOIcd>8qzOJRLh8mYqR`AU#GI(;o z!k4S^Rcvc1{Qm{NbdK#^&bcc%Pv5%c8;ZMf8GBYy=hv~bRYSadiAV7ZYWsvf`4Io^ zLUh7=U{TDWuioVQO*mq@=Vi*Pl&NsXWTNZjksgm%asuBI=-&q^4^i$XN@OgkAGe}a zMiD7-IlkY^DVGoGJ{xI%cWKxQ5`XNhlwB9 zOOd}@(PO_r8Q(x9EBXHn`gt!#NAL1p@hb5O`hN*@he^=glhES?sPZYO@?ofT9CRxR zzMEK;+u_;U_Hh3@#)_~8OQg%G2T&o?xJY#U0gLbc2rw;#P;6B9__uG^Y7;h zUHJf7;r`ea_r>^5^*#tz#^aPHc+O|wHQ``COB}(JSb34SooR60EXwN?ai2JE0Voz9 z5qqA?v zl)s1q*+CrBRyb9>x`WXYYQzD?YzI+8isaY}@9tx7A*FzC+_i#L`>tgIl*CsyvUa*CBzTn$Glvsy)cT7@*-$Jp(rzmM>%jM3Rg%)vj5 z-9h%bUy4)HSnlOtW4(iOcEtAVXB>Ai?te1=yV%~(NblmTeT?owiZDW~XpDdR41_z7MgL{cGnw?Edc7H=*fiGHEn}nAEH;vc(jnB17W98>M#x6V zMnikrGj{DbmK_@npC_^W8gZTQmDDC(A-ZZ3yPNS`E2t}v{o+jVoVZG~BFq@k zkmFSGq4Bf$Pe;Uk+9#_}M;lYbscNP4LDPSaWXGONl35rudTP@alGZAE?hKw&5;>FK zhGt?YE|O2$FT@ke@sVt%ktDOWC3*MoZ!+3`DYj8(iMn@FM0q-5TiVthNprVX zG%4-8KX#XW4ZdxU=u}&}lcQNle(e?Y>hsXQZI>P|Vq5JOE$exNE+T#vMVHeXqGE>x zA{rJ2N9Z~Am3~Q+DJL-AyOG$o<$lrK^u1*QN^(fEiaITOTRB>iCh4(a^lTKaJsMY0 zwWwWIf)!DV=*7OZ9WTwG9u;YOM~SBGbwsjGC{3bISpxAQiU+rkBAF;%C2b^>DCr>W zcSZZ1jp6p`TQXL)yLzSPjN-)$hSOxNmYx!)hrOnZqCfStemfih%fNPanZB(Uf{VkMF+DHoq;VF;j_G~zfvCV0q11?0ieh{qPKa}W zC`Wc;oQ}+D6g8QI)DacOFS0ybDfsv5!kM&6*gIx7 zne?@%&@(xyEA)i8RVtphqO5HzE-T$KrIn|tUbQ~8>_f{%l&xpQvToycpt6~jN!T#s z(6$Y&Kr-D>H{!TU|GUqyrX1ai7DV}yccOj!mh6;uf}^tI8b zqg~VECeuy6i`FFrWv3V&$FDG*FO6VtLq?m%A4a1M-;kVTpjN0x{kHchvlql;YG?Vs z$3?&8FHB6eIV)RLbY`}MS^C1>)4mol zxouKe5?{85u=j*+Xxr_P@%El5WIdyQ)#P-8&XW$2Ot&`~Sw^Z~yk&eQ4iL?YC-qJu zR5&3=I!z@WaZ=uCG-2z=UKVfb3H8K=g{I!~#nAACU?z+m^{nV!nfYcpS|+}9u4!HC zdr`hhTn`KTLwWo{)iZljBUFz}ll0P`@}T3$EY2>99g2MzuURg-bi1yyd+L`cL{d_x zYD6V1Wl@PXWK|hnhVJOL?LF6!&(u!ws-(84SJ+UZLLC)Sjc8T!+Rhd(j*vx!gHz2l z!dP>7aH?GtuJ=JneMp4GPc z*xtz1*M_g=`q*)$csBmz4bj`;xNELYHG<+;VIb(bI(pA2h7JBSwn9P>qLQvQGzw9b z(T|}di0g&cCyUNeRcsd@q8CxT@NXo|r3GdGSyq0-_ep6;@qpfpl!q^jPYdX~wb%5su81QTiXTjCtL6H$zA!6RSN!ku zI2*ufRBP=|9HBQk@wE0j3&D6xl&2oj^U1PMAB&>3S3R%sQjPY6u9uBlrQvM;Mc1eY zTpM)UY!)doXPiMPunuscg_HGXxRt0y*owmNi%-Pfdh+zyg;X*fcLho@HQ9McPu zI7W}+ezzr>7QKr8#pmJ=@ul&ten~r3Q?C%agdk`h1pU%6oh?pPoo=Q~HHp70 zxLee#QBhB6OLQd}q-%{w-N@LO+GyF>sOPm;RAV?$8XFz6Kh7nv z4XC$yX>?bYY&yN)$>!6RtXkpBnN~C#*WU4UrOm{uZky|fTaNBNswF;8>#St8%l6y* zlY5i0Co{X!o=3+eh3rl1zqY5<(eU|nuB4XfcC}n>R}TuG%(c>HB<#)BY<{<`p40fL zzilLKT%5Es9j%eq=)^hKM%zA3BA6^Px=?-gM(8AvXu+h8Mqc-@Z&8Mmwi-igU3_e< zr)_Mk|KdHTzKa)oB14pMd$mt8uVK+s8vU83P&@Qo)>2WgXxI3{EEkg`;v175#>q}= z8V8Cm#kt1a;#)Ug?$BS&-Z#5ipJw;k`EGXI?Ne=zQzbzfB8W;-3qe*Ksxec)W@72d zVrz^Y*p#EmSd^Wy`x-F|D@lbth&E7I|< z{pJby{`AjFxSdRxBZ1)wS0#-mQ|5x?P(pHGtM3BemEev1;=eO6C2$<8dyMuS%gW@# z@lY-Y^`aBGtG`4$c-OE-b|iduqzP`&zrtVEe^Ngud^Kwj3X3gtxobxSj!j4)J|>uOPQrbGh5jS>`QP*1;7<-b2$7cL?M(?y3KfRlu0K+LYyIt^y}^@MVfozpLe2>VyA#cE zRVXLam9>CQug|H!B=mEzXXv8(zC>y~QU6isLLxm%Lfwg2nH_qD7>}pwzb7jDwfa=N z^N)}cK5Bo8f%&yoZ2DaU0Pa7m$y5Hd(q;iJjgYPKit+ z&vPl!GihYx-%Z@d*WojW3W9J@8wMr48(?;ymxz# zCu{g)yx_x$?QcRh=ujd^gWh%C{od<*n~B4_*O%<8@}B5>*!KpjuD|NLo=CoF-etSP?kzC2$CBAK4>-RC>nx5fLG_YE>gfATIS!mkz4FN4T$zJUn4Vm#~X z@bjMp7xm_uOUc*x8jtjFeAmfD#Z-C^dmr@vLqA-GSGy6BtOLj=9mpF|b8_{IJw3@X z?LLQ{>W|0d$mnf8RzA(9~PteK@toU^?5h!iQT|AE5S1&Ov zml0Lii`c@Gh-rD&{~fDkE?*iF@Ux_q7InY1g56t(UhZx=L z+u%FEDzHC*CvqnJ^FCSbJ&0hONY-v&?{wtacX$}md7Jc+1AYh58k>pccq1}9GB@&8 zWCo*k8yuSiPDeD{FP7Onla-8$B0X7);X0x@+LLjd3$n!<%yKoRj(@Ms!Q^gP*FFF^Mr$Yh;C97;tb z5~=003VfM;{QiL)`ZJIezcH7*9a@`%cl$=7PzI3Idp7j>I{xb?@x0$hEXjPLNZuqC zpo*~zfGo2FFZL$nvFsqtT$J)D??Wb%wGgy}_wkuui@$yL#THm z*bdhdcQqET`3&x$IG1t!U&5U)Am2voRcKv{YoddRkLk1mhoK=g2Q3H53qhq8Bbit+A7V67djVj|1@}N4Q=0! z&*)p8Vi6wtnRxLZ#piz`kwtwe7tl6E(maa){{dRL5YPNUbW1hfrFUubr+lww?Fy~7 zbux8MB66TVcmo|kJUWN#b7|oaq5!rsXTOa2fU)$%?eOvR=y^z;#Y0blYQ)41{<-Z74XHqJvn zG>_Qh@MFRmm<+y zNNmgkFd-&$)_CH0ZX>SbX3pyw(~!wTEG{MHV>-RIo+yQRjP4vFM2gV6#q{SUB8YyV zO|yuNcrX@2@en=u0MGUqI1BZxo!Jiw))ehJfIE*Nl4UWGIlGC;`H*`~jl~L0CxU1I zKGwcOuAIfU=H}(`Y>GBMjcXpH7mk6(^6`b_$E<~IL{vNt&D;!Sjf0lPuzfo{I|}+6 ziH-oH3r<`@yu}i}Utly}AyR5A5m(1CULC-c=m_6*BBJaz+AVq?POrZT#n0vQRi1Jg z^s)I3s6tbIGMbtXd-d+3cqJ~t9Gqgaon^v8VYYbud4 zli-3|8P$8Bt=EYN`5rG&aqRu14huh-QE!8nxGfe*K0LXKyL~`kD$eG0xKBtPzjDO^ zq)KVby3zci<_61R_fz!9dPZX<^gWwrzK>^A%*$xv>}m^5XAxJI6b&dG3ly)G>_3_Q8b$x!NAF&PjJOmur*oj?e)QYf z(EiC#{ps-6FzV8=OW~zQc;-1g{pa+{5421@_#-hzYUOwQ`i9>5grm!O*6-N=S9BL; zJN{kya}|B`bFApOen%q9+qQ#a8~LpWncw+bL*47yYo2KN6^gMY6X6AUel!cHIl$(z zSv%SJnm^FmD#w9pHV{f1fCYCh8ubEVrbZG$brqa8i2qB$U>U|ohdpT*@10yBA_HTF-FC1R~$25Vtjx{d2Go-e#T01<3oQu}HAbiThgiZ^YTx z>;r|5xeCoZ)W8M1@WU&fP;n~zV&keAXr)*w(RF6P-(G%}a-`z%IqFxw-Edb^0H z9n7emgS6{O1cu@|I#Qp_` z6V~D$;uMRq(e@Josj`Py$ek2LL>{EV?EbzeX)Da?LcN7H9Bl zRxFb7b!wl+{-?qGxgF}%I!Z&)NPQWdqljZs>`E?lV{iI;_tyKk=Fn{}LbH5Y1;jic z@@^F38##!~H;?5J-sH4~ip>++e3$fdIp^(19&ABYu0^&;w@*RhJ%+YbETTmP-j5_6 zk3N0|9Y2ZxiTrwqGgR)QJVY$x6r}FESO=eB!F)$;TkzW-Le|zmJ2tNtMekM6p8d$I zjhwR_X|@2H=?$)&gZ7xm)nCM9+gha5T4b5x`F=(g{>bM~F`4)SpWk!s_mnlr?q$^S z4cF*jD_s3X*+A6G-`EnH`TZ^Xms9J(UqS~YfRJXQ%<(j{^@-M!B zWsg?#`iv_T$*Fd}iiDnuPE(}f9Nsq+(K(GCo59{m^v)!D;YGBiVvApl#d<2*`0aH#MU3N?O&*M1LZebsz}i_^wBb&`ZId)1D<$3Pre}b{Q=hFhwOWw zXMc|v)VJw{xqQFN?+@t3FJd&KHPjS&s^?r6Yx720^%t7fq7N0-Ws#y~z0(($TE&%QPG)Gn@TJH4PduD@c>rwC)kx$3;XId2c#T?&7B z(FVD12l_sLrepY!g{zNriZUbACEyxk3lbt!GgXWEj^aF_`9&EA0T#HHo|k*3{(HL z6K2O`_jK|!Uq)h1!Y+QC8IFh1DG%^A{Q%z&qV4aZmfNWBde4>UyK`yV8MLc6t?Et> z*eY-Gb;_q{F_bQ9MjlV?wOB3NtLOsRLyGrOjG@*+)7ol1sqti<`859fQNMcs#Fz!q zi(@Cm`rsJ!q9P_$da!*QM|yFViXtVCi5=BQ<#AtGUlwzqD9yICR1uxhu${Q4u2*f+ z37ydiTD#2JsWqTETW#LdG>uspip8_J zY&TnKbJ>bL)!J>=YwigYTOpfLK48VN<Dd|&@bvDE7W>DSCspf+dIi`EL z+3FN*5^KNwr-kr*LqSI^!+#r35;W#;3K$6&~x zt@_j_8hcU2F6!9Ac>jjXw%h^P1WS?XpJ9P4M(@3kq@Np;`0pU~Ro-LY0*-vd8DF3^ zR-$Y4PWv4rF3Z8*9@nE+RHNwyX{I&k2I-j<{OkAETq!MQw4>uH(jTfpdQZA_YfOVk zf5^r+i%Gr=^WmCzLb0JP^2o&=Df+_VqAbox63To`@)K#zK6ybb8dT9GHg56`>(BgZ z@=VAcbiOvN>7!XH^F%oByJ*HmJIHryLPT9J4h9XeZ8;bc=w6?{jD5lolza(kw{Y?_ILyR7^a;GBMRTM9- zICSZ#W0BLnk=!R?ZEKxZZS`SKAIeEc_kPHBNq76!_G#$30caG>7g^DJ@nE#fVBV?p zZgp;KdoWvr|9x*(Zc+c3o;W?GJ5P<>#k7F*z_GMQZ&T6~-Ds&eNEselEz=@36^p52 zk(N$N>20%Nv3NEYw`MW_7JnI!b#oDN79%I=ET5l^WPAk7YbalrJY1q0D{j3!^Hj+r z<%&F0=5e<85P6Sf^GIr0(VL036ldaMMVuGeyy>Dl=kJz(+IhR=pOUV!2xZH-G3{kB zU6LS*ZZnB#QR<3RxAXnM6-jT~W@9OaejEEt#yAbKANip;O=SXEc8JBf z>lqa7F8{lE;LPLZqD4)f#h=?E<0Q=`rxhh_G16+E?T-nV!gIhNRj4K+62rvbKn8yT3*ETYR)yr)So^_2d@s?IKQX z?A-HdG%b=(ae%UU)hiZhr;$=5fvjJRsH_Xkcqj%y?}MTxNhgyf(jexIHpwMgwCGPs zH1oELGA-uad9CA?jk6APwQLDjn>)jJ z#Xgz0-{ME4(e&qJmiujcrCSvzV$^81aD9v_wNKVW{7*+E>70$>BB!;_QMA#t>bC1_ z%RKzjs}{{7ohmPXymqz2uCW&BO#4%OfNP!V)L!k=-Bp9d4#dx}nk+sk-YV1LA+Fbb z^c)TKxvrM9m1Z}cuRhS7^vhbQEsIak@BiQZ?H;B9OkO*QZ81+48)SOHq_<_5IB6~! zDqSFH;UbRQd6N0kAhJ&t3neL|h)G30nAIvtq^sj{Sm(&*HGLpWV|u~mDOr}1MHS2U z;A)i3=yLzeBDGuqt6TRl4P?(K{bsGU=qyD9;W_wHkXc>%xp->c7}_25toYV;=Caa6~8*!V(*3GEL-1QSwo7A%0foz{nklC zi|W-rlZX}@Z+l(diP@eOV{g*aY(8h z>}=H{xuKX8wM0@+?Ke4V+b$YP@>KgJb99Ba)H4>zWg5k#l+zxjiQ@K;lU9;_MT%gXF5|-OHpTbzSXBsCogT=$vJ)76*}%@p7xqGZ{L!9 zu80cEqL0&sYEY5P6Cb-cB=LGfi6@#2bKGQ&qfy%@3e=xTL&*or;1w-vc1v_ES>kA1 z^3fuh6lbI@NdmJDE#gqIgcg5n)|^E?Tg0K+hPG{1mhrjv81L)dPkd@N$p3zd>n%6I zGG|;Kfov3Uy-5R?(cs>zmGR)@gXQ~}y=Qiv*>mnrl1a|CH5({y)4KhVRgxNUxu$K4 ztX5RA#d9l$Q+qA$Gf1tP&nRY}RX^&#SX6r_Eye6kqA0nau*QXiMXPWOz=T zDoJB@n@OR#wddYEoDCPB3;M78GVXJnVH{-Ejk8X~A>s?!3bHsX+EbQ|+pBMJf|Hc? zbb4a5B&>GhEq8abT~vqN$2in#bXJg_+gViFa<-YC+hmRPK|CHb-Ww)e)Pq)=^<_gF zOlPQ%C9|B>W|opnK-*d z=b42fxg~B<9J1)x=v~L%8M@x&k~pEE}}P3itBV# zy56i7vu9)*$U;!DEwfi7zs$Ob%QN@>DtTh|j9D7mulqN=m)RDQr)D|WbLg36#i@tX zQ)b;?$7HriYWt-en z^k+7TjjA-CKekVETSgU57irYv#i+x6s}@Ng6uoZT+59|#3hnY+AA&*t-HB6 z$20c4_De@yAL&{>w|c|4OINBU^_2CDZL6QOrEk?^J)-%*hWx1g){=M|wI^;TCW~?0KVMvyIIT(9wpuPNwDUd}sNa-n4q1WnsK& z^!xwkQ?(&}XT1}|i&^uwPh4sIr*`R{_WmGEY<0vrPe-jzS@Oo`_B6)(aT`b6pkGdw zm?m~_RwjqUukm-Q_$e3!m@BJ^H%c_7O5a>UFVpG$M{Y)>OSH@ zv*|Tsq2K0X(z}7pNjm%nv+6AuLiL(8Z@g``hkGisknO4CeH7O+wom;iePZ(i?v1M9 z`R)qiPLoaUxO&|9&iYvUt%vPwJ0AbkasBCzl7=?IPBO|8Gi%I_ISSEv?(JNh_us$& zy;o95Opx5xTXM{V@gqw%8}efQR4&#T^bPZV#tw1LsK(+sAe zj3*=)j6a;F5huBfA#t<##x#oKAE#YZpQMiQN1RWzrQ+`6-aJf?+1=c><4eb_s!wuC zZyq|+t~HG(31(Y5%Sya0as3y6PMzc4=}oU%n@twz3VVLDFSKn%*O|OjOKgtBK245E z;;3cP;7-z-oHeQC<{<2slT%JEscsd!i@w!^)|ale>OaXF(=m?9oGvz+HOh+-FX!pc zw)NSNdhM92$ysY=jabdbRYpaoah&cl-fCEb+m7E&ZC5cV+3?ezAby2wztw8eAg*^s zz3!LVBHn3OT>ETB)NBIrjrc&{l7ZT5{9^nhd1?Dhwi-{_?5NZGPTQ(gu1)Hfc+XmY znyuq>u<3W#f7V*H(9PHCPG-&M9iU-`mdR0@)iEioc@guQ#b;sSel@+dNCIo-CO!jX z^WZk4?R!*X zOOsxfcV|XYVnEbB%?ReGdj?>Jl5to}?MISu8s zl15dsyP@S~T2~_=396nmJHejcJ-aBV;h*E_xXiKp+BwqZHUbUhuiZ&UOsk0g)FPub z*Henn(5#gD%_z;@E7da&Q$zG*+cv{x*c~>9^}pK=N5-(6G!tg1GtSFtQ74LJvoq~m z+wrjyjq8q%j*Z%tt8A?|&EBwo)Q6H;#+UZiVOjAe1>)mqo*(ghJi=A7&3VH#lVY+; zGR9ejW^rnsxnZ$6X?g7|GC;mM^Nq_JZt-98;Ke-R4CkNj3K>32Jg$JY%$O+!LQGw%KA~IK|PKG`np0Ot$5=E$Z6UDp~BTJMo9f z+W*dz*qo~Lp1q|@dN|4HtTvkklN@)wWRgPKNlz_qRK|UL&c|%eAfqLUFzM$;PPLeX zlSI`oo26+ew@hQ2wsexuMo^zN${Kn5rPjN51<|D>rj5SYL#D%xzeUAH!)C!atIFs| zceJa`PPF+8(Ui9A9n@)lr|(5mqE$y{);epM(fxn3_iYB#>Wt64x;MUty&yRzN|p{$ zkIDv*&k;5pTeGW8kGlCV(V&gFM#(G*QJraVvm+!wO@2GtbK1~FnwYg=Zvi&TAGf{L zJ_e6JYo~h~)iY>hO}m&4peHacuy(0sPXB4!j@dhyJJTJPKSH!()MCB}Sxb&L4HZps ziq1=7Gb;AfqD%FJYOz1%m)KmJn+cHAY>3w-x#}XvoHRCXq)AdqZWW8bFu5w}X)zcU zwJVRGbcQIbVKIw6P9LHXvpcor<~d}MO8dC0ZEj!D7Ivj94Wm`_o13I@J#2m&Nfy(V z=6BW@OHZ4mu}_V+?R8#gQJBq2TmQR}ar9(z%+7Fg3XVse2ghhcZL%Vov~x_~>Pf8_ zADBI7QrlVO;z-9+>LHWb;x-k#&UBD*srH(uRqq?pf6jmF#=&WG>2dX^bhl}BZ7DX? z=5dVUM6nj>r5>?fu(<}4S9*W=KV*Fem{V2Pb?T(|WO`wS-iy2-N>M~bK!q19C|I$J z2=)$&1ySr`$AWYz0*cZ>1f)olUS)>qJ(-^5KWo;v|2upi_aVvL+}v``E^Dv7&pC28 zv;eqc=|SKIL?T25lr4H+$_e8W_9wj;{1E-7_*6!z*azzXYfLQwmeVv|W~ITeTW+)( z(503=hfExOvv%)nQSr9WUB+azDpk)Tt55Pzm#b;7I zhZ=0uS)#?6HwQ1r*p-o(Regs(1K!ZG=wQOpBeaR~JmN2Cq1kF;apmjo z7Mfa$R)7T;8_vH)+F)zx@qu5+S>c(9A;^X>&f+=vJLv~_b;=87uZEp`$@)x40_I7SUSuYoQmxr^@q4U8hiqY_B&`zct5{1$GR!?; zACbq!xnSrRHJ|>Pte;?9GJVMQiyhZwf>uTI6!fmRS6X|>1tgAMit&NiC-GS#fsh^z z?LntSDv3pT74byCGZH^jGO1q@*~6c4?ow7=v_#IFg;Mi?^oNXd1sf4F3+I8qsPWKm z+CK5K;uo~NoFxwTL%V`#nf*k>1Fb-7%9B%jw7H?+(WJ%z2!Or{`3lIj)T%?0;ng71 z^k9%@Y^+9-_>|vknRV%F;xpl}wRRO$1qp6h!^Y7Z@+4>xNT%2oJ_GtzB0$L#KwE%* zVcqEonPK6m06Z(^QG-{Z4xv41-(jgJJNSg7fcwaik(Xh6;gMMN5Jfgu=RsYWwJjiM?*F+seFUYeA zVrYp+=o@h?dLMog^Zb(`gpyuxShgDv4Rek2-#E4`32#8(EnK zT-kDi6cTk3EumtK*cEpV36gpoKD)*S)6O6w1CkOcf#qOdp|`OYmI3{x{UgHH?1x-| zF+GmcY%Mq`!L^K*`O5i=>08e?*Q%oe9~%KY|Q8y z)lsbPM@2kp`cdbO`RxxjkLcQ~M%8dUsjLTr`ghjAW3;ARA9KfosU^AqYM?a`#sqRN z+%zw&!zelEI!Qm%>OGwg57~ zm=6{U45Ex7;H>Q0I0Mefjkoneu;^q%>4|8w zFt;vQ9QXvZ1@v9t+@bUiynAN*<@Z{o#!s*$a)fv`#0T(tcxoJlC<8izs~}<()&NMA zagx{|%{SmkK@PO`MD%hz?GSte<0tWpkTuu{%`=cWhle0ip;zQQz>EVpf{nAhqM8NI zZB(;sLBe8c4isWhWUSok8&;qJZNYaDZKLgom|#QkOW+Z#_JI|HAcKejDBG}A;6XTl zXI#zpM5X}0r#Hm2u{c^u(UL?^V7d5kF83$*9L6H;fyrVhR#{w*HaeqXByGeHa|h^?mKaNPuUXjCi_C)yyiRt3 zbHGAUfGH$r2Kz77yW&MSH#zPLo2=VIHWs#ot2GEtAezuH1uQe9Pb8jLNV9yBb-b9* zpvA{;AS>K}fr#43o{C*U#)DoJ`USs1y8w>~U#T%Pj0~{;6J~^gjmJnu+H2@JNBjXL z$e3@8P{b&Dj6vk+`b$+bhOy&VaR=keF(w>a7xfoLG-Bg798bu#Z#aTU*fH>Gl8q3~ zjCJ+IJ&gGwhm=Le`C7COB*I%5^Y7+2$qEX0*piE zNg@(P&Cm<(fn4D(p!=}tRZ0^Pzhg`tu>t0g5P3!P&b3Cc0trw9Miz6u9Imf}xE^Dt ziZCr?PUi5ha)C0Zb10+?$8PlFXABF&PoVOZD;W@{pKy1}ni~jX17= z$gH{+OTo{=%eHNv2S8@U;oLY}<*sM*{4KqehXvP(+2AmjOLui$%+Z2*&tRK?)VYG3 zL|2&q1c+Q#(~x`>Y$=gB?HsKttq0K{vCsd`5J9e_LWbiX85aZd66Zi3V9B^<2z&#q zB&;`VC1%-xUC{hA@E`Fg^$0x-DTXy5Lr3;oyd?Z2H;fj?CVTKV=H`I^ga<|JjkV?u z|Idci>EL;_4c?asl2S^Roc^IDQz^EKat>_}tp}E1B@VO&Obl3LT5j+g{Dn5JzQ`S< z49J*>Q(~Ws(FpVwF{iMZEv=MZ-IA~!)I=W5`RgIQ^r7?-xGUiZ1xLY8XqZLtj&N04 zei3{Wd~Hk3!=2-4J*rR1r3+?54}*u~2t%o_v8n3^l?-yFFG?h4DrJ@tZC;{N8oC#cmY1mRA<8)dcYTC$5=-wGybri2D7lDHxN@{B@&<`L@KZ= zxHq&SpB%On{2XTn!WdLaDLjDujv20&GoaApqqm9w&`ywE;z-H}ts`54sFn6x`kav$ zJRl=C*lYA9ZF6Zea$&@S$cD2o$P5x#gRZ3x2!@2j|I;k_n3Ib~FgBtMZs~z2W3+_0 zbND*=Y{(aUSX{LLt5L{(k^P1LBJL#Pfrv=r9?lNLSsJvxoU?)UgWeCiKz$&e!pt(- z3;G6r04>Gn9eeOk;vPr}V}Fj7r4$gSBX7$7AjSe>A{&4)#gsacJ?8DXjxz8m#t&ef zP0rN}LFw~BT;D6T|S ztOJ7r8xQsbUIhMx>zLwPnEMMeoYHpCzXKi9vq3v(n_*MvWiitd=I%gLA-O@mO9adn z2Wb%*SCD7K`k4jFy2|`Y^rpl?Ej>LXgA6Fq2c??F(Mkig0Z&NAg!vNmF{>^h5#U8> zV}a>_=s4pEW;MYl%v8tK6A|MeuYmLM9KaEXqp|URnImH)2Wh3&keRDgEkce!W-Vc^ z4E_b$q>XyR{b80aMrGhga1~-nj7lTN4C`?5Kj=OzD_9xC1;U+Wsa|f$hJhUgS4LmV zIXHw*=Zrz9>41%u*a(u2jS(mFZm<@zm)0BzTgzs`(?ITF~pPUvU-?N zj{CJ)dSH<;Gb&_~Yd7aK%d6x%wQ=)!R^~ssvKLormW(MQ4ADp#!$t%S-QyiHg2u6o z>%cN|1z@%la}KCv#B5yfS$I|=74m!VE7T3Nlf*PqpN9S-cZcYY`6${oVnxJYK$1Wg zi~w*9xjk~I!dj6nAqOe(9C>MpJfSDxe(4i%UYr@e0@fQ^ftkZF&l~26Bm&|5ZdeJN zdI-A=EOJ`m5}6kV^F{ux@&Z^N7ia~s4;bVMCFxiL3NpbsStNsgoKi`Cm$jg%KY}-7 z3u2vk#-fa2BtoS8pnrrnCBs2a#yGlVbOMVYHRO!2F{i7nVa-|u;C-@Tn$1T`vlfJz zQ)bQt!;@Q}ccjg}SoeT*$eZyOj5dH!*^}Tu$n?M_V!kUWhSdb@P?LCU8A=^p0fdnO%TQfq!OAhTI3%a@QBd$KoZ8$|SYX32PIet`@){TeVBv{cx9*dJ&;nF@?}KtDsLsCl$Yz&x-E zvI02#6=(C33N|ty%p*cmWd&if`?xl|C~zPAFOa5GHo?{*F9=?R>=>Sc&&&J&?0Mj9YMIz(o(KF|S#zJWE=Fd_(lU9Bdg;L=Zp1l>)4&-B;s86%tOPg% zNFd~nd1d7P;N=iGGh!i@WVAz01bPV_gM5(%KtJ>R&^KUfGOvuLf$=Juy||zpTetqC|rMB2NI>VQ-M(gdO3TU}I&YlrS@g z$bo%LWCcDzY$$92<3ZN2us<13gF}E`i7u1*VblRD1`AFv$mm4k#Q$5XKyAc+vT@8; zYjfzaI*9YgqDly}kYdGot=3X1EpbI)DP9{GAO9rJj9x;Eqdl}5AT5v8GD^lT866O@ z12*O5hm7O?sQObTo0Br@VX zGXqOriz``cs|?dtN_G_3khN)e8qU-P8we%=_JK?g?Ss^cLH?!Uf%OhRIkM5~F@B^B zlX*Z~$n~V9I*c>G0;@2;Lhe_*s&K;G3!kL*3F1N7xb`;c7zKqf2GK_2F|q|V!VyGj z!kP;Afh&j%g7SQqF);R_+tL@n?F z5ZUu;F_s@MELX*@E-Jf9Cy#?gHjIs!kT&@9LP51LNEyZ2w6v1S419U zvuJ}`Y9?$2kdV~u6GwqL+Vp8#A0Ezlpt_ln=_*wNe{a z-(aOFW(N~yP%;vN0Lbt#KghWqWxfhAM#U`W2ms$(Tx z%r6aA82uPj?F=pu>ocR87V<1?CMurjk#Rhwi1#nq4Pbp}r*I!KqdyP`F+QFhz8Pyt z&<4P_p&9~f%n+%-pQ0V`k6J_IM!rw@0^zi2hZ&(jR%K)YWR|vv5g~X*-ZiW?{1hxc zaWOw(kA||es&?9SGPM$mF=ittgPwrLmD*tXP;eK>D2ewM|B5JtybxZcVx04jbIxwC z+>{lu=FEe!?a?B{Xh1^fuXH8PZI#qxmiN}p{DvSb)NXP-H6r$q>e(En8{o{kDQ>Kr zV#lBv9n^2o=ZsFsm|zbym!6ujD%%0E1bJ0R2GKgC5!MpxKaz!&aRF>kv@hBk)!9-J z0ecHu!}u6jOVe@K57gCQ_I6g_fGvPjV8a#Qe}tzItjNp-@d>OWtv1<89?uLE>xrmi z!k$ZpQc%6bx;U110MrOCLJW!XlNFVS1y&cl7a~U5UPvleRsohmuS+je(mBQf9)LKN zHDr?Yhb>@S2?m^F?a8u>jfL$3A5L^2HdBiypaF>O1pD#6s6W&m;5XVF!7}9dS&`zo5)F13u_xpO$4hPQFC(qZJ`s-$?P7gA`T*9E zYtSGUP23B#%j!SkQOQlxzmo_0-<}`V6n90x#$;6F$f*$zk)YT{w# zAh#OQqpbk`6)XuE1RJ8^NS+5=C$vVZe`GZ}o&|ga7%X9~$Z`S+vK|K!5n2|%0Pcey z!Icp^6T1R=lOb*ylaRAP^%yHrkRJe(r){hlW?qF%6SC5hk7o6jRBfGAzt-Y%Mj(tN zWV{5mjb2z|jL0{!MoX(z5NQXZM}NZt5Va5|%Pa6QYy=S=Yy;Y#HG|+C$a}QRoM|-^ z=off@=G}0t#H?s_$fD#hP}_tSwW!vGRRfF9n!z09`;kK+^9Xdu94)iX;!EjO1&vbz zV10maplOnc0T#r3pIW>RmJ1Th+GH?5SUCu5CjgE2P)MVXs=h!bZirg_0ILIHg`GYM zgK*s;R4Zc*gLVqXux0}*E+E5)y2VcFOkBYR^%Bf%x5OuqF=_?95d90Q8E79N$It`x z4@VM1YoUvH2M31;j(;Ogk%)znEsv$vBH9(s9$Et3BQ~W6r~JZB!$J`mVxP7_X3(n4 z&!JVt{(&=vjm%Z{7;80RJEEeEeQJ!tF0}#())K)wAXwSyKywvE%h3uvURxCkD?uR^ zU|r+cW_=;A1bdIU|G`9^P@2Me3-C}EIq=HOPK|D*`?Srv)8kb_dwCCF2WwNXgW6 zv5k^u$a8C@C6Ibn>~p;ltTloPU2dJ#aj58*)jg2CMBGaK74B5-)m6G@zkaXA# zGRBC!!N9^JvPP8A4!j-WaM&u?9QGl77jV2kK7#l`8>7K7I9z8-MvS9ZV3lB-(F^1h(Y|2o5lH~O zAy&X$6TJatF=Nhr9Z(ZI71>xYUL1>x=jUhq8g_(LAh0Jb>$HSH=Gq<5C+L{e>$6gy zt0%!`$eMcK5X3T&ftK@VW0=WXYp9T@4E=@s;>IcfLAbD(EipcHnX!}PZHb+U_~0>N zVPQ?^T{VmXI|I)szErp@>?Jz|n=X+ev;>=E!HL5VH;G+`FQsn4pHh|?6-dpr=bgoh(%@7JgPQ64l53#z6dMNV)b^c8@OG4#tOw)p;r7c<5^j;i@9U?XDtUo zD@}_;e2W-E*fL-PiF5@wkxgRT!2UsQU=4xn;cK8R&_T#My$fE^OW+HP>5f|+StUsEh{NxMEio=CDMltCzgRd)sVd~Rj`wqjzAlTdF6b<4zPvUZfqCk#E4+D zJU-eJy~8;Oq?Vj^02qR~Ku8G@5zz=S4O&?{6MI2wn9xh?A349wa=@H8^WBgx;xt@A z;&UP!kwnHN|Id$3;Yeez^a<`;u_CO|AcFxRV9m| zNw{Xa>C@y0#GBAZ=oA@RRyMNIU7`?)r{E3Irm~SI!cnYzAtr_A)nps%S^@!}<BVq%ELD>dxyA3@G)@+ zctPSg>NKn)a|5{Bi}^)(bhJT9v)|zC2jn)ua0=SRJF|CNuw)V&K+^GhNV`O(j0KSc zXC9VVf=s30L3n%meDax$y=jM#8-(4URhOt6d^n=3Z1o@Yk5UmJwE(RZn~1zU@Vu3V3X32%%75o?57x9)z7W$c5K#M1w9FeE^Gh`Z}3Bn5q zDu9=gO`;j8AjY+Meb_}X43H3E*6=H!9gf#zEed=+Jq>Ldu{`<}cmeurRV{`#LAza? zQxxBZwxiErJS}-o_$OLE^1YA-W)oWGcDU9X&P~KfPl*0w-UI9wEGn=H?-uWnaIywk z+{!!$?i|RI^9$3j;gvm#Z{<4ZS@bKln+OOxE|~-P0Y=hnUvgV)SLp@dG4WMIs**>d zPBYH{D=OOif31z+VLlU{QLKVQM0ip%B#>s>Rq=501mc76E*ij=(w<3TUt&g}f5G{% zc(m+z3R)D60pfTKvH{c>a`BAr1h?SMFa9UbK~7p22fH%sjvvz>Xl|gPmby3)_M*7_=g=8pzFYo*ZIS@;8)B+9=v7_-C>?9J40U#^=$b z4bmw8m~VkE1Xf^lC+r?aV7H7uz;<)}UeuAYrW83Y@ciKMr9uw+tBnHV2($m|qnITT z?1}r6oFcu2cq&>Eav#Fkf{T%{px|79kdcLxd@^`MMmuMlV-=1^V_b#!id-QhRV_ac z4MjiF=W8|55upblmk;b|EWX*LeNSKC@Lm?FjCjG$jU zx7JR$4&zU@6mbrD34Vq~NG4n&F9p3=X`vAZUJu=jKM#C^7?SVrE&;8~%nC3T~#ifPCVY+~A9`@wfCDcojs5U*l+M z8~hc07JVeNNNNLs)DT}nVi;ARRoUujc`}OdhOFbn(TqgUUfjSFfa!u~)I2TQkiHnc zhg`bgBVoye34@jkV~sZFKlm_uC)_b@sTPTmE5eyHA0Vg_(uK%L!)02Qioc`%;XP{@ zjdq!=O3OIEC6B^-F0tcem4S(Y!-!UD+C+JcuPYWa;K~fgY+^EhD{RYUswuxHolAO;V(wkM5mNR$p#4W zLW`mzM`C$K@{ldsBdwBwk$~hkg@J-J;Wxz5B4@qzGR?yT^!}d&yo3!k{D( zsfmxE77{~4e~6|a0oWJ|X;@L_Q=r`;H%4p3$bfeX+^@w5}5+#cP-wD7ptRFn5Z^uC`J~&`)MJf8t0(yt0Z2 z*93b<|4R=oazm-3%!zI?&OpXpJQKYfaT)baUKvNU{EMCq9*lktpDx~*jfBn8WKl+e z3g1J<2eQDr81yO_bTC!gXg|irti;B3VYQL1kUW*}-l9+9@u=BAE95PpQ-VyWoti$9 z#iUn-7UD>;37XDPcf|%UM+F4M@k78@{0tO>(GRG(>eO7@%{giMs2Z!@s>%wr-WUmi zdMeJEfOqWg(Yy!q*7Q^rSC~toor-ce+LR;9fx+-RkThU3+$%Uosq`fl7OoDlWy^oC zuCSxR-#4 zo>9FpQQ|2n2NHM5HSmrjz)|A**yK)#W}(X(g5}k;S9~MY;P5@ zz&^$j)N+Ur_pnFNL*$|;v1A7Y56HM|AW7g#XbrQ|u+s3Ef_LS2Ewg1(FGycYPb)rG zB53F$J|l*g`dwU07%5mBNEW1r&xLluMmASap~M*yDFa6lC(-XRx)Cpq7H32w`~)Qt z(Sn?xBM%|-+}MivEV_t(K|5>G%zj5C!@FaDYfPMQ9Ef&+rg<%X-;znv&Wy9O$7qA4 zzwwSX;fQxKbLdC56m0_T6qbTo3#p>k;;OK15~hCNQ3)$J;poQE2NWL2#`OJA#fqD z#=GNHi6kX*l6|wPX=t`Gh%cX-mlnFlvWQL`$L_|F?Y-TMr&x zG(x-)@vmTD+9v8SdsQS_7+d-;98bH*^U-sYSK()B9kdp9MHu{xy%>(6oXGDrtH}Ek zo#9>ZuZ#7Ua!zkaDTmDC6Eu=o9{M417Axzh7jhJ38uH9%=I_XG;cCn|P{u`j8TW83 z5@eq(D7Kw8otzumK&+0o=i=kpsELT{7#p}m#9g_pwy$&iiT!RpgTUu-2b zKR_#?{jGThcn4rscsSwJAoIf9)4NIhNRN1NB#C`uU&OWuHizCwB?oj3wLzQ%SJ*PV zYZ+WYFVItb7Aqw22{jlxOoW4Dh?ZcR@QGPA$;?xeVf*A0-tlx`l*!YPkHejc#?xXF zu`{AzECzp1EXX_~D{o~lT9EjT>?PX@uQ;Fl0>`kOiLx|KlYSaK1I&Z|Wb8rTjXu`? zY4(Pn#Cni7#$S{bNINZx%-jQwVU`2ZC-P0N*;2DLJr^B^)RKJ^hK-S~CKD^Dk&K$NOlA7;96wvxt9AmrjPT(@{CeX%SeMcMvc*7mV)zi%JDps zc^uAS%Q^`Bif3kQ)3VZw%#FOVszS0Q664Fb70Jht(LnqUZ;$pO6QIRC68(@1ZHYu^ zNu_5lwt(zUt(RfH1e0kto@f^~0sH7NXgM?qf<=(OC~^FYmV&Re7p%l-i8=mvbR(HV z98Y^ePf2tNM6E^i@=4-kK`hV<(GBk7Ike+NYp8(||HAe_r>S#9I-0Ii!=e4K4Dj*7 z+mjJs;ccA)gEXz0v3eUMQT(un*w`+Gh(Bal1dV(4WYH8tz(U{ zWHF_(mW(m>@*BZAV1Xsy4fal0Vj?8`&H3z@0hPRn%%#Nd@d|t;wNvDKSpP+gMYKgu zh~ENv$@m-WmpPJT{0$>bVnyMGFov3I0pLmT9I)pY@63u&4WYrxOALFly+`k{&jsb8 zozc&*|3T7yi%?q0l6{xmL%8A9*@u-!aXGK|y((G<}s=#OZXATj8p zwvmegTA?;e3@f-qP@?=zT%(~BeiEG0(vtHtzL8^y+h|enif?d~L^1NN9dYpZS-l62jIw~e z)K2K0NDaQD=JPxFXY%2YAAXDPDM>{CJd;=tpcX+JEfEN90MJ29xiP4*!3tr}#m---%c*eJ~aRk5QRAl@YYY$m*)T#wC z;Ah-{wyu+8U0UJ>Sp|kGd0%aKK_3{|fKBj`jL}6FjkCXCo&n~wfmw)UIR7?BSmW26ZIYga zD<*ON7BXgBtppYyEhBYxxF@mx;+OEms1a|eCq$JpP7>dsuhDjp5R7<5o1l#lX=3IP z#tO_JqczdWkRz}%nE67}ZL$#LC0H9FRt8K4SxtOxE3mmE;Vih;T4&1#`}G zq&`;Ll5uh}b0sR!+o&pY%q*ADZ2K^yB{Q@5E>oQiR#Qg0z>;EYzPI_@SYaEhnWU({ zasS9+VfG4VbKL1v&xX%nQ}FcQQqVG}p9O1TQNQx5-_)zNt#58QU^lcMGzO#Ep@r1j zI@K#>szuRiz;Sx@4M!=GHwddXhRi~PBI?%|na_C>5V>Qn50knMR)K*RhGgs1m2K); zum_0j>zaEPat?iv{2tzk3=Qf?@Fmsi++=kS!Q-sBGuXls^&Msw!KfLmx))b}BMw02 zT&0r5QuSV^_CZfM-&(1ic{Z6M&UHug(J< zgmj11wa+PDHeda&N}a1fy~E}dslA2jJ*M^|@+ebB!e-W}Z)56S%GHsm2njdeH}+tK zKg_69s9Fi^fk6ig8&*f+_Z8}Qmya%)usf%SVjsP{f%xX==}px*v42)Tg&~ zvvu$3p4C07&hwr+?lpDB=hW-Vy2o@6=pI+!Jg&P-cf0z3x9%q0P&J>eU{{I&Qi@hR~;;y1*vj`xpu ziKoRA%ADJbE%(8;wPCqr0QWqIFS2tTcK$S{e1l{IN*% zf-+RCV;y73u@rT-QL!6hgVq0uu~*ghTI`G1hS;&#zp;W?Lo66?r|zwHe0coP_}lTh z@hwWXYSjvy$*PyG(v4I-FkSbPZm%v^*QCqR57FPQzh6ID|AziM{jd5x`ci$Op`T%x z;U>dG!$*eYhINK*hSLUvG2PhK*uyx&INA8N@k8Sm#udiDjc3%BYm6InOubB9O`S}G zOm~_lneJDwkDF$hzA(LGdPV)8XIf_Z*7T|A2h(cPO7-bu(`TkH)%P!$CYzp6$KPQZ zZW^q9+tU;@H5x081;*XRAC0q&GmI}9Z#8x^CK>ImPN-lTfq_1J%7tz-GoIng_!9iwGU-!+YFayK1r ze7|u(W4PguhOZjNH5eOK)W1|ey53R$L*3PNzPjq#jkS-}wyTZRoT-^#b6ZWY=0s#} z-Vk0Go)La9JSIFW+%KFOZmORqx(eHp!-07scQLj<4j{G(@E3g=9A{D68=oM#j@Yh%{tR+w9T{`?f2RL zvR~o&-OU=98woOvS4IdKUxTn7rT5oBv&Z_SLhWpIdr9bfJGiabc)+-=Bb>*YVt>w3r8!IX+rc|vB53B83AFfYnxT;}l!y^s) zhN}94hMi5@V^`>g>u)!_WqjIHVCrn%V&0Ol+wz_@)%J}o(SD2lcKbN{clP0q5zgma zi`_dse|X>Znf;mm65k@<2fj5vyZ;XV_x=-pf8g4{BY|mwF9ZJsE(>l94h{VpDhvG^ z`XFQqT^)Qfu+Bf+-@|{4I=0b2H1Kd>cp&V5*MF%$>O1E9)3?_5t#737H*bl@=NaYR z?0VewkZZon<(}l8=l;<>(cS2}&Gns?vj3%|W}Iz+t%t3z zZMbcWZJ^C;TV@?%-E8S*`5|Fof+^vG`KbAnIbwDvxD$%Zi_Q0%Q_MS*E+?5b8Sht6 z@m)i*VS#?IzF4y|>m>`&!MuNQHs}snw&Z7FM>a{Hx-%ik=ml%lnppSJqg1Md>Rg=ZmKm z4=OG!no`tI_+8nWA6@ONjsb8ia zZKyZQGPW`uG%Yc|qx8^i-EMu(X0uPS@3IectatQtZg4*A>gSHSzwl&x7kS(JUh=K* z<@(zAZ}&g%|G>Z9-#+j~z!rQWcs4jN^hoHg(9qDZ&>f+BLU)E<3T+M5hbluGL$gCu zLpOz7p##Bv!En$VDhlon?hT#}ZVKKS+!werU<)kwkMJA()jp#l2KM`2^R@H+?j7bW z^Q`fF<$2oE%X8HIk~_z}$#sjX*15>}kh6{RnBy%+2gg!-5BquBIa`9=V?Se?VQXhw zV2xOkEW;E2G|x0oHjg*ota|%Sv&(#|X_E0XL!JIj{Uyq_R_P+T-D(!P;qjbULbO%W zw8jpN&c?*X>_%hbXAKAIH`mRoomew9vNwEl*b~mL{-yfr>X)jXu6(FsTKUehzGXW~ zr7|3WKU&?<=Uze#Po7vvYbS70o7 zHs6r{^Mw@`y5xrnQVP!&jx2hsXlc>5q8&vCixP{M7C&DysPshXy=4_;PnRDo*Hv_` zc(1k*E~64M?$7$sb!Ki$F|7U!+zR6$8ovyTW8EU&6TdS%Hx^n+2`r* zea!obcfB|29qyasi}{}PSNcZm_R z>JsV~8Wp-XG%j>isADJ;vWB9;64kHY2A>a32#yT43;rAUAuwH8gJ%O{1Kk7eK$-t1 z{{a7b-)LWgZ=H9dx4YNlt@JGL^zi)R9_LPWZ*z@z8C=_)A3OUx7dg@#%j`GX&)KfA z{baq&nrHdY(%({jK4pxq@{IkMdF}VEovOh|T zOKc^#6q}2qMbV=DMXwa46n#>7s-Pi1J%5t2#b4!3%6lCqOryQ z6!$EdUvgFHnbJGTPL$0kf4<_q$}g+ls2&xzMwUjdsM%UGv^KAHZr!x{CmW_TKH79+ zG!XkGHcVBDOx1PM=jj(1o;8j(bum|%KTgQ7d|^qm&a?KhZMBWCpRvE`xWZYb`nA+G z#QlXk;vVB!t9o{kH`6!OcfgnEzt%s_Ki)q<+1sQ39)afq3j@0Yr2%^|J=iLk5ljwR zf~Ny>0uz)?zCG}A-~;u4WFS3I=wIT$$Dias@7v?s;QPWi&X?_rdGoz%y;Hocy{A1N zcv^cFxNmTGcXx3ox&L*2@4DaB*0taHq%+5P*732Ut>X)OvVF7dUE6qDmhFUfj`d!v z+xnR$*|I8ON(t)El>X8l9;J?pFMmedWX`@A-9|clPBH#oOg$}##^NN39C(c*8N?alPY7g~<*(K5+yb@7Po9-`O&3i>wo@msqPTi!HM( z3oL8Z-R31+lJK3mn|ZD2R)u?qn#Pzyrf-bBjK>Uj7=F>`=ntsqCQ8ndw-b3{@^}`#sHJBS0G_0;)QrA%Xc5Ok;>BzzGgz(nz>yf0I zD{9);9Ek)YtEzof$0{DFc&_68iscpkD=)1wS2tBQG)^;F%#W)+ElxOSdD)t8{g3TM+X~wW zTPyq5_P&lyj^56N&ScjJ*Cj5qE7kReE7AR)yUsn?Q|lS-y~Ml4Gu#vKwDnBzZ1A-9 zKI{F+`=B>ddB+M*$UD$G-utNcc5jAvlji|-Oo4m8dxrZD_qCp7%6F!DX1OnS>)kVy zT}^dXIc`ysljbUSzTvbvpLZOxce2m5-D8_<+oB}wY5U#w!?v(hZ@otKP*GfwS$VdyYxTgeGki;xx8lpPcU3RVE1ysvD8IQpv0{G3cNJGx7%K{u zt_>$_nNH{OU;$F7wUQ`X*l0ZJ)aO=k^8mYwV+JK5M4sY(gK)SC+ZTZ+@BJNch$Kt@%^)aPz08`9_m5 zpyJ%?OgEYq89z3R)eqGzkFSm=D$LtWw8&59N^{m|5|>GQ_6jlVYx zY$&KtZdlV`Y`mi3vpQ?--8EBd=G64Ad0Iu0<0Gz`ZZ*zGL)GP#v5IS}j#pnBZd={E z^6m1k%7&IrDH~A!TE#P!Ln|E>3(L-z{-~_x{iWZPwl2H3OjmYwX~l${s4a zuWVLXZduRr+sf}Rzg|(`E6NJXY!!1W=T!BtvQ|D;ac!lw`uT7|#G<09#_+jtG`ua6 zUfaL!ow~>Cj@7vvE^U0c@j^pc!!=F)V-w?Je^fuj zpf?qpAF+(EzM#-zJfTZM+&m|t+%n%b&{5*J!m-J=)jHJXa%^#43@n9XcA{g#<+d^vm0fN7*>c1@*I26G ztsi2{Hv1Dcn>U-b7+uD13=bQ|7@G8_bp^52P5m2_RZJd>J{12pJ}EY(>BEMB^~36> z)!J*5YbVs+P&>V5M5Isn{p#DQU#iZlz9~F9e0TNK$_eG6GQZNSyUN?CSmyDv2g^2; z_AS{`^jgtN#WPAiEcw28Y0-y8_ZCkr$tgWoQmNw5_lru4))e1cGP~sFlHJ8!lm{JL zTv75&`LC5fRCTCwRwhg+Jm)k*4>uhZl61v7Q+tJ2($a#sY!8z6WjI*1oyZa~iJ?^8fzg*+oLC>?E7d^u~h3?_* zg{~J}Hn+j^x2LVLvYougl=b|;^__Ez^MI>^r;qm!@3-ECp3B_VxjuHSb5HcFa@$?! z9qpYXT&>-m-N#%@UGKXtbM11@a6an1)LE+JnOTm7lJin@R59;n+}>#Wt)>btAkS6xx@ecATXX{EDDkC*l=YhN~_^yiZHCFYX);`PP%t6uRGA5t+(v@pLg zwP;k)V@01Aohhm>no|@j{H$@(Yt2hFlWRKH^sR~3+*Nm^KGLwFabVLgO~z|w`YXNhyNGvX+4-0ke; z^10W${hqFhw>jW>!26lk>GgZ2x{tWGdZv2!dD9j1u*hfdUE&?#9p@Y5H~ICx&fX%$ z4&CFu#y8F1F)-P`&O27cj;B2LE03A$y~Eeq_qS)7`*ZhZ&wsqVJssSu+}A0N=&1Wz z=as5g$GN^z{rQ-Cjc1DIZ|6eWRO@V8wLQ@>&z_?0r^NZA<5t@m%Vf(E%VU<(gkF}} z)*Sl__Rh9H5^_xc=&#W2iqF+eFqlo_Os5S2-96FtrdOL@jy@Yb(D+XMC3Via&9yyh z0^vc`SBK|Bo{wyc8!tTJKB#Q+~65r0=+|Q(#{pDX`MF z#ruZ$4(|ztf8O=<^sZ1icf9XE{?&eqf3i2z^QT+yv3q`VzpQZ2RCktpziY7jR*%>F zs^_??)^V#n&$`{R+cM3T=v?J;x}LV*Z?T#`F&;F0Z`iMV!UFyA`17&*Vu5(qcuMTW zrXGzM4bRqvBmJxUSGKO0Qocr|qi!mlP_m<_xZrSpmx6JH9g21ojxK0-Vc7XU&s=e` z?pVpO@)JLto^p1{*_~$=o?de5i<1LS6dk+l*wkZ_k54@@_~e|EOHTf9^6<$gPmMpF zd1k zMDmTPm!xH-eVLkzmF16XOrj6V=7ZhWoom&l0ltm+X}HU<|v(Be6rxiyp(@ipB-@KkJIm; z{`GX}=}%98b?W<*-AelJh3x#_sf63ap2BFo_|&!@*eE4 zuXeX#SN&g??*4wyzP+9HfH(y)volLUlo8a_!=}2O9cB*XlA&l?k`no^YI3e&-#9vgbM< zcYp7F-QO|jOuR06eCpw}UFmZ&ZqJ;Xd28m`j2#)nGw;j_w<^miZu8%Eo7>&f?%lQz zwJFXSmYvkzS>0`3IGv=l)PX01!Y0~$}OH)!) zt5PVK~-ZzzB zyUlmC|2O~7{-yr2{wn`xss~^7U8Z>RRo=N?y>Gqm6aVDEH{_Y;@;G~_WZN-wKT}8JHHIGgeeuDu=}oUUTvoR*l3Qh{ z=utYWsAGZYLWkV9|NZCew9}nV>^~gaKYP!2f1TKQ?XL8_8T;EGeEQ(e`-}E0+mX3- z(uOfA&3!i)ZtA>w)|MN$t==(b z*T;M8f6v`F{=kld6^A}QeA}_jC;vDTJ(rVf%>SmSf7y$b--YvQeT_4s9+mMk=wCKG zW7?K5!SK-eHfeY2vuaAw%X0o|J*&;RHpALR+AeN4tbMBvUv^y7`TMRXyWQUX z%kFvIQ+uTL=-A`y9;15h?YXDt9X;RcaZUFbU4Q9(S*P@lp$^e@)^<;~&2IZ)o0nSq za+bEbGwVV|oAgIhvr~E{_enYyY7@NC|FZX>`vKQL=QWO}?fJGjwn>T$Sz;Y+?Pnce z&9?5aTxnUT3XrZfe_`ria+DuOe%6XsjX6H>R z0%+$x;l6H&NEx!Jzz02Z1M^`7WdG@#CzrVZA zwPF2++a zvgyZ$7wWsz&#J$^p3z~G(#EBoPOVA( zJgsy3kaS=A*|e9_9!vcy`QgNCf^Ge$J$JhPw70juX)ZC0)BO>3Hg>2>jr>#Dx9rBE z$1YgUC!LL)JaTmE!O444w!gP&$nVdres<-cRqy`N=eM%8H*Dy#@$U8Geyd#d&5Gn@ z!9_Url>=TJ)cVSY2Imj{ez0ZmgIB(K#mRva`)}ztvhO)H&A>n1 zMs{i2xxCX4o&N5)t3yHiui9PKCco8}8L`ywlD`Wb@uj#&J6^DrS?)==+`LAyf8C7( z^v<|HI=Jz*`qa96YRA;RSv$XWLG9PI&nULwv6|(Pj7VvCNTgfMqS`t2CmY+xhUr=v zJ~GZTO*M~B7-+f5I?nc~J>r{D4iGv7=7w-sUB|2_MLwuZGLfG%9^}YV}2gC z?8M^d7GA#aheb~=xn$|}%Mbs&b=8?wft7DAAHQ_|68GY|g>Nnz^V6~wqkh@)`|TTt z{*kz|$DZZ;E~{RuGtqBJT#&jX%h6_1 z`$syx)pc5rExjJ@{aBx;`t9zYJ*Z|-#h}H5{u;Qef9K2Gm!9pT?~~X2^d&D|^0E3~ z)$4{{t9!oQqp)jP$2Z!(miidikIAKIGZx8sJ!E{U_lX^LS%l{f}{f{7|es z+N)`7gQ>o<_SKqo;W5>ls+Lr}TQ#}rNL69=rI7(Oc`8=xTh~}Oum0wS361V(@A&=t zp~e%YI}#GCTWk+HDxK5aH+o0-hX>mvo=o~QWm4Kb8Kbh2vOmk&()#T-wQbVdhT59i z8rpVj`+8efyC2*2Y(KYsP5Z$KgVQiyM(CmR{eFcryD)>uhxvH-ckN@Nk-9Q`IqOOJA3BT$>R-&JqNDc6WMuq z`!m}HZW*z0?|S3Uu#X3H&q?%BC>&ld;A92s?D*qJACw-nV^+*C8Laky@-hD-nv|dMhJ=p8R z9v^izcY3s4ZqEH#j`XCI-x3Rhp9Yrr8+=`TwVtu=x1Eda%dDRytTa7s+-WFMws~Om z@rD=c9;p4UCLB2y*;(^sU3vXn#c4d(cvDkJQ(n_=O>Lsr#8$;G*LOAkZobvp)t+x} zw7V2TJ4SL*AQX6N z{|(;N?vAck9r?ERtP?Dm3A0T54MF`4@nO+_8~WF+k9=LdsPgghze}zuy0T#61z&F0 zb3dN0J^8|k>yIrs{QjY~2gmP!ZSQ@%ZrGW%RyZ@cd8HK$fSxcrx; z5C7D#xa*SoWlMjVz5eR0pY6C~@5qBU{qyOe^#_X&bw1MJX#SB7$38h#@^5KgLjIn@ z<>f~s&(-UimgvS?HalB+e)F~q&Q9)?7EAS|evvvp{gaFzG7e_klr6|`AcRH3C>lsUqch&u= zYDsR>8x2zpj~I&e52&2PU|p;D*643dQ<^N%D`O|)OZ8tHC!3EZd}BFjIcE9CQf--F zebAQT7~*=*v)y+p@JmRa^m@|GNxKsVCLRg>82ULB551RoN78l4i79hZ5>h`(y*;f> z`pR@)#$y@VGXBoEC1Ym#!L*0c+NT{&otgS#>VnjjsiRUCrlh1SPPQi}CbdrdCiH6P z$lDlT3D=o-82@8i z-lj_$Zmio z?f+W7UAOJTmeHG^+BkBZb!!(6#6^# zsmgjc1{Vf<1*ZgF^>6S^_Py=f?(61H3Vaxhhki(WH1T|BL}*0tGyiOFh5J|6UCtSb z6|z_?2^nUC$z_~pn5lBpZ|RoA!cEsSn(Li4l~p+v<4ZdheR*NT`47(aK3#C)t7DHI z>G#i;1Ezh~?J4`Kc1O4EuWWPwQMqO3=D? z44q7Wn@=Q6vDmHMttS)W#!_8=Y(w<9rmBVu^)u=#l_!5M@^AR>$X&H>*WFvcu3<;h zud&?tUfolMLQ`kUd~1bmjbni;?k@4%=9?VopejXdp^t(u1xE$X1dat zm=Vl;BXe!$?959uir}AM6?0 z5^4-(CEk)aE%BkmcB+peq1B=M;7@_){nLGOyjwljd0zM2;O*kO;QQMDNT5}4X7K0W zy5L-eq89~<{O$cCe7(H|?zyg~ok@-%wvLvb=7)_>>MxC79zEaqOGBh?QO(To`KmzW z%(A(~mclPC%sBt(ncU;+4jT`4{JYhz_1hP08}rAsE$26Ww*KvPY3ok?{?wY&t5>eP zWkvGR6^qU-n744=lC%{ke<@jiVB39r_8m+-_VcNo=VG~o@_Q66EdIA_VpUz_>-xu| zee|E3rdc93gEQ)S$@@#dle8vfzCy^Yvuc+&YRgoTRoHcP1^CK z+`tI$M&~!yZl*7En^j%iuIS|GvglW_5%F`e_oCgJx;MVk;Al8f|4n^^%B{7lJy5f; zW^qk!&4t<@>tAR*+Z2foj92LXGQ40)ONd%7u|MOS?cV9VC6JN$Me+lwGt+vcU!Q&` zEt+~`YM0bmsU6Zbrd^+YFuhwwpNz)z4e9gKzf6BH-J1SIT29)e)UhcINuMW<4Alhg z^8e}m)MN4-a&LAYbYJO-c-~ZXTMNBgy@|dlO2^Omy82)D&+|X&4=L?=!k6q@sdAbx zdO!8f^X~Wd^ZoAg_y_xE`-6eI1GfbFtD2Enftv!~`gi!Oz7+2l?vI_*?DMVX6G}`+ zl|Nb?|8MN6rXk8BJzX;`?5n!B{9sAHq8|A_pSPbKdvemzQHLh(^Y5|!b>h#NTff;f zb%TBVytT!@o&I&z>M1M#T^{){GO4S6&sh8Y=9~2cW8XAhT0gFKWX&HcqiF~?R?VzhSCv_P zXZ6R`vFg>~pCbR(jIKLXe|6)yrciX2s;yiWTO047uQUYBcUf+??XU-&7I&eFW@CwO zrFhdH$t=mbz15gjvs+z~y*&F=wkc;q&X$}-ImVnG*)v*Is3An3W<=9wre2%kPOeI9 zlejQ6AoQ(@8jt!{`R?$VJVmaKu5X=L&Tk#v9s5;1+)b*&YOcML<4?!<|Hsi;fJK?T zZG7^r=^nZfySvs}ySr-@Ywhmt&b13$5nE6(KoF#)VUivghUu>F`To~js4Ix>ocEmb z#C`uBrjdD?)q`COJ*!t72d6D}=>J~v+#K#YURS=7KS^+2@C?*IGXysNZT>p`6wnPk z;)U=oa|!Nr&QkU$Rv5Dh`o;q=8FG}~ftEzU$OHWoytsR!qmR{Myxk(y_G#!*vrMIx zttfw5tSmT~n~MwY(VY_n55X{5i9ZH%j&5B8o&%P1|AOBBt0h`7Vsxv zbYRDzc@P8OAtQsQ1+56Y9AJ=ckv2-|#ea#Uf*afqtUkn8WFFl5)IvQ;!Src+E1M8&m`i}Z-P5IH}xG2%{mdMGR8Gwh#Y zX{;nu^cwDzue?G|I6IGdld*vK8$XS;!y3?Us0F==ZNa+`sYE1W3@B#a5Q~Ue#8K!V z^=A|?)-%1#RnR?L%0@ZQIb*pd?oHkZeieVQpiD4a_)_Q*hKqtl4&fK5+!_T#1+)2s zd4A4iwioIYS8+ae59voYL7nt9c@Sxiubn5$iP&G8^?GUZWX+MX8ZhaO3KF*AJ)8|{BF-%%G(WZCcl21WKR6~V(AMk(e`rvn_VB3sbe!1 zk;!q239p;aRlSODW9tC1stS~W#73*tf~xFGmL$fwYc zVV}eG5#yuIMz4seh#|)|#q5sBibkWSMV*g45YaWfF7$iI>)@k7JpxOmnNu5ds9 z0oTXA&YH~(VwB^Lup8)iBoZm5Yv?19LS!lG1w|zjUyY~Zt%)0ii-=%^F#;Jq7+V?T zj5$meOUX)rPGKfz1{dX}^Y-)q7BGd+g+3uBG6{=>SB2e$Zw1}qo@4MOJOyV5o5tDz zok}y>3RwYr$w-RwkMk~a4|b|-y)5&L(^^_5m^AG1H4eP8yr{Iw>j{mXAJ%+Ke%nEtZ-^~Dd{z9gpOxvG-sN?V`w1}4rh;W^lO^bqopeu`E^Sws5IyWh3Me%R95G~d9}_iL%wS#_;idbb3&sC18Y z$8=A1!&;8&w;KsdPdnls=HEn(K$jA)SdqLj!W8jY>F_{maC}Jb(8AD9VS=qN4Ur$qyXuJZbMTcp7|S_hs9$TFgJD*x{7JU4#r?O0sm!vVwqW? z?1Aj%(486u_t+UOod@*>URyZtX7CsCPw>CYP3lU;5Lt0~r_v!sGYS%NDZfYjI{mYK+Mw@e zzxu!Y`W*W?>l63W&*V8DXMcG4UiMD?_VQcS`;Ez>)Ujy;vj!CumNhFKwM&{Z^n5Gk zD)L#W>+nnLJbnjpbifCQ7gQnfAuoby!99Z90ehu=BvZt@L^Fi>d?#1QKEmuu^uuzH zUv!*)l-7e*N@Y_&k#W*MsEJ*4J#iSJ`ciEfYbi7zHg7erF*D4)OjC??h9G0E@sDYU zWu)zsqrH2YceTHOe2sP*olnqN7dbe8u&}e(C#eYd8ni2T{ zdsIbKzo4a^zLGR7i?leh=>6PNf371f@21ayHr;eX*X@oo4y=(DEaPJA&@ODttn zG3GNB%<-%jEH7&^=$lBK)|^-l1|9jA>=kT+eGybQ6PSw`g9r^JhS}g z>+~0CZ-0FFp8bvfZPiy@>V?$nsq?;az9*!CfHV7Q{<~s#MYt-k)}%?)4L7~A*5;QN(V;&l7$Y3#64&vvi?!Bg9TF(9&)Z4iorz@3@^fgIQ4!i~dAc02m{gYNh<3 zWKt3+!zpU=0Wz07hqT%M5*!r&_@01gBHOpqNAfNA{sjHTJ@+*CIQL$6o;%#L+e3J3 zyqJFu=`BPN651g8B_tec#I20M>}0M)5Gl$Kw~-zUkOji!5Kc{;(rqtHKtC ztqdCzCJ&7Y9T+koI6TM^V3u}~K9}^9WQvDEBw-Rp3a1L5@w@TscrSUEcsD>9o&}wR zbG-4q7VZ*mGQ@wgLFGA+^@};5X=kiwq!LQJ8S|l`=zK&;PXgV!o*smZMD``|VFyt5QZ|WcNWYQ-e%RAY<)fsP>THl!#8lqcl zT6v?lVO4!lT}{oq>T6Zk)aO-8m0=2Ku~r6EuCCCP-!0!*9$S8->`v*uk|)KFicS`; z{j6&|B+nIczGnraL;j#okB0 zK+GD#ul(^;beKhddT|BDuJ7zfc2Vnlr@T_Wv&I))(J*Gc-E6a z@4lW`4chF@pa;n%+AiTCxFu#YUEov*EtV3 zhdF~dI?x^sW;e3Vvxc)!Rt9qelMT^fJH``&Pprk|*iwvvrJ={rW$0XV2HGE`pl^`H zhzR)!>gyO{&q@=8)azXq~HlKZqX#*txLZ4I-?OxKJv419f_?soH8 z?fs_gMz5y7=0XF~aJ-&XzrU`fc2;dxO}CmW)vl^FRUY+ub)b5mDov?UDCPOG-<8IS zSry`n))iMOW>)?qyD!I;r&Uo^AF8|5o~x^_@1wcZNYV~(Ua#BRvR2>AP;8iNEH`d5 zbu&B7X_hP24YmPxg?+5!wxiAw?wsH}?5uL`bj@+^^b~n#_+6w+6av(kOOPpOAodX( zhr96%;u@&v%*>rE8v7ty$LoGbc^}~83onP~;mv?v>Ks9pU><0v14R8mRoz+?Boc}QBB>}8UWh0Q z;xmh2y8shBhWn*AKaL*--U}fp9_o1ScpE|I*$6%HgWUPtA#h5z{ogAV{%#VtkaGfh zb-CkFkbPNsJ`EfKDX~TZB2#y`Zcfj_jsiq8*{mg8b>Tf3DBt zx#1q*s&kyR_p>Q1%gq|&zXnvFtlQo^NXu^0YxE6xLznu+bvJ73Yx=_Kn5(8&6{}aP zL)H1JwJL{lu`)@~Adir5lesIOS01W7Qu(sdP}y0wSaw48UREy?$_L3$$hnFNMY+R#y84L*Xz zBiqTZDgCLpsdnl@nwEAJ+(Z~!gZ{=I;R^`|u?0^4uAtdE!~Dc_Fo(nb;($WHz@EwZ z0&a$*uv_{;*E5x;;~{)9Ujys$8Qcvwc{_QtdBb3*bmw*E4dktc)uQu<@i+5t^1nfz zZo|Cm@m$TUV3Yh zOn6PWN;pQ?UWf~;1RtS0J4S#C()cGqncay`K)0@h_ky>a$K+iHb>}5GE!VTvtkJCJ zObT-{BY`mC9r0<{GIS|2f!>DZq2z-KbDh73&+18XFL2?`r}ojd2Fo@xX1Z?ZqgUwm zHAiW`H%`|y*B`6vR@+?tzUrx3quQ&AR~=U6s5+=`s9ov~Rb8w2Ri)}(YC?Tn#Z(

xQcu zX44TZrrXhyuXh?u#;>N;W~rsna@Tsuw!?nVaod^UYH>5Y0X~tRLGqLIO`8J zwukPcFGgM=9%L@6L^oj~JQZII#*CiJ)xX!+*g4!*}sR1Oo+21Q!MQf?(kupxg%K$pQ#ex5yy-Bi$lTGDg)KT22eq0Ao?CB zWD6?=FToeFNYG8-<)`wu@MHPeyrn!h_Y60Jo5bnO`NHnae#}Bx^Rs!{EPwHjJAqpE*Zi+Y(lRmD~PtISe}6jS8)WGvZ*%5Ih9O1yGt z=~%)3JySR;2kJ>)NtA< z1RC%j3AQHw&w)5IrW4>2VpB&#L6C3_|NV3m$YPDyS^-b;#LrL>Y- zNtxt_Bvj%N=ZG(h=ZJ&Ed7`bNC{elaBDhZoVY=XuV5}fSpof(@#-Gd& z=Bs#Dc*7x*e8`=^wQw%M$(X{9Wp8D{*}!Pa7*8z64`KJv_sDDd71{>s1WFsS8yqD! zyqi5^++0_tW0PHIyKdo_cNlB*LtE}OW7?CAZ8gR9+v~z><<)Pi64b9$X613^ZqT2I zVUJldJAIwH2=`k}66ng5X>Xmi>~gk&_k26~W3> z2*i+MWg~{Phar@sHnaa zH;X$+7QhN!l01{7z}h^96}m3D0&BHavKZFxsN^Z^D7}O$je{7mhqOEV7%insOC)#U zD+Wq1=!cvY&xBo8BH9kO-7C;8xWIohQs4$%(FSlVsKISA8>&ZgQ2tHi(z%JCHp^#^ z0PVDc*^Rjp&cjNaj0a&|(MiZ^`dZoy=yC*;86>l>(EHqT!oAGZ+v&DHv;A$&GmkW7 z8>Z;1K*d$rG^J75u(6(~yHzu!nq7se+o-lGaiv+&O_`|drFyNRszqv!5f%5nUt2Lku=U(Li8C zDBcMhf&PQ6r>~$*r4FKWCr6Xmew(+(^T++lwbwZqa8+-uvn*EAQKQgsxrL?MrzJI= z*0gQNsasvkulZcHSv^WMMtM%rO2JX|Q`}ZWDjzE`Ra-bI3zU15t(Ccol?p=fT>iH_ zOV&@8R(Yl}u~HMD zeIk;F%!2;keQYy6m*~fEGj20`LnJ?oT?+bs8D~A$#a#-Oo%Z}k{7^WZpi(dVC=|h3 z{U?%(T8Y<)pTbI5#e{?>VM?eH3j7!jXWT@|N;n4(!0pstQU_}}RV)F0o=empzP^>@ zs>CAcEFB{qA?+v)kn*L0(kSR~64H7}GW?mpVW(w@kBk2nJ4H`Kv%rJ-UAPInd3oUU zY6Sfrh5rVkotbbh4&rs=vA}gX6mo-T&Th!lNUZ+MLyRIKn3#)S0S#q6qNi8WvZ&7} z$H+@bqx`*moxHIgzT51~c3iZNw|Om3%o9yU!*P9N%a`UUT1De5I31_eRo2X}Zcy)1 zbx}$b{pGJ^dt~=z0(mm%A%m1Bly8&?%1z3y%38${MVz8a{$74TK3C3`XUHDP-pG`) z0Qm}ev3#^58NPa}vOpQAnx%T6!qtb=ys8gX$E(lQq}K}Tw>30r7B=N;BXlEMhUnuA z0mjy*>E_Fp2J0kyuA`r8xBHssnK#>~_fyGkaxEo`nn0UQFQfMX1$k?zq)x~5*d=@% z!DeJKHZ!AGzgROtX*`)z&Y8vi&F##40KL3V{GNgYL7?!Vus8IQ7~;VY`6YSh+URZm^DAX%%SuHcL9fp4%!87w3pJi6o*F@M1EAZ=mntfG21P#63)Y z1Dx7hz?1ie+ktzEQ^Ag6Z)Yi)e=)Bx>WMDIHgIUAqN&JN`cGOq^#f>KACcbrKl|Q# z?|9C*_qvukyE{y_$JRL(#C*rt+VHR?N_SJsZ`!4)t)E&~QnR9(T6I$8RE(FWRvxJ+ zD!);lQ9h!gYvnrGQ~7qqSH(xgJ;h1I424I2Q@&U}7H&7X{EsYERxHEiljWab)tVF= zl%>jO)n+&$9V)K6pZX5OoFA$>RDY|!UGt-sR6ndCK@$OYTVZn<#3SGJxe)I&&9f~P z)-`sD^N@?=+3HpJLP$Ny{V2Vu?P)xyr+z|?p~JvMwH7x*yjVv}VdOC8G3l(wtUgec z>j%!q&Y*A<^8SOpmdT$l&Y?#12F@3QnCb>PLp1j7D!#vkbr>!Qv)Uj zbO5D8qI8zj4!eyk*)QgbKf>KKQPe}!9=as)(Bn-JRfq_PQ4+;oac}r8$6(bTND?59 zOqHl46scIslj`9%TnRh)m3V^K4pGfa5h{8L4%$4yG(io-sGa#vUN-M4od)sa69?L9qJCnh1Ti?H>uz9|=s&S&`bv?gsV@+k%Q1u&Sl;WJsSFyRAUiPu% zMX|7$SNyd2LkYEPe)-Fag33EGraVC2EPEnbF6#**kj}CtuuTzIX-XQ;Xglg2w=fOy#^gUd0>q4j;9qN1vz0VPOTZTX?f9N8CnyrN7IqMWEaqpVlXQ8lZMLU_HV>L%>5nyOaS z>#9?#qiRZP3TkWW3hH+?7#n(OMrcAcB@NFTDjHU6<~9D;ByG;soz?dgbA-n;(kF)UN_z*S z2XqeH61X>TLLfabBj8fNoB(dXH|Y-P0I3eXXA6Aw3y~H)&NBsXVCUuX4)dmg?#c&l z_js-!sv{Ck0yvjnu?Db8mU+^JMm^=0NzN}lB zzki0Nzx(m}d*AQtzt8)jNK5|tE7O@ZE60)d>rYB?28;?gq?}*9wf=0=+?H3y)t0-q zg^mp1a%kLE&oCc})P{0^b`R-{b;19D6D%1|!e&Fglm?!)GWv457UIQd$S==PHk1D) zUGdv|M|^|eS%Wfnw3Sc8~JpczSqOdFOaLd)s=K zdH;B)_}Kn-5QVk{@6i?dVe+$^YEePdNJ2QaG`kD&&7 zmXQd3=v2meP`WvYhfoFGL%boJ#306T#!p5b{C)`IG*n6c#!p~HXdJo^$)ek67|lj` zK@KKO15J9SE7Ez~&afq0j+^IzpKzD{hptN7*r;nT)-~6ZSLLcI6egLYqN40^$=0GF zf9msQ<)mkg&1C=d{NR6&|4L0w|Gei@>y)OC)gPihI8TkI)>SEZ9i;6`$=#fk8o^r zoOYaYoN;73Hac&*T6t=`DgFfVMe20=TjV<075jos$0A!lr4o#hyLe+kElgQO1w z6oD^-+6UhV?h!%@)rVGtCNM4HP^3RHHp&}$BCPj2hn^twh1aTTfyg_#vISgXUt;Y3>`sdj9@%s1b`P#$)vG7;J>>K zoQ}cFexO-TViK%>zyWK6nrQ{pwL5_G@gDOa^9odXhOnNo$m{{&rX0rhfUlL$dd!>% zzKLAMb?ESr81o>CnT-+XDR@`bQGBHPa0~YGGCiMNeVnK5Z>-nN!;G4iPtD~`VU2ql zymcpFP;PXUO?ge;vGQdZRx+XRd;Z4U{=e&gZO<5-KJ~}8Z;MmW&o`3$eUQD~`g%%I zZlWl$Q)0wRYtoB1Z9nw?^yBNhbar-5etJo<%w9EB6Rn?ad+DvC{bWqzuadqB$&Gj# z{UUZy+^yCR+6-xXuWfT%q}|!JL)x^D+Yl2T85+7FV3x2WryFsW9#78mrn)GOl~%G@ zVIcIIblsb;0$bo%b3Al7XXw)mGfZ;xVrw7!A*aA|#}`dbrS3wy;CC1=ShK;ybAYGi ziTFc+vO*Um3Z4jSMNyJYQZyhVU{#xYVsUu)u)2_y!HI$C(pqs3(QSc>SIK$Dy2-eS-$%dFLum?%f!u+7 z1b8<&{@*}Pn@TEz3PL*65cX0!f`TrH-UZo#nz3obJO$H_0CkeXjm~F>`(H zoT8Z(r&R;%uj_VNuXz8XuVA@_=Aep*jj_TuaqYi$eB4>vMcFmA+tBWm?qyvao%eR^ z)2^mfVq{#%YsqkaDswUVhVseR?8>kmGv&2pYuh(w)>G={)&8l?tjn&iX_(Z=(0*+0 z(DGD&)hM;J*zC@+UW{y`C19nDlk7b1CjJ7!7Z@F|PdG@#7wg3y(10uq3<+8rbR#G& zC^P76(9EFyLG6NDg>(!Z7IrvX8DW8r_8aJrN}~rxcZv><9u$2bdSi3}DAgxMybgO5 z(mm)UD5JLuckm~JGCG)v64y};Z3_5D5BqR$xx1%(fcud<*mDRxqxZcteH3snY$e?V zwJ<_wfdh2_BZigEW^f(cGrR^~8_-?R1vdmegt@|bqC!y3eS*f{XyF>cRUq-i@t$%= zaI3(jo(;~rajenMF>kdp_YjKo#lIQ_s(3Ox?oDiNCJrJLg^pf6_WIMcs^?20pH@!D#`B;#-2N+_$VSh7RcNTzuOVCSD1l95XcLC*bx^k+33-J#6c}v+LP^Z>IXYn_24?m3UMo%C~ z^d?#eEs|OXQAD=S2YG#vvzL9f^_kgZTx_7}@9P{|S7Uy|`MN$e-_#S7VmZIEP5JoJ zi^aOaHh-^M)&%;0cN#6Rg;(gNF#jh_XrM(PH`u-~K ztvER<^mKQM8=sm@R+qid+2X$HyG6c7yNbReY%C4e%jbaWbc3+1aF^f$zYJ=PsX(=oL2vFC zbc1kyB-9|=2|5V-LFZtd+tHRGP^h_?h2l!@p@dAO=a}1*AOo*X-LyrH1w*q#e13b(!ZU*-rcMs%C z1GqRh8~8g5fca6v-puBJb6p24fq2F#Vi(>X`;3gD|E7jfhLNU%@_UhMgd^4_v9yN{ zc|uELbC`B;RyAW zWv2CCuYzu4brKy5>K5UQS=0J%+rI6KI{eki+sWPOW2d#9E_58-{zU7pG31D^!QUiZ z`2ARy@QVnQn&NxsI$}?;JTP8wdDuJ}xMo+t346iRY*E_UIHH|@yRNzGJQI97NiV29 z(Ex(QJkRohg18)d)mk8gaarG?=Qn|Qo>>E3zZPZ)D}nVlyMdj>>B|!d>V*G_^^)X( zw?Rij(6GhfB@vUO?nXb0xfXk{Ra{(~xOJ_1#6FFFAK5!RJ|rn{mUM_%B>ci#$C=5R z#Ngmpk-oGZl;fljKihZOd&^4$LggC&8B!@3p^9l8>06L0v@gDr*v{C?+`$?LWRHHF z9YAg20HI|+cOADo*UtF`F6I!<9=4HnpLLwI8#1U}$Y}IM3|L zv@o6lKj1P^f@`r9v?H>Rc7T#h^7=Y^N4jS^_u7A211!r;uMN(Yp}OPR-;ESa_xi=P z7prCJ9;(ZVdRYYAp5IElmpmyF79J{K zqLb{M>{NCnX9@V!+jDAwuXd7siTw&HQuo;N*;MuwRuJn7GlH23w2vpmGW-*&qesyu zQMQm0fk8JJRy4$3W=VqX?SYo<%@>+ zJ<&!V_dWJh^w7woVe!E`1H_U9;d9#RjUNEgQF~xDrsALQRk#n+VFU2_cnubXm7+1&4a@>M zyDFlHnay6ot>*0)Ob`WuSN&mNW^lW(3lZO=uE&J78W>j-=Zm`+r*DO~V#H!G)~Lsk zj_}&hbHQPO2PLwTs*sJL*4Gb-Gu-=2trHpw5GMflcx^#jpMGlp!R;=!<-jMO&urudD zbvS}Oi+zOshW!ng15EZ>AfAOX+c2sjQ+b9I(tc2ml1BR?JwE3r`wU>>d@yWn5j3A^ zjA?M!l4_7DnkrB+Tb5Qaw%l2oS#rI2c~RfOxIY~Wg7YhK&*xnJU7B5%HR{*CjJ7}5 zrN2oV^hJ&Pj%fwES*d z0jmN>P#x_YuuZ2?f+!RUN@-77LAgeGLU~QGQm#|?)0WaZAUGO|^~YOqGkypc<3+%1 zWntUV0ML&-1xjir?IBf5xlK7wc~9v^?La$C4?$mHLy0QJVODpJja$yw3ICGh2kZ_G z4?7fbIEoh&8k-X9kHuRVV^d?-#ad(9#%QBjM>@iehOmO}NGFIpfqVKs*TwF_n#*{F zkHrE|D$<(%jrN-+rf&ek;ZbBZ+8)aX`q5Qfhj$~E6QE0C+yP3`7-lQxPsTvTO+t&0 z$DNo7V*wZR6E+-^qc_o$=zY+MOu@9+apnfdMr#wh@xc zBcT^VcZXF#27fLhDcl?OD2yDoEo4d1g@D0QwYXZio&Ps?FKAr%LPRi*SO>4pgcEPT zGw|g&3y;I)csel`JcG9w-x-CBlTdj+OROLk0IzQ?eja;^Rv|vR7Z^2e${N$Llx&Q#y-%1 zI2pZ|x0nH}b*w{hN3H?=QhU~Yrkk-H)MqMUCGb8M!@6#R&J+z@g$NNfJq0v`GI|U$ z3t5J&1b$u)&`SoOYtW~lF42IhcLNZ+t|Df7JNj&JTh67>$!Gmi-%-y`m&$?I2U(Yx z7a6DNH|XAIs~ejds_Q7Vld9jV!&KK4edG<5`zxZ$@v@$!D@v9Wk1bkL`0Ee$$DW^; zSDQOB_f<~O@5kBWvl=oFWz7GXlKwS)^v`Y?yE6x64gT%O5#^Ec_ZBz{LrT|H6w7z3 zPuITDY;As~KViCK9q63wT}=+4_rkQqL{>b9%^MFLi(SGWB1}SsO6CsfNNG4YFpo(- zgN`UzR4TkDWC_~|?()m|a|Lri4;~B3<7R$aftH^JeZ@zDbirbQonHt!z)>KioP>_7 zhC7@)hqI8qkkyTuPpks|P;ay~(w=^t_7}}hJw@eG^MQp@Pw`WR0;O6_Gtv!+7G>hI zi4P1LQ_Akc*}zTVnfSQSCCn0S7Td&qp@+|r{1#sjuNIGooJcNOFRB;z7w#8)1$N6a z=m2MNlfh;80{SrLxQ9S%b)Q?rC7{c=1JqP%o`RRiTLb?xF|Qoxqjj8_oS$qqJC4;8 zRJc!pkwC#&SS&gPxkzsTnzoqsow}Inquim4q=mu#6YLx;N$ib z{SvJMJWBh5A3^9tZ4dBUObw$CV-s-)&&DX&c=Qz#j$8sd z?*W>LI*0m;GL%wA-b3z4t^~%tl=Rf!*`EYX+)uz2%=ZlSJayCD6J4jBpB>e9%-+qm zz`7eMxyMY$jh78c`n;AV9RgwBIPI>c#73D0)eLKRR3BS^x6WHTr1n~kxaMwkSoOIo zX;rFvsk&a(L$y~~r1Ll zXtB_{uNG%Q&EXX2IMkvoBC+VLa0n=yCV)yN9oRd6!Fo^QY2jTS#&vL#ITt{Sx|_2S zbiJ)O91ex!WH+* zLTt7i-+=Fi`~4jL1n!D5yaGPsg6Mk!xQqUQ*CJvf(SaaCR`LYjg7*bl`ZH__W<#%{ z!%+{Cgv>)YNGAONy%)V6c=(HGooEi~C+Z<^<8}eQYXWA?|2$^*cz2?U>)Plncl35#u^VlpZ1=5R%N$F(xu-eN6lpqR zbQ%6NGyoY=)iSrGTDL|=(w%H(Ht*6_H4SXK+lV)A(o{BdYdBGFty^DbtzBE&Tyqep zS$NH#>MPJ0TUNcP`e^m->Yvr*nlUx^YiPA=YPGdn>iG4afVUo}sngtUoYN%L=4_>Z-yA43TglyYvShLr&JW6EZ!Gpfl;Uv>&v4FzsLzO+b@V zA5%9`2T&1e9uPt2QKBgIug%mRN6SMF-J2=qiWdPAZkdhUkfyKW8LSvlXd^p4F`^T_qyJ71K?NFfTni0uB47t zKf3;My|#XM!+juguh7UelNxgx$264!!O#Q@LpG2phqN@boYD6)v=|;6SDLz-$(CZv zGwV^?O6Z9Wa&~rwxtSi9NAK16>ijBF1-XP$K+U41&=ZhT=mu;ma8ya4MmWbD%W|`B zKs_gqvxI8}3YML>3bg3^|{GK3~B$x<44-#|+l`2(G3)=Eqz|@}xD@)^7 zfP>~BZwl<>Mo>l^02V$I`k{w8Ltuo}C-x?G2X-CnI%^_mjX!}yB$`>uIL7D?$n$?>J{Uh0YwuK3H`HFnv1P<+j7X-YvE6vPM}m zVd4mCNiYvJH<*r@+L*G8D~%lETf=07PJdG0Nng=&q@`C&v+ka5E-jEcItBuj`=V>G+vI-Wndg=I{`e02`;rW#`{aqR z>i4PRX-?WL(25lU>41wp#(Lsk@u4uKWF&N>I{=Lvf!yR9i_c!jP6YCLci=}pUm1l7`2&IaJC1#+6% zpF#CLgB=5e*$=E;tU)X)(7%o_M=?3fLg;gkhaH_uoFT>&OyVbyqw+23 zB=ldj4cdexA{+m=r@z38kD~MG6|~zh!KMS;#Xn&B%4C>(qosVIoTAL8w1PeTi+qi| z95}})xq|c@Vz2Qq=gbCkf^NX9odLiG(!d;^^S-4JU$cB_?;G!7?<{Y7ug{a~x$arz z8Q|e~6z*s4o$fL20C$b+nQM#dZ#dPJ&d0FldpSLhZ;t(rp$-Z#F!$OA!W^0u+eY98 zYpf5fi>wjAYQF+}X3X-r@(a|m|unNmC!M}?Cs#o@-2ow=vh*0avFIS zrHQhIN~4{jN$A(+HwiJbcUk2=%}p;AnWyPGDbVpMaX-5+Fi%fsPD~T?IYz^ALZJ zXSIbDFJ~q)4}jCLEz`sJ4c+jSj9!qV=K|w79<+Tf;QpV%=i%*e2bKo&Y8Jujd(lkv zBDx6eh&q7ZeHxhyWMcnZB20wIVXQZn7=OVkry9Q)zZx^(zcfZSL};x|qfBc||CxT645k?K0`pC? z+#G6IYBc$LmlxGEtYF)D?|p|5fO}$jMXq@D3S4tp=6jDI5ULV8oD)|ncbPanSJ4-2h`uh)qlmm#{aiJ%O8*kjF-OJpA+<|VBE8BI? zwH=<0HZX^%)cF)9R*iQ?z#NAR$A6CH5c8vsa{CkeR{P&}k-fq8$#%*%$JWN?u@=DW zs;$-u*0xr}svIn`WXR+*d4UbDo~ z(=r1hj(czeI4rT&nbvdG-#{=P4&S}h7H(f@|7gb?vmHqe+_}{G(;4mB@2YkUb3b)+ zJ)1oW;KIM~3Vl0#Rlec=7k)lWU{jC=lOK=~h>bERQPf@35^6izA)1WVfxe5LO&1~a zk%x!|i9_Sz)TjX$;aF@p5RX+D4obl(_;!fAKjX!C6HbO)y*)7iDs>CMb+UsXFxGo3-MtOZ-Ce1k$X~Mg4tou za?dDFTMx^lcjvobxKFxQxyQQOx>2}Gz5>vKPI}qKxEqh@es#12fWvI-!Pb~r}VdlS$&x#5qTl`8QB8riZ-Fjdg}Oj-*%}*! z&A?V+yP)3wANByApHHyge__9|94r?;vat;88}=Fd0H1k)UBk{|hoLUI9RAEGtRL1E z+{h?A2{mXg`VpSO^FTP5hyD$JOBevC^f1Zb8}bA>hirvsbr{kbW@K6DmGo5bpzMd~ z4kJy++U_ zKK2}h%wZr*j_|r=?iBY8_a65m_Yik1OlUH?N?o5^cU{L^>s_;8idkETGCa&nhS)j#1hi9&vbBr_IdC+;!nd+1~ea7StE z!9+SE(~xb*RU{c!&Vq2zR_H)EKWG{7=*r-$p&ZnOR3lk1UFjxr2rB23ke)~o+!rdq;ZPdAVMzr^53EPK!gHWpG-w_wYP+x7_{Bo!~y^UgMtX?gP`i2$+GW zhMVOL%t1T`x64Y`OxJK%Z;&*$9p%zOq6u52_{@lg53BC%)Al$CqlNI0kcT|f+?VBB#bgP&k&*5@~(t5~LN{3k!lfq7B>zw}=D8P+&4i7=J^QnE(@9 z4nxQ5J=_wD@IdSUdKYSNUy-qN2UPUOQ8vJI!h!Hk-}W1Q;l3%}>mC#;f$=-Y;t^Z zh~Vu0>3r_I>ipk?PK$pMJTWqeMAwr> zk}i`bLicG51*hDGOl38B19>vJJv><(Au3BFUm^b=RaY6_Hr8}y$!v$2nVHMX%(T1h zTV~v(v}LB0nVFd(Y1*ccreS7gv>h`@-)aASkDh=X$Ch;G&Ye4R&Y2;sdUR0z8#~rp z#8-HoJ?PeQi8zOqsSVvu70fK+E^!lmQ~MDS=&5N-bTTFA3ELfO*9P=W)iWu~J0_9& zfK~l6>i<`P0$7cmkC>^UztFE>n-y5|KcFM&TXgB1k zy=h}wfJXte4{eHmI9&!Ux|=G{2{p}-ZYVTlVfEW+_{;E*VWlAs*y{)gB61BT1{;I1 zL8i}z@2iKZq{$yIn1%icJz)7^26sb$!w$nM!zX-wge~==W9ftRF<4Ut?ZgaYrZRJx zBUo>O&~dvK4wVq`^#>Y7;vtTL8_C1vHZ9wWjl zVzEdCx}|=)Gwz zNjvEIukil|(8qKPqQ*(g-?+OBA_N89 zgPF|?LiR{Y$G{`dj1#hj{^&2%ll6twiNl?IMlPh})Ij!5uvyn~T)AJky?AR7)2u~z zEFZx+;YHxW$B0sdXN6Cp)N@|gU05zi6Ock4x}#j=Jwoq41JxZW!{4b74;u!X` zbLeX9VQF+jd9x!`Sc5V8_P#|wgyp!p@vKQCh1hsJtZyPZ z$Q(zNB8JsMyo8|ExQXb>@q%7W)(ebDGjCbX2y-`LyA8_G9O3ba#=Q(ab>t1knKbymYigiPsn1H3%6YNVtOHJ;i?-KMfB1=ckO^ah&--oicP8Q-Ip--Pa1zQ{brbMiReSb^H1w~@dL z=YQeP6A-A*i?I(1!_IoJ(HEn!sA+67(U?p{R;3M`Ay>0tAdTmm%`+Q;{vJ|j3idO7 zYvONm$2h|HhtVXXOzf=o0p~~w`vB=!!yC^_;r8a<;jlT=*$cHEmlEs&xs_19v8fW{b$8@71$8Zh%%{a`kV>oUi z+VaMZV-H@_N_#TH5n*g#ZexB$qMvp#G()10-#o#}0S}aePJ{gsb=^nyb1K;rzxM#{ z#AL(xrsUArdp#lppfeucUMR^jNv)WVrXOwp_2_>!%qTgtYTAE5IZXm)<8#F4n9o$7M(+vU zkG!~Eah@X2FCN=HJUlMDcevTOIk<9Mnw`oVn;e7=-R+m!9k;!1bIkfLE1u;u^H{TK zrXx+dLtEcg%oF7b*760s2b`ts0aOoiKGp~aVg|F0K8DIqqs|03 zjtaYr(os#l2;2^XK5I76@~Rd85~qV(Qi$#w!^K6S|Dn#g1k9k#0v@yzP58TjZL#5o zaE7tBP&uSGxsY|9_=c6f){w4$qC2Wxs0mdYP=!jA@9eUc?U5)u;@Y*XYg#IsMl`;L z+QaUe_Nob$SIcussgi|7WZ~_6=e(6Um0343?_}Ih-0-pQL2{X!-8({r0!5 z-(G!-{9gDy?guO7NXmep9;v~<=B3r9CuWvs8|V2J3@F-G(o#0Rvbp+P-TB6%7WR0(~-kG%n-`)XF~wehm2O zpXK+}_l(atZ@uSf4=eXIuBpz^PH!Co?Kj%KwAQ2V#{shwCVP#}i;4w4{Mp>y>;%$_ z^`4ndx9K&&V3QuHM zcB^Komun7aPwCF-(WwHM3o3Q|z zR8vj11MR#DsvUA**e4mM8x1zf5lbs$B~I^5^6sre_%dLPk5dXm{^v0n@|3qzkR9yI`@ZT zYG>My%u6|&^S2dUDNU;|u8FUYZ9d8c3BC!5jW& zUKqC--F_yMHN+gI&M+IuEE~-#)lUUa?jehkOz2qCw!h_CQ*uLjT}e%5Rbz!+d332; zF~2Y{|6r~(+dC^Fqi5RG)O{)G--Ey1OZNV92?*zg#I^*(hnDv_?;PHV-tB!C^S<`| z)(>6@O$lj<6(6HM)qU0^ul+vf=fgCoti8FP3Mz_)RHsoxQAc&1>Ii6)JGH15m84YJ0ff%sPH^s;X{L81WpYY z;6K@Kq_5cLgV$jTr(#V7o$+o48aE8TFy%99E%{< z(uMjU-E~brHTp9soS~e-lSN9GNzywvbrgW+4;-&8g+=7o2u(2!Dm(jPM2&ERGq?gv!}4U`r~2Vkc2&K4db@ zgk`eII02rvr#MEWK`)yXJXh{Tb}QK*o*{zXtiParqi#?N6~Zn`>eV^FeP8R==Dm%n z^|p03HHDS$%ZairCB;Sh0=N7fxi&d3vcfWtrq}%%m)e~2_Q&<_%f3mICw^)ARGK7C z>hp0;Vq?Omgo*@FqHm&KqIcq$#O6fHq&uG$eHrm}#rNWr`M=uIH)loWMiy)+rpi;Q zuGOt;ifC)={Lpn%^-!0?7_*1-r;5W&J1m~ttaK=IE(gZ>w$B6qF+u+e2@G?M7!oxs zrnOs4k0CwHdR^=#>K)trZSSYOSM?s&dra@%z3=xr+H+2iL*1Ef|8(0QlN9Y8y)kN0 zq&VUW{OG!n;9zl3kHFY~N&W@CQ+$lQ3q99+NZdBMdb*@I#W-%WFR~qIbIj_oMUGjF z=|^J|BM3MMCh}fz2zDRx65+!rs7SqX$Ggik0s);S*$hidH1i9p|z_VJ2 z?~1LB{xaHcbPt&I%SLOVyx0O{Ym$g9>IeM!GX7wmnER8xiLxY55hlzoL!Hh~J70ZU zSt@Upl}ge(^4pqP^iAfCq4ncymsB6FOf46dg_n#gI$2#-emVT3YJU zpN~^6{+RXM^jmW>Cpq|w$7fAaK$0kFPSX3Nnk4H_dp~)8{`5Ka%i(0-Z~PxsDQ{C- z(vD}&&*A3B6}pu4F27!7TDP&0YCYfKCcCESuhHoD5}njm-a^p~lS1>KR%dME9gaBn zcZ>H(@^bRM?5_$m1b+w}8h$$BOys(#Rnc2w{^?fQ&8>TE_mb}BJ=(kP>F(OSu-oBo z&KR?r=pGoeTajrIx^TO2ld#s1;^69_s=(&~i~N24^gcP>2fX|}KZB7~<}%qi-*Kgb znf+N?h4m1OTZs7sQ-R4;qYt71!a^uA{Njv+mEU7+A{H{U=?Ml8eX}+n_00k59jYXy zv+|W97V$rcie{<2OKz(et+=G1l#7*V%Hb+nRin<-JkrMNwjuuBO>bak5|QXFQB1z3 z_CX!8n6sH1$jjhO0DIRSc$_Ub)(c_<(*!~&O4{>Zp`!DUTZk;28T#e;gA4ne*-wWX z-s@ttpVdQDHxvb3V%btjW5=ZS!>x~-s~dYXJg#%7T?;J%Ud4(sqtepiD@6f?=kmMf z^~hb8vohNSEcH|AnP~xO%YGeBegBjAIV0u6k6u5d-?P4d`kwPW>WBPCLCT4rCaF79 zqkaubdz?NgGc3D*?!WmD3ag4e%C=N=K^e18!`SAhZGMt`>Ul zufD>&u^vgA)ou+*S&Um#(NaH`?~jX zGjpwS&U8w1Omw(nzu9hvZJ3Q1-O7GiY%tfFZZU~Ab~iE==L)w9hVw^3Id2vxfGs5} zfNtncYzAIbOfN8$BKs7fXJPk$Ubjvciwf04-D}-DT?L}hANqbkvxw;j^gKogJVZ~H zhV_gbLVcwMu|KmtI4e2dIX#g#H9~!5AM)!OXmV%sl8}Kt!E*=JxI0&gid!;Uj!McK zba!bYS{P#{fHpG}>k_q}HFwqhRPPkapszSpRxjzbF%VDz{hcEx%KiQd(TnT+9?17x4<&;Pt%9o0#`CSDIs)vnabJt6$c(%;t=z8ILp0 zWh~4XpRpk$HDgewQC4X7-#LAAJ9B^Lu?togW)=mOl$M%RT&wb|om(H*c)Yp1by9~` zlG=4kxj9xVL+GD;) zi~A_|V{SFBeO>pu+;>iPQacWI+~aW5{<&S2t^T)$qE&`v0kkgr0Y$vm^ezxhokspf z>%^a+fZAPn3`{6jemHL&_cF%}deY|Xr_>@~8J=VJR*u}&Y2;;=vR1MV0)6(DmB(UO zUStd~4xh*wR4o<5UJvf%Bw*OmIb|FZpv2#B%eZaOj0^(O%@iGzKXI>dmvMu*dQKWA zjw53)V-w&+XvkjVR#qx%7{-hx9c5UqKL*s=M$InuJ=G`WbHyzn7t}Hv8Bf~MS>93D zp3ru_^+d~y=CUS9qoRRo@W$?MVePt_Bh{a)EUUIuN-AboWI?;?W|^pL2lV@{mUxt$ zEH*2CS~S1NwCF+MutKV^vLLPCYe9AaSvb8gr*LIaXfeN}tmJy>GH6)Muh>)hvdXCD zLGAYXe;U1-n_J$uUGCT^SuC3+U!jaw-_kzQzoMTKhsmK>6JGH43C4=r#EXpcO?;sM zA8x_5thM}RbFHNmuq2n#b zSjWleUJ&T$imoj(hxZP#4*m`$_8aZR_BZUN+WFdX?22s9+m3~H&ORH)dXaUt)p96! z%(Q%MVP&!2JQ00AD@@EyHiDnm%V<3`115+{QN`W|{n%;zdekmt+$-ET?pUrpw}n%{ z$>h95RqSunJocgrbqD%&T<$2S%Ff{a%T2~yGv&2%ySM{U$-00#_afd|RJ=c+{xO<2 z36CZKt5Gfd!W{&_H51Y8mAgXbz=4A>cQ1c)%X_=->c`cU z)h;#hH9oaj=-=v8-%?-P@VPM_S}coOp0q03EIUFwcS{7aRb7YV#})Zd18LQS=?3W+ z7#`EjOcQ!pe{)p706?Qkw{9d8fsD(X?yyn0lpigN{?J_1tkFbkT2a}Xul7;bq08JR)n6(f zRUUd6+bJ^@TNTcVck)?sBl(N2NnLt$K$|FQmTr)mNuNr_NE$k~b~<$?p%1owM{fIp z_U`RXZFkx>q924)TNyBhyIME4u5De@x*pwpU$qvs(yiUwj<(6$#sH5-b?oXe?>yE? zcdnO+rH`cjWGS-0yV&yk@<_#9g@^K)vcD=p<)J>Nc0gui5bB(MsEq`nnlu)*u<3LW zJ)Zf-^dMdkVaVEtkO|~OC_Kgio3@3+;U49>VGjJoSMawB_`vo0p<8d5_!4$s8;xp> zBA@|$)41g~zZkpdYfx`UH2G%o+2jTEAI|_a{+mTy3-sD<B<2 z918d!`KS2vfJG3YW5suLqu+(m9mng&vqk0kJFqDS;BSXS`fte4D?43u<}L`Xs<{t{1# zyTnHl4BS~CWMpScwn{Eb-b%88Ikb@Wm9CIp076ME4U)~5U6GZ^?9pTWLRWd0n|!hS zxm+h73=B$@B22kMnXmK*J}zGs0K8vpUkKM)&<*ea6;WTpWxg&uCdI$|C z1J{K&5I*-H?-u502CtFF0zx1Fy-3FaMYe|jAAcus1}FKKfF*dyf5lG%&LE#(#;*li zu8Xfh-i;C%3mkwq2^53_|364DR4@Qz-V@sUgYl?0I-G~#Z@vO2@XTo7WE=U#K$btp zJMIS;?Np( zGSpA%6g89bqMFG^0 zjeG+T1w4Ja?xgN-)Fr!6%{`!JRjXQ5GL=rnQ`@M0fmRx+o&{9fA^4Sd>MV7Wng#oc z(2UV6(HuZ_^E>LzjK)^WmAjkFsw8$Ck}*U~Ab%9A{22 zr!Qw5XEtXQ=HWri#@kqlKO+{(N4!)El!_erSB67zdB75j!3VJ9T0`sI9KAD)xkg+O zmybRg3`fn8a9Uw=RrtRYV2qw|ZUJYshqD&z@id@~A~|jxV~!Sj!Wrzh;1C=H8hHk) z&0cIFFaW zq&Q+S5rxWO7jP!enG@*NI+2NBEEolH$nStc+J?ReLtztUSS2b9-wY28Cr~w*4Q3H~ z_83G4rM^a=sZY?~)1LvtAWpvkqcKe1Qy;4L(%b4qdPb+zb)d#prprSgg`cRSe*`w= z_wi2m2FH7hVgjD~g1=?y3U!sJ$;os&9UItKN4>8;9Pc$4wYJ~&z#aNyz%e}6f59h~ z>pS#pgFVo+eGH?4f{8WkGn_NrHGDASVxIF*O%BBP&jMQJBz+es++tXPh;e0NaDA)5 zR=Q80=G0^*&tO`~eOOHyh z1MX)uI9C(N+2nHIcXj~fbr$uyN5Ff%CqDz#lY`x9C0R!{lbxiTRFY~^2bLg3u_+3u z@!toDFB;FP@OLR*)rQyBkriYinMtOAKc9g2x)1Ka8S*f>i`;0k!y_nDQP+=X28nYXES};Th5O!%mnLWT&?FZg&5zs`#fI;*@txiDb zn08dYGngdiDRYfE#%#y9&u69pts2evpi*RnwLnhS!v=oQpXk@9nO~%j(!1#OsN2t{ z{{b6i7~O}CrbAG%ccLwTYh}}nL2c+XG#aW5rC4_}fmus7B%%WQ4AuMFhU?f5Uck&e z1wVYyupf2SgZSEq>eD{_{CylmUG*6Le%^2u%Z){5}1Ex-lnP@6SI z)!7}R8$%BQI%qm7(o1n(d+F2kP5LRFK&JqET1|HVGbF^7_+rEd0Xs4a(au&>Sg+uU zlc3jBh{~3d;SttAYzGrPi4mx~FCo?e{cwml3&g`S;v?~s$b-Ug3##>O+*5m&2Wt4y ztiGsvjzcYMKF}0vS({-W@u+;AXI)|4WZh>y1+L;XMkRsu3G*%a{~cdgpJ7KI!4-H9 z5B!GpiuDXU%sZ?bn3oq>XIaNs2UxpUTYx2A4Xnm&e8wc8T!!Eiy8%h+0}O!`%LwQ& z9XwYPQA3nLaVmvKfET|HOw?(hgtp-dR)Q7w7w{WHh~7jb;YYaO8jZm=p@HUTWonpW zCJQ*41aK4XV(c$6C!yZ3g^2}XeLnLKGZ_{1K}=63k_iIh%n6>rh~YC7qX*)k6P3Xx zx`r-;UF6YObQLgAe~zp4Rs6h;;~IXufxq0r zNZr8qM;Nas^h2!4FM$#H_~-aUe}%_R$Ghhtax2HDHPFrY+z!lNJIDqJXv9NPfrpuk7GQ<(GKZgqF5jNs$0n3zN}ehETn-kjsmy=J#4EB z|Jw;GtA$4{Aad}&KY70zx6&ha12>M8Kx z6R@TY!gzNhqH!HzzzuoTPFThYF z!6M!=udoI@VIDAd;2&>bu3P}t;spG|5n!(lFngF?@Dy8tM%=)x1GYUDo^(0vW(DTf za%K_k|J*;v93WF>W5)f>Oasbh8Z!gP)@i`5jAO=PCQioh)A9Ud95a}ic->6A@^5_q zA2SEkN)_80E`2)2le+hnU+h zn3v3JT*(Jq(Kj$L(lPF7@N{{&-cqKVsm9eeGEG1d%3%XE4uareewgF1CY%Uw%ou+l zoq7Q~H5SKstPS&Uw_|bNHxt`228V#LxkOwCUg{pK;Wd$n(MkbI@3$q?5cQau?Sz7$ z3ADGtA;ft9_8dMqym9!!hlC;aieg28uN%k;z*iu!LOv{amLu%M3Yb5$Ki0tkLP!fl z(r??ShaW12wPgaM^p;aT}<` zrOZaWdp3N~dsx&eSkhD2q$6fw4L;-ds9%O{@(6+`!nr(zbxIfpBh`%cO3IYs>~>*x z-@(~C0FUNEG{asm;k@e@0bxd{@cJ9DiKlp_fzjc6B5dI+lMhQz#qY0R+rKUQ86K6u z_TT=&tDS)tdv@6qNyF7uF?`JXRLq^_xYGn8{5!bcZ(#r5FoMT%wNlK3RLlt>;R>ADHO#7y zj0vp98`-*QJl}wkjl>)ni0`5J3PRjK!+SKsdMM10vBV@;7>khndEH=m9#6cJH{pge zAu$$_u!#}yLS6X82RPe8MvUkB14r2Z&wCmJ^LgACt;;Ki8$&5+)e=F#qhAIn*WJN=fR84hUJAaBQWks%nyv}W8ArN z+@pd&*6o73`tgqkehbU?#C?)uJon?ST!vjg#H`ze*`Z>h-~mQqUDyK8&<`A>gRrUJ z_i+s#nGza|!y#DtOuX|9ta38U*u$84)0t*Em!1r}AA<9r4-B0r7%E-#4`ATlV} zjNu2&uHnoltn|iMcUAcL18eg;VE3nBg}#il?a$oD)ny_2Is;bAVVc4-?%+Pw(@H#d z36a`-#F1HaEl|s4bOxT8N=M?kTsnw}0w3ljxN>`e_gxFT=nX^zsfYoZXf2olWr#5z zA;Mb(w5SI?n=YVxVeN=!4EW#IxThQF-E=0N+mDrDFc5p~bPGP`8ql7Jv<7j*XZi@@ z4lO+hUa1e_hAC>XnWlq(5rTMP23DvrL_GpTLBCg~zIZek-fR{g z8*nbgh(|<-kygOloo7zt9-jJh|Kl+Z=@_v}%%5-YcPC)^?TiQR`#boyCGe~j@JX5Q z!M$L`BVdsou+hp?GjWL7`@plu|9Qny%(~(5BMY$} zEyk*}hS&!mvjN_JBXI-K!dAT23-fCw-thz;=VIlk__H=u!cXl*lWoWf_E6=m;XUefMA^ik}n>;+jYPsOvE7qM!7$%={EL5)4@$Rj>xnZUe`u&v4SmR zT>$?v4H=ZNc(ec!uL9c|6L6h}0dG7PGbj}O#Jgdi6N3HoeD+6nBio7-gPv}4*@cvh zN@eG9wsLPGbCJp^Vdqc|R5kWLkAZ>T#j3zrdSDehf-~(2+~^*}*k@VktiE7b^+!Hy zI%P&(MQ(aAs}cC*%edM}L@~zb8@-qgM;H2F*yL*1TNu#B9PCg+@N74F9K8ybwHy1G zctoPlVZV*E1tQ3Sn1gpK(*CIqW? z1bnTY=?%Q@6Rcu!*ssL1DiI53z`G{Fx^;-o&tpaoqiKT#{La;28CcWB*oDph6X)-y zPtZ~z&E8@ax{TGlFJp@TafMG}fdjb%@u?H>1@os9cin~lYFKWtG$e!hny3%J`GnzW zm(snEZ90Pe`w4>|qW$TZM~OJMTw*F}QAe>Wx`Dlr5wfvP5~$23`XxO?(cZ4sa(wB z5!gpO#jEz?KM2%E*J9?qAeIqYta{%O+swi$KL}C4YQ&Bs5mjtQEc>2eBSv>e{Bi;Q zXArm?fs6w_XCoq2Tbx5W;f9#76KC`tBjbx%w;QT@*<=?o0S(w?9zyOe1G)jn=}5+v zXeBPNrjj)3t2Wdts1KjTD_h7^@*pw>{mE(MT5!;Fn$(;N!Y+`;eOb^?_I_K+8KnS6knr;K_>WxvkZs z)l1(`@2Z_qRzXl@| zYq*3y=8Nb<@B}mH%d{6PZX@H!lwn;Pjo+4Frk%pf8xOnQMwAc}FoTvOS1|*&vlE%D zxyUwHgRy*L_LH;-p z+@%z7GyC9PU4iPse0cfyEG}yqe8?~`vS#9^77@Z4#)B!r+Px1R>@;$3o^*;K)eu9U z!YcpX;Akko^-Ka=F;_?89;E07>y%m@lq~IaPT;Cl>iX&bLCxW@zFGf4@2WqnTZ%02 zRs6*T+@TR*$t%H3Qfd-`e*Fv{l9jd(SW4Zry-}Me(Z(R#KVB!$UDHMYTXI^nMH3Hn zY>Flc%$Zota?O6tYfZN1Ik;$_!C>=M_fangw=GqzQJZMA>I^W}lGJ73D>kY#)sNLX z)svvT5Ud`7?DTbYu9~X})P!nmHMCld9JNSesxj7(;OFGxKdZrd?W@_KxdvSSR1HT{ zjaQGvC;o%)b2W#-ubZjy)|f-Zqf&hhb*cjO5Y0*Ox_5vT_qS#e&S;Wm6F8bmjgNLX zm{$|Qpc<>)rv0kbYK?J4I&F)#97mVdS{DiqRg|u;ZWh?$$6!~ZbtRu&;pLsM_h<3As}q2{=}Gbq`4oDL$B;+8j~wiDFt(@TIEjpT5;FW9WEp9K zI3)^tEf=W>_@+#DHQSxDp0kx>$=MF=ho0;MY%b6bf5C^GMZSI~v_SfB26Kjh6TX9U z9!QlroQ<3c&J^xl=m)&y9!G6%I*xH*h4N6FpAV!DhC*~Xk-zFB^4w`LX8*}~#-q=Uf7EZCf*-Gje<&Qc0hMWq&dWe0B zeVBb8iX@BJS=1zGL`6{?>LGc7bb_B9$5v5+)CuIK<5BTT!djvNuOtZBCu1;>(^$r2 z7xIwb!C$f>bAbIg0k+TwvH(%rPaybLBFtYZ$q zw@kr0`ZsAw9%iK?>r)C2*h}1>$*2zpp+>rlY=`HJK(2<(dVrWa1asglvU3Eh3#;H? z*k?Lo_3n%K<}u!FD=ak*f0;m}AYb)+-lQ@g2r)ST$e9|j!B_=3zJL07Z`YhdpE(4c0)xbftdc&YW4aEa>SE0TDRk!6}$HsyCKUn9XGlwU5 zrH#=(08`f-o`|a%r1_~?18$|Oj?(?m?nAA<7Ci52aB^>{TvYYo9KBM-tLJDm8fWbn zjaV~LT?}o28({0}R6EsC;QxAR`)bF-tIpQac-O6JfIm>(%~U&ShQTMbY7T3?!Pnf5 z^O&dm2cD+^O5Bri4nE-Y#A>C$F>iw|K_@t#Em{jy=6dOd>4w86x$A-Cqqzz`a3pj6|*m`z1&OnY1 zx&lF{g6&DQw(0nAJU=JRCC_)Fke9mQH#!WjZaFPFOq z3Lr^faUNuQuus9B{-x@vsi^(!=gh*K?#-pyA1O=By-;A)mQrV__wYp1ozyhUpSPGB=dn{N#=JU>OvZmq18Nxy4DX=^BLNHgH+oL4e{FCDkIDmm zkBYUAG#8)&^hy<>=3|XeYf3dwG?z8IQ77%D=BYXrlND1G9~6Gdb;|F`5cD}Yp^8x5 zRYobh6io^T<#Od=g(WwsE#nMa!bFpV}_4qbO| zvqz?ZrrA(x3otojOd8*Z0?a|9RZv~@0gt6W;N-T#2=w1G;cw%KfQl(c1^+Gg1T-A_ zbF38bY;N^bC z>MX+HrFGYqYo=*#t8>BOuvKnXL@6l6b9rxhV^?FBtGut=PF~#gvg<$>uggO=99XDk z;2N4b&vlG#Uj)3%g4U{*B`x7C-C9Pq^l33}DQkY#ytpOOJ-jk$Ab7UX7 zw#tJP^@_L3i>hsy^`o^t!EG$jUjb@1p1DT6U?r2ks2A*Iz=d4`Yb}{K2nd5mf+Q#n zmx|9Dg&V&y_5=Q@6Z_1Yf6Lp3+1*!{N zC~?^HLV3gRs2{X?j)QMcbECf=F0s1Mrt=cDu=mOMpRmYV-6~4f;EtAt-BfDnH zR!H|qj&v^WaA~h-ecN)bc|+5N#&-=-4L|B<)OXhHt|RIu)E=*?tsYi=rs`GYi;BeZ z?`7vpqe~=3UPXC@>4hOh3yb%Zc9d6EdRFsm9zZ+M6Ww-}S52#WT4i0m24wn-nlH7d z>-yCT8hSQXHx;$$+PZgcl3wrHrBEsBRV&mD>Z_WY+E2P{z0B|zBV_#`XR-4+6M6Og zrNU358ZiMS(c`8Hv#AzImTuPTY(Cmb?MR0ThXanzPFI{7ol2cfI8Aq&>2%WRr<1~I zw6nzdg-cJ@#ja~zZ@MI*zC4` zU{wnZ%}*8sEz-@GncJE_HZwCDWV*m)hw*Wv(_&~L3*`cPK>&XsZ$Fpd9_9FRs-e@N z0oFGZd%3}gkv;*_aFR0s>Xu936>|}(y@BFIF6!y?5!?MoH2_}`0QJ0BXeBg5BPf=W z16J#3ZZBXXK0;BWjXjfHjR-9s#~$odo>6*4YyNC0_G)_(8Eq$@A{V<7SSAB)P7gL5 z(LdC!*LrI#)Z~2}ZGEFicXNt2MbC^8GLS50&l20Z7 zluRn-6n!bk%%7M4GC#ZEMA5C17iDKFCRWL+*F*huO~c7dgDQcd_4NSBfrTBWxB}&$WuQEHEDduG%}JR8fl{ivNjw zk@J&15bCHb)cj~^fkycoz1S`>6I=y}kCpw^%r!9gK6Ls~;(LQjWQhSr5X4xJk+ z4BZfNAb4UBC-A-hGCvF7AKvGH1Rv(1al7U^#HGRMjN@Sk3DC#G>>t=A+g`D8v$nT9 zVU+P9x zr7~HuPTnpHlP>Li+AeMNZkgTmrD0@!RqgT`vie2kzZH(<_ew{Vcod5;!-nP;$kLEAA)?TCq2t3+!&Zi` zkGLIqILbVFVf2IOXVJr=&qg&xj*2`Su{(TZm?-pfaQ~of0k8bLe1CZ_@sfL7a^LH= z&9xsGPjejmfEDa*C$#0+m{_-2F0$BQ*3TqM+yfn|ygA0yY?d8(nK$S$$itp7gd1A) z50D>9K|JoQ(`rwmi$t(yr@BcsT=iTzTp6w0q7))tTmqeeYOtN6bx(B0`sL8b*kG_j zjb$|I54VYN*c}fcF9CJrOHBc4><)G--QjurbILiZxGHWOu*E@8L9*bdLNP+aoek!| z2lglGKDnONoe+X0YHb*+zo+xky+aP>iKYg8)>q0=ikDq1*+|KOj(cq{Tk@M68t2q+ zL9d*&$^{icS*AsXYnn@{OG@bX{a^PduTJ*=dgj~D zA4%xBaXu?FN0HM%SDb5+b1Z9GW@sim%PpIqL*|~!ODyOpDlAc#sVehpTN@v>`gIfT6-?h1;br05A_60`B6mi0i+&#cA$nD`eRNvX?5H1+K9SEOCP$=% zSBJd|-3s2}mB5ewNxs$I9$sTS#<`7j`Pb=(Ltp!LbkIbHV(T0$#?s1CVZOoan#pOS zt)jmLPQ0J&9poZn8=b5luIr(_jNH#OWQRI|S)Qo1(7*{~9Mn_A`xv6D3)yGlzX*Jo<)x4_RMmk6_MKe%eVNlSEh)D7+mCSDERPhc9 zdl(%usWf|J*={3rxb38L8R7oiv(EdY?>v8p!1kc4A!cF2!)HW9M81nuM%qV>i8>xt z8WkFSBDyYG5&bCIEqY1R!N{Ev%ffersX{-6o(~-rnjV6g96UDYQ$V)gJDz{Q9dglIo$$LeZyXZ5j$ z@mM=ia{(gt9&#}+pnkrQy_z$XJDN9{?=LtY*a>|2L_rjk>K^g0fa#XPzs_F)Dr;1F4a5b0mTyew5~ofj`VJ4Oh-!F zqE_3MjHXQ((}KDkwWc+TsxDUKmqnCbEmjovD7c;XZ|?S-wb?dV2Qu8!n||$1y_z!X z`;4z97}Y*s)_ggVoc8T{O3*K-^z4ian5hjp$vNk8B63&f?a#L^xKNN#II;M2$q00h zx2h0U_p2M$xW8q0M~5s!c}LTtn?_3s9yNiD3_S0rAVO?s+-x$y+}0||rois5!z(A! z)x|x}W1&|spOJn60Z#+%g5QM94!aY+G{OXE`M(&MD3z%j z>Vrz7>ISVd9#omuYki@=w^#ocGTkgxO(uCvX-$zeG0+zUG3Tcj_c+lWn+D>s^-Ex0cVGbYStTf|ry*^INh=`ho2mh%voC$9gxvpoaQ-H-I` z@Y^1+Hn4Zl)ZnKfpF<~x{S1o_-w>gVNROBR*6E}0`mlAOwZYGzxUe?ht)D3v-s`-d zdhPXG?D5t;%6*JmsmmZ|mJ{Ex#(uKh4Vy@7W2;{l_U4|ZqL}r1z*lhC@GQh5$k=Za91oP#P1Y51tc)*X|uro=7H=<1i5iL0`I6 zbW5~GG(uzx-quyUh$ z^pu--HAr7z-+83tVS7qjPHSe%&*t|{7aG?!%&wnPH@kL3ja#**vbf?_`RKBy5_C~6 zDl3!~un{MH%=1Sdow+%WvWI1tWwEnzDU0X==i+)#-CHnXJh3}t_-M)0vf6S4)s@q$MPOL{Q{SWEU1MW&|F+p3YbABE3kr92Z>^WU2OUcs zBA2uO;-26)3JIeM<66^E79Xsh+Qiu9+P`vKkg{{Ry7um=I2c}nM^QF7Y`B*MK6U#f?NDIyf!dlOgN`0J5mR< z(mLh?s{KwF>F%`B@CIxEE_8`Tqi&W#&jNO=7u3t10IxHSbp*)&0%R1AkS?e{M6i=F zwzgO^J$XBMt-Sf*ZcG*Y6pR)&33rM3;-%uZVvRTyz4Deoxq7Cpcy28!thonT1N@6`=Q7v8bz;VPl>z5Gzu z9@!@8Ov#AOxgD3<)7xaNzOAcTPBm|9S_^IdJ`I=aOX^0{3F=`O zkzF2ME-5=)mQb2jGO{GPq_fz$xK~kaVN>Dp!Y2jQ`S0@U^WW$9&+nBVTJQv2&u zD~T=>R`jV%s2o+bw^~^1Sofmtzxtepu}!m^-CA5*&$o5!INMnwSs_~}pQAKVpVUm$ zbu;AC6cNp8BWZRGcPXDObP%sLo^E>D?4mixGQrBrMs0J!_L`mA-o){YV}oOyQ*Y-| zXAhSJE-cp;*H3Q4-0!=4cwF}=^LXnK=P|*fr-#ISwtI%#5;se?eXa>EQs>FeU!2+< zGaO#o7ui|bIoh^cr=#<@v!y%wCTE$pK=Wdi@pW`)Tq!mcUlJL>e%vZ}1rCfQZ#wrr zST3vBB$O`(;||Y3CYXjk(Md2_w*YBPL5X)dxE$WpR;cl3P)?|sw6XocrTB)L%V};V zR|ZDUU%XRb^W^Zlc!5v?e*zuYcD|Wlgy4|iE7WC$;Al+}&V_bhPobaCTG%GIiB*^b z<%S6U4W13}Ec9^paTMsaqd@LA6?Lu?!2KS=4!H+&3)qWv!$|CTNPVnM0>$0~niuMJ zRg5ZDc^0`PzI+qduQO$Dq^{Cik`T$=PAhb3sBe#MkB3%!ee2uSnXR=gu`LcQufdr7 z+_WAY=#`C28g4Q8%^Dw(fE*ReP*vLCu&NcFpc;rs`moUDf5v@JeY# zWrd;Q?@B>cdsTV$_nK$5SL(LZ_iKK{%ncNG7XfG_GRo@=z$_&<|a(^<$8x>>L+ z%Pj9&O|`DKo^12fMsL#xUCLje~dTgk8DqBfO%! zZKKUvn^x=5)>o`*EPX83q8o>od7Rl7Q#15kx?r4TBr=*RekL*#trbcHd%%&*L(IeB z-{g(q5xfM{@@H~~AP49J22B~bGOIYfVG(KUN60IDVyB^wSH@=8LC8tZ<80#GL_SFc zWyoRNgWMn7Iz+|48C^ZV=^Bk*R~LD2pz`{jm&=o2=6`{AzmG@f@JY5jD==p>v8VOn zI)lCQ7IgxOBL~lJ3i~Va{QpuD(EZH>>IRol38jI;tww&Nf%d1*1EbS`o!SH46zF>Y z(wb{$XxJJZ>c9WP-Zx!EsD(q~<~eiEw0C+Ses)$)l)thMAQY5==CheSQI7f;Pg;;$0~ z2sOe6QM!1ak*)Cwbh+MQQe_fsx(0gvI@7-BUG=~$-K^fM3w+uxvof<+X8X)$n+-FI zG_yAAK*y`crWeuoXqu@Hx_o{$*@-!yhpwJ3#!2wBP2!DWGw~zQL=hhv?pMH@94a&y zRtR1TE+8sii;Qs}fgf0PY;dA)A#dEBFXlI4KBr;~AA_s65DEiPQ1li7jLpVZE%ym` z2l&vNpc=f3yAS;xUUHMbpt}o>-!HC&s|VxuEEM4TB7-4BP4)yB)%(H0nhB1VC$g85 zI6j;&s1TX48>qxT*|AuRzY6t42U5>cf$uSr^$C^10YCyd6IDQES}^D6P`bnL9qg8F z;MhJvZT=jTSUXVdsYbo$2y!qNwY{Ly9Iu%J{6MrO54yTPpz!XhN`w+~u(BI;wtbcL zipz>&iuZC2GSX66r7RyD+r=^~*#l{q^owLQIF-%l*${}XK6g4cgK4|I<6cL0$M=ri z9TDjN@~C}zdpC50aBBBx_iGPp4{f(=*R*NdqT4UGn{;gNV0T{bjD}*7NV*;hRPSXI zq5VBp-Y&nW=mzz|C{?QJU$w2~rDlqjr%TqY)7ye0*c}+lWk50=C2XP9Jd*4n_fnSZ zo9uqb=>3P;wVYSN8^{02?Z>;U&f1$53 z8@*>E(LK^%JWxCu>{&BJ!_A^B(QVOY)C%T`ri!9P98o6JD(9n@sX08tSFmOW3%G(` z`~#>J{2x_c9oN+N|9|hgH`v(d5Cl;X5mB+b+qZ@K*0_8JPoVkS4?5tTcmePQ*#S+2VZ5olckqEe%gN<%xR>Au?Z#QpIRwv$_E7ir zMs@H5IAT*+7BFtt1F6{_wZHkqbRZAQnSY2UOiTEW*V*?$qx3V7eJ_9?YG)q-3{eSc zsBfX%EVE@;+gJ~RX_*Ht<|r^Gr(ot73HATdx`n9RQFWE3#&CqO;&^>GJ}|B^dc*DM zrr{YpJBAoohDv<}^!&G?73&AXHRQS8L!Sp%k38LaomdwG&xh@`(`wh%7S)b}3gT7W z5!~NjS6sWHb_O1U5|PeR*GczQw?t1Ex*4V$?i+%@zHV%KVp>r*-W*VWquzvC@^jOYG`DuoYR6946%mjP^5bId*_Hb`Cq2U4uBJJ$nK8dr_!V z*RzfS%Miip1!n$!AhpJjlfem^4}3u=_!Jv}V`c*#Ivl>mPwY*>jB};B+JoT?aL>Nn zHqiRfdfNVpdS;(ub+^2lGj22H8Q&P?#^r`>hBCui_&H4Zef<0q|eYr z>xyvn|JHTaJVIS00RCKL=3Ost=R=mQL~UC9}TSzG4C>bMHV0)iRL~B!f_-tT$QLucL%>W3H7g1 zsJ%K^`$O3hb~@`4cnSN#2Ymp&)q0@2Elh8qzE=QAItU2f0mMTlf#%Uyp>x|CXiy~` zV_$D`0XzB=v}HYPzF1v;fWLpq`pM#B`B9$l^E(dLHx&Pv|^! z&uaH!?&w=t$AR}b1~}l$iHX+V_ca(Zx&L;uMMy?UMJ$v?#x!^`89Km=y}RN+5}4Q3*0JTJT^yehnlIU!CM zBYcUu;w6+nYJ_r(nNT=_a6|_DRSaK{FL*pg_yjXclrRij{TdweLIF$I7wW~uomhboAiNtQtt~tnW_3pou6(~ZGWgZ6luq4725vV6m2B*b>~89 zcYiIXwz2kT&4U`TmabV^W2+`>#?~A}eA2%rsK&RZbItx5fp(Sloz|*dP}@vbsOtmw znK=DP!+AJOWtdvSYi6>!%si&vT5qz*Y-P43cC-B@b&;L`Mqf|xU}MPDtWs$IpJ4mJ zX(1A`@O&Q48w{2HlYE}wu)r<|5srjkr?+SaPzJRkcPL)3fTPT5@fEaV;$7k+(C3JS zN=H6+`0XX5B_kxg(FS5y?=Gnl$BFO2OD0lm5JkiD@d$D_-9#R6eJmAbVeToz9x)v% z^EWWccEe4i3H-yP@DnMAYQrwHtMHph!JaY+|Ffa$aaS-+;02dJj=&o`$iYyI_$qL$ zKJVZ>F-z<(z4%@YWWV#>)d*$`VwVDN8)y~!FRa2|%l_`~9EAK(wWohNy%EpzK zD#lctL#ClyWp%~t3a`pXmARGsD*IQ?geTd-N|(wL6?3rPom|nlf(K+wLB+q7S(Tlt zGOM0epQst6P1i1|9idyJe`0uQTww}FMWk!}B+G2;3fm34jtT+>T?3u=c-C?DNN6&= z2Rf^Qx1Qfc;3X^(t`QO9Gf=yYl_;g7q%WmyWw&KY`F41XtyVl&e1W@|R>6b&Zj$1- zVydEzf&pe_n|z2|A^$8}A!{wGlfIVjhY#pDX&ZRDzK|@HgiC70cf||D5m0q`En16F zArhrwrdfelr73h=1n~Dv7u*so7xWbb2|Vz>pJ18bBJ%dt0vE(3gMeWLe*&CXXK&C%ZP7CW=Y%0tg`F!-ikuSV(dicz*}&kvQpVqHB+@rwOMsh zm86on^oJ+&BA1>nCU{NysZJ~T%C+!=KIx=#nyI)YH_Il-Qlx#QuQ5MlidT!JaR2rY zy~B}finU=I&gn*i3b@5g#o3t$m7cE9`CtQ7Y2pf@kJc03avo6sh~%x}y@D23Q|v}| zLQf_Z`6oH{E^5RVO|Y-A3w1(^&{yOu$`T$y&Pk4_CK|{62afy{ae;W4NpUaLH(}n_}oddOekZ?D%2ScfUXbN@9(MC*2LpDInE<)enS^ z@Hz8%vl-rFk4#p0(7iN-8P4jWT0hjHw(A{pV@r_sZs#4J~si z%PlP{Rh3btvr3Pa{3`BPeCYSrB2Cea!hwbVLE(D^{Dnv6f6Xh;9i1DMyD_(A?$Vrs z>~GnX*{`#kW?#%&lXWxeXI4d42h3sM6<-z>^jFS(tWbW zde3fNFl#s&Uc+YDMX4$ zc{^EiX+Oz+@i$SrFji2`UkH7s8s2OmyiY)X?HtzXy&MBnS#Cm;xvuJOa+LUb21ey;F}CT~VaUwX!y{ z_tI%nEq2@w#qHsVwp(OEM6+JNKn;$??~STq7N;{Z)o)okc#?~<>QLw6)h^}ly@kTpz8Qf@q*tYiY67lFBnu1lixi5YF<#@(%hXn zPjIZyW?jh~moYLuKkY0u{nWq4{#5;}P92{*E9G2rf9N)DPKr-_lbDh8U&@uAtJ9um z+{)(WcPr{xGO+w{)!^DEhMRSRExEQOw2_Elcj9{T%LLa%gCxn)Q29DVk<(12Nwq{x zX&$&O^pJV2_0IEYOC$el&78gDdxyK$u>eOGYMW|vjtNABW?Qs;6iU zYHn&iYhG&RX!PpY>ZdO6R4ru0&I=QNC)x}Eh z%C}ha?d4<3Pr`HBt+Y*vs`yRO$U>svYu>fo9XX4#7iJyI{E|_ZZcqFD>(|f9RAFjK z@;^y8e>f+^e|r?a`|J3)d0&VxiZAoN_{DAhn*1&IN7s~hzh-4N&V5!OEFM^PpmIkI zSNFHUyRMyuv2p2FjFx=M8OPTMr;3J3LgXChI^}$qEt*;Ga<3TgX1>0SCI^%@UK8ja z^q|T4rWwsTHUHK8uNKqE^0>JrdHI%8+! zL&FBce#0XJ*Z9)-(-devUf;r+ZktMdqJ4=i8oaMkWYjh$qtzX`03fuhR;Ce?J`igdqg{hV+9lWSD>8xhO>=* z6xpjuk{i0>rLwg>jbOXY5<~a4_H#? zpcvQ%=%4}STXm6jHKx<>?s#MzZj>NLHQiuFwqc~c8#M11=`SKnbweMfAEz5s8>)4$ zajy2P3a{*6F}?gknY!$K>C#eF>7^3Cl10UHpwIibU}}D3UPSJ^99{O8?5u2$oaH$` zb40n-Ig4{1W`E7f%T#4v$%xN5lUb9sH)m4b{DQ|t>XHRz7c2HuMbv!O{;L~ka59~! z07sQ-`EEHa8!L;J4uyvFM&vXv zio+4Dbr78w8W9oRfQQ&Ro|(G^c-h}TMXrWVTwAaPctCLEv8%!JnT~t&I6`h$;0L}! zM@$YMv#F?FJqFV#gU3T|yB~DmTJVFBk#gpndF5C^>Yx}|!XuHzT#35IU8qAQqdh{# za}+Y4BarLti^uw6zj+qgg2j9;_99cT=8Q$$cnyy@SPezoJgzg`FG*BZDDF^X$~W+4 zpiSnv^0ss9QDHgBnU5^^UG{8tFjz?fWUw}X33C`4Hyfc&;08sAUf}Bn;FbwYU4j8x ziB^Gi))uwco?s<=(vQGNI}Lp=K6W2+dvE(;Fx5h#F7^@%Qs1Z}V1}5$Ve_@Wu^on! zWQnZ_{0{HgD^Zs(u%ES$wKum*fG2#9F?iGV0a|c2xJ|mj4Y3+(Vkf|xX$uGK*VaSU zwQ#+gX`N`DWSwhWW?cjayw2dSXskS|2@Ds5MP`k_h}~g*ZmqPcz_W3;34p6JA`_8m zjYgg=-AaL@7;Fm%@2)5O(gJKUo81ZpYwH(yrX^d`aO(%W4PQYQ^&s5SrlHo?4H?@o zaAbn5-c|+t)6Dp!Jow0@T2d@Ofc<#`F2_q;e#9=T$U<4v*5=k;_=YR2+rh9oWrY?k zwDx{l>%i?9Y+DO=$830|HH8-1So;k768i@Ge*0B>G}wLt@C;o|odKgdlQL3btk=E4 ztUdsiO)OLw%rp<2bcbe=8#I%Gp|j8jJkbSUupa`?{Q=ZO;-N`U1?30^njtP=X@x_d zVw$Qxj4^4o7$mOV--vW2+H^~79uPbW;cwk$>B;Cc@4u;(p zT%Uq=St>Nm*w}+M0xzuxyAQZ^;c%ppqf$}?zTIz@9;zD(C=mOA@h5{SS{cwd3|Mk}_{&X&&cq_Wye z#i|D1V;e@nU7|1a$v7-umJFx`J9vIiSp-|aE&}7~IvALBthvA%&VvHtYw#r9xRJZf zSYE7H@JMfh6}1>gyBX>wu{dU-I6havuG<5Rm2@119K_Be$OI^0WTA&V(5q*V*XoI} zyNjrTm)=Kk+KZrCUoSaNgU1 z9Njahug!t(#A&GcctE?U4dD(|j_=^$vcN}*q9=h(eFh3Af%GpZKAff2P+h=H@~37| z52ys_<6WXAf~f@WOxsmBk*&9=EhY6V_{T@XXLF);JbXTTpdPTtoCux$Rq+1JG1eMI zh!KN{ZZZ|L6D2G{0k-)moLGl2t8)GpBm)~u^4s@Pk;wXD1}uXKEwviwzf|BCdA zVU@F>;rs>2lyRjCO0E~Pi|_pY@Y}n1YVq*m+TRO)|0)s{xfD(;m|8Fbra)~=1m&ly z?$%x~mYerkZT2BdGqNw(d9yidc>f93iAu%GrS0Uq6vc3KAFKGJ*zMHOIn6mn8SHXU zy;4)D8S7f-y4OwXHr!nXW$kWW!QSQGr+oyzF+P{POFS*^R@ay65Y;881@hC9)ay9E zkEmrnR)__d?N8c1*sCcSGaCEH)9h~COx{*OckrXyODD({$`_-W{lUq@d4=;gL|fT# z^s+cNSGH4zDu*lYD21wS$Rm^>LU2(}S3gwu)wFOGyX|-T3Hz1XE=E(g5`YKdWjLZl3B>@bqA)jDXNbvq5j;MZv+avj`xVyig$w>%!Nw`+m}6n zwT!&hfG*iUKZCx&bi2)V5)P^oFyyDgCE9Kq32n^57!&WIz*r3(%rYn}N~yDUKl?P> zKh}YkQT5Z!-Rp{shYfph><`uU)GBH^R3E8|u1u()%Qu&6$|sf$EcGoB0E0iia9;kf z+jB2!V$b2w&HZ#4Bi+aF`GDY0TfB=q(&fUqc1pyUlE0N-K!_6N-8+QBu7>Uou8%Da^QTO3`KQgAyjHfKq8R2DCDdAfhvuwQ3?E>2BJ9+lKPvCEIU z-!H^Jj~o0&ALA8m_}KJ=`}@A{xNoPv_J4K$WzUzhUyOLpeeU!^^=jpt-S3-4UyN^- z5}P@$a8|{4{S0d;v5YfS&{xu1en-*WX_oUeRa>>E=9b3j+Qj2uuO2=^zlZ*#f;^gq zwz$7P5X_U7GQszI&}t&Hk!km0lHo<>_Vf z%a2r)S1zeuu4U^#87G*3SytH>FgsYwxGe-)QMNQ*F-AGkrA$4*b+g+D_v7x1+*?7( zzsl{pTQ@h6TMxG!x4rJ|Jk*|(yh^;g_;&O2Z>03k^mhrk>HodaBfp`(YrP{p&$*ST zTdB4=CCd1c{=#%#JjccgA*V2IbAEK5B+ozBKc_o<7|v#n$;e2X`AY^*qEkr+eq8@v z9Pj+q>r1zo_n(h_+V*kThvV-(-lx5r_wN4N^fyi4oPRBPz5dmnSKnT*d^hgXm@kJD z!hikDJysg44KtTfU)YNT-^3kd{S`f&Td1V!Dov8xeUG(Xy?vbg${OVcI0sz_p5MG( z=<~4L@W&C2+Uncd+s??49TKpG&RN zBw4QL0-w)q$&SbVWFRvcDoO*XefA@$jpoC z3N#N~%9RyPVnt_}r(~P3o>#_c%U(fV0D^xEWrb!^2wc6}WA{T+P2t%nN8~dAkz*XA z#6F8d9EFZlODG9FV+KR{U=`~-o8pvlcOk6(Y^`jW zOd%UD9Uw^(se~i>ufP?1$r?y{5*?ZCbQbiTE#N*AP=RlXeb`XS2RO!J+jr|Y%Qf={ z(@Vo4-BRuD>V!&gFBmt@OR-x2e(7 zMpOJpHI_7)*K|PhEiHD1ZEfAD&8aqL+GK*qGe4q5I5%ufD7(e?X4WQi0yhQhYIN1d z%Z0m#zg;*^JX3mHzRKylbFR|WWv%*( zrl;#=*Pd=2-JLvUdTj8>_c-FI@}fP@dT#NI^)!0^=h@#gz;n3gG|zeuSC3%#QEro6 z>(y>9EtH#`>g6qD0!fZgE;z<>=T2w81;(@hnfh4lsXswaLIPC2-H$n-z7g zrWVG2hJ8S!ebmj;8ER+2i_1mZzNSsJpz3AC*z6UU=?? zoO9VTvO+S)W<;l#rJepY{O82fMJZ0n6B4xvZV79?U;1`8-aTFzZ~eOZYf{|7xOZ`f z;8GB#uzGoYVy2{J_REybcQ?jgKU!^NgCx`>Eh|S%}wU<*^}ix z%X^0RR_{XZ@ji0ju6}I)PK~pJ%9@6@=o$7Y{89v`?XR})+8$_gvGtBtdsxfN*osXMB;&iV2;(hcIi!YFu3fdh0%DvgbpKT>%ZMHrPqrg&*1qV9_kZ9&!>`D+92P>%rT||1N+>q_Tc-uJydm84iSW0ET zY#m`?*T>bx!@c((W2|9}!QD^}R7s@%Es$kBYP)N@*NlKN!m`Sp75B@(l|3mPQM~R%OmcHQ;Dk5Jt$0pBbr>QoT|Sr94aKCO=JzN?MlK=|_)*XWy0J zDXsmwB~BSP>q~8ne>Cy=)2BzD=6}llG%Tj>Yh0o{tz#}(YN#D+rN}Trt+a*muDV>) z*p=&c81wZ)w7po;CQ2!zI!~`u5Xns6dNRb!6LR7F^8T_jkiCwg@V^L5t?}c$au{}B!14` z$2J!zEsJ@nsZJlNOVS3{Ov10WR7O?D)O4#|ua7lNwk)+9=xyXq&O-hc;SZ6!q(qt~ zAMG^T`M6T)vOpcEX{!-vO4ae|W9rfB0qPs->6&3!bIRS0x_9$yL#NK_$wG z9_&FtW;yb6t>N-@2YaO+$o0;#i;!dL2F1_C%yuH4^k;wIl=5~6drLmcDipVzla-BC z$CZ|ByH>kP8=&n2o|Cq6S4Cx6 ztI|Ei;@<}fIR)$UymK?M4`q$coRg8BJ~zES?QYt~G+SD3+UT^zUu%BV|4d2^PkE7a zCGp&kK?xP#Oz}tKhsQsF|K6=Hw_}&YG>TsO*+1GX)*M&y{Y(l!iz=K{5v;SBU(ka% zBEfM{FX=pmhjOOsrD~AN9`!-ZD$NHRfwr#W+*f(N_fGb^-}q>7a`S-O4f+>36gn7xxY9qIb-3aZ6*_; zpScn_0zGuLnh;5Jt-aKmU4O35&-hIjQo9U&a8TQ&c%u^BPTCtQyt6!B9;kSxNOE#h+LTLFLKmgFx5n4?gll6rM6K>^ zJhI&TyT=2MpQi~{FIEYZ@0|uH?tixueouZiYS=2^j*bDdmJbK=zu8Gp z0@z7Dg9cJ8vMs+5S*KC)aD!h0bn`z{2{i}ElIGAjUQH&jV%gU@DcoRwAdol-zyUu6 z*6yZoJa7*};VEEP76N;}9QkPMo{&;+*mrd zL{t2}Xj|d!f}nzv`LFU`=5lh+=V)@gvn5$mGimTNJ>i5&cxq8R{S`d zFyec}x8w0`Fh*owZ^!M4>x?;NMC^f>DKQB#;a^6?&rSUP>p;%M-_5Fy8ZKCFQQL?i z>;PVCL70#ho)Z^IugFI#aurLR>s4JerLMI5Qt#`H`~wd)F*Gd?IomQdY!}>T4dD8J z5B(Vu+5B>|&}IjN69V}GU;KLc-0?W7`CGNkxuerHxgBny>5_Aj^Wp#zOSm2Ap$NVm z4r>z5J=Pw=g^sn|w5&FdHvMPF)UVQa&$40qO__Dg>K|Bk*alJ?>2}B!s-bz+ z6N+ckIDNPgz`H*~1)(|L3*6G?qT%8=$!OUz`DBGzam8tw^FXDqO63xP(PGiGa}9RQ z)%4IrsmHpkRlQT{oJG!IPTLf<@?r94a6M`->xp$iBhyLafsgqF*XVKwT19Y#--^fP z@;M4{vc@9Es6&n}9-6kd;INSkjj$r9T3#jnShJw!wgWlY*-#?8N3Mcun~j_d-?>K6 z87Kfw*oG>ImE9D3pbX9=Fq%*D)EG4nfw?q64e|}}b3OS6_}@r)E1@FWj&q!?g=+H- zD1n@Z-jWk)*w3Noy$rdu;q*)@0y`U}Wv2PLNnl*AZ=#de8f#uvuc;bQ8Cp?OcBXV$ z$(iDU-?c@r3fB}w<-6uB%sHGT%(SFerLFlzrPii|rvxS6OpN;RCSgN@?z`aogl}EH zwf}ZI{@mC1ans{Oan{)BF$1H=M30FniXHs5Yr^%E3mGHw?*D#K-l%4Wu9-2R?w6&~ zzMc6>w&!H>@&zA-L894Woiq(QOUfnAZK3zVM&ANL1G9oeO@D%&?hztyu{ZQ`%M&gC z3w_rjF(j(FXVY^*s~ey4|JTp#{n%6N5$CqaHALgBp5yXORjT|7w=temZ$*SWP8uQE zD%vPuxXswp2rZRuYis2=Je|$+;oCJAzHK|q@8Bvu*K*c+&vpbWRew4O8R%nBos9#! zu7K$d?d`qf3}j?I;1$@3vw{1Om(BMUeg~i4A38x!a7gRr%u|k0?os9|TdIOpG0I8M z3F@nCuFQ9S=={UkNg1N_RR$~PD`S<_%6R2krML2;bE%V7kuR^21?ej*;WaZGjgmCp;pC$PUn4i5QHKZyd3%TiEtiRfDHZ~ zsLkGi7l1!K*mJ>n50z4TJO|%)Y0zA=;9fgqjaEJ8N8OmQ`P` z`nNK={J+wD#gmFW3u1B)Wq-^Zk)cd4`DOY!^C$b~qSV(ZJyK*T*OR}36F)amo@h!i zf2;m_Ebh~neDV$$rj$mwngxM*1G~iT3X9t?+*074NwcF0ET!m#UMM?#MyVk`dy| zLX9AkmjYJHA@(xXWw;eQfySL1l-)l8ciR~H#+A@Ec7w+7S*V8BKyCg!deDvejp~v+ za@P!Vj+jagW{qI4;KXt}@LvdAu@*7nd6Mnmba+bg#0g@vc#I@pvQZiaFST3pX$lvo zXHG+$vz&V<=O{-i1C({nG0t0@J2(rS>zoRm?mCSH7VoN}uOdf2PtKNKkaYm&YKGJ- z*#=DJJMmI+Q}I_(U(r1wOW0P>liv=Qg?~7=*!4K>SK+FV3x(@Z$nrb>afwQ3`1&&s z=)W4&ZZ*_XxD<5&`{pz zbWP$XXD277{PXj4nkMsbcE7xdg;R^4m-Z+>S24V5IHE+QKG^uAu9GF$w$%QSTFcBL zdxDwQR4`LKK(GAvfAR!}FB)HDt`Y`Cjz3`SSg8eT#hu_c9+qr&CVYc{pBt)Mna3j z5cda(_6dIq0>OUl!2g6=^ab#uzQPsjD%^}Vpk`Br^KKwiCaW;Ve?g7u6I2uDLzQ_d zD}^k$fD#T z#XO)g-zlmUwTiO}wc;*n>bo#5+RK~BN6L@MAIQ(+@;`Z*yh{F1K1yzr9fV&dOUjir z6)zRN71jz`3hwZS@YBKQ+R1IiE#mCuxS&d~81={vs9K+gp0t8#gdFQg`+MNs23SX0 zPQt~^1eL`5W&!kCv*AqkU;UT*RIqQP^*1ng_Ap}1jX6?jU&)O51JG^QJS1+iV3VoZ>vI^+BeJ=4YnOywx_k`bVe)E67 zE?Qf(yl8*XvLahy`@-o3`|{7{9R>R3RQ8uFan{^SPG(t#d*<=XQCY*X_vXm*KIgA1 z^!R8AZuyHM{3r}=Z{3zq+EFQ`LIUow@|99)XU zyfge*!7pKq=$ZJfuOQ5<+eN!_4=#fHKqbtwN z$&GRSfv7RcRi_!CIi_||$GL2EY2{L<(m@Qv*~N~EN3v?a%B1vC_IBRq^h;rvd&^fM zyL3)k4>YwA3J3de#^wXRcvmzSSh*9zoxn<41jm4d9{~M|gBVLqft~XQ=KU4gJ9t=5 z1k&IESoR&j)cVRFhWOAZXpYyF3*AJXK<$Tu!4Zz@K+$B;E0GFbnn~i3k{n5I=?>{D zsZrVlkx6422daZ6>2K)+@U@#u?UL7$vA}d+09U`8I9b#WJi>#*K;b%Q-1OmB07?Cp zE8s5T(dSI`C>!EB_HsS(JXqxufzB6sA>%b*^7!#oQ5njP!*flspll{BGY zp}tsd!!~t@f(nX+3Kf>0A>a419tw6-~zZB$HDBHBNzdsbT@&gAe+An zPR4P(>0ol*01u}S@y8(K+n1s0{TK@189+Z=0Uj~~{s2#qaeD~F#ZP!&U$XVES%B7% z0wFyT+GHo8p1R(w#y+H1U7~3w5Yo4d?cs>|5BQq%AgnMEqhn$1SFNA+a!tRQ{OTdq zZ>#>U3dE>5TR9Z?cvYpf;$6l33ipc7<&(;5%hs2 z%FdO=l;xJym8r|ymk%%R3r25ASy7p$d?>iTZOc29pDXWP@w1{$<@w67Rb#8WfCH-3 zuF-nceyokqZ335Tncmy**bojD*aB1gIu3SEyXvE$1e;=AU{l)D?2o9es7C3SJMaW( ziJEo*D)-G%>)#9{&^+LaHUcrU9GrxQ!p)*y;#~1qNsJ@_>+>mTrZfb{?ttvL>?u}y zr94}nS)43h z77Z=RYqI@7-_MsVlueOE;#^Lb-T?wX9iuZ}d;{3)&e$c|MLsx}yNFw2bqW_di<3l` zMCah+It=s79AN4j13#Gt9>ykUZU_W7!3!A=6jUho44>h3yaBG6df*jqa2IiTIP+7X z2hC;w0=8rbT*pt4^T6h1liT3d^8oHU-I*ffN?y{nh>)FVPvikLh$Egs9~VjqHZvS5 zMb>!BXG<)6XwO*=fl<@Wl2*U1eq4R0dLJeE)iO;PwO)4TA0_EFPWd23&F}9UOx<BEHQXO9?d7Bcb;D z#41FTbJ?b|ML=(UrhO+IQrkgO>K45ARB(K}1m8b9l)~=7p~vC65Cz^yTmyn)I=K&? zbxEWITHfb?xT#_B*=nF4!r=As7>bPE96wHL&QfSU`f~?yCvxX*+pR9!M#|rSQt9jjkLi?9LAID`rn7WtwKcG@&hO2Q7;(>g=5bTMy;DW9ftOB-v zJ2*0}umf%*_#0<$6doIb+|qpfACB8y1t9_#AlMW*qg}wU$%CW(H~t6k_>c1UqHW_Z zLY&bF$3BSf2Y#;{c-I^#0ldLp_$v5?|86i8?Mego9=@52Rh4xCRy;7d{fY{?E_i8_+L7(e;o zzaE2o*mPi50^pfc4Ckijz&{$w2dW&#>~Ln!!xyg|nPBozKv(9_|oGaSm$F0>YP z%V=QY?ot<_{rq6)tF24XRsid^9GJSlk^3HC?TgkQ*FB-X?+vX3HC$rD zt*wB7^R@a|1908k8iE##+y1x=2j{&jaB@BIn$f__Ew-+)Zp1mX$-33L4+yyH7^Uy6 zaZr}d01w4xmD64aKjRhA4iaZ6$mbcG(U90eHps z01l%s5W^>fcM1p!{&V0e8fhP39}b_=srF^|?a=Sjc6jCl!3-s-LU`4i| zhH?gK-JelkDWuF`t9rtBGJp;NgKi4F5ty#?@D#caZRmc}kGHu~0 zGYG52KX7GO1SHR9_(oiUqtkQf>mzFi+?au}z&%6*MyJDTZUC@LQ=zcF2pq>l@Yr|^ zefQmwP6j{}1qgW8sl84DZ$zYVTqAj38jB z)KJ6_f!EA}DuNMSr4?|{D#CZmhCfIKwAoYfO;gC94eb}1g-7z>ubM{|H+=6}vKCw} z3rV5T_zeR50y%zz8rUpjJ-$&X9Nu#9 zt&`xm8Uy|N*EqlLAb)ic?gyu!L%$tIVmUBfbD^6*5yxi)ykmR8m!=(5Ds6T%&k2POy`j!HDth+xM66q^mVFhwALRgY%FJ=TU>7Sv`exNXCK zhi9Y?O%Ft0HT(vP2?!A5vKU#lTp}F|P)AFFCVv9jcg)-Blm27D(kA%k|BI0|(Khxg=0U=~*ZH#ZmKbR6841_C7- z37&c&R4z1N6!PI4XU4cL0h%-!3izLZl6(LUtaHFh9)K&?I-CVdFyGC9%VQMIj1lnc z><7oD9+>|+GoA3i3oav}iO>$(Ie%IQI5Ks_yzYjFL2EeO)P(vGxti*75 zdO5r#M>Vw3z|>5@XG}tlVj^&q)8S;}Xmgl(`1E-Vmy7UCmcXlIF|d|P(HstwYnZhS zZ9Qh?Ex=dofDfA^KeP|u{t(XI!?--qaQg_5pGWZ5L9`?Ib;t4dQCyyc7ua#!<9Pfq zUbh3k+VLK{alN&nZNWP`Zf(YAtVi33nBvc8?QD4F{)YEE(crjv23eNN=)YTVb-Rb2 ze2m)u3!ru1qi18_O!Nh~-5Z^wlLi=5PUh z)bPHJcly)r;$0lCceDp+4^b0;j=p^NzrKvYcZ`M$l;e9wqi4UMhokX74t@L`y_|#* z?Py8(O+Vn!?tyjN1Qs-0?^%9{bVYJNg~ZPQZJ7!#jP$^>|MaD!ryTtghf(txBgSzwqH+BWW9BU`|BRk@7)gKHpK;}Qp5xd# z?)%(uUc@)_aN_^z*I)P~M{hWq<1G2zFm``8oGq2;hiYUt9p{Q;-2ds3Kj(zwys+UH zP{?yOOw5kSxuF3RjbDf|H-4q#_W$2l|9ghxaYrw(a0$sO^zEN#{plCtf3N%VdQL;n z3()V5o|mHMW$1s$UsCj{|Mj@zmgCr{ap^b)jw9uS*ZetlivPXN z@s59-fE}Oe_|}diYt`i~)1QD6rvp)nN{BIK?`AR~=|b zqyl|bkJlL+W&}gSqjh)(2Jh&YJ8WOE~ZlL0GLi!G)s@)~v>OY#>@&yw33sp?GIMJR_WFg~z<` zy8c+>I^jDw;oTjMnxVMe6xSoM>bc`{BjK|(9IkF%@P5tEI^+36a4kXaw}mHGE1VPl zxE=w=I(K|R58Uf;Mh(Pk+Mq9J?5iA3y>2)TV!ZnZ><*^E1+_Ikr5EDBjrhA4RHXXi z*#Wq<10GC6aQvGfK70@5k2;*ggHcaA0!Bc4_@KrSXYf4-W5s?BhJRE1`U6KZLXTBUZP)IKPy{ z53I2palWZhG474{cm>9R5ZM$*Je-30@jhnt6Af$Uanu;j;{HOo+J)jsXJKY^cxm3m zteuImW5xh1MgK;<-aY+`u7~8?s`#Sup;6R9PAx6xy1pczG;7Z*E zk-!U#Yc4!bvoTwoMqIuS4yw)3C-YG2aHuh@#XB1jv2;O%@r~(<@o)|?${{8Y5f6=D zGZSl#BWABeZ1Wy&S<~R{SA`Y)JVtjUk&VjIZD6f#z@4)cUc%e4M!iCx^+5cz9P`0C z^xJ#pZ^T}Yal~HZQ49VWLZqRWGMHJ!L5!0BuqtVA%ss&yErQ?eK#U`O!&+1Wx7#@w zW6Ah#6B$1o@4dtoI8;7jKEMHS1~CdsLC=^Qh!K~ASt0=|3XhCKGEjj|RNmsa< zzd$T?p4kEp!T@3w9A|^D%a~2<20Lglj+Zx%v>CJGFPxWc&}(N9dtC(cqz0a_J>iBL z$6P}7R*xO)4t$plcrKgSO}nD+r;(e;6U1QP24BlGnKBVRx+>2 zgRE9$3+62SnW+OO=>mLM?aUYWBt|mF=rCpzaTvO5MZ|DM1FZiWrZM8|C(JTn{|!tq z*$H>=xNXa!+?<4ScMc-X5*(cZtU%r16hDVp z0hdJu*4*JZ%kN-T{X&G1n~4zSFZwynBf4Y%ASXSEk#N6dBMSYC2n9E12fmjX{WTcp z@HE8R*RUgmdonaTdSIMf03S(7c4ZXEL?simStaCU<_YzkI>!tJg5)p4gLzBOVSW%i zRvMYkte`|x7bbw^%HBc>nWJzJZI3>=4vhH@;yTsAewlhh{J<{C!fb-KsT2r98j!Zdm1y1F~7-zL~7jVg5VBB}WzH=G*gK?!( zsCA5q91NW+iiwA}XEL2in6bxegN*uU%o($A#tOjrk&_Xa+ppn_=n5yKcQoBbsCkEl{Yld0w0_Mbgj2S-8 z!pp=L(uw6v-iLShQS|6N>}k&6j2q3|ruQ;USPQ|VfOk9iAP0z>ta5TYGl51N56_B7 zk^l;NH=%&boeK_HP{i-X`joM~+W%6169N>JZ zD|Q!V4OvK>fld#LJj5l);dorae*JHZY0AHk=3RnIZ6K-HCbrZ>B%Jjmaj*u|kNu)C?++c}uX# zJYod=A+wnz`vlv2Y9=d$dzL+!2?Z}^E?o%@j5E0f{O|irHFlcwnOx*UGU-t49&ch+ zh=&97M>wN4!48cCbL}kZ2|bD9sFf^Z{zE3LFTK{bks`rgu4ZUj z8*)0aj83Hv(V0-`T|s{(cETN$Pwu0F?IAeA+00VRJUfY0Iskkp1N|>{yFut&4L2yEKV>0Mv%tJy-?q~K>i>MV$BcSxfbUP}JzDb-U z0>SZUN%SXq#Cs@OQN($+Cu}@RUnO)p7&1hZ%!Rgc*L`cbL-{KaGgJtR<`~ zj2S`eZFpO)v>&rdm@QljuM5kC`eKhDI^V#1A&b9zeXe_o5Ps<17&oX&-0*hiuF9<2I*L&ByBd5?+Fl!nlM3*3}*_c@$^`%sal30Q&=ge<^&ME>Hg5P-NmHPOXyjQ z471ic@;>rzCSp09Bae}R;Ob}4TG~Je*yC6g^bEUzcEj8wAk(SWw2md>z9YNY&smSs zPub5o$4CNtGa*poDrX+qr&Aw@2v!jz!R+#x9Kkt^*Z-u%#2IoEGtRz(+QyC&RBt))svC4!a1ke6tl>-+Oosmh@HWY=K9epsIWaH+~I52ocM;3C}j;H zVr}l$b@X{|vcQKcrcYb0S$!Ec@LA5RwU`s4sjKu2titQbSFBm={w#mylS4a*y25J3 z4Q7YX|5+<-w~0@j8qQ*}H?_jnnd(PQg5&Nl<}$p=9+2J~70Z(rQyhY1%UGu!E{Q}b zD~iao`P!4ot6YLL(00r^mt4>Phg(BMS}Q08yFE*0heNBKV!H~`x#P)5jMOZ8H?@`e z!u$h{@n7U!WMQM=VxL2QWOB(p@WWJ)zHq2_MYVqv{HV_}Ygn0_L=HvTskOFa_6lMU zw}x9rje>v%505&SM>knN}CA$5kemD8MUBzzeUs+BFl7D4Re_TkUw ztRrapAR^x?dJIj|GDPqF5Q)!5BE}Ab>&@Qv8mX4cKu@OM|u!DlY5Rcn~1hw zv+rYOuq;5`SisRzqq6vu`WIN6ONf!otSG3@4r2ACW?DV$dZHMrR%K)tdXD|4{RR!k zOil-AR=?-IB7*InHapdfHJ|g8y@oU*!b&DSllNH<*n>I$v9)Ah>?6LCEVesqEThKE zFDJgT4q&Q)dHr#9;(7zc^uwutCx8Z8@fbxV+?qT{i!$LGSm!$An4)@w4 zw2deucM)8Ap1sPRL0=;lLudJKxS3wS+>6hgsX$9_ku7&&#{o<{9PFH7KMI}jQt= zDu}cqFTIBe0OQo1=|#>U4>7+eHb!C~R^0vYNS?=x$3Ec_qrz^e3-)?>*#C&Ib6JDA z{s|nVvJjhS$@7dqT}YP`cEs^ZY1)3D`oR2!{PG0iIQ9=-7-KyVQSV2zxq%RpFNmAW z8&oAk826izwY!Ynz%lHiJP=`d!EG`Ddx8k;Juky)U_14IIS+(fHTGj|F*^Pue=*(Y zWV#H|NewZQ-UyGOG4yOAnAHy6vU;)w;RkQid}KC9VTLZFdN56}KMKUEY#`$h*>nKD zeKs=^ahrh%Vp35#nMR*xmcR%88uk%CpsgVye}Y|RL9ai<*o5jn5ls(;r+hwridh7m zfi=WF<~8bGOX!dEdgdcR0?pNeJb@K)phT~=S=s|F$|ryh=76JVt4Cf zcRzNw*sa)sV%K9UHW*-lfl7Bxcb)m}gTME?{@1w#gkhLDYwxx9Uh59V4sxb*o1?8G ziJFApiNVezmT{L{L{wnBHgrxSXTT|K5~Bq%ol12yqr9>D%z^@J81n8hN4evqvlW=_ zb?6LSL`B1!_)O%HPZ=_3dsZTj55c7AK)rL$g3qllV-lH++Q=WAIReme5HMDedvO0W zm=zze3M;_(JO-!xS!4p--A6D&$!e^f@zi^V7ZvF21)YY?q?BiEn`V{ktySO_+&K}f7$0%w#v5hg6naNl{24bX}sHr%) z{y}|VHzJTmeFA$s02;=?RWx4s7Vzco37n28zu?~!w?~HTMcbG?(Ia}a#{)(){ zPA;5PLGE*gQ!gETD5Y~cqa744ONk!TNJ>qtfCpI(S%iJ?P;k{U9b)H5G7_<0$vA}h zywu49+q2HO7%1X~WNYeBblawf3Ei_e2($p#vQ}s+?Ml>g#Wu{SQ*X=-};x z_g6oh9OgK?POkgB2cz4smCyj*3T@q_y)S)M2;6ov){JF*-UwoewbH#$nIcsvZ>{iaNtmbS)oI zC$Vzn;mpvBM(!a$OOT=0;KbjbTtxQAIU*04WDsgHz3@JxG0rBE`OpYiLa^YV5rn=P z8|Se$Ytz1GC;1X&Y!K9L?C3vI-C;p45RF$A_wE>j@j2hGgSKucj;R3xJRg1P5 zrH4>$yXCxqYGWMg0?ByndDO~|qTbXMJ)abuz?sOgbFfF4fms`lSu+uu_NmlL>;yhR zS0NcpDZb+wlmRZ<=R=QXHM+{3p!Y&S8$bdd+=rGEmI=^Cb;AF>(0s-`#vEvVZgMmA zgZ8$MaiMXN@vVV09MNCYSL#L3&wa1osBfTur(3U^uA2*8|NXjKy3e|NU8PQ>AEH01 zzo|d1Kd4WE;!~PFN8jGyZIqcRP2M>pC%-lNo`w`aCziq~TA zB5#AY+$Y&*ly9i)mu$A*BfrOfH~lL8`pP%UC&?q_4dt`o;ITmNEzk11<9EX^*>8%U z*l)Wm$CvFp+^5)kGmu>?y(&E?d4Ba^dQff-*T$~vU9zQYX|&`Y@oLdzVYtA?lfeIT zB4-SH6l(+AFqjM%l25o`ru0CiA;i|qy3~>d|0KPUVeDl{(UFCngrGH8T%hI4jaknJBR1M);gHd0 zj5M`2Ei!qS8(5B8C)(Fi#Y8mI9g4Ht!M$!KIE^CvEJ>ZTgKJ~A61P0}VV(l-B%d3; z+htyUVL;W+ky&NWj^6x=#ZnfmC!?aWbJ4DWBJG8fLh&E;9(a*A2`Rn5Qwof zYX{pM@YS0`D^dYQB+E9~ngX4Av)R|O!_paQ@DHF`RE)F#SnC4Be!F>{d8xU#S#Men zcQhAcTf-uKI@!PqLq!@{;k!(Nc{!fJ$+rRuAL8{14YIc3-QRB7LXOHhl*-yl`yWa^v zANfvssl1DSm49J?OVHwAx6of<>mtra%A)vDXCn;}>upbf89I;V@hVHS#orQS@wRBqkIW0qJo6mWWn-OThM}b)&Jbg$ zgU)xfPN1dKp6VD?U*$5zkGkn~;=0qd+}ebi*42*6y%mnK38mkQR~0QN{95p`;Ge=9 zMWaeeOTU-#%a4`qDt%Ghq_BUU+wX*5>A%Q71M(i^Z!X+dEGnH`N|q{1PM5AM`v@;1 zbD6T7U3swbN~N+gyDFq+R_(#M8pU5DofWzxv@$d zeVN_Me%QvfO|)LMke1zWofv1HZ{7&cy9MS3=8L8pBj4y{2-UaKEreH16U};cx++rj zUb$5%P@{+C%MyEG zO8%NZ1-}~oI{2&T_x`*N1rG~VMW2g1lyohTmAIFTDEU@0qBOI#TUkZfQl+X#YT5eHhG63fV}IaSJ6IF!Q>pXBea0ZnT3RnL5z4xHWanC;yJVuv zS+`7&46mU+?R@w9-tf)!^^?t(70GVY69#2KHWuZl}EgLkXr}WlP+ys#4cuOmUM#jfMkIr zVQT=YSch-+>zoSxR}z;$-D^tSW? z!ZX1#(6SV$(`}Z~n2)9AC+7dmzswPqjh3$#6I@2$TP^^Vng+eQ5MV$XTh(yvkXr5_ zqO;81;V1GLY=;Y`eWt~xiC~B|FdZ;nG+fd@*Zt9|;Y8#DN06nO7|kAao+?%~S=maF zTYJ9dNwuoVy(+QtMMaN_mE}FkB1<2Y1eY?(Y-PIg>WV#ZvG=R+Dyu3s6;3Ef%opZA z$#)ivF1m)fJE^Q+`HAu)<=4v<<*pUR@}K3*inWOCh{|_xD*98MfZ0nbvN*Pacp zk^azuk2M#WZ(Dv_2iWTzXPxuOIOYS^JN5=nB)5e7idVp&Bpf21A?@M%!7b2Zn&&pJ zzTS7dwch=FzWGe_t?^weOYrlRo8({o?*v>9+#l2<_;qk#$c&I}A>%@fV8lcQe-4@$ zBUOuh8*Lh`ndV8+( z5P7V1zwP$awGxVthorA1TJc1&K)g+KP&iW1AKbQ%(0R<~1apqDW$f9kGfY1757hDW zj0bT3xQqIS3r=b>oT@pFFTioW2Lk&CJThL{F9Vs&w4VV&Gt%}I`Pc#lL7Rndt%Q5{ zb4wLqJ4#ELMFY2jN6;5bV2WKWwsO;3$ijo*wy z=(H-0Nk(^LilL3+iQZqoOjo6ys?7&;MX!FLKB69{4pNt^)~N0(Pb*UEm~|^^@7FvB z9(_$!WYvz!I~A;oXXVM|1In9}|0Yj?(_t^)?JPwl{S% z_s72Cy>*dognf&H>3mEqW|T5NvxdXHD+&9o^SpGvLoir$TTDsXxh!(s0sW{b=rAqt ztn=*db-~Nu`-Hcv&wigCz9F(I*(yJ={FD5ef3JY?0bT*${6G3<`JeFb<}dMg^$+$J z`u~=v$v?_#pEA9%S0%r&cW_?l_X0n5sww!hfCR9s3MHv|A1r6HYjnP;7ozycP&uF^AY#mtU_iS z^D-kGCsY=>ANT@KoW`~T(G~*Cs2CNYRZuCr|1Gk@*EQc-tW-CmR81+-3*uPZktIgIN){M}&Y96Tvs|~7qsza*T@NHP5R45KA<|{hYYxCZy?N@84 z*;x}?<65JJE?9om%Bt&CKGnX}d#l7%p;Zs6qN`h0bF2NU7gQguo>ASodPVi`>gwuq z)my78s+ZM#tWnpDuN_jiLNP*FrChHXs1{>M z5@4HRU+Y*-^#!VI6j4E>ky(r%%vY>C?Eg4t!2hY@-Qm9%)Cs>r#l2FzLJ}tZDqZ3d z?3&{Wj|C&xrL0%X z8_ZYCn^3_#g%fWo`ny8%H}Mna*)*J}-vGfVMHlrC6u?e`qnM9Q4+Si;FSHkAj!bBM zitOLObA1BTb`vQ4#aa1QDOgDE_$Y^l>2nLm(%xKV3O8RfSDE*l3rrnwzN#=57(W|x zj2ffG_@A+%@wVZZ;irLRd=3SUp88TsV}O}sGF!as3xmAs1ItU!3##D`JrmA8mnrq zZmh}Cs5JL9TQ%o2dQETbDXlBcy*Vg-){F~Ve_RXA33Pox#eptgHm93t5wNt67MI3)4#qkAGPl^UfssX?lfGF`%4 zy1ER;)!QY;MdD(Ra$Q=uv~sDE{sZ=4t;A2-4h*)pk_i%bNwN5aI90q)JXhRAjIjsh zim$>a!bk9R9WSgA{40opSFaIXAUF84F#`Sr7ojyT3?5|XxN%^VeTR3;EqJGsoXhNy zY!Ul2JW4Ksf7pkmW*&iWToCjx3!tmH5_+Wn0Y5w%d=qc-B{3gW4g$)MQ_;g{;fz6k zc!)}u)A7);1bQyv(B3Jt@4~6eXe+d3;FLBNif&cF=1;VCu(q|fwDPPEEmJHpQ0kUh z^yWL}dC=_dYaVJ&Fq=%N$Q1ib*Godfr} zM$i`6d0=6&Q9p5GMweeCTGzdoC(+4MiiSWab#dt9l+=r;b`xOa7dsX zSqi1dC*X{1N4;y5y(d@^4Zy1B+Rb2#6xzPo-rLfkD0vzEjAWenw%OL${9r8r4!MJ?bM=5z|&;GWn%+J3_6unz1CJ{VJ> zIAip(PpH>0-fcf)ziKclLbXRkYh13N1%2- z6#pNL?+(Dv`-108Uy1Nb=!)O$NOk}_BLUCS8qd`N&)5X2uaW2{hofd5gumri|IZ7! z7a?k$68z&~FTkv409b)GG1epf4d|)RmCp*G`HO*S$s>Lc*|>7h&&os{^bN2skD*X} zr~abR`j=4IJxLtJJKcl#y9HI!wO|{q0QPML?!w4=WMU%l8tvT?CE+TVb^N(a97K5%xaK)@XYZhZ@IJ-{^~2hoWA!N8q$14_R&uvg8Ujev{_!oI~5 zYZs{g=;RB4KV#Hi1bQ(JiiDP+z{$rYMvqH|K5PW=c`cF8+M*)c-#G#(wwZYT_2^V> zK|TBe(BLo7jm$xhsRsRe17hihnr;)EaNDCRGXe_5bAe;r3QW`yyxSD;6dt3m^#whO zQlc6i4Vovz#l7{!T@J;arz6@15$Oo!>h568^hfl@kQ31N7=sSlXndYTPR73p_mTidiF$DbmL+X!N$-6r>`hzBlts2*&lhs&*=r7a1e13GdgwxL{*xJ~Hu<))klF(Qc^f`_?~45V&lS;6F7-oZI0qcE?{HfWJQqd3zyV z+X`GX7mCK|p9R0+Ht=*b20ja@!x~g0Sy=Tb=#cxNw@53;%fOE6i(Ee%_}C#pTh7E7 zSO6Z%2He{upkDU_U2_VfKNbCxzuc8um>G}JfqH^yq+zDK0Lz8uzPziSIj`|`dOZ*3 z6*9tIT=&3wxrLs_RpL5gcM;Ea6rGNPKx6L2SX&3A-(vj5dG#?IkJm92nWYE1uAPBB zZGl%A31?ZRXB)<149;z)B{jsmfJ9?ws|Qu@{0!uz_1cbJCx{0d0^FBr=-FQEVkV488D z#@u$G8$+`UM402gz?w#2KC}QPyB&~#opE=2VXh1X>vTM3&J4tSA-akyfzeq{Z2G^* z@5I`75V*dxxX$3;1;p+Wu2dk?QtQ9EioVhf{O>%*#|7kz^YwhDvX>Ch|W?xURNXJAsKR}8&(%CMyL(*RD<_Yiud#z87uSu zST1SxQT-2*y@`8q0(XYy1RceF+J$?#1v7O^{YRSr^S^5YaK-D9Y5%g0Hh~qi2LIcP z$L~T+{>M5xi07e^(I+tT&m+?G_@J*F==(hc%9tKck8r;;fI@kPk2E`oW}AG%-Os7# zmVC!Zq&X}<&|fV=exVs9d3gOabLS8KUy9LMjtEuMb3kc(;F<5fG(8 zpV5iv{bfC{ffuJ43L<1NnjPYUXa*p!H9&TwuO{di#vrcku}kTOJK7Uiivd8+4+ai& zG@|<_2C$zeEk0+y#q6OQ+=dYU^dSK1^~!kWaJSTV*?TCZisXnpm&?# zb<=q#02txFYy?JqUe@AXl;9rG*Du`DSGeO(k&|EG^BrJ`>A6W~W*WtQ6f#3vvIBc}UMfngR4T+T}QF*MP@W zgP2$1s>bZ3Ssd2-b;wZ98LL@sPi^SnI(A+ zKsrYM%b0sHOKC13{h7{%bo^;HU^3!INAL{t?FEbvI%nU&yuFH9dk3@i@&6OSH}$iX zWLXQ!nOcF!)ggW~lZ&3EwfIgszE;=sbLbU_ z=I{KSy?D2&vByg{^)nA!5yNxXLOD$!kztwJ58_5f4P2df&G7qsN6&D{tqAT zVHDgz%xDhcReYvbUYe6guc9gV>VGW7EA{Ivz1GqzGR;M#f1A$5Pcbv;4DkxPw|AJE z^z6(6+Jw%=G-Hou>*XV+f1_E4S4YQ7i9AcQ@n}At4P*5$H;cqOp=Th?{-U{IQoKK! z;U&Si4!{T(V_Y*a7n|bpN1Q@1`7Ta3irW;?Y>vMX zk1M7=vNXdh09PPhQ$YP{Of%iQanWoy`V!$DIqKIYf-ocI7QB0UH?6K8;Yz$$`u>*V zzUSd%Mg7`WfjLu-JWzsrQ;Dotg}YDTH){~1j~FKr*d1KOzJpI}!ucf>v8^VCKo4&} zqk;$l67D~0J??2WqWlPvr+2iU@!JgC+n(4ldJ_|%7~+Xh;X$a71@1ZJh=v`EhF<<4r4z*8+-X#SRK;vid4w)d%#*+hq3k@@8J#p z8l7`!PV9F4{yyxbwHP^X@q5GYFAqqVzR1x%`%)vn&DODIVWL9 zUWq;W0NjTOIJ>+>4l>|gG{-7_4fjgsR8cRT`NVK=DC$Yeyb8u>K2h(Lduv?#@pq&KN-G5-}-=w}m>$=R=2;LBD8HL0| zs-Jxrsw0$rGw`sXz>gkx=f?QcCJvMYMsllZH44yp@?cMzn$^LK1nHVPJeF9UXuWxJQP8iSv#;NhBc;9VcU1o7f9k22u(%@EKwfd69e! zbqjwml2*cXB%1jhb;fSwGO{7`KgKeKLf57p^Ant-Ylv8AJZ@qxWNBEtS(yw0Q4JS@ z1H^0QE4GBwh5d|~KuRdR&1~;YZsH8*i+HD*52%-LZTm&}Fqd%Ja@(^VWN)X|anCW7 z5)(feYuHUV4Os7qUhu|TO>K7GBf2n>;3oHkyaPPuDR9_65NjD1;6(X|;8CCKJHc3Z z3=C^HV=5z;oJN$RF1Vg*jvT#+Tupmi6FVWHkwtw4w{J6aFhYPIT!mHa0(Og?okoWr z1uie6FQXOl*%9QpN-ZY&tXOtW)S^VNnoEdiK)P`&j6Dtkapq0XexmpLo9HVKETiiI*j(V4x6JA7+Mc! zLv-+c9FrY6j^^xq;B04msMM5VwvNJiC>z+b;lOFKF;>r@(sB}Kxye|EHiAjk z7Hi~3X9}?cBQqSh;96&6Y8HAz(}@gZ`zgd2;ACycyORhhqZjgbS7MuEh@Iz1rUHoV zWD#Q~a~5M1aS8fKZz=Rxp`x+`Rles~mk&Wdq#53g1+ym$*{dPA2Z!ws>_fmnGc$C| z1I%8G+eB~Y6!3_4BBoYn4A!}P;9-5K3C`K16Hap@NL1Ib%C*9Cy@j&-Tf~7+9Cki~ z>sY??A$b`&WF#>F>Oc9|S&YD)yGDHib07fk`xZRD9uUXKk&H0LAz~%f)v?pD12cR# z_Ip#Xb8G25gxaYH{lQr9l6I5#h)k-`-X6Y)AHULuTq3oV^s@D8_8zZ@GKlYsDE zM9oI0EQd%y?bGHQPc6kr3nq%l&Wv7U30OiGfgX>*D%}&clW<}R>UbNSchOph38Ob!WYhZpcueN_4&}*=?5mxAS#q{a}Fe0qg!*DEGCu!p*jV^M9AV()hoaZba1JBi&{AWjRLvG<#U`r=#UhMwei z>=fog$>liCTl=w}55b9aKdQ{5u{TP?eV^-`Oe6x&Jk{9*K5F}&&+$8PggrpL{mUv7x!akDL zM%V!?^G1xU8nCWA085@mJx7EYxcl93&c6;_pa9|(93W!BO+1EtxE!(Xh-`5m`>JC& zW&40@!on-P4^HuYR9gDi@978QytWAM`8D_gi{L)B1ia_nIEVDc_s8H=Fa~?}c$~?L zFsIfaKC!slbPZ%AVyD1-3<2YL0CwKrv5Pv7%E4aj^>$)yHekQ&pzc#a*l$lnM&5}1 z;b5|wc#XZmV6X{7z~(uJo#a{mr&Q5*N{rG@)^ba}_fmqv? zVL#Uj*wq^Ba>wBOP>%b40JC))Iy_ud>qnrTZ$7*wE^>~5xfV>pa^ZJ@~KFi|Dn%CE{EHsgfWd^JI_*l z#6^w$Mj|3whj(vs=3zdm&`UUh*ZU2lWFby~%ZZlQpUTK{*i#R~SV+Vv_c`GPPVH0d zi^n48Ekd=)0G>q|(GDHG^@tG@GiW$@7bh};OoPJIZYbXOhbDjmIlL5OB@g?%b70cn zB?o||`3xA6vp5$#K&79FZp?Nt4}{1XJ;(&Smr1y<`|&JM6NM4@!8scpo(R&2s?k22sZOB3 z*#K3QR`{E(p|SOpjv~%Eb1*vx<9>BVe@cY@pu?Giv%nML1$Lz=h-CwOuQNL8k?2G% z!YRrH=d#1d6#eo2V01P*A&ZPh-Ebbx*bnO~J*hZt9Yf9TFe))dWbbb1hfN`_lC2n5 z$<4&S&JWa9%t1Gd`3B(PBszUjog7abM89h~=6he9VtG(XNU7fypGHpkfbIhqzqb{m z@)7bvAH0flV3|9S+bG1RIkK}Dyy|JFEe?XxS1a7hdAJvCa0WLb&vgXv<2sNc1n%e$ zU?WbW&ozTcM*k%Q<02eq^Lyy+$dM0eC(w(9tX#l2A8$9n|kz?E9p5MU9@&Mwa!ur{r zazNYg8+dUuDNl^AQtlp_5gM-z-C(+JYH@RTeYn z8fr2_kWt=}xtKRAF(1F0A` z7mVn)U3ARjA{`3*Eg%!h?IRqo91<|vFM`jv4(G;52N!G@J6LuCJl^bx!ydLb zT%}bGizAX64Bz8jR4$mQi*a6hLJh=qjJk($>H&=bC#Ap`UycewH0oTBFz5ZCW0Qu} zqz!6HTvSGq2r0P`cifBIfm)&~W=#)Nah?&~$xoyZ%I3GoBk-r#hVu~*XHsNok`3L% z{ZRfq4ZeK`s5HC)5C0X;s{Np&co6ES7DgUpEz}2x!PnT#cmoD)3{*k`!K+=sY{q24 z`*;v@Dzg#uBjYkWk;9-pn~Kcp0d|{?c^14&K2ARzW*(y$UbKgpTc9+03ogqe@LKl3 z?H~r&kUelpb}&}p8A}+xjP)2h8_=Knj9Hlr-qjcEpJzd}u_0qDxq!Hh)m4T)>rE)* zw8Sdd8h65q9)%}%sGktqApC4H;wN>U#$H2+6GRpC9_EAN-wvpZ6bB14ZwA$wGB}Ro zI~De`K#uf;Gf)IPsh`=Kf>m}H=bRdQ2m5(&M{e1Aql!^zKY`tA9dICfZ8vQ$_AT(N zz5@mRMEHGowub@1^c&9BCMY%Rh5lwdemmBl1WXD2+wbixaIzeBGk)R*Wlod*CI03U ze7Cn_4#vtuM<7r^1=!0igTljX@N|1R?mD`IFS;L&zlRWOC9(nC&-OvaUyhUFZ|vCu zvC`5Si|?>Ih(dPv0Jp%ks2YU>3%nxt|aV+cy z>?7!~m;fm1^%DUrOJK?;(nl+f!g5?br+7#v(Aca)$WnKm~&z8{mG?5R<^;m_w zBC9B{&pCy}QFE&u}sjhZ~@J9c|hP#Mn*aQ{ysY zf8$+alBo^wW+lM*O)+gUZ8WtveKW2!_Ao{pTNr!etLH|EX@cpJ>8t6d=_CAmKAL`* z>P$iA+2$1dg+XSPS!-$poS2*CyrnmEXeR^xxB-lX2i9QgeM_pP#xl&RwC36bIC;Ot z**g;~(|yn^;5mjOv$FBoLf&JA;>U0D_puFMvy4V}3f%0M6Yeega}70#vc_ke*ySlU?PEWNR5=hB8&@7T+l}beA#zD*_eUU-;G8wFPK-wik^T7xQiq1f0cZB}43?5YHxUsw+ zymq+rBcT?5onHZ$ff0hEf;7QvLAJmJj#O`iRYDb@P9dU>q6D~_2t-Xp8%5=!C~-IO zV7Lpc6E79F6Dvh&B6uK)GDHVO%S9(d#Uju?#0t?v(LzxORLj$Z{|WC4UkS5?>B6N# ztKg#GlVAwGvj-#Pq2Q)qufWRp=kJE%^&{SXu=dV!k8yuO1v?Nvdkdg;eTsXYQv(cP zclKGu~e1J0LYzzC@_ zZiQC)Eik)!8WIhI3=xLwdJp|=-F02I?iC`M0}qTgT0*-MJc70Asp?dy4=DSoqSbnJKTWKrNWDhQR`*j)R31?bQan_gP`*%^)S+5A z7_)u!XY}{ruu!N=gd?0mJ4olMk1|X)?lf&N4+X2GfmLal1MPiJ%UVkyGKJK>&Y`0C z*ah{2U;I0qg~pNR$Qg`j%)zWUc0BaPnY@0y(Y!;vApT>%H(X?H3X+6ZMc2fGCD$b- z5~suh%y7Eon?x@WNrR;$;i5Lu<*Q4G>lD{pu1eQB*E6p1u4OJ)T;{qoa4|~Dq(##A z(q!peX;-Nch~blB3emnRx-Ytgk)Rj3iOWRG;9qb78uhmY4%(T3=f=Coy~NdXSMzun zZST<3v-6MfmAp2*OWa1>`|!T#hxK7M{0L--b{YE(y9aw4tAx1|dxZ|nhRg}fGUhDS zW!53qK$eO*4{Va&j4bR5eMl8hgSn{E^hA9i5bL`Kc5)TYJH&h3fzDuP`{T@V5IPUF zSa;`xRbsN%SUrI0%&@WTzEBFk4nEjTYa8otFoo_~nqejAf=8XPR#+I8Kc6*5`u?!$QLW{Zw5OcpC0kw^7#sEv{8gQ@&Nat4pjsP#sgXva&^` zqT*D=m5Sk&)2oJ7e}`wu`)W@0ippW-%}Uo7Pb+#;c&N}(C@9`o;!?JwoLjlKidhp{ zTU0x=?sA=?u9;#BBs$kBv*5yXL-|0-R~=JLR-e?&)!jF^nVVa^>=llq(1t68%jsO^ zO4e@n3eH&W0yq@>5j+$w5cPy3a;&siI>E)}Qs(OAzQRM`_0oHpuSUj_zn1s(Pxim% zf7pMbzqkJ-xm3Qy&n!!pCCWTx0$H?dvFy4mTc&^ueY9U!zgYZiv+RrS2cN~>)t&=A zGTer`I;B;TP{|c>U$I(rTr^tbBRVhC2pS63@=d(?yzkruZW^aGXBN8+E0>W(jzf*l zlj@B=&oIY*c(hXvF%^WhtIBZ(9(rq`^v8~NV z&3?A|G4Z?Y+y3u^f0q4vm-nksP|B`oTh#|%DPO9XRkF&V6_FLXii=fWYIus4s)3pa z-3t8-!#tzG#Ieq^i=Aa;G3zN;F8C%ii=K(QNUlmkr8UxU*LQB$J??qs_^4#L@`(Z4 z0+$6%3jQZ#e(27yH{rpN+oL`;c-wGnqvS?w8~HT)x8c(Uyao%SG?7~(n@1`m?ncav zU`A{Tw}j0PD-RtS`Y1#aGB9{<(6B(e{{;Cq*&3e_Uhy7n+*SfC@j?7q#1@Sb-V{_| z6|dxe<=lfS{aogB#xZmx(7|-}qeeRBLv1F}+Q^bb*4#UKQuHZYv!g?z?=?3!D{FeO$>PRi8+C5z(}0W$j+`6u zApB350`Mnu@Yx`BfVY1azZt&&c*{Lsxh;3;E|G}d@sD$FvfWts!TNfE`9z_zkb*je z$T<^Tp?9_#OS$Qn;k%BhJ*j@9nxPt{TA)f*SyesM({LrJC7N>04{eT)ZCGr4W7=uX zGONv9EVnHj>p#|D+ibhZF%o^%qu@%FGEc&BL&fgJweube)`*r%uDd*S>*3MSGt-mj zRpB|!Gs7d%gX8hTJ=Oh`doKLSXW*Ldalqq~N1?|{k82)j9v?k+!9!5(_R_Vl%SOo> z(R#sCUIh0Vdkm{Da{=QZdIg)|{M^d%%{ItdY(8WfZ;UdS!4G{4R;asbkYaZ2+Ug^f zH_CsOhLv0>8d?}!V9d+>bLsc#UmtS)bC>_j`O*DH(f8%w3%+r_`RBCE4*dGyv*gqK zO#a8u?=QYPlTnj?G<{S0#`K-(SJN}nZ>A4T*S^hooBP%`{ayM*IOb)3I+@-4$M#>T zd9j7(ipq;F7CkANTfC@bXQ`xoeC6bt$%=H$pdNQ0HwWi^V#%WFWHIHkN9P^;%$=Dfj4dTn=&$Sxg z+O5seHrZ|Nw&~PnPwUIAPRDPIn-)8wWuKVX7WU@HoAH|_G;Z0D8&w*v3{4369CSJG zLqKrAX8*VH$9^MZr+kih_4oMf8YZ12y1@_Teq^0uY$di*FYLW+-d3K)-8{lnj-766 z<2ASh&Cx&8Woa*J_Nl+9hO5ezbCk`LW0mieZmLGAe^k|~18SLOm!`e8w{D((x}hDo zQxn#L!xYfG1@|@|_ z&AZzBgwIT0iEK1hu+MOmZszx0HbSQL-R%3tC*RxX)zj;aC+QgvHfyO{4BSVBE}tbw z#R;Md0)oGvJBAa$z75^AT2w~IQIG9fYgfxNQ=IXjUZ-86k*OalV-*)`!)mS|M=vYa zm)3MG{rLIy2jhp!cPb|!CndXo_T8^pU&t@pJ_miC@M%)k zoy=94tuv>6obVz4eLkGNkG?yeu{^_>5%z9n#((Ll>5VhSWT?|Wq*ED5A68`b{IV=( zF940J3U?N(OXinVmp`tMRQ{-ZP~Ez2jxt>Bsy(OgX=-FyX6<9|<;-H#v2XJ%P0dB5d|mSr(tV!T^U zZF#n(AT}%ZY#bS%5!W>?Cw5xw{g!uQZnmh5ZW$fl{9RM8CS4jOHef_94Eq)QZ(v{l zUVig@^SlpwE%zMl5$&#b-R_bt`6J2}OLvl#|3g#4F)%74`>9c8^O_w_^8cp3A-bypMQC`MmIn z_1)_$kfqCZ_;KZda=u*a_u8+U-%?qMue+narCK7)&x{k%M%b?n(A-e9Q@*V0T3c9s zs%k-HrwVDgqV!kEv*N8q-3wg`s`Jv36V4+ixaLm!dH09od*|=lzWvN;oU z@n7zL9tjBRu}_hoUS}=KYM*7vJdxQvGwcrkrrF2)>=agSNvLeSz|G$T5?`Lbq?rdOKwYWlHBn!^|g4LeplW(74`^qj%L$LYAGY6>9Hk)@l}mU74!+tg&kvYsY90X{)qjb!y#F z{RG2s<0{iFbGD_xT4!^0^q|%_Ul5Uur_8BrlDmpm$Zsf26vc@-IOY72mbz59D&52$ zah?fYCg=nG_Kx#e?2`nApe&zY-&wx9p^vc5ccyQU?|C1-&jRmauijo~JnK9XJhr+& zaC__e*yX(RUrCtwzOc8z&U?>2$(h6M#0q3O&~54CeBubT@3oSar6!xG>+i3YuRht5Z0%Rm z*QVK%vS(*+$d>0!`$qmq`sw>i`e$MO!l<$FLhj_&g5r3 zYj;u~iQWuPR%gx-o{2wK7%2{vR=Xs+t#$w9G1%*b_eGyKzKMQ*{_g|Y1-%O189Frl zX+&D&-KdNPpBnCM6xlel@xCUBO+PgaXg09f%w~O?shZAeTGXUZll_fvG#z}dS7ucou6Zr7U zxmP(8*&@~*#u$=K+=NrPrB{8zPfl+VovKSbfNkM z`cnNg>?Zpg-xvp))TX`Wme2}#V?AeEYVYlcqIl@CFMuNPA2N+`k-49h%>Kv`!e22= zz!laA--_0WLnU7&=cJol2D!4_uDJDb&w{)AXpbb12jJR&_ju!R$z!EQoJX$vJXj8_ zb<1@PbY0}~TiQ}OQ?g(DQ`A}h%0a_sLe0Wr}AA2 zI0fc>N4|SOSV5zLW(BbYEeo3BF>?yO7K|$t6%`cyF1D1&%bJ$AsAyH$uWC{C#TtF> zY=uPiM!f=FnvV_rOoiqd)^ghzM-{aN3W>Xz`K;!g{oGO>TVNF^gcgxhB9ZE)_*1Sm zZiVg-J@$Dn^%~;s>67lW#dj3e+HZc#x1+5DD6r>Mw3GNWQD0p4) znBYdi;^3N~{m`En68IQ9i8=lU<^TB=%G_nGe0%uxh5znhU+N_P^*5wMB(05NtY2*4`IhSXxxy^K8|qz;o@vsOB%RUqD|*3WvjU&>k_DUNNr`e=h$F|2h8){}(@x|A~K%zlxuTnpQPD z_y+MD+-uz7@ZQ_Z@!{NM_lFnj0(hyN0^2oa*$| zYQ5@`YMe@@Du4^;B4s`C8lZl!?u~pr zSevcwuDh)Z)F0P31Z#S}F~F2<+KQR?-7?EcS$AWdzG3fyp2`kj3Lc~1mPvFaufs`i z79*3ERDX&&-MDAqgg1b9i|5H-&9CGS7Q7NP1IwKhO%y#A@x?>M zXT=I}oMbKh5+%~^(pk`iIw8FYzr}mfE7J40&Pex3mr6%T+e+P~`I6I;aS}iHC~p^c z7mLNeM5je#MgFKsrU)kq<-*^B^Mb{K?gB5IS|9MY@rR*?pn+TFW_V1ay&f<;@0PV6Hvg;s$v5D%;|G|+)v+KxVP z7qA-*)H~`pI)rV&ht5K8avXT{HHgSUdz{?>6|;HJL!vqKv~VqN(cNfg zwwNB6{x!vzbnsYQVvI3r4G&PAPBcidHXqkd(?{#gx)-{=x+%KWIssPdJK9~^>Do?O zKdn_$ph?%<)ST4p)~wUa(F}zGMpI3Q##6)9IMg;Zp$FXEi*-*>G4BJ#z)F2*FR@ANE@=!8?oN{Kl0K5bl3|j8k{)p6jDW|l zMO-TW0AJ26;&I~EVsEiV^hR`8G)vS)(AIsuh7W9{nB8KF&hU08S$gms8FD%)SQ}|62Am zWYq7F!VO;%vxqX^9wXL?lLc+zq1uSu9KOAG5%U$ z^of`z;CQka4;kmtJ6#12yTRy}gaf0fAU~sGypNoZ&Uzy#FcpFux&z3a7$Dfc0lhyT z+-K66fe!NkAhN!Kqu$RUbiB2%fzP7Wb`_j>FWUz=7`Cx$EGd>@7(I{B&kBI9(ppn< zQ!%<4U7?k51HQ1~h7$cL{b;?f{GtSrFCfA zL4ESQ&I=>-2`aWT4X+J>(08aX_A%Wx$<15MCi65)uB8XGWg6N}!TEKY-C%b!goPmY^q=LL0gH-s03$Ar6t8_*${DI6y3C~P3~6f#j!%@@2A+!h=YtP@NT^o9elM4;h+=Re{n zV~rWd@5B%0ll&syE8Yd(M&3WX4!mG^OP6yqxK}Z%7H|i0qq&}38#3!_&Uwxj&K&e@ zTSKXZL>~J9htdP=rR>q{1h$;*U=^`mvChG%d@QRi%a>)qXu67?;W%bXWUXS1o!yLy za1#_T^2rY0z2gXZA4vfuHM4PTs_ASk4oe@!!LuI zv9EC_TxPvZV@>Bx3Uqh3n}4E5xWw`SU6)1H57sbf6&2Xp*bmzcKy5rlXMR1<9-V<5 zA%KT^K?I{GT?CJf6SyNcgSzb`)c*SqC}4s_5C3r`A9W900I+xQOQ zX7~fo6aFI{gz?iv=qqFhs|26XZ#^N{AebiTBWNyg6KLxlq7U#FAQv@3H@AZK9DjK! zZwRj$Pk_p8I`<-XGj{^&_dZbf$cB>3Zq97<>l<(coH}&oufiX829N=b*+Ooc@G z4#D|$2rCx;@;YdFTtmlZ9&-S*In#xy!CH0~vuFX7N?I~J7 zZ##jGPB)vc?f>KIEx_A2zOP}em@JcQnVH!EhdF7OnK@0;q@jkBHqbCP%#3ZAnVD@% zGBdNqxcbgW&A-3*%RI4YwVIv1bLY;?x#!eCHFdXXim5r6f(oESycxTtEx;C4WVmD4 zW|#^N7*|8N{+a%Weu;jtK2a~$*MR-yZ`~H`y$;kh2j>f=tJHqMO7tG>@9-K!wH>hH zABfS;s;Si!XcU?+u)1fO`{1xWr#XXi8eFzVuupeEa}Dplf#p42%!)C~Hx>E=l2 z*```z&~vh^$w1oW0BN`hS#zzB`CrZG2K1W|ILNcGsovNL)v&tb4io~du>@Ad=k`H# zn$DH+MnC~LmFJ2GbrT|*dVYYQk6@|b45CvFb^u!mM`Pdq5WLhoVFCOUU*stY7R8BD zpfBA;)J^mgN*CB$BHopX1W=r=6=nkIoCKFppe{x{wxpP+r}!L%Z4;~R1}P9p|d1l(y4Dj7&q z4%G-I^>^SjI062Gg+Ru31@6rYSkEfp?H?dxY%8!)qk&C}0DD6NkTVaEX|Wo5w%xGC z&xe=#2%X{W);ZSx*e8|%hxHLGbQ>(wEL|}>u$nW$5xEQ4ke|RQX)-BHH%&WCGfZ7f z{w9Ml-FOwYHr3b>?1N@758g5CG0Zm%09wQqXpU6<4gGHYV)&0P`bey8sGwH;3i{Uv zbZfy_HbK`<*H#y+^V2!&*g6vs2Bq*ipS7>C5Aql~*iSHay~peCU?D2f)zt z_JBv6uiJ+6r0R4!Uz~f6{wSD+XnhiPt`8Yf4Ps*t@EpDaOWr_e3YVIa%`33m=?;dq z+ZF+0z#CQ$&}$d5)7BR_N3E@o9b?xI?P>t?`*A4vbU_SJ0c6k`C_qa&W3bO&!AamQ zz@5}`n_^t~2P{D{es4rESNXYofuJck;MNMx30`3YWC(*W1`dUlED>(P$aPkD8M}0M zfe5(=`?)T>2piff+$>xHbjV<++D8jrg=$#RJ;71IAA+%hmIA4uf&YPjn!g-gDW1=P zR{ah5q!G}oXYsP&k=B7>C>ill9-^b|h>bcRGEyTNzl?EuDlkKSY!fRJ7=gpAl^DM| z058H}RU@nC4)Z87e11ht-GLc}{5Ug|cfV5iaoyXI`85rA$BtASW{+%GR6a5#9sonR z7YOB9@K9ZWYV`*;P!BA3DzaSv210f{5YOW==A{7n$_Hb8CjAVky{$m352M>)f0IL3 zLtFd~c(zvpf7HnuYUKby_6D=f)s}H!D)q*WbSh9^+prtm9c)$%b1veN-KN>VBm@G1 z1pNQsY%2gjkopn!E9FsuM0X$M0H7-wsNB)Y9Xq+f%vaxfUs;-RrF1h-XJCG1eTQz7ZjJ7D-5;>LKk@8Oc)tC*(?Bmh$FU{gOr!MP*put4pQ8U= ze+)jVOfNPx2XECD@Y_}y{EdTPQP04{m|~g-P3#I_P^OsAgWsqbuoN#XVlcR!vQ}H0 z0fqDl`3=*6o~40bKMbsAGTQbv<$}5HIm~iegRAN*%ac8teH>mZgfo$I0*Egk?qKdF z?lZ2A8_$~z{KrS0i5I~iz+cKg4zE(m=Lo_uGLFKiwO()lI3LXls2~G;O3Vwr4{v=2e$bc{^;J?CLd<}mrzYX6T>jvMzbhU;zhS!|u z#482zX)ngOPFxwcj`J4vSqe;AG%#t|>|5Y1n-1Roa5jrw2-MV3SkzcpR49wbs+@w7A32)*ae$Ui#@-2Fo2wG9SGQWAKOoPi6k0Ta_(c&U>>oBocdX&iFJ z+aTM*4{H@>AXamLXMF~=^$B3GS0VBq4-9x);8bN84YkOUNQ36pHF&OV;BWsKD9_el zi4p;qm=ATb8`dM%^}sm|wzjkez{;vDpDov+^1aM57Ko=13)@m^erZ09@ns79SR@#3 zD@^Z97fc)B$$EqP*wv&mrWtP;4;WV(CmFjN`%1FFTFx zf*I6y&PNWH+kv|jv#bhWpnCCEg4-;Q$LBZUkK=FT{{yd60eng%_LN6}+isO$FJ^7` z1g{031>aH95SL{Ma2C{|T|U zg_n*HUTREk%+(RDFyYIIzw#%Zg(Wr2^eiDtg8~U-WNcA9R%yg9L8wm zw>C%afdrOSfxQ2x$k*7145)c_f7Jq+HtxusP(g>}6Y^6|A<|j~n;HVlay;U8A+3k{ z`)6QHPh;h5xplgAsI{Xt%Ib_9zkJ|ZufVcaTBc$xtr?h=DNDKe6V?(AfCFu+xi`=> zzGj=L$n?r|5t{Z3Oe2A>j0114#aN8U`MU8SX79fkhk~m$$|wa5L%HEAe9U!D7KAL~zXnKA7)S@W1eH@b~b4!}!>Q z?~G{c4bXfmF+w)wNqFV(qX&V|?amDZ3O60Iy!D)MoD`0PQ_6nAJ^{7qOKKR+$vY z)H(v@p2^7BiUrzO4W`0J;NRN-9O6(QsY4LAHzI<4VZDe|gQdvX>uXH`23};<+HL5% z<*;R)orSNPB^lT}KGqX*5vAM&Q~L(1v zFv3Q_qPQjKETOpDffph+jwXxI2JH}7O zGB6O!;4yo_b8dv6dSNOwvEWgAn|}pf_kpR zC$yH11K#{7+D`&Dh?Q8o(>kFuHP#tejcNjnywPq^|5$cljbW6f3)lr*!RS~3 zYr10I2d^~UJiy%290C?`wJ95Z>ALBdX_M(U;7R&nnJz>wWnu>H-?Y!YH5Zn zLn+WJCfJkM=nZ>n1KS*BoMl{P+y@rW=ZNsCjcij8*sLd-{s6!ELsPDaX^t@W0$cqd zFyK}|0WS%-<;9kx$PA#7oxKRTuQ^sR-43g|2k1An2}(zE;N6RnojDxrQD2~lG#JV< zxlpK?2nCvKWF~ik0@zC^B=kiz^NsDznasHWgrzU;<|^1x0oN1jM4Ne!fT?rE3evCq z{rtQ9@30T1zzZzDZ3KM<69sb+Pm!_=qv9&T62WZ2RKWxwhkL;%1qoc?m8#&CZX#aW zi51pKm<2{b8Ksez#=DJpZaHrxuNAO&?mR0ugL|F3hx;dY9@e4~FcPXba?TCTR?Zx( zs<+`tIhE|Uh~#$y{ofsN7sbwmJsoDPVNF7of{bOw8u=^CQPv>)WHK`N-I&$T1^A4- zl#M{Wc0p#CH`oxWkk|7NxkF2kJva>ce38g`EW`T3C1ifgMP^oSWKacQ1gyaLcN6Mi zn~>4m8TsEb@XdZlp1@y-GiF+wTV;sZV-Zii!Hn>pzwx;?G(hS>Botf|dLYc0HIls?2iJ+JvT{Yj_T;rP*K%eT7+L9xN*lc>>?{ikm{MFsm8`Mg*lg39Ar0J?zt~sf>srjaH*Dk=EvAJ#|V*612N`0xm4{{7lhH=Jh z;}pyX$0Pq_EYvr9q91jotLe?ii2NBXHk3-m{4XCmc^xn}_u;JKFfiA$a)09)dF%Kr z-2E!SbRhjxumW{YBoQwH*1N0I5vK~LU`b!eV##?)s-zsHR8j#(;X=t*$ur44;P7@! zev=HBw3h@+3{J0{wm1!P@&!WpvUq{Gskj;|pVLK=BBgM#P$qnhU7sXD7It7-@@ru? zt$-w)4kp=)uni~93-(-in0Krdh>U9CRmNgfz#SP#1<f_C(+11pUiyoYiNcn#I9Ux6T|cdaqA{<`X(5)=Yvbs~z{aA6>kaE01~s@hWY%A=-&H@eKDoZ3?pEEZx?Xj>y8FP&^r%h5E?zMZ z!>en4tr=MpQ&U}ixB4W?nd(c`Z>t40Gi$PIM%Nl^?|_lKYXjZzy77|glKQcxSQ~>q za+`6i`IIH!8V+r^kCc=(mVK8K$2-p#3A;cYq`y;dNq6V=F7d8XH=WyCV2F7h3q0O? zIC~ECJOsY+(X!jJYN#w#$qHqq_P-xx4`h#J-(>l+Y}p6dOIez%My8g@!B9U@mMk-S z{_WY%)8g^QRBiygLaoxta-f-c%40YZmd4lYLaPd&lRN(}{RQ^`p z7tGAYb1txTtRAfM$j^TQK42TNn>!(Yttoa+$6=OEGgd)wt`#!RvYCT`72nGq#gTH) za3#E-5rL)R{|ZD%IlO87C;WHtZ}0gkz6pO<@%!?dVoqq{Rl~}8cr3;aWKWEr@A(h- zxB12VXnTIdIe}5o7i$(fv3~IxE3!v~J%zqt!fp#e; zyDIeMrfEPVcG_feKsI2gA;kLp#g>wt{7Je=?70QYdi~1MM zDq3B1s)$qkrC47wxa@klxgw^jb#-iwQ!S^ib^W7;jjAo0>$-e{(Ntr(1k{~7Qv?ja zQ2r$0Sn(i9a~F;4R(HO~EYD{$AMedR6}|?)YyN`+q5}hiih_CvZw;;so)z*qTEE93^WB(}Q{oR*0@vh9%cjgNU!U zVX^)x_A4JEljfynoo2h{v8F~7teuB-G(TNWtbC|+oAmyMqlPey_d8A9%*EzSmO<8N zx(wJMcjWaB$C~{#CI_QVe|W)QWU={SykcQJ^H=^XWIp8y`-?t_`ijqr--#Q!7rU-^-RpYV^|6_$&4JImabNN;#v^_uBp@!ja}9e61yCgfzOYxuZ`8Id7Tr=l3qt)ja`M@G9u z2S@jbo*3OTS{kj0S{LOMwIH%2VrfKJ#Ix`&;cvr6hgF4+2t5+=J(wTdE@({Pw17GO ztNnKQ?(|vXJqHL8hUa4Gb2o!agmZ7FpGA8G6};ixXKW#>GxeM8koAiBol$BytsAa& z(`2jOHD)$6*7NIA>W%>ekyzWdb`DU6E$bfC^{&^{KWy0DI7c;9JzBF-`$;D@bcby; znnzd<(O&66|mj^lHVEM{XX-& z*4r}C$8Q`q0Y{nW=g!@S10R(}X6!&9nOjVJ4!>K@nJu6|kd ztx{dlyCSXp*K%q3%d!P!9%VO6$CY}NzAKqs;#~5m_?Kc)@jfUCZ79?hj4HU5FV3Hu z_cYf#cTtWqJ0*KVmIC&0Fr!h~O1Vc7rYM$QkS~-^k}s04m#>tskROr1l)sn1m9rH) z6`{)I%6rQAjPi_KnTpJ>nI)M$vz})OvO8vHWZQC9LRIce;nL!1r2*xKE8?qsYWme} zZ>UtA)0P-UTK3xJGDA3OUa9bn(<+x1?tz|(-idyyfbqfeLzBY)i4aDOi#`|gH8wY{ zAU-YOM52FE_oU`YPDz=G+Ys!^k*GudDfJZ%1-?2g0aIJB->VrB( zyHYn#-@`D{c*OL<{LFIKS`Iz%-#{l;$E+(fAqC)rJyDP_5)>h1_FqJPae_Zyy zG_!aohQ#siJi?MhVSg4&?;$wZg8F9M_H>eY`sQ zjtQ6?tPEWpek@{F*&*o6X`K!TNqy{_Db2<_0x&1N2-vFM3B~*Vf zC@NW(*ylMnxEFYT@lOk!L|?==C0ks&x$&fwXD_ciK2|?wU`f!okQ-sGBN8JoM~b7i zMQi*6A;Ici{Jb$C#iG(;Q}7LeeVB zveMMa=wY~_>#gmqIi>bek5O$x{jWCs)iAojr6C>J7NhIG*9F!645gz?C2Ms#odea1vm3IajwOlxLhR$|t-%n_M>nUc(J8C^2&E7vJkE1N6V zDoW+q@+2r0ZkK;eKb}s(-{i>GD;ku+Sr2pO=6e^NEuqUgRlcl#P*>AfsyU*+WLi#N zWH#eI;x`KKIeEA?l`i*e?G@}Z-|uEXk06iW4Z*%4#UYzQpM;(WJsIj9RugtLTpH0O z;_rwBksYG8L=B7zj#?WvIyy4uQcPHEVeIF)o$;3Vu!Q)8hy?cpDxog^OnhP7rP%&4 zlIRPOH^Re1R|Rzr_{mS;GfS2xJ?VDa<&tEw*js4j)pH)P)>Ef!Eoi>=oB1YGiB4ms ze4Jsoz7y2Le!<^p{cQaw{aftDoYakmmPiy7Qa58S&q>GA{jFVsTw*SE`&8OQ>;|6# zrho~o)!#;;IS|Z;ztX=lRxp|Drktcy{Qykk7#h!QF#O0%rJ6_WSPZ=6llnu8irK zF8%Dj0HceW^AV>+@lN4;z6WnVGV^knE2u!mP1@Po);!zzUf)%BOB1NR*f_dDTz|5* zWsRY#pz?FY!}83sj%7~}Q|gO3#fys~iY^vb7j!S+6qM$#%dg2>l-Dg!mUl0AaBg6( zHRnu@G-r4AknFhZPgxzZ@-tI2?`Otl4$Vkc<|?(yV@ioKOW~&+tc+HcDykGQ%7M!M z%E`*>O0$xYQG)+NGk(j6#L=9LKQmTkTvf^yUlc=?LzQzC=j0KJa^?F>x9pxd zAwD6kLt2Gy59=2GQ+WUI%J7#FT_a~l$|KXFsF>R^*)h79C9#QdcjAQcTjD1t^h``o zWF-Y8bxul7dXOki{3F3Rp-ued*pt!wBF~2}56uaZ1)T6*<2_84DwVmda&G3t6Dj!j zxu4iJW&qXJcHX+)a>jh!^cWhtR_w}_V}&~mtJ*ST7r)YP)6a*UyoH9$W$kb%Mh?_G zQ7^GB#E=Hd8G^7Wr*eA9V!Oa(ig`y7l_)#x3T-)(*D)lq)-cdz&{*uu|kI zIp|X87U6M5=Ig`pYvUgmFgWl=P+9P$5KkyGz6%Wq>l)@Cb|KUw^k~R{5OeUT;7LIv z1NQ|)2Ym5Q_V4XC%y+HN3-4mDd|9q1)3b-idg)&GIc{ODubc-;o{A-+v4YpUR@_bO z-OOc-dGr>`RntpDn$D<6QU8Ilr)k}Tnu*oHRou$ZiuvWKWg%sYOHCzrOV*ckEn$`9 z7C$XsS?pbWuV_XQujr3LO~KZJ#DdfLMR^f?a?kZ(BdP-}nGt(6OL)LBT}XD#Zg2UYXU`I@q$>d;n@j zv|*s(7kyiui#A#_R^3W$cQCs(+0vnKzC7)SETev>o+r zjbY~7mOk_p##81g_IFM>_YeLM;cd}6@k}SVWUR|D*JrLh-DbMKklym><=IMB?)B8? zn=fFSzJK}M^uHA_A<#c)V6Y-2I!qnbF1$y0*YGysA;|da5jHH;GvsPea^Nce^}f@* zXUJ}ONTf-Ku1cIt$cMPdXYv+t0@(%3l~jdoD9yFEAWU{Fe5vz zZmmjdI0`kz?=?kLhKgzB(z2%|BZ_^BniMWU&g82+Y2M4+L%G9p`MLU>klZG@rkqbX z+}w4!UvqEdF3kNocWSO{Ztt9t+555>Swk`>W%N|GP&AjPr1wr+pZfW`#J|mz90Dhe0ijI)Xz~nq7TOoiO-DxC%!QLWP(%T)r77IZxaHNQj&)zrzK5J@=BVK z=#;Q5Zc=Pj%z>Ed=**~nk?s-Vu;syN0XzJz`xJTUJ==TG?mD-Pt}C5KIz17c5Kb0c zB>_=aCuKUjW7F6CE)iy=iX)ycGfYxZiAG;P#Tsx{yjc-bIsI9zY5 z=QgSuC##;THiK)Tt#-M#Ol#JyG<-L{F+DXqTes04z?!#@QZjq7H*;1%?RORbrXW)& z7B6-(O9r|8>iU;ko%=|S1D-c!TCeRsL;Q056#+v6?+5-I^jq+zkj|ky;bA(5Pg=^m#r=~r zlI_IWO6A*9=-ZYt<^WTJ;jMmyPOP1ve$qI+!LMFfyP~Ffb$C^i%J2$fSzT%8(%O>i zC9_JBO5957i}w{vi?0{$F4|f&vWQc3r*LB7r-F)nDt~-lZSLsYojI?vqqC1@ea*a) zaY$LLSfbF&ljLL4)6)*5jYz9VU7xx!RhK$H?PA)Qv`cBFX-VmQ(p#q|r1wl8pProl zC2dREpJ_wW_NFfV{^gtRckk51v?1w9iq4r{x%UdT7nhU{tWZ?>)&|zkYy4f)sE;&@ zXfJ9uX27ljgP@sczLUtMz%>KeMuR*pUWa_H`i%8a`%Lg#>i@TYt^dBj6CoGFPepn} z$Hx}L=O+$GVkV79TARG2$?_($CM}xeG>L5bdy|&Q>ckfb?cztqp24i=X5_+%w_)u< zT|&+WO%1r1XP;YVSh_uULIk-2)ob@v0cCG7VB$8yTuhRkEhF_8+Yf>&74SU7_~Q z2A=4t@snwpxw&Ps^(;LQ>!;ssTxt*VIqNjLk(0qYCAcJPFKQvqaXRK)1)hNj_iX73 z&t0;F~LTQw{yIZ}>ap!Ilop_f>E$GO<$DPATVP`QXQVq7Pw8T2jeA(E|&{jV| zw^+McvqybJb*u3-yvoZuY2DJ=jGC~T!PRrB)>Lk)SX;iOEUk1xDWi08$*1Dj;^Rev zqH%?f3py8k$dAgOk@qV1r`%^b?m0habF%BQ(zB*#z0K^M>6)35F)>4~+@$f-5Z*zjG7L5v9Y5$i2lr&37+9*Sa0pl+sO|Tz7Z$7 z__*gvQ#=#Bw)*(`t@L}~SK_|EZ%G) zdbE}fb+2?SbAIQf6%7~e;-BOevUjlNGefDvwq)AJ8f%F&cQidTUNT;T)^&)n20Npx zko`PbH&*)rk;)mhS@l5`qkf)Mn>kIB+;nzx9fy3g<Oh)d9g1#mD%CA?AsO?>Us^LkaRy9Y{ z6w2%qO+zh8s}>y6GbkbJCHo*ZmA^@}LK5!UA9>DAWnH{Qz9amm`X37j54sj?2}uc) zhg%{OV(!KckDDL&N8IYTsJOYYA7k#uw2YO<`o>knPL2H((>I2W&W|pOc8i%Db2H{@ zj5y|DRBc32*lT#0qX93l3Y+JB%4?Xc4p~@U?r&U|xr92ua#|?vA!;II3-#_5I-{Wt9~%|Z2J1Q%Z#71+ZXr+T9rre+`yAO_gR z&AQ+8Lk%+Q-cLoQ&{DJ9oCK!nc|hWivCRkP^-W}P{=?qFnaFL%bLV>rdJ1=l-iw)% z1W6@wMs6e|@s6~`5$6~h!K6|WQ%6bCX=1} zHGg{1xzYuddupQVZq$vc-_nrL$X5TZ*{Qo};FUZ^JJ{ypOybbvMc{>Rn{Fs294)2QYOGkf@1*Un9R)<}BweM>4H(Cz`Z(;a`vcLl$Ye2ZvR2}VV~-_XEV=d9swTBVEG!?S1!Lm5ay#;hun;l zU>#BNwLBH~2#3pF&zu78`%|{nbT{PxdYc)hw}wsnIl7f#4LYvgruwC^S3`2WTU~L@ zz3O|IPg9lp^71lH*~rp|B`(P9{9NQ#6j&I5Eb*OrUve!ugL3M#&t!*YkILGZsn58X zaUf%J#?Fj$8P_xRXZ(`UDZ{LsrTnDWqfjVl#b1hQtkWi9H9thLUOo}eJrz9UmM>Ox zRi0M1R-RToS3FgUGrMG^WlzhiDqK@KwPHb4WwoW|VeOc@fO<)Tuu-JyuXfhD=sy}H zrlIDs*6xhCtRU_*thKmGqFvj&HLT#wX7Jn=gr$6M`E(u-1U^Ut4hEo{0 zvAmrJJI{08irm7F&U)w0F7I59xZXmpYp!&MhuNc*XLnB@&pRG1JswIsOYgV~-G6dh z=labh#$~57-+8Yj983~9V!g-;t}882TJ>B3Hf-{o}6d63;No6b6%Wz6)=Yy+?2t$eDOr|5%KyCOvwxnwQd3sCf1oUVl+f$zET?ekr^*To)ma+7ir}Zgl!2ro?l==wk(PC7$oh+rYWVdQY`xSiqIZw=M#rzp16n+|~T2sn9qQ zIE@?JsADL zFnR#^zxS*?>|UHQ&L(ajUMRfHML`?kQE(sfMWaMdMI3Q}xQ{qn{L<;7q^t8W=UV5M zF1^8?HqiOJq^YFA>7LU(;2<7~dx^h@hG1uKKC%RF3$_DS)5w?d%|Kb1*+RA%8Ny!F z2H-doZBeu@BD2os_NGW-%EQa8OP6>XubWmCdlC z+p4Tf>C}?L#b1h?ik24!7iJVJEU@Hn%n!|fkk<{|YS2kp;3Nv!W5V9hu zZQw5fE5JMO)c1n#A>a2tA>QL;Ej;&lDH1HvZkv$8q0Y)e$`s^}f?WnQ3(a*TfXf*aTjRS7^ohjDb z&RhbV;&IbYW~t?>B^d0i^JsT)<97oW^b%-K{R7UDLTUnVy#s;D@&}{kKkQDN2(Fm7 zpZA*A$Q#Okhz!xKKol$imbeTkzLUr@KfynUe2B^X@j%7P_>Xxrcrm;(G3fOke6(It*-9F2%iywiKERmKJ=>XXh`;>kTG> z+}!@T)j7pESvk{kE@gMho|b($dvNyKtkYSN?5Wua*)OxcXARCil-(^`nZ?h3life3 zb55U}k-6jY#}-~GdR&}b`o8Q``G)f8o9YsvB!A*PUuOiF{5^RZ~q@{RZO( z(@}FB)MQ#QBbZ`jVdilvd2;?*ft#p@Q=Me1v&3b->k9W6k0j5|vO>(SuVJUv<~KCp zdf<$pOF8eg`Egpj#0@exF9e(V58q^Uz5)UAA|Qq?;@{hUa7Ju*<8=P9!sP#?scx; zT{bwUIq5_zg?k0%P(PmyHS8kpGVUSHQDpbO2U=|=Sfb=_TIb>SvCk$Hu=lsOd{ zROP_Oe@C8p9C&fN*vh~w-Ub>AvDSOg9WOS|G^=3I4aUxh!w%`?y4Jc|T6b+nu&~?$ z^6XKgq_IcC#rlr*@9PY;r8Vm6Vc@DTRGKOVRK!C7Bv=iEM8Z9uehX`UlLl9SkkU!K}kc& z-=&MolFKX0A5@&IoK>Z%8d&|JdS*>{?fcq=b#v;UHLz8ssvYVTn(x{;eE@P3))`Nm zGR>bXe}LC_EHqt~Q<2Cxe8@^?8#xiYiTu5S!@{|uWO0djzEiqWSBXhdCprHk2|^?g=yXXOEM6lj5sm@hz#@Sd3>K5XcX|=##px;6! zlESnBnPXDZJL7tciLH#)hG_=5ey+X`RP9^pJ@tBBq3)$_9hi-akSRM>%g~;{3~3bB z1w1u3)HBp`)R)v>kXt)n-Arv$y;B`mom9QXdckTHA3PP=jje%l+k|ZYg{oDmDJpMO zCK%sRRO?j-fq9&#dY~Gju2IKg#8^FSPLDL&qm$S63ksBPK zAF2NhWrIFP|BJzBm|*QR9(AuY~+yB=TCfu~UHon$5Y)DF$M8l%0=Q!DS#n zc0Sky4gx1#$qNFDX={E*c;^HBN5GaH&jR2a(Q2Q4}mb6gG`ZeC_f?lwu<|X`wy}?SE8%|TDB*Ay@+e&Q~}ff z4hZbq$fO(yTz?nNaLy1;drnJEPvkAN;IM%}Q?N5o@8=j<_OO=&HTgSm{|AAn4MuiJ zCQHR~Vmq-l_H5l9tUrLQ-No7u#QAVm2+P9MFexxoI00!}&CF!p2hz3+(+Aw(am zyd@32m{RIH@-WX)d!W}d1?u5lp}W-_{Q3e)%_sl{`5}}wb_0Vs8!DDP!AlVe_IVT7 z`U}C*eh0p63-k--Lb;$lG%MVZ-(;fep%PUDo%>JVu0KZa04noOdI1z{y3i@eNeM!x zlLmLM2qg~)-Dk*nx(M#j-B3AO3I@@+)}O5-k-gCpeCQ$ALGiLmk;%yhh7-ZRMQ>4A zDlEC+c=?X<6-esmu!sl1^S(fNV!4mJ$Q#I)x{mdSJ9vK8aseLt93sB^U~d16Y+QvU z$5H}3zt&>0P*xTS2bm0>_^ntVMcY|>S_dHmZY&V#%d8uKA>D;3AhSjaj~)cwtL9Mh>P&ZqfQgRV{p@_4*xPiUq|X?LhoIHLu5 z=<}q2as;?!7R^PDh7cLBTx2SeLSBn$SA5Hd_Q$!RfQoI8^Z$g>3%@y%9)}wJO#e#% z2KM=t$YP0)u2IOqlbnRxNSB;TjC&;TmFFs zJjH0HfF?yF6rfnhfAa!Do2aU`1(yKP*c=AGHWfMq^XwY9%b<$83fU+78K+=ZMD6$z z80@bxZrHWVAJ{d|pM&+CsGPq?Ch&VG(|?6V#dm1brz0;)0o|KysCDFm3m;p|c%Fmj z`A`HYg$hj(N-47Q8lcWYiWYhk7CT!$8>;tClo-kfTxblqArALOzOf(dJRE8}vB*Ad z1uec#K%M^t#`S*a2Lq^q7-dHwyJ0*v2|Z&P^$W7JmQpKGR-mt}g`e4g{~M9Pyo1^X zEx^rq+(Yd}*$34A;eX3fWO<*!|D(v2IDk(`*^A@;LiYR)WUFq)_cl`Nz%Q@@+70ue z75ED^9ltjgobUaqo=|J(fVwq7G#Cp0V;Sl$Mi!45S678A&Ow`eMlRwD^wS$?%QMJs zIs(O~ZD2fFg|=RV46+$O&QCxtHBlNK2$hq5$o%OIZHO+6j<`qdaHog{YAeLXEs$yU zUrE7xZS0@5#@}{0mK36M+6~_(N~yh(_t*#L8(>$+b!cKwvg_ea#~q)I-<}JNpoOS~ zqb!F04$(+nj@X6hBQFQ0=6C#Gj`tSh7^06n*RG^H11R<>P<0&(E*zqq+aJHx1EXUX z{C*qMF$vloVc>}Ng&v7R*PIFUQ6n;-9ZI=G6*Ut|D5*g1d_=auYv?FFg?7tBXsz5q zM(Z_jTwDRW4$-f_fGicFPJaonh!!qU!zH>gcaRtL5DFX5pkwzE=lFuNr6B`P0ez|h zy9PDUpC$^eM3LAC`zH#-%zxExdB`vp!&gdBoRMec3Ux1{JxxaS|1Ct3nCQY1WmrcE zhK~)$)sYf`-V}+yj@Lx9_`ed2EE`hDx5#%w@t2&3lmML753S?#f5i(e74i0wy|I_Mo$qf|j%r5w!MrDz3* zwlmRpEC`P@&eSZwjfqSrn+t8l839GpdySZYw8KRhe2IVxo;0frU@;6L`l_9IsE|$WtgpfB6Dz?vlL_d%KKs1=e^CzwkqQ`5El(70PSWh3GDSMg2%? zDC~81sGyT8Ckp75xF_|vM;hEQz1<2e(2XLN$U?3J(YF`D3Y}3TcqGq=zBDPs9*F|6 zqjb5!uTR8{**ikgkKmKu;uggoIGkiP(>$fYb~% zt^L=A99C3;oK}Z=d%67?c~xn*G4eUl2Pewlq%_#CQG*^%G{4Cel0vSM0}IA-pZ!Wn zy~Mau(%;B+I9EA0<;Rb8W*$)@j^cE4gT;G z!SFHR|CUJ1Gh&d%9`o-b(Ik(t$%ykHS`HmuA+w!<$2Oqhzh+! zhn^_WUqz3+VJ|oB-kPY=Ke2mjqFVmp|JslA9^zGq@6JFi94%LZdX=H&DsacDao=j~ zwno~JSgOO`w79eXSsc0Bq>$Ei6k-8ZJpb`Nc}^^!*uLKWT^+20*oWr-JgK7xRKm_H z?KWPC8W3;#Un#ZMjM$LFuQ`0!kLQkFPrgO^%8wrMqkj-TU_vcu=&w?^_edAT{So8N zlfK}Id+%qz>yfypF}S}8P*EaE<*hNocR(IaXYgZ?StuFX`(kV#Xdl~$Voo{|xzl4X zB9BKjF$rZd3emftjOP>a-b6f`iZU6`$NgKz;yc4}hW_?*4X{7IN=9W;9Oowe ziuhI=ydC-H@I1uk{u`@_b&*2GY+}2_ipkjR7`+|u5Z_CDgyUWEDeg==NAy69`4eohvpq_6#Hb`fO@XyGf!!n{28@Siim=B? z#CH5qCvUq)Cl=&lAMeO0B}7Yd(Sl4A2KU2gY-;y%aGBwqVb9C4c?G9xkB56??HAn9q2(VoQ84y$$8 zCh2peZ5=+B^g7Z9NQC{PP5&GD9lhsAY~qM`93viSMG}QOtd~3|?>k!1@tj0P#7;>6 zbDV*kjYKx2I4p>?E9ncwClOmBaR-S9$UkzQ$$O;LNpwK&x1-NGA|mo0@ew3OBA=0G zp}+5*s;snPUtj zqaZ0i{FEcIBe5NsTR%5eA-hesh}j3bJ+*vE03-D?rQOnNnWbUY*dfmofx?i{h{56dH?6AQbI z1{@>7$nT1yYV5t(WNQO%%;T7E`$0oA73U^=SW3)wUSSNZM6~n_^Pzh5Jq1?y)?>y} zj98)`^P-EG!~Bhytr(s~hd7SRhTgzeRG?3npxsL`gL{IrWFnd=#)w*pE3d%$K426s zK|8A8HyiO;9*)%E%7}+z!rnOy1Ku}7wMPo8BRzW>k@7mvhuk~xhd&L<-Snfd z_%-6W{_|tx?2fsCGGYiHSW7&f1>x7l_#21Mi9IDC+K9EsIC1u~_~HF%oSFE`K%6Ib4g1ib59Py#{P1h;ID+(Y(xNs$p$He9HxABvwn~l??SF`oOj5heCTSCBUa{h_7PM5)#zN5AQm`LTzBy zO+@q_hxb(I`&BjpTB;Lby--{=v0O1eal?1`h_>{oqXcaljPDxIcgc67aZC>UP7!S1 zF-FwmJX!D-9DGX;uk#K5L63Gx#TC>*x0?Ya-`DUoA2Gu_YdeRP8Z|tU7GBm2HCV2_ zU9#X|@-VY}55Hr@T67J3*ekR_HO{0)Eh}&);&-*E5t&moqBTmonYBTnCm`48xhalXp2XDO=In82d=s`EK!3a8*GWN*IKL|)!2ei;}1CA7wm`g zaO6GwLKED}*O)g;py)mdck~T@B@p+}3pUXlbF^geZQjFqIH=cP#NVDc#s%*NK!g1* z?)DO_)l!T{c0Ki<83ou6cmd`1-!ao;SppZQtW71V{&8~qJAHXibKj03nWwe3{@@;UaMPa3V9CZ-&-h!IU zVt8ULKZH7r`ABEXTz_X=Wq5!YOGaTm!*;2=D__Af z(i$pvY4qQC9uJ;^9kdj6jj+AKj)@y|%>}lL^g3uof21#9$7ce49vbHnP^T_K{^(8c z9=)M~ZUom|KJ8-50rTGtx;yqzy4XI^mGn}qKl|ENLKpoP^bbFV9KE3pW=OAW$#v4jcy+)523Lcp0 zK#>@2ry1+8t1$##W*pUvIh^WZ+m5`Va%MM{JCzCc&=Q6jy9$3oZ@iN64SNk5th=mW zL_kg54Sdk+8J($iP-f1AP4MB@?!um@F#^G`c^Ed+hs$drgZNqp4;T zkFgk*y&G2Gg8e}O)djyRuzj?CqAxM#fH4^Lw9SArdKcRo#%XFHHHdN17J?m~L(ph{ zO1HP|WBdkgkb6+@>urmL%{enlY_}ODlz_R5u^&vrEL$0)2)pQe?f%IH9_cc6fZH)z z*_xwv-KcYn-(eLeaTVjBB-@d(82c+chK_z^+ealczcPfjue8~ggFTa8;EL!;mErmp z!Jpp2&PAau7>W@CsF{qewkEb-&_-`Y&4&Lw$e^gzPz|r3=h}8MW@GoJkx`9Z&qli5 zHVABfg^ZE5FZ2r=A6Dkd=nE^22gCLv#xK}Os08a=scjp0!@gq@)0En5lj6EOsELd! z=!(x|@R--B8e2ZS&UPMqTf;CL&SMCv6tvzo`d47@mqI5%$Vf(SNugYs7REPllJ2qf zV0yFa7!B4A)|s{nDug+Pp{GZoMW5grqOn&u4ZB3-3U}bw)r>B-byin8g1XPT%q*gh zS|-xDl#R)!taLwH0eZ()+kASOtvz*~>cVitidz!c%x*IF!w$o3?a($is7}lgj8Q;s zcCl45t5}yQ9dk@iDV>fdY^R?}g)twHvwWV)aeCQgg zH_MB0#q!O1hkC=jV-s74(IcrRRFEyq+MNCacc{H>GJVzNLV-m98qRa5v8{*8I!mLta7ub;kxB0OUHi2SY_4IZcH@+hZ3$GT&gx2i1#jkM#v*H$r5jY;pMf2;9Tme0 zV{WEzntd$~=tCG==E7Iah6k6}##!qvudSu@YiI%;#c0xm9tgCRgz=VgW^QHFSf85r znAci0w(j8On}r=+0mI9B%yij2hw*_ujs3;e3>n*RZQE=UESt<=2Ih|B*4d63YmG;3 zdswHa2&Fki+()?sE9oo<;zHv`UOs!d=$VExAUg|(PDlTHP9fVb@l zt0O0u*~_*EIDuvKFZ9p0U?$BHGp}0Tnvc+znaQk{i~;b{r%rw6s#(%k&odXW)CwZpEiifYE3%h(Bwffl><-D#}r zvqy40SbFn2eM@sB>oL#5{z`u|zq5K#?o1|li{?`&!L7F5#<%`z?LxKX)Uuh3N#M$P z%s2;p(gW%$I1N0gzVvWQ6@7?3o_C!!8C)ehY)ioz9mm*UNwZ|w-ZJ|Edy>ey!7x}? zTe@1?Fv8h?L&f_IW3Bm((PAlKUgTZiALYa_%%(erPo`mv6mBm6BX>Xbz(SiSYio+l z`Nq*P-_XUDU363GEY%vL+!SgXa}G7rRz>p|-qauHSNo`*j8uA&tsV0|%b$7<{>4A3 zUwF0LN49~+xu!ts9_KDQ4A`E-tlkvQGT->tlE>T+e$4f(TH7*<$o!YJ_5a7zS%6n@ zJ@0>auO!3;h>;Ki2}y9bmf}#{-3t_#V#SKPmf}#{tx#HuyA*dPK!U|xZgN-tpOgFT z^ZT<;xUzD0&zUo4&dfXSm>xI{a4MzrQ#%z*Qj-mfGEw;+ne8NZKzBOpeZX+el)cOq zP0NhcS`n$T*=(6DU(wEM$D~ur3Aqb1V_B&^{KmKBifHf(Fhh5i$|%|JgVvWrsL^hf z9)iUC%P6fK)rLtmL253MtD7oY;!T~666$NsqJ*0oNd?+lW4@`jxs$v^pRTV48#6%r zS=%r5G+!~-mLICNg7#_^BS+qBYGQgTRn~WC?HC>XrP5$6E=jLIB1|*3XkFEQ>Nfq3 z(#2}AE|6EKz3d5ERcRk%B3%B(5@78tJ+rOP4YS9|pDb0Zz0C9E0KJ`Ewx?*bmD85i zmKm~1i!Ja}GxXB(N%VC0EtHhi&td-h^rpdyPN!5P7%~ZR{wG=ONZi z?Nx4=t&5s;-+Ot;3b!+C2DRKWUd_cXPNY#&FmEQQyKrehp;S&qg#= zaKiMf`D^SkRI&Xa{Xq6urtD+6FU94*%}cPqmWC>|Wm#HbJl1DvajL6cMyky098U|C zhtqZq)j8{Fvy6AjMsrW)rmE+4v^~{K=APCi=05U4-B0}kcHz3(XwzBP?8OQ` ztDEFz=1)qR?yc&oERQxnHu)L3hszi8cjs->%32y*+evTo7uv?zqnHE!kUp6nStgo((SNb$*jq5tSLnOs z=4j*u^_A^S!LM>h_%+9vo@gUr5gnm=87Hto{vxw-gA|Xo(b@P-yRI!T4$H&PX!GS+ znxFlK+8-U*NIz)bq&-u5Tl-rc$=-&OK0>>tj?lgu<&I_OT*M{_WgQmrJ3oO zd|PS)1M&mydVXg9OewU;cDK_`6?G@FLW0$1rjpJpoR^x*>ZQRk?$VZ+f~~>kLDF{p zoN-Ierb19_bCUVAX$MaJlKK|O({$E!M`?__oieUS9jU{VBK6aoYn%1*vei_UT7Rcl z2drTYu*&GEEw*2>TlHB|Dfy+bO24l^#?ER$Zgze%sh>7x$y;C*4WKga8>yf1gVvrpoO$TWWVwkXvr1cr zCi+9)Z){P1vplveRW@ty;W2n!(3WSin!8~ajaCm7)U&@*SL$=5pOl+QPiZ-9Ol$42 zy0`K-)gRtTb+sJEy;u(6r0xP%qAbsBG7qs^JNV%jy=L?u9Pv|m&X{DwXgOF zHHVos+|=KE&NM-Ot3T5Q8uOLod^&18C`ij2YrAR>m(H2$Sf-or0JS@+Of_v&noGaw zA%-SBlMmyDy{ab}JMm{E>z|FgN+ruJvsD>se1fR=TQFdKO`R22X2naa zt0j4)X@cdVWrukxe7skbyQ~LV>ld|PLcLGT=g1M?D~0G^wN=JM`G7LZRKjN}5y(4JdU-1b1Vf?OdB6rKmI%z(r)EMc6F@V*u#A>bso}%B8 z*i=nog<2CVU=P+&Us(+{gCp`a-k1i47s%gDcm}?uUPwtShYjfHSNILC;O+2-DRc_; z9Nf9$3HYsJ^j|8x9E$Xm4&iZ zYOBYA8@-~xmT#D-(;`Ld)v+y`D@Vu~`bQ}$p977Z4;FhmXvx)5UFuSdlP&m$y2+0f zU+N9_0e2V=67Dh#NE#oF_rXFH5JXQs@>D`TCu%WM(?r^<{^zUU*gjT5qeDYR) zqa8e}hww8UW<7Qu&2n8o3$oRwWoo^___jks>@t2tc3fGFk7A9{*;uU)*LJBp)aqIa z->d{<>d)S(9P7g~>^J@e&H6-Ju2qJQ_qg^*KZCr?K>lu{m2T)|SVMl`KF(9?@rgbW zKi6jb5M{9T!DoR|9c_Fjo7hD*IOei3nqIqxN6AWy3;4Ge2bA)(cGZJF-3x7@IhL6$s41cuG>hF_Z!A za9c7{_5LK%)PZY`mh6(KE^;0}=UbyZ$i*S}AlDdcnTv(@_6>fl>DcM*VJ&We?`st8 zchzVjE#}#0843D3{X4vuFYyaHN%0`p@8J)MXSF`eh^96~dn&6JHFoN=7)@{VY-(?m zN+2^g?bZu})h$-xr}UL>;pwS`RE!|I zr3!Mt4k>?)y|Wr7&PqyW#g8%NBb7WVsAgTwW`=)Tyyx2d^+)G`%(Hrjq6` zvn#dmx=~F*g_C?FbvqWBHFF!wK+9N|`QK9ep(r&tgQ;cJlFX?mR#&PrjGz+838#V7 zm6^_Vhzd6w;is+*8?2~fbkS+6(>|xwPVJmZz+TGJ8hsnwxCYIy8Oyi=h$nr z+FIwmq=L)7T=(3s+55;(56&EvF*JR0T72rB)E21+QruDwCihEzo@7a?kT@)1dVHC$ zYd;VDl=<=9$3<{5y^0+a+d9@icE*R@@1MSt-aUL9^=9O&=$8jxym+zmW%%piZ@+op zJMRAHg$bsVdTGPbeoGmYxZ{iS#|a498@v?Y@dToTIc)Ry4@2TE%yzY4{CWtiH>4v$!;vuY}M z(}FEjnNEavTg|T(D!QjdPFpO><}E+<-mjP*$U?Z?mpvEh-E-zoGV5^_;oX zUb>p=Z#$Y_*}h&a$$WiMZf)9RzG&G*rLA_>u@+GU&2HXcIcuHfywqi^Tc&$G&uLz3 zy*v9{_i5m(`<^fAQta=dnZ7oki{1^quEPaenoQNHZtdLWk!u|3e#=dxPT-F)R}^#Z zVqHVdXEWK~7^fc0_sctzJtnhx`o`4VDLqm;!ddz?xkj=nsdhr*mu{bq#|FPY^LGF1 ziTrVlV^|p9#?6Wqri+9?09Qp>6>t6#3MrDkSL&sueAji}Y5*5jILjmkAvSN~G&VbvN{x>s6L z!KeJi=($lHBcscH4IfkHNZ7^DyCF*O!k`I(X9Ge4zVm-t{Li8vyr+AdcKK>;i3Qn5 z3%4)KUu=_XyYlLitG1guMkjNJ=N`*Bp1nS+XXd5!!>NsuV-wze9r$J5=iZ-PzAXRx zPePUCW2t(&EpvAEjGW}0kMM9e&nc7B4kn5rx#4+MTc!Lz3Vu?D=sTs(raso@MRvFj zf!no$m%sNf-g~`WePVo``5rAguh_ZbWBtDg+(K>R;NZW5gF@T4-m zsq3Z2m3kTarsVOE3&9nGZo_>df)TV+}^l;M`cHM>O%hL8sS>e zrKj^<%UPw9amilO=9_aYvwFsi^l#Fuq}NYxpWZP2Yg*H^g{l9hk`>cOF5Vs_D`G;fgL*A?J8odAO zec*?VA3A*~{o(oh+3#xu(RuW4|GVMu@?t8)_`OYgokQmDo|jEue*f~q%l@yPzAEy% z{Obm> ztoUEs4(IL3y`5u%OD!rZD6@1%zw|F@F=-Rh|H_z@wIJtiUgd%b>bFF29uRfgjt$jB zzoTi|J^h`LBYmgjo9vd`PBn_`a#`t??$OG-m2Yq{Tk#XH{6+-c2#5^WOge6qu&xn5#e?0zJd`kR? z1n0y>i5C+0CVro|Hep=6{B_UgW1sv!Iasu&#MO5nJxV z{CDf#UVI(#YTFCX=l?uSezN^ZUwC2dj~YL^`*8fjZy&CH==-?Lvpui8-(UVzgZW@q zX65WlS@SYlq^2kO$2a&A_xZ=KFB0#i?ap~@|0>USn(lVZ>*u07{K^ITm6#PgJtQyW zV#vMV5haS?#GDXtC?G3vdazfiGT|+w>Qy*dxly%=)mv9DS*=@@T9wrDsnH#yDo6Gy zyEWo|_`5QHmHsYlb!c43kHJwTiUvgo&J6Gk_}jmPf0*CZV$MZ#z4v>zaL;i$?Yz=D z++0n$XH?gg6*${sa;j$6$#TnDleIW|OU~BZF?pGJ7Team>&yVpvp#0LNwcN&ORkVK zH*rp4jimX>jZ<5tug!GJS(WF6e0HyK7SNj;I9O&qxN~n;9r8*1y>GkQKDZ^a^U;GXMu|Y{R00DxEi1Z^bb@6%_UX@ zcPV*0bYQ8p&~+s@1xEx8@n2so$T!;ikY^VU!|j^u4VSbcE=9bY>pA^Fj_9wJiDrvw zxO~7kt}U}q$lsdhocmjLWL8YZE$V)EPWuWs;Gxv5sbf;ZQeUU+P5C9IXG-UkIn@77 zPB}`|?={Im@Q_ST+LF{Txj{ht;&Zat6#vtK?@Hu^91Fc!s!iB5c;mO0>KvLI z(j???NSV-iVF}>_q6bxaTkTcN+O@mY?o-RZ#;z(aD!h$u6?H9gS!A2Ys7U|FWo18v zyOjRB#^^GKH|t|qq|ZZq76xi5Bm>>B3Q+x?`+V^4qYBEEx)jr6?2ByR%Z%Uq(yg6A(IhnFDwO3k^^quJyGInJ+XO7H_ z%N&q(Co4be6SbPFXI;zendy?5n2|@N<><`zRH+Egnv%7aedn^CWDUuxkvTj4OsbSp zJn4D-`Y$g&{qyl_TyWgf*ru^v;X*q2q1=bB?|*#n_0IIx{;K}VX3uv%dHyi=e$Kt< zd#mpZx^?Vk#Ld|^`~BPH_U!wmp5?#o{b^0&j$1+G?q#<=xg-uC2ZH72O$-AF{Oc%7_k;!=uuo21Wf=wrF@}*ppJ{OHB&XN)L~i z7j>|F{Yq|CDpx&Kb#vACRbExLRT^4Jt2n9Ru?oMJ4=R@)Sv2Bw*f%8;f(rb17EdU; z&3A;4ulIJ(DjrwdqFsj-dE#`LQTNHx)Y9Cnn8wIWjgIPFTcw}> zOma#Yp87PcLB@{Eq%5BtU$}kOy4(t(B zp~R^Ya&YjWP!@igd6;HZEYKiA@+MSJ_CdfjCz zbKXxQ2NHSacTdgwM`F%pP_NqitGnDX}KABFKp}cOHc=EWx=w7 z`UR`<*W2djP0byjGb(#l)=P4{f1tW|hs-gV7cxs{^~tJ{wJq}?tCNqZp(z!UDkZf3 zy7P0{PyfVy7klUZsCU6J@7~;g?fiPltB_YaUY3V@Z_JAg&m*4=c=FGq4iD}3SKjM- z_xznZcWU0vy}R}Ptw-ITuYHs9q4SrMNxjnhWi6!g&86%{RPYVU-I#kVZ*Bezdj+ke z;j2`%rn_A6oLtl?;M?Hqq4P`s6TZ1@$tbtzrqOv(-J*Vu{JHGOh@BDb$_|Je6E!5d zZ@Gr$PnBO)-nIPBa?8rCEZ3&ok?5ExTjZUxJ`vALk0}*Z(i(g%u!;Z9Vs5^hy=r>A zcRf|a?sV3A$C7ULGB?4?=cmk(Z-Ju8)bDA-)awNqHeeMwo>{#!il#S9dy{%S)d#Di zSjN1}jI8cC_jA9uZO;F_peuF5I;bnu)#^C41uWv7)O~6_tr)B7qeMTtnMYZ-I=^tq zaC-({c%;uB-*-hX7aLSOsd!JnU;QTf74=(1qHp(Nn~J{i?d99q_o45mqLYfXFaDih z4gYxmVgc9vzxTiA7v&dNd_&Q2byA-sx1>Xf&8TxwA)#;ly04b6o4a@gXq>tTaSPcHL!_>G7qWlKa(h#VOCq^xt<9uXhI z=ZE`*FD~=7v~%f;rJO^k1kYO**PZ zY1Oq<^3&Ino&J_8(Z}(a)y3EMK)NQsRq{+$i^9rAolDgsC7ivTBCLZfH_WxoM@_+|Db(S~1b0-+sI8aPN~-nk%L;Py$K+?& z=D{3#1Xiwjx$kpo=B&z=vbSUn%&L=BJj)}iUe@ZYq^t_r{b8%Untd;OPj;W|qS?>j zdiW;mOXjZ3$#9WY&5X=!3u}Zuvw7A`s=NOIqr~$}r_9S@(2OO_eYdmxvzKPqhh<`Zo{~Sl zV2^#jdRoiSJAfp5V`^-@;jFq`bgS#J#B+mJH*eV|)2D9HImLb|9ze~b4gOUF1_jIt zFa>@MTotq-sC7{Dpj|;scx)KcIr2hjVGC@2Q3~(7XW8@;b#y z3B{l9V|3TAXpOXVHBl|5RnXkv75hz{0;|kPHJqwe_3!}Z>b1eh^aA^~0l(@N$wcP* zbRuuj$~3B3_A*i9)^f?x+4{oT-RYK7N#{{857njiauLx5yvPb)2_5#Xv0(Vrg$)ZF;GM zDpT-~jsyAB);I_%ZogJvyH2eBfZfYJwBTrdmaT>D&%APZCvscm#=}%>%K0t3VYX}b zhpd0FHD+Y>&x+1^nmIl*3`^u(#?p-L84($|=~vQMr4MBt+9bVEdh7Hb(kG`+OaC#w z0qn<5(zc`xrCwH6>dDl_sZ&#bVH=U!Gj(X{n$%0FFH=)e%cd<#OH7MkmEAXeMf$Py zE!0N{NtdY-^C<0)w7=6nq^W6XX|ZWO=`+$3)9Ys(M&k|3YLNYB_UG)XIp@%5*K(Z8EhXs}`NiG#!C%ImA^>dp~-H=W0ntL^m0UpOaf<32r?)CheF`Mq`N@c9Cp65K*doJ?a z;rY^2^NjVJ?-}HIhuUf#JnXR5{p!BZy`Q`4_OqMex)+v0yUSLW&Mu)YnMF<)`Kd@) zkq1<|`o`Jp{M6|Hv7!!6A=EwIZyjb0x4yNkfj7j@^3HtFJe?Y}CCy3jQk^9B{0gFr zs??=D1)In{^A7VEb8BimHa53lYfP1^GF0b#NuA2`Fp0#O65$oGm?Ov<~CzR1hS^RY$RqS184Z2`6@ zTO($)zSLj;*|wbu?7A&7e*m@N4eDjLD;QL;s^ETsR^Uwi_#Eoa*0YbX@3z0OdxKn> zs2)=@)Ck6IAFAz!z{>On)Yom|dKK`JPldZ@0~qE!Fph0N$8I8)b`YkEJgRCBlGjsB zvjnmJ4$6;2kUl8RASoo6TgnipE^6|n0_RrKXOk1H-N5{#c_JR@G3HI?S7x)t1(p~$ zOG)}8*izgQfqmPHt%IeFrM;yK?}l6E;xYf*a?A4C^4ang?wfbKdqG6^5wBnP<`H#e z*HFQD6@Sw$L*St4#F<@@q&DrCe;TYDCel}IEAOGq!d>a>K(tKe)u8z*Fv_7X|QQbl7EIv zV+Oda_QVT{5x7eGV_B%yu^bl_%ix+CB~sjHWP3C;!&%`K2TD?vxK zVMZ>amC$@i5sXu>(Vu(OZOqTh)g^2T)#+5Z9f@5xN&N{dSa)o@PE?0%rM6Vt!3EYs z?aFsU)j8Cb+^rs_%H(zRwt5%*)F+s06wL)Tn;O(P?VjCsN5RGpfjO{ru!2yT{z`YNh}?jk1iw|)kk=wo6#Y0Ln2T_GaqZuk?MYi@Kx z6E`Lb^9{^&b&cd&;T;9D$dA%cvX#4m&F=#WydTfpj1818S*MN3W2k^FB=Ss3 zkR{>@@?T{9WRtb@8hLw6PQgKA1uZ*`2-ren93WrTyzli&A%~3t;zuUs}Wo}Jvg!|RdG#*8%H+db65J#gZTDqJzX!!Idg~=o&?*z z(O6>qN=)rAJ-Hl|)B<8{o7i_8gu_|-_AI^oia4rZBT45zv(YgwJVR&Vl@s7$5!^2W zsP{aTIOIB7YNzx&t+kwLMswMgfYAPpJ=1BknbHdOo}rK9VbOU5hIl8j;T1&c*K*84 zqS#$%pYQk+{4at5#?fB2$vbFTm@h6ecSRO&bB?M-rmkRL zHjL*Jy*Z|kKV}S1Igw{tMBh&aF+Q8#pN+2l8Jy^BB;sejoy-xVxt@^m?rc41oxWVL z8`lX$n(mkb>EOpbG4AGKrjuQrFRa&_OS+OSc3jp1R`}N zqi#5OgWgzYz3KB_^lE$RAGbm;HK&(b@u@vrN)5>v5PxF4*5|c8@=}}k-xYq}3~B1d z*#in?X$%O&@x}xqxsw^uGm(J#$iQNLbro{44yicGotzCNDx{+U1co@?ZY1 z$nmj}%VI^E9XXvMf2lH9`Nqh7YudD9A&XC6*mp!5jb{vvENt-~Xtg!;{v5{5FutEj zi!NoP{mTEdk*F!i&1@uN34b%#KblSS!f^fso)1M6A3nvQmjwFVFviji45N5J?Y+eCPn~H3T$=$jAyqZc2i~4?}7yaJ+-DBBJn! z2%e}WS9RouM;y7@2fFiRYf{RfF(Eq{$(;6ybt(_Bjh@*WZO>< zZvr3C%IHgvKP7+3lT4p~khXT{lAqB?_sFumiX?1DCT1Ap&`f8@K1c*7u?ET9fz}dP zm}fZWU*tnDs3h{sr@6bwyvr|?7Qq-Hq3=YtqH|%3YhVf_#>7Z^u|Z+JSP(5-m$rBC z4S6#Mg|oQG)RV~>is8C~aYT~tb5{?^ zgDOEUI)Qn4L5s?aJuj{;I5E=rg*TvG9;4+RqVM+7K7amC-k-7Oec}Dv=?TFfltL@N zriHtKsyKp$aF&tN0_{4N*6d3w`!gm_an4_~b!Wyc7*^)PEHtM=@0rOy%j4HYrnbnj zdq#^M;JO*~^;XbC>-jyA0ak>k%m#JzSek)TsJb#*d8u?Wtuggsb<|UFQO+`Jb%$$q z4P0!uw8z?OZ7m#jtFa0?XuvY&itJ@uP)Z}5@xk=pJ%VJ-WK zS#~0$WH>h2UPkK%X0I}6xCfwe+A?F-riT}S_9_Wa*-zvmJRozVqg;w?p~>q#|Q=$7LCocj5+iuTz0=;o3CMhy-qKGMP~ls^H4_Z68d~Iqh})TXEG1}O~!Ky z(qWJGGv^ex-i*vV zo+A@qLOHac$aQUqwqGM1M>>Bx&q<`qiziD(29AO<3NmuZr;CJva4zHj9R2(c((-{PoCf;yDHx-NwC)O8egt<^ zo3>p;TLv2+vG?#n6xCknKm38wkC0?8p@QNmQbCXSKQ= z^xJ)XJ?E|gk9!C{adJCFX6!S1aV-6lOWuuH zdd=qyG-*+0CSij)OXER!H6RmF@a?#9);L=0CbONagGSCjsu={(!&iDse z9@UV=Nt{K7iynlWi8-Vrx*!J3*BWvP4})sFtB-?Ka|6h_H`*}H?`E{bvP{)mGNOi~ z4Nijc3t?QwA%Tl%v230?fgTQ*ZZZb)i~+P=8|f)>KMhv17fA8j+n?hpiF7xE~B7VLYwdRr;0Qvl$O1 zXL%`hRUi^Jk_^{GavCplKdZ@-ZAYHjWF%kYn{Gz0#p@sNy?g~(IgDTO=h(Bz{WhsO z>w|Y_lC_MMw&=BVqb7DmO?>r>I4c|NRuNq-82)}@oCm>KDIuZptW{(=8BNw1+hB;~ zFW-<&%5~N!As~}m$Op(rJEv4PO*2h5RWzMcdaxcJK=#NGr6XRAGRg}1DE7cPDFJM4 z2W1ese#Hve8Y_cHe2a!mMy9@JOfEuqxp8C_(5?Q4E3@$-GFH zC9FVa>9uG{6A1MVWQ%scQ5AP*#4##0b4f0B>M6IgBi z!+5_+%Z!r}ST#FKkzg@>IcGNb*BRu0wjh^v0vdD;Bjh1=v5B0AH^zFYBW#q5$%sit zYm8=YX^eH!2ivnVcX3Vs4DV?u=)+NDH7-Y%?$B$0kpo#u*$M~sa|F#+xj;XtrGTcdr!COBQpvq0 z=tg(&r~|a7cveTUqW*>!yN#~+Rd+|*i&4FvxnjAV088z7aNhCs!4^GF-KlN>Pf`mM z?QZyKzrlmh5ii3=>Px3E(%p>Dc=M;~`;2MW#kY*zp#DZ{N!oEe8u?7oH-ga&)}E`G z+7~02)>#j8>l1w!x-FbpERLL*d&XP*NR!F#8Y>;79}>_$L-cc+s?{^PGHUng397%k zMm?!@&>wQ8z3Oo-2+c1T3A;<>QEtb3oTV&R9?Pd; z6Dc7X(n9&M6kz3K~A@=7Rx46Q&Tn>JK+7bf3+`qS6Er&jmIE7&#PlLsQ{U*x)Y^L~=&!Av|- zdTknJJ_4S8g?vk%B(2tu=)cNu%quN}Os7D9$1~#ZN%zsG*I5~QpyO+k(J{!FB6UXR zUzFO*<6)IAtqhfilQ+>$o{z2IBY(t0o1^bkkE(kNCsVlTw(gy;<*qAOCDk_dm(uMg z><6XkmX+q`dKLRA{g}0;YY!)Hy;R=oyfyk$^IS`L`IK5!bv35Q&!sT#`mVZ1e}GjT z%(4?!W-Za^tCTdE1lG8%j(sze`gsdrJdvemX{3ej2f#BX9U(1c^%Ak zCH3lBpdQIMT5i-K^YbP1#3gN_`cQ9+jU6GK(_G15YJ`lwC95e%_K-fPi|pTM%(ZB< zZOn`X+Hre)K`rgJG?1PQHY9v})4(hzF;D%?Ofi*A`BCy@ydeGYR83%A^92j$jyA5~ zYW@XvoAgM&Z`9NVtFGE%?OT0^QJf6=ms(r31X%&2q|f9te$t*ROUygu6#RGlr6|+C zatY0nf5W!LzD6smn({B%Ua41*<^g(azCC{{7|Tq>UE0J`-K6&>Ysb{hWZph8k5G#1 zGYa?eCs{TKOP`IdG*|3kqeX^7=Gd1-?SuBs#DFUm&gl>WjPEBi>7 z)b92y^#&upr*@9$;{kQB_JO>O2(7g}S9O;sD6{oU;+CD&cVwHSldJte_mtZ!JLM4| z=KC7wusB@Ni7%;b+Cc53C9r+g{p4oK8>+GUYm4ZgFubjmu>rp`a?~(&sfK*As<I*WBPs(?|$(&RN;eYodb1O?S8_DWCEme9#-q&3H43+s}Om&>z zn)?|U_QmQ(GFWa>_b`!6fdTqhdlR*syobofNO`V4#i*@>nTEI{srL>gX|C2|1f2f@&nZl4`RD)!rPZmPTd_PK>A%RsGNWM$X zz#TN|cCtr{SgV^wCf;!4G1)eIWp||p8RCuf@9jsl3{xwoy{7Bxll(MwmHbS3O>Tc% zvg%HoKFWji5^6>5fppb0$W&iytZh+~v|%uTOh>DAme;{`(Lpc5`oSH-s9IE@xPw3S zlk`QtuT)navMO*Q>nB`#NH(9M)R2}@cVwYH#njXM#%N>jtES3BE&is_`eQX-uPfh| zW8|?g%MYcUzCjB1Gj~-o&S5QFlMZN?Rn6#S{?1ZUX^Xe28Eo6ti~uZ!%~+71jgj(J zq5^A~qwnBNK22MDo7$PSN}5_<9br_$66?#joUdm{G2H1YDTsB!Su$k@fd=WSH0C+_ zNN41OQk33Gi#ATlvy~z8H_Y|*S$AZTmz1wpk$cxC*oOEzBKw0!)?Xrx?(hE?KD|>j2&H1dcu=fjE1n1oR+^aO;q4A(sRfX zmgN)jAJS&MGh74-(mLgteBC&NertyQFV1@CU-?gFEc{W}EZ)XhJQSX?BKad#W`4N_ zc}Rg$UA*k6=+^-900&?T&XNwXt_)xua)^~~K6y6l@y^$l&g+%+F2;{?Uvgxe^xNuv zy*(8m6XjU)QZkJ+xXNW?CrljMm3ZB&+Ydrz` zbr>0aTZ|I2k8%yxk2P4)xvXxcVI5qfkG4w>SScnLS<*Im*;82ep2YS(Vz^-kro&hw z74fr5*w2$VRz^u~|EejO8WK2Lf=)Z!g07;kLv22wd}wm8;0zhPgW)?2|9 zQX961)kX+@yxm9?-1x>T{d+tjqgW@l#!K;!aflU)MY>7UrvY|jD&DiH*k)C+;D5y{ zxD3BQ6h5Hh_*1&0={oB(j3q?;htV$!wJt_KqG>yf2ikpYzVV$rN_Li}>Q(g4Fs9_O zo;{bs>LWZ*1f1RdAAaKBq*)!0uys2A(rVbTO_UuPmbgBev_br)6%%ke;kQx7st zpA8pFOZl0!n^|WE{696xI(i5H!BQn#%7T5t-;k6A3SM^NR2F*U5OI!y=(4OZW7udN7$LAQchP z6MZcGbQh1p5o58w2?-d$Oh1uI6CU`3K49-Irw!BeQP|_XuzarTZ?&8HG}cNA*L$xI zLi7H~af-BrY{Qzm3iczG>uUNz{U^9}-eC!pfz#$E)@9+W7Jgt|8Hywi#9Oit5As%i z!7rtfHv;ld&sIQ@> z;RNhm!Tr63b?tj3axM9x-uMr_#{cmy1RXe3rXd&)e|k41tj-5PxxN{#Nq& z4XcZeM5`V#LIi((2J6nXtowH1J$jB6-xZ&es0VNmIhal#`@x=*jhE7jMmmVsdnNii z3%jc_w(2%~*|ivt4ag&m$1j78ga=8i2IBC6{7K(0N0*GkM?9N#T_;+vDE`M&TwycM zbsL*}2KTptRe2NE5xenw{E7#wI{Z7$@PK^Fbq*8#8qGQ|1wTp{D`6Y6MGUM+vB<^+ zu9t(Xe#U#Y0bfN;yoF`R8y$up?bNyNP^P zKpVbbMOBF>6)Rg${M<#!0=&i5Tp4wH>G52n1-ji0RvRkSv)=_T-Us%UN7p{2|KiZw zQ}8<7=T6TVrSRC5;{7SEP?g>&4zpN1c7xzw(>d=Yzh0ThMKE`h4w9fX{uzJvIMH7| zg)v=GBS82k4g4zYk@5`Msv0Zh7W{H1s|-;ABAE7c!voa{A4M^GpcZ?Za@FtIs^Q70 zg1()P9U%M|;mGw+&Tq$lQ757?QJkJ!<2&9rN6&ZV9)vHk7qLOX>)V5Kf8ZGL)Ll7m zFrNqVt{G>vV6WqIPwt~RS8T%nO?mpJT%kAD5EUv!EK2-VOYUGI*J_Ffsy%aIPpp7H*{q<5hyeHIyTwIWrik zs#F-y5)o6ud@Q1p!rLffl$H235i1jH$bxa%!Hq0xcGTy-zTt=}^rhpzgx6g}ZE6*s z^!UUJ57?FWUh7+Gl^1UnaXXbq| zq9THWP%w=9AZ>zkv^Zl|usnJcKEE^Hi@Fp(oGYpm2sTOw>!e_hbnxp6UPv?10>OAG z7@LK+-NCXiqVR%O)`sU%FlGvF&OAnyU}O~)5FEVc1<1RI*9sm@!MiFVx2bsbzi^ab zcTPYT#_>t8um4wTKrq5O*j^p%vI*=LM}02*{wq2_@EU*MlVBhJTzI}`S-kYFek_23H^u;7u^4%Zp91y#dpvif<;*{;ERa;C*F%{05949j_p00coj_K zAGqFg{sd>TsHq{Y{GQhst}3dVh&Zo!3c(UD*#8BOvtY3mZQ3SM*3vZ5993tLL?kc!y1xSog~|Hon~7z;(c2?ukwgBM-2 zbr#16oP=P@&1A25Ct6If{igCxw6VBH2iLCf9gCI{&-I0#5Zsi4$^9Ll9aTHT-WWDf zYejI$Iyj_bXcfWdFL*G;JJEXY*e9wih@a{epwv>?E#KJxh zoWfbOu!9TV!6z%Y%mv4{h(0K27%Q7#0TrXjnNcTrPrVDrUr|P`d*MDGMyQCU`!YI1 zv5mzz_hKY^^PL4Rc099x9W+~I#$Gb>NEo`LI(p2XYYPrs7c^BABj1y8D)`h@BCgJy zE9%V@j_R@QL{%du{y?m5k3n(YERA;yt4QZ(HBCa z{2yC>QH}`Ujs>TF5ceI-dk3Sw;PiFWEfGgMs(uJAS;2EHj&~d-xMCd@cpNP8;%b3B zli-pST(jaTf<;%%wEyw13kG(_CvWz-^Im8)@ojN_-%))?R8$bHDY&!6y@|cz&IRAF zxEsM#>$tw)wiSKzALqN^conmPV9gcZiS`#)61=Zo+?#kpfp`;4x8mN#F9=kUgUi@a zMZj@=@e4x3Ix0Pgrx1Eow5?#)75dkOR|hY-sL-L~R}u0d`cG6baPXKbw7Td!(Wl~j zp??LU!JYpFC#s{yk&riyUUcYXm3M;E*FY|9>=EY~g%atgg5h9q7iWlH{r|ba#dRE& zQ-n4WM+@d_A;}Ki^q&?NzspLKP3Rn<`2}~q(9@3k3?fRJ%{LAWo`f!R)W;C^;%i21 ze4!Q+b#eqFx6uBAr&`Pz4u)_s^2G@Mz{w!s~zL zRosu*95qS=mP6Di5nSh@;!hg)FDfuOepS2*4J)2rXtPg+*Zs`%igSbp7srd|bNr6r z0Zzt(6inoz?ZlPD?+9%pW|K79RLr+|h5H@)UugX-z7cG?;!H7ni2Z`oTlA9X9U&uP zrV!uQk&XZO${lr41nNX+DIsqTo_wWH0tF+BKwOxSKhOV-BEjn{w1TKq^B%JAx_ zoFi~b6|t=TuZE5|CX&x$kE8m@|LrS_jaQNH%dv^Qj?ICe6ZT#;ba4&-7Z|5%Xk~$~ ztIj)N16F6B(A187;=9WKJE{uTtjK3^tf-nHYQ9t}+{7M%qAJJt;*1L1mG~s8jyiBq zqP|8r--${<;XJFbX-gNrI%*>!^n!WL!wJqiT<%UWyn$qQ;W2twgl~F#;Wx z0{*K%AV!a)j)t&t92TUbI+;VKh><16*?%Kc%>Vz@$8gvP4vpZjVT2VUHc@j!)ZTC$ z@n4+{M_n67Z5v^Mh`r*dcZD{MsC*}C9f_kI)z^egA~cb(s2p{29JP=fb*Y5Da@5ax zf>slI#OAPQM5P>YWrt1mfh!38_1`9bSLi^YeMAia@x1@(&i~Vkjv5W3O@yT<>~7H# z|J7X*HEBfa{%7e3-ff}Fgq9OXJ5hg5Xlv2V|7kOa)h{qu!ip6=B{YucPw~H_N5$Nu z70yUP+x(}A{wFACAR$5jH$#fqP*h8@7Oppx!hJ%H#41H-r~hmfVP6TxdZDWvH9|yX zEm2#abBnpkfRVm;d)jK^VEjbD*JattKKT2M6-DCsbHx^y12!&~Eb{MEn6 z{bA>=EA=OKeNq1g8?px8+$Eq2Qt*yc#82}Nar0>7HPQAnZ0Y(aT8Nw!q6MA6hx|me zZ#dqKJ0K+H@o5MB_lRilGJG|s@GG1l_M1#=z2VbgJSeYeQ;B;HVSEbies|uBItoG; z3hsI{PgIuGw!rf?WF0&l48dkRI#tmIZSf6k1Tj1nf9eXp--9=;Dy*)V9D5EA&tv>8 zwK-}!9{nd&`0Pl_L=ndpXr)@Pes0Axc!E19M*Hjr9n}>-$^*{2hF_~I*sW>At()K} zg*6xX{f9WU3*13na}m&@qB7Y!Zxz=FA+p_vD^5f51&4n{ z&{|vZ3wOaQA?g&>=jqDet#8DAjm0xl3EyFRo~a*wyc10)#>)Zxb$9SjiXK^mAB*P( z@6eAPK8h!G3B4Tv>g18!)b!fi*HYQsMA;>`HC?qdw0<<%k&z^&F_{}B%`;4CrhL;` zr9L>iDd3}8!dd7p*Hv1ZCYf%*)F*IRHDF9!fT!!Ru7d#Dr9B~|;W>3`>mesz_+VR5 z?Y^(Jlq$9Zsia#?yQ@X&XS9FS3UG^NsD9c8)uiUx$EaVZuDef-Qcv3dw2x4iYvuI6 zh~Laon`>3{Z}jOPCu(c5J{jiON+6>)!gbk)IQ;>uL%!?PV|tlVG-P zh@a$FB;Xbv>tJ~|c*0lG6XFlicp@Y5vWp1AY*@1XHGZLWkQjP=fV5eD!r0gcA8Zb| z$_;W15_AZ;e5^m!%<$HwBGV0Y7h+m($e=XTp6W&W4f}j`ly(o?N(E~7=4!*qov={( zb{Y67AH4zG*i^lRea%f@s%_NkAq)P-GVPdF6^zJA<2UeA?cx7EhJU{!tXlVpkS{eR zkYo2jnnkR<8*+4?yG;g#xC0L7OY#Q!3{riR`m*DQzRxgbNC)Nf@^~qUsOmLim|RNv zP2L0gxvFd<<0X+1Rh1ZXbK)W;z@a3=t2Z7``xx0-F)M??p0$HjJ6?%ZPAUbka-Q6Y6LJ$AC9Xs)E69IxwT4Dz zeY^e+kNO1ZFeB%RUWJH{L0ksx75t0u!1B!D?n}wrsbYH+Kjm96C0of2I44yjF0@}- zK$P;jQA&1G@nt5F2W$X%YJoouQA!uiT8-M?<`<20rEXS_aY1HSm$w1EbQbaLgVhi*SJ+f_`(S&hcoX z{t5VZdxFjNBMLE#Uugk?Ed}r6b$rkZ<)4)ribW2l{m;wKmGjDE`Gs*opUquV2geiz z3ZWRIx`231Z>}~$ex)2zCK2neQ;5SGOQdBAI9r7%&m!Ut+t3Md_?vGV;~Bx_@y?IM zlN=2CXOtmvy$Y}$z5p|I8=w0MwCHOhftTe9T<<&Du?lFLDbhzg)4Ax`V#aZz^P!Rr zByB7`Pzzt^6KR!PPHCu|fny$~MS&5B`rp$Ut+vmqmLF~Ta_UWk}j#l-KXK3zd(CE z=9_vj)V(K~SeDT=RhcF0#PB1fALL7*IeRfiXEVNDz}Np6Y>2lMM~v`WM(ZI)btt_# z5le9pdiS0&3+){OL;om{b)VqM3zMyKC;aJub{Oq`0JC?K_~sGDTYF-r zb7`Zqpi&kxj}>R^AHv5SipPB$o_H}||BWs$3fe0L)Ltm_Tr4wR6|`ApA|z$d{_e0i zmZwTW5xnW%^yqq`9zPKui9xfU=Nfl-zl0Hbmi1&jE!Uc8&v>wXHPDy?k()c*ttWUw z*y-`WeUCpll&Gr@y;6nv;E%*#Rpy7MjDh~lV-IQHWsKY3(9WW6vWbi&gJ)O;GI}?f zy%V$4HEIhy#ag|NG)FOaW*TLgSMrH@Y(}4~WoG;aBjXzA~o% zG-FrqkcxmA{0B+s!7RE5bY2nI;;cltCG=q`^%hK613jc;%!C&TrJ_2kihB6K=V434 zfvl}4M-fY}MP$uap2@7+0xeXI2-O87t`9wG2T^&5D%(j&TIa%A?v2T^3zO$ED!Ry3 zlrW{X+zQ(wgLQlmXRPBsLRsav0V8}Ak9dE{SB{l)!CCHO9v?;&VGy=T4Q5FgdWk(0 zVI(%=*$>hp0pN1i5t*9I%=nOYzR&!3gjv}I?|Vz4tjn;%w$Zj{X&C&M2LSIt5qTW}3 z(rSTPEUBN;E^B2#Vg3mw;Vd)T9qP|dfCuqUX3$!otlonhUrL-X91EpC)=pLCi0h2T z+wfHnCWh671;8K?cV2D4JphZLoZ)?iF^&ghMa+|m&D!At1|K{f*GE{zE}7* zoYA&b(5o}KW=G_8H>28*@1n45LqLXK=1w-U(v9PMky%gyi}eRaRUQ0lp^TiTk|Jy9 z&>Xgx;DmSZ;yYdOg2$pWi8MCkXdf5yp@XXgTTgzf|2)RWS8T+ z?O+h(ttx*2A6bd_PuLE_ zXVpNC&fn+zwXs6)SlD?cRr_>-*5QDE^Kk*S*u4l-2f4^nfR zvX_~2r!rG%r&LhNE6tPv%3!5An8*a~+=ivo0RGgnipmqFa{g##13BP}mAT47WwtU@ zX~nG9SgEEM{PuqND#zsTEQ`7Crqs3nhB>%bVFqwt+RT+~@w()Ic0w2CB0*u<3aq-1uyY4^XcCp`~QRy_aO*qUse)Tu$oJQ z=g%Pjz#lzO8ZFa|^~oR1ZV#E8MV3JTa$XPsw#%W|CTfusd0zDflB+J*ivi|n;iX3+cN>AMhWM+DLHHOS!(!W!`c zBle@ROj!vszOzzCDW_EAsNTp$Gp?e^79|?I{Evm7gOHMQ@W1as-VajSVWeDJ_M)FF zAk}k__qpUbwPq|G!yaA9T|NYB!2Cpvw=UWzfth^`PuUaPP(Ir5tL{mTWCQGyOzgZj zXhjcfjB@03naT5bfChg6?ojX)&w-1&A_%2aErz`6JGAs!{$9|(R$b9EwM4MkaoRa; zE}7Wfv>&yhS|@t9zSdl8sD*1znyQ-M-EgK)KdaBl;l4;F_)c{Tf5+6*>YwWGXCv~7Yg3R#Q>aT1w$rYcjZUXJNf#XJU)ByE+wI)0P!_@ieg2JQz zP|vAZw%FO;`aNY4ySMfxGsF-nmNcq9y7mwLPoj zeEVbj8T)?N{s-E>wMW^*KozeQ3L6yKK8{yJNd*yHNQ4AKL*~C)U}f*qYj0ZSP^cxSjVj z?`>X8-kZGld0Ba$w$gA_MA>TEI@>1NmfLpNuGv&ut^B_EBlE|@nz0@35=%kzf<*-v z3Q`NA?NjX!?7pBW2P3^-z=RG%lYQ1|lh^S|FU2b7D)x0tX7@{y3zD%N{oIV20GVXL zjDt5sF}E`RYJOv`3Nyq@i-)y=b*A-()yb)@(_p9hPOF?2JFRd!=j7tt-g%<)Z0DuU z+nrB2|39X_0=#Xc>)N(t#*hOJCuL@4*fL|v?UvguGcz+YrOeFCj9YFvacn0sgCxUu zCVBVy|LCz}%d#xEb7$t>bI*Av$&vUa;nFhFYSJdsuF`(eUeYeofzsL1719aP7SedB zLh6!ykzAIfV%aH~ENLkzEs2-JNE8yAI7|FT{6TyPJk86*>#+C9U}}yK(_)M0m1qwv zX9t*>?}6934=lzg{1+DZS@>LdL^uKb6mdu_d;=%RMcB(DdJ&yKXHeh3&YVSkrP3%b zrJ;qi5mxhoI!}$JBEZ!ANN`Xvrr3uy5VS*I)fvh%)6r{Hhu=DfIlu<=u$>WIxrFim z23XH9_*arSsDp9(R$~w+K2W5k5agnj5`?!2?#+ z>*O|YAy6Kv!wOlJ$z?V$JZ7G!t|!`42(FUR?ji2+U@{4He|9|v>s+k6vAa1~PU77H z_ZQa|S34K&dhbjHI~VQT;Hcx!I1251?4|9$Z27hd_CEG5_S*Iu_Llbc_9ozE>TRD5 zmYu%#I(RqQzS_RZKE*!J{+B(<{@J$CHpw>McG&jV_S@#RMS{zzTk$q`?0$P2$8Lw! z@t1SEGu>I;wZ|3V-r{DV!E)JCllcJs9Wl3)s|24_3+==2n~pI0dd%i-^HTT@{sMtl zFqJY;edz~uDdBpdOW0d_DVJVH@L(NNJqk)&|TU&>F&x64<_=gR*92TnP8o@|e-w=4!+)HkH7rGsEy zZpjtNbXZS>Bu9KyJWO0ltOsNFY*7mIfU1dtMJC}B;XdJXu$j~nMhTttLwXtAovuoU z(H`m#Veh97QY)x=)N*PQn8)W+<50cZ0e-JKRTn%aZQxhBQ$wlAKp`)t)=~SYGx)|` z>JgUv)C1}r_*h(&o3i4e1XoUDx-&f(yeKp2Iap@FWBvn{mT~lOtgk|c(;;BvDNk3% z@7Jd5fN#eStGr2_pmtEp@EZfD7F0PZjw(+z!|&FhTo`#Z0VC-FL<%dS$K;_V#*eYz zFT@hwB5FAsv&byWu@L6~hO;4RwhsFGW4`kYymK?r3-t#lNh7rT)>tZf9o#8y02j^K z*gx!B_9Q!oO=8W=3uZl28~kmjJX<|eJT*K9_f@dttb`?2aSPoB*GJcN*AZ~=ZE-Dg zO>vELO#;VuimSdW-X(EmI*)-Lu9LGZmPX)SmN{LHe8*3)fIW6Ray)mu0LSKK$1%r# zSk!LE5yv&hYsWXo4=kCEB8SJpI$RDLIJxzXY=_06c2;uMbJle>cMf#UaxQW%bZ&6o za=M%iT$@~8@Vb6=mxo9E;Hk=NVVuk`C=iY3+}r|GXzWBa$#%>X=OVJ5j2Lq+VnemT zJJS;_{vWysjPbjL9^o8Om^c+yxJXhc87O@!jgSqH9hRA8&E;F<-{l%bQ^f?uA;kxU zRpD2#;PLhdyVLsS?%;3ro7Rf}MkijdpNG!79v#;O-Ixk#< z7;{(j=oP^k#RF^98G1ZyHGtM}7o0wA5s6%b@nD#s2(y<77^^qL_*{*+#c<4IU-Ng1}%fjD1V>UB`nTm|l^BnfJ$urJV z*^>iKre1E9`?hPiE5h{?-t8Y}9r(92$1B*+YsVAEX~%NMKt~ma#eT{@)!q}nN@TwY z7RrXUNL!)xtaXyLtF@iAp*7h0!*T|^U*j$HEI#u)^KJ8e^9OUjS!7AF)U~t#pI8k` zh$Yi}!aT#=&0NvU6g@0TExKLA7S#sR?%&`Jdtv5VLa|K)%Q;JX>szbNHq`dhHrpQN z*x^WWesHdIm2=;65Aeh@Cguz~mTL;Wz97HRe;e8%WB5se`+@;fF#VBU3)ZzdVvXdG z#3*Sd&6nPjO_JN?OB8LD!KzKF$10C%f?B8Qb~MiC?kk_R;m8WS`=s83M0 zpe{ibg9Bb7ypsfyBy0{I%bS2j_mk-Y{- zTehT^#3DW|9)Nyw9>!C7!c<`op-}jo-VFQE&>z4aHI6DxeZl=4fEihNL3HsLe>&6` zmSDD)DtIBth6;lg3{GpPEAVhHVTHj^hg*e_mxHb>94|Zzrl<(0Ei4ng5DCRK#eKx{ z#An2rXjPpg^U?F4m7K&`r%EnK9!XwG-b;Q<%o4A}gX{1c@9x0=&zFpn{0%O;8SvU8 zB!5Y?lF#5TixWS<=uj*=Dr^AdoGxG?8%JqS3pGMOL0zUMYU3s#({>6`!r;I|V94FR z!q$1z%IkX>(k6%yHCpxWItB41J``{@ZrLw$j$oR@zqC*2dPr=CY<* zr&$MCD_Ngg`dGp(lqJ`^+gu-PbWe&_7F8|EF1%fMyl_ro-9oC+T<{${yd4V?3d$Fh zF8FEcV{#dP89x~}8Y>uCLwVyI;}+v^V|6TDjYIHgZtQ7XVSIpeXJ1p6X;;DQ!nH-O z%pumlY-8-(9j~3B#dD{7elXc=F89%U-FM1=6w$%fsE)J>_;i%8zG#VW~hEBgOvRh-{gbjFJ(!x-O@(V zJjo=9K|EcYAbyFl{cT|#;T!aSIn)S>MFgO-;3hH?!N_&@Nj-)!@dZ+Vzk!`Bg@CKf z*CkUVL6X&CtvFwli*f8Ikri{8pOLawyu$wrbN&h5 z4_r6&o+sGp>t#NIhTOH{6HZtK1XaUEO6s zlVx@}U2fNJ*J;-x*GN|vS49^O-tL-ng|mq>6YRV)up^hUUx1}Owf3|+EvMnfYFGr8 zBD2G6gE!l3?r$z_epA%6=y+j`!pwp-1(5~&OjS%zjE#+ZkW60(MsJ(`xPFqpufBnv z%{!VmDz9x`a^9cZow>8XnprFNZceM5_?(tGy>ddq_4hk2Xy z9rIfmb{k_0_7o6cUB3 zt)h`?xB95&Ne~mvgd7WP6&9|g!ykn=i8vQgGxB6)RwNxYCaSdVg)T?8Pd8n6T=z?N zUpHU3TxZmEjP4U19{pCgL)T6BBFYhIkI;v|)$R=I6eTJ%vKiSy4AJBR(W)CH*GtBs(rM z%97>%<(uUX<=OIpJVBA7IH_P1&6R7F24yeRB~_M6ugXxRt3Ijrs~W1FDf=lU%14To zilK_0ie69zsRi)LQMpb&R(4S;kPelki?@n5K%wTf_=b3axU6`;=$Y_1J(21tXv9xM zwV@U4a4(Q=ZS0kM1>P9M3MzYP?^|vQ*Otp-2eV(8znHt8#-5+<-R|k`;ow2s=6>eR zaqHa~?rgWworlWYANbGrgkYA~*fYd4+cVcQ8VusqJp#{7up)-Le_&Lff)O0&%y<5A zes;cgK5(9KuEDsgG}wLzJD%HP?XzuZ)|S>AmY$Y)aI~H=H#h$%T2s`z$XGbJ&{8m| zpup7Abj?`Sc*`){5NUXqKR-Vy|AxM`J}qxfo-}VwZgOr;&fc7;oT=HeY(aK-wl(Wu z){?AuS#`5hva4p7&R(5eCFgj~>l}N|oZO*#f9pr)Z!lPm6AB!KYs?L-Uv1?bEu6hw zGu@LsT4oP3nf<|S@YO`tWrN@>eM8hjGFZA^rdDiMK2YscuhM)DsuEH)^l50zFkjej z?cwl75&DSdk+-Af>*(l-(Z8eX#*B`c95W`S7j!2a(b>@vG2_AGKQ=}Zvnkr7Ge=#E z+#c~dyju7w?UOK7*y2zsbVSJcU@CZg&}$8)sjc3m5~|uMn}b8Vg>0I1ilm-6S~L_I zsuSpMV7*KRJEK9cP~bs!B^mj?6R73riy3tXWKhPV+AkIvD5!-Y`aKZw=X<Pnn*3)JMRgTi0_(**vxN=-qTq`g)80G5ZiU!l`1LuC{6lX)H7=G=U zNM9twM2OLBXAZkp3o@PS7!!Yix@CRBileIAz#xIAPdl zm|&=FP#M^Keg5nG%lYT>Z|A?y|C4Xdmm6vrh8eaSo*5j5cw>t3v@ysu$D}t6Eie}> zE~JZQ0aSg@{L~U^J7Dkctm&3Ar&u%h#9Ieit+#y%U|sBoI-KUvsdb_Lc&cc&c%fv2 zbgOKxyqltyvaYJFx}`=LbRsA@cxmvR;J3j)gCjyxLq>(x3M&X(qYVll5k5Y=Lik7R zHtlNd0quSCxMjn~hHnGc_mFT+_(5%`c12iF*p<*up`Sy#gro;g5B3I)4NB8Y)I@3S zs5_{$ReM#FRV`E=u*uehK1#G=g8U2S!GEOPr616rH2_iQRm^>Bh)av3#17G8Fu@)X zZ5H(tX+%FT3pgR%BwQst1RL`RwTQR%5DgWzN6bwIrK{nhgQAxrn1n!6!U{<+INa*ILh9Vq|OzI~ks^8`}bFXEO%R zIZwW4B=eP#vuc*d7BRn>?~EBt_6^u7tb}zkaqLX?6RUvgbye(H$9XUV&S3dmHEtlc zlsn2j;68I1Tp`DDVsDT)8km?C;P4!cIrIv|aMvSpI~=WHpm!KzyeW80Lfm#SqQ2uX z#~y`yb7oxIKAyDNH=y>VWhH{7f7io71q#HDc`xrf|E?l8B5 zTgNTprg9^=-dt<0CYQtoaXwbheniynEV~aJ{BzlV*r9A+whP;mt;LpNBUlwHU~Nn` z&hGOmeJv5Ao&^O*s9QrD&gYB9e6TOH1~-DY2{%OTEF2Hd!@bQIz~!L$tV zY6fv4Bj)zssW*roT}IqzAGMKM0M^_=7y-AWs!=*h4p^&QkRf;l9MCbu;ujzi*6F(Nk=ls9MDSwWMHMm z-v~yInQ+b4MMR=K;B@4zvBht*z3u5B;)tCk}-JqG=JSKtJz0U;d%w1XEo%Ur~!J^-n52?&-0$gi(K zesUtRl)b?S+5l~%D)19JR7_LIauuP)eDps7a_@rwI1mFnkg;3`1j0&Sn`S~46Ke9v zT=oHiq6_pGTl(t(DNqNNZ9MY+Z()&Q zFbv#7USJ~jH5{Y99moa^pvU zKiC0O?jG3QQlK*y;8+*p_<8|rl!C2C1A9!^ORM1wTOeAO2X-`qj+67yBB*A_w`J3F zDK~#JconWA-`otSo@cx{;Cwm(F3Q#Xj=UK_7t6rS(F$?D-C&VAicIxPWR3*LxYh&) zMU8Bl1}LHjK&>qEU-ONJtwe(Ji!cVF{RY4s-vM=CGmfpcznQ7bKMPyE3w-Q*Uj%pn zM*wqk6uFy!!3UTLD-?lGz=geqfLYIid1DLUsJ{X8BY@wOfT_dIdmz{){7do(QN3Q$ zHIlL7RN*A(NT>3%1Pkfe7-=0w4m3@eC|pH-#s6sxoN=UwgGp=Ca@}Xq<*0Hn$EAx+v-pBHTFu8drtNJ6WGPy4UQoUa;b`f zvb<&fkG@v^%>f0NYW(oHmjXTgD}Bo_#?|`?S9oP0&&S~UjR$whfdIiNi^0HMk)I0w zvO?$ze?v~IlW!0Z4GOgFM6i@E@wW<80XAk6yy_#MUakd7`*XbYF^+10Joyw~dtBQz zpc&c#+bMvzki&0%01kH>5KMIg2aty!?W>F|_gdgB+WXdfKVwvs$DQ;p2J-3x?#W{u zkIJuynr%6Fwfh)rEb)rHZ@52PW$zwn97iECQya0swYXv@kl!~WA{zn!^cU{zGWetK zz6HK+V0LQ^xDF2rb*25aOjw1i{4 z3h1{s0I2|`A)vn}0Ra_@oXZ{XD;z?*NCxuplRpO*Qy;9%4}eV~*!g6{A(BxwIRk3H ztKhk_{Vn0C#=@8V_P2vNwFcON=|F?cL=>+*dZ{Hq@!iIr|3x1#5nh>xb1w9ELqFjT z^aXljG1_h-eA7?fLVjsvX72%4aSn*Ytw6wR0A8XJFhsA>`-+giJRxujqQSpEgL+G` z7@>^8_`4~!2YISAW*}J?VKm_4(}MaKb)^Y>h=X^d z4g+DJfy%*Cx~g!V@D{S)i$qpYOXR#WpeMXWau{=@&yr~AFzF%8g(k^7vLW&daXz!N>VoQ;>Z$6v>ZEESqDM)p zNENM0Q{GmlDpw-r;)pBU8Gv+BS~k;55$q0 zioK%4z*0DbmxRNi25}2Y$)o8;v=gcv)v3EUr$-ne_5k}Thsf6i^fO}MbjyQ%<`S-O z6Zq$&;3i(=s}BX29+;`T;HGiG+#{%QjKHkp7*maT@0soi_gq2zsES+a{^>gC8tRI3 zJ;XeJkAsDl%M)9)ZHCoq*)2GZ}^!X zoIgwN&s&#QBTtp5#p6|OO0FwsPfl=7gX|tzXEM8G@-x?FC^PP*UrE28elmS%I+cDT zt$x~#KZ$>K|8Dv___yzu=y%uO;eY0&P0P?{oyc8~uQP=gMi=Rd{wkyj&LSdO#cGgfyxvRs5XypJ|WtJIc3^nHYOfL5e!7{-MR3*N1{ zq`IUyD!U|+h;IvDV3u$dx#turnmP`&eLTN3FEqe_Q%B=X;U2KH*@w(1CYJf+nFG{^ z$^8%++6C@$?kVo=?$7Q-&k~QvGoO*NOWEJ726NmtnAc9=R&tNHs^Ga?V}6{&RpR=vy_p)&QTyt=?YIH-LS5Sr>rLwe>sPDZ`p3H0 zs~9np*BRu7mHCSNBl`aOhWb|e zk@~gz1N!~?&H8!zf%eO<=Q%mLZ|6p}nhp|DZ+D{yspERR?CUqrX!ijh8Qo74{VG zl?LS9Rc$p7gFXfC3#k#hFVr8pB&>zDdU)fA5s}BE^g4I+jhO1O3!sU4Bld2rDRy{V z*?4V2WMXU*oxC}DUvhkMgQSSWpYeO*rpC^Vc@;e(x|u3?_}2N<6$kKW!6j0qbT&!An`IASbiQ zns4oFJ7Y82gm$;>s;#zdoi*Fi+49aj)SPIpWS)xrF$*QVBIxN=GfybmQK&DNUH~0b z(@oO@lf|Se$S@5x-7!jy4Gn+gx6?1k6Xc!A?VOvNvnHoWPImUt?5wPA^bO}uedmlDc8y@~F zd|$-nNJ=*)x*%p?+{yTz39>|a;=6=d3E>I1<9EjA#-B~NoA^2DW%904fv@4^VxBqF=Y~Gc4KUY^0XC`&w~l?p_&n`B_uNz6OOe$W z?I}WJFOBKM?qN@}tJz^}Z}dp>5uuWCSGbMdS+Kx?fswqy;6QsUXi43n%G1l~KXf~& z8m$mD5jiKCr zK=D*jS@}}ASXEzrTm4?$M?FLprrabqNSjJNihcnr(F57Sxx#~ps~#8D7EY#@QtJdO z`NMhD0x|w}zKh-(-sgzw)#2>y8kS*tFy}m>o;7Zr`QFhhAsYVBHrF-?n5?U|@`#c*b8G-wW}9P_!*B0yPqj6&mBSK>WfW|0yZxu#Y}YyZ zL%lZFUf=%0cF;E5mS-Jf?QJ~>)WR-nBdZq6cS{A!KqyomF6vrzp-@zKz2H*8k^-Bl zh3UO(Vm~If0H{Sw|w4heN*GB!Zhn?=XFmFZh(&mO!g*3 zcV}W|orwRk|Oy^a<@`%OXZcCR%%^xpQNIMf$=Y6N5l+{zM~ti zYo@EOtFBY(&PLHuvdH`4iP{dZz-gNOsy~W$a=EOg^oC@dq`V|kyhL0iVuX8Wm!Khk z6L?4uc$af;*$Herwjo=QRj{vs)O_hF<5`C>&T-dV*EUz4tCRbpTkaX=x#0QZF?#Ht zAYeIDnNsXG_8hmE&{v7INmt^bp$!bI^Q_Uxn{bK zx(>T$xEcX%_|&-oc3R8X(%H{B)p^1xa1D0t1tRQ~>$~d-GT9YedCu$3wa!7#Zq9i? zQ{*}yIcGcTI|a_;4xMAGy{)~1-3q(@3Y1QJYa+B~D_icGC!6KwK1K5i-xLfi;7p@U zSB=5w<3kLy^8e`n(I@I3( ztCxN(V|Lcw9IgJW;gRWO!NY=(0*UF4VLWuLgYpj<_yymJB5n7bW0*zWv;MNYj{HR6 z(JKg?{1$?6c!`O^O5&MPwPK6vho&g_eQ1B}Tv($m;$UQ_sKTffx*E}3^rx63u@mAN z#Iy0663huviKP;g6DuZmOx&F4OI(l?Na~wBGFh5jJ1LN`ApU!7m6(gV%~5%g`y$sy z9*O)BsfzMOj*9FQ@lC4>3lI6CS*e}jg)ENGi zfXF}HTaoL^nvlydFlx3fJjgZV0!D)jWj#{~Ik76g~p#9zVHFC6gi>^Vx=TYVw1|`Hu+YCR5?m{6w6d) zD`iXN-&&egS zrjq%hIN<^yvO5ZW!sQ~9C=Q5 zY*%KSC&PUj%Vj*ax+l3Cy4|iju(?#^rN_CFTp6&b)6V-&pL4M5wJRKoescF~*FslK zSGIE-Y;7Pc?u}FC>V;1SA=@p0GV>C55?ZX^+0Oah@!lbVPH8{qQD=f{rE9gTitDa3 z#hLGD;+SZ^ZmS2q$Qzj1@&#R*pubcu0^ z?gUc;n}Fhngqn%36Ava0N&cLil{`0jNm5?Ihq!|=Cv`zl=7^yY3nHQ;zee_rS`pPb zYHeg>gj#zoq)(7k{Z)}A8wgawZ}BN){hnc7^jthbd{{I|_=EEBrM#B@$KGY!Mr76U zm@@1__960SS?m>L-nKArfLcE0Ug$pWF5`LQnU3t+Z1xTm9FMT$fcIO)h9X;(?Uf?- zF^xA1c+7J^lt&=P(TQ3FCCY)cMraUbh%&|BCHIiaZz=yMuc+vz=!*Px0C=92K>dD{ zw^O`PG(gtVu56-8QI%7j1G=Jz;)t9tUn?6YJ1r|C-vx~DA$hXAm#m&NSG-lU9_oZ2 zF~boGD?`66z#`>$&dP0Nw9=_ab+!`yH%vi|dw4;BM!hi>)rZZ$m3|kUPfha_Qj@ zpSfPTSXT#l*t__ChCA2&!2ORq7+CI;uF0-4$c%q+{(z$OB`g+a3s(=|FGf2H9BUm) zM{DRSTdhB=PoRSO#Inxv!6LI>wph%)&Ee)dMPCcI6|6ORjk}E!V^_nQ{ImIQ^YimR z5tM!WIV`xlszGD zi{Vf~UeO=(Y;#aig@VpTXa1c0B!kw}wP>>KrVFSh-yN{Yu0+k&A7q6VBP(_nY7a4( zx#UW_D;B7BYEA?X3LP6}3LC7A3714k^d?=m zsB;ldw0lBZ2eTluTBMjPzb_jtn;<(TdjoWBGub?8Kgmx~sIUe#jDIK4-QO5WO0B%L z5NB-d?dYuszWjdNVK#%Qz-&YhF2rM^=b1;x3}WUp|1eD$4dd}pOhe`>Gn1XnE%Yu! z{HYuA0)GS3zZ!X)g@~T-;=xY%e*`|Nv~Y;%i1?0VzVx?LFTE}OTWZEExszmtq`Xui zbII<@dn?W;PQzj!0dEGmu^XlcE%-Nokl~W_PH)h`bbKB+`b86;{%Kn}8EbFiA^*Q(Q4jaxC z)HWL|2Y}>#YCcl9)zs72#dy$Ex@enqoUT-%PQ)h0^^02(_c+dp zxQ;n4DgIgf`GnbtzY;$tPE0(NFf9IZEE9cH_bX~a)byzHQ4%OU|D{vu&LBCN9Wg5W zZCKUNslh8St_)YkDz*b25B$F1OCE&k<5#|HXghpD zf-xiGi+=UFqpZb7ovR8IB_4zbm>A_ z8~Foymi(kVLY@NbS$Am%#0+z!?PZ5#uVgo6Gh_*}FVcO`60RkECuuA>AXbYvqiXAo z@RabL@DC8`6NNufJ=2cr3wDZqh-wq9)!)U;KQED+WBg71Q+#KDX&Aue0yD4_Gqk5{ zF7S{)FlLzxM8O%RD)SInNGJOEp75FD-1|_;Zoxt3!d?sZz7 zB3C8MkM^SOrJ}n7Dqj@tH~6dyVvIeVq0Vsh?6cADZvtZXmGi2zqw}t#7f|6c$8mdz z{WO%uAKOmYI@rQ&O>CoWluc??S;m=v7WoS!3Lg~&7tStRU%0sNdLggqdf}FWNRz-g z&~PMwoBmavCGU-XOn%3FuRbKdZ@xNTsQ;E5o_iptFq?rF-JSI+dwuRTeP?5h!Vl(I zR;zU=sGszOYfR4#gAGfK*##9Wl^vTrTf8#^3~xB{(U*{8?0`(u`#=o;u3#0tTV#iQ zE>ZSWKhwMn`aAeVaK(`QA)%rBLuZEF($hXV zDH|IaS3AB5`u5q0rxQmfu1ScD?-RQv+8K2`(imZd@0}l2U3UQ*>kwHpyh2#Z zkd&Y~YKPLU7^|qLh*i{9^oED>%3I58%HB%i#T|qzDWTva?q$b-4>=wo{0#>^+&cbg z$g3>jHnT677EB_1=0~PAdz>v`Y3?gK2$~p87}n$RRATlsy@3TC?CtFfM||Ck%2YYB z9IyN@khN|EUZIi5X!I0}rKZyTfi>DDdMA1=+Ao?a>MQC5)X`;8XR%iDU9wCXBU>R` zi;9VH(kx(%Qc*G1L@I?}-3BG{V0mBJ4(T1qb8$fQP!x_zutt&tl60WRcfo_|MNlq= zrga=O8|d)Is9v>#)nX_rl4~GC0agTFnLs&zZ{KZ1vJ@Pwt^oXhTt0=DwJJI(EIQyvjU0EeMBZAVW& z*3}k0_Y!znJM452@Ryz4AwX2_clE@4>5cP-^PF>wb2?^2=bdTJ@9?JGVY{y!tsFb- zLi;V-c3T(QPwOn}IO}ff5oom7mh1nR> zuc&sZQ#27luYv{#TZ2!8Yzy5Hwobbse161&$R1JOqoQ=pb)$95bUT6PZ516B6B4V5 zGsP{8KN+jh%hdJJ)rh(k5gpz$tbfSZpl|B? zDv@fjvW~KbaBY=87KjWZ88|Y4J8;B>hNm5}D(9sPV5~%ug2!rjY7^k^U{d ze}O%^!VTgYa;><(xdq%@pp!hPFR6<0>3!xE6UrW92XduRuRGB90NFkfG@EY0kM0E{ zetBRi8h~-^0&fdql`ViaAB9L!EyQ!`3dKSj9V+}Ax!W#4h0hne#C0ScCBq~;BpJx= zG?ccLcEYn4^$P=0<(Ms*36jnJvMI81vMJJ;K$DwAw?uD5d~p-;KfrKm#B)Vi(9q@! zccR{FFDemdRF;)nGnUbh5B^Ve3Ly;X1R< zmh1#>nd!}q^oAk69D-`w$vz!=y@_D8f6pII8R+q%QWCywoqRm1ubL|<)i2cwbvaFJ z(D>jRK%D;-mK%0d`*(O@c-x4>5vs^3k+&ngk>i0m*F^iHzsD@c$neZZbpjVDr(miePLKabEK|ZE;j%%&xb$(T=4Gqt^{^)hu7?#f!y^e|6gDzyaN>! zo?tTApt@6+fHCt^VW0)$!3JBh2@36=uz}>RN9E4zPJ$xr}@A= z$-n~e44FbDwB#E5+xW&~hV%{CMLjsSE;m#4x<4%)Ph5mmG z#-^9?EJtm`d-U}Eok^%HT;sg#+~^#D?e+j$T+ewHvCpoke|>LnVK1`f+Vr-|ww^Yj zE!+Cj`og*&RXQ^30ZVDi1#=Vgd*Fy$7mY1CQnaI}V$tEk`p7DcE_h;UZK?zW`!GD4 zj22^O(;w54g3*O{ic&1Et^2XR(za68H1px2+(KQ^DDzHha|i1h!*mD7m=zK0p-?oc z1OzDOedFVU!$u*PPTi;FqCw(alJ(NYvQM&!@<_#1MI+@GWvc3?xJ{S;JN%0-6Pc$1|hAS31hq%dAIWXaW4acJ==A_wE)Xa_CZ8x8|L#i)KD0m zVpq7Uz`4uW7tz#aPL1<{W4q&`Bg>KQIPRG3SnSy980oOuXP|bmf}KW0zo9M2R^HYc zwPj}OW9u{P2kUWbE9*zgWJ`Hq;E$S@nwO)x?3DSM`HuNAF!5u}1I?4nd(C;~VU{SX z+4{h?&|cQD&vDf;!lAdXv#+eKzWKguz5?Gce=>M6Pl8`A z9FfnebQK{jdMa8k9$7r%x+Uu(e3%FStwa}S~3uw`K#!rY;&Ld%B=LY1MVLMw&V3{45$8~QTza%k_+Pa%y% zb_f3o(gdYw-mAx}qtwq;Q&q{TpUU;H(D#Ziiu=GZ%#(eRMo9-tZi?%Ozli3DVnmNn zvszX7FWr^CM8!}i1-*e8&jAi|9}uIpz}~O}_%$1p5to2*sS_}hjez-@g)GA!V9@RW zT}f~xD&YBQpf*1SOlVbr*7*#4kC%U5&=y$RSEy(_0!D+1R6Z&l^_V-pr>4=#nCW!H zICY~^5i z8-OKZDtIAQc>KubvzVJt#|U$_YrE^J>$&R@W$5}^)gLW#Na%T|6ol>U>71tT)`9quu&KektPIssW-miyUe}x||aDKxWb|LJ$j#Ggej*pI9hZKIern48aR(BEQKZsnw z0JMPa*h4$#P|V3*A~R46Bj7%+HW(kzab>#xc6;2%JY#{qUjP()FnbOC-w{U7jAmiOy!>R##!YD%51y01E} zI;gs=`mGYGL)4t=rD`Mc&r;O^;`W^II0{Ui`R;4i{FSw!&b9^^Ggv%q5|kE7zK`?YOyakNqF=%U^sZxEx~$V$QQyV z?u6>w253o+f>vyG@aALz6MYvrWHM`cfY<->^Y~_pcaI19xiQ$2vcXw37b6m) z>f*ti{XO`B?f@~{8Z3sNfT>#ye=hOoLyi0aA``uk%a8Hdq5n7vV{6L$fSUpQNCV_A zlQ=2ILqBh3FS5hg>TD!yXHGFgnJ%!{-OOQ(P%j`Rb(L9x9A_m)$o%WM<@w~%Fbz=Q z?e~~H(M&(oB>crF7^BC4yqVi$^1MS#^(eNgiCTnUSic%^~r=jYB(OU8`(+hJoXHm%_@PQYyGKlX2Taf0zU5}Y6uLd3iwJr zq@H17-WzM-B_H>gu!-IwtM*utD0*r;>{2(CbDc}gRfW5m` zj6ES*Mgh#tzQ9)BBg$k#k7WZ3qV8pA7=VWMzxq+3n0KZPb?v+5sSH$C+Z-Orx|fJh6@R=xy(Uz()P$PteprafJJs6_%s+Z>HDp&igYU}#4yb7u{T%=+Pyoo7 zXy7@Y9<2_(-GJ1b_KVnY~ZwS6o6`0>>UoiHb48Ff^=nn>hacVO1wX=~K z+XKAsL$t;RV0Ze3m|-4{mjcSZEb#o5VU-R1je&U_3iR|fR0Z4t^6?CK!S4E>1812D z9W^_!%|b*7qi|KU;L%G6R01zeL!g)2L5r?4ddp7eiTZ%^a~M$4WEq7@oT)(6F9HwW zI@BU8!Ry(;*RMu@x&@3#JHTYT2anyrpr)c?Ar9nlL>uX>JJO>p2Ni3(Z zoWwjo6$t+WcpO0$0Aaa4gxC8K1K5pcvhFZGC7+j(`SsoOBau4qx6z}sf zwgyuxo*xu%^YDLbpJKn{w_oCD-r%U7|L^r%9Op+IEm=O}Tz`U}9 z@^L+g1_#mIApEHoT%kfd6I~Lb&{Lu~Wycj{kYOU~I7Dfwgq5Bs4-id^|Ne*;hPZek zx-di|M-3)Qq9Q}|Uj9>hAqq1@(;^Z}G%)ByVS%VClqiSA;Xhdyhl(SjJ5dTtGH}=N zSVOjm!4@Q(Q z$hV1p1=+GJKCOZ=H_@jcIud1zze_$Z(U2&QSEaC)=t-2rXQi;eDj1y*?J~0WQaCoE z(vu7XVl%K8)+kxRbbg3J5n=TIPk#*5u2^_z9Yi^Y z!ZQy%!bG2jC=L)+0-`xaURz)j23SNcS_ScZL@7dF+&Xe#B}DV8L=A%IJC*3VZx=lpsh3thWmJqL4qTNLHNwy{1l&mdLl_FO6U%L#)r(`?QR*BjZ zQKHeo(n^kqtSMPawj<9adQ&CeC^^OwUtDrb8nk~IY>}K9`IP8kh2nUWSVQcyUE4o{5L~PbaEGyF!9%P1L7|=ONpW zeUh!o-b*y3$oEM*BI}6`74bz{{1#~^C2fVYA9e9nC^=Rc?i^_^lHzZYy^-xo^nCL@9hML9>ZD(K58EYcKfv?9Krci3kq3B1ws~3HzdS-eL3*4A`1B>VdWqiR zA$p&CcqTm)c}-ZypQDE(%&Vj~dx7oV6!&;!KcpWa{oV)sCwg+}Xd~p^cN{a(y!(w; zX=t@X5ic8`lJ;Dpi$XN~NY6qv{7O7aNncDxP()*`sIP?;w^(A&CLs$Mcl@VPMMgE`+a=?mcpNX$ z3?sT##9I?J(^%XSa<4;+?=-nj;rNVRJzIj;CTL$GZr)cp3swIG^{4KXw5h@;Dbm#K}pNiF_Z zet9TV>_oNf64cm9fzY18i^8nD9;#|BsH06pMecmu-5HqQ{KWjDE^2pgW6pOPzA_YC zfBmt)(#Rg1g(~wpc(o>&r5(iF!vtN26x2dq1^e^@@PHn|k={bwBo0i~q>8Z|ekBzQ z3^ac*R2-V4!ekDrbF0B090%Hp=AXxLAHw$IFq3pb-9dkk3&rLF#N0tbH}#0($}$OFt*JC18Egpfj5}3E7 zCL;6Q3wyndE26}`CtFVml*5d<6>3r6q9wSnUlzYT6tS|Yi20lZTg86tMT1(-J*d`S zjA+CmY`Ygd!@uY$PC{Ae2|WKDd|QU=rQ_X0m1hV1_O3uT-gW*kYAjt5RZtuGEqVL> zJG`U0S>D}&bip`ULSGfM;J*os@E-s|C<&f!ht&NwTNoC z{Pw^ZFbuQ-ZiDo3b5N^#!QU5Hi{EG!>cBSCcM19T zt58cBkNXs$;I;dP@I*jLn{bV` z;XbZHedSI6ra&`Po1Vc;cL^$0-C!fg@Lvzi#;p7TM(q6qsi<|WjctUmv*)n%L8#_@ zjTmo1AO&~p4mg%?2ln%N@XPSa^BSVo(-Bw+o3DYY^MOzjX@!h`GVXjs=qn7yb+~{i z!!|@M215CwHtKmQ^DFcAVcid0^JVCV-toHeze6WO?+=1cdW1UVkC+vQfiK(zof{F3 zI|;fBVVKQVhh|PLY^f-qgZ}nH@XkDds!LDAod@FlHXtVQAutM8d?!@QdjziXE`#Cn zIWlCGpc>E%)oW8=EjC0P-ayMq3vKzz{HM?W*#Z5O?KtoDK)>_{qu2(_=~trye*{`7 zQ4S~k10Qf#2BVe4^4j1y7X*$#vn7t-8ye*AP$yg+Et`yj$?Q)8pF?6Ib-)=l0DIk! z*rN>U2&p_3|5mZ@`v9)8U(m%MYzcXRn}`k^3fu!*LQY^6|DoUu@Nh}I$6&CZiMUdt zU&`An7*9vihxv#6VPIt$h)UxvXbC;Q@%#!c#~5e~-U-X@^fzKp8GckEf_XLtnjQ~e<%htqP#aaqecq<^@x;g}X!nVI~#9f9Buj>xxL)K(wFv zf%SYln1_=D!}tvPfN}l}-eB)M-{F7}R(%@W2t8qOPr$5J$Ik`g`Bk8xP!r4^E%?g< z<$WW#?`$`2wzrmlIB$i(Om(7PQ+oxEc_aNDyc4;2?>-;v4+6hu4AqQ2OF_-acZ93W zZSlVIf8iOy;?)YZ+s|NSgV0{e!6RKq6m6fsBPwY>``Cbqe;v{P#r*NmQyLEoNEJLm z4R-TDls5r6y3_t{ys7-7f=85vdMxSA%J02C9k&z(Xzzd;6&<=b!?`!y}bAQpLtUR6T!ab=HCi@2CG6N|NcOC z^a)F#Mzan5(OAvJ;a#C7gK@KlXpi8DuQhYs zmF-%}O!l?s`zWpO5bYGK=23o?w>Gzjd*gNbdisGq^6mER4|Ef7q{^2c5;*8P=k3X@ zLf-Et)5dp=Z=z=k+fzFN{k%Q!oy7m6>^;DwD7LWSs_vd$f*27LkzhbjzyvCo6N(A- z0_rtj&gmLZ5ygO@V$K0k1i_pzf;pjhRlqC)iim=lxI5Eb{k`wL_k2BzKHvZR+t1GQ zR1PQAsj7289j1oi>3Nkq)6V%_{uk^VtP=HfQvj>ymM2x73-9@E@iEa+?gi+*Psu-m z^@Y{^8Tsh!16W*0@jv*Z zrx5P_VbO1X1Gi?_0;gO%U~l9AXmBoeonTk#zwmAKAoiOcf~}6H;HTkSSioBh4fVJ% zC2tc2+^esNe$4j_52c@$+m<&@$GKLen%doKPEKBma`!~oEp1tuQu%i_H7w63_|M~g zNp_gA9dn3ojAGuAT z_r4OotPTUD-v$j`x zTs}VgF~8m48MmmpzIH;*adDC-mAmT3*LAD>7+&@l;Oyr?pmyuTJH_3j*WEp!y0>ME z(=A{FE3C6khQ(YUZ@`R=;I%g<*6QOm?= zar{2)#^3B;4gXD7fiH?mIyyY;=Hle*2VuwTf-uv!N#2g$4GSwDrQ5h$qObg*d|`HZ zcpdcbbL@q6!~Pj|+T114#3aQY;Zf1uVV6ql`eSjcY>moC*@y17_{q`@HSd-#jiHB*`^3-HeB{s9)%xM2A%^`pb)$@%axF(MwBf0-SVUmgufX2*kl2-|??_Qr|KHPU0N%cekjQkMd1$e|ZeN0bO4Cx%^o6TeMc~4z;T#Pq;I&lA#x~ox)FPfE|W+((UtA zqf_JOql-an{~Onqwu+w*`<1tXRo0*Cx|9cHJH&NOH(vFs7PmB++1Wjn+saeQ-&M8@5Bi^y&q_n%iTOb6B5nwIDR!6p z#o(B4`#I??Q1cv;9p{fr?k;_l>=o7ISEbWHr`lkL`vurffzK@ebn<=8#V8;pOI=)f zA&iXvEN#^E&F0&!(xmB!(V^*mbv^3tuRowNEv;l@LYKUg-=VZV?3=8gT$bHke_MSk z*l3(p**BZ$HcE~ueFYxc8T;?MV6XXMcX>9foYY@f-?8#ndUb3U9~hsJOu^py+t_6v2s!MM%8_-)tms+aHM`9}n>>p1M}6Z8 zaH*sFFx(7}Zl{OYSj!!ix5CcxYk5b^#YM2gzhm;>XktFD@*&PL+*cWpzX#7**Wi?L zAAc_H+LvmYM2}^q^u}ynex17)tF?{bv1pfYFtGD^KQbAW)JE{r3wu;wfjeCoj)bq@ z?Xem;C3mM#ZD$_d@4*c+I{|_HD9lP0#4z?CSEP^&7&Ta+0uMLG$*+i{&k*9 zYU5j6r*Iqi%DM1i^Jh9Tzt!LE&xd8QW23#`>u0!sJ9K~*h!xq^?#$@E=-Yg|bWr8< zFgt#;bYCbD4WKzcL&I8ggFVHeSEx=9&|eE&WyD57^k8Rym+DsC-#{zw$oe z`((T3YpvRUm4DS9>VK`wT=B}X{>%5OUp?F1&5VvHT?SkArJA$iX3_Uphh7;TC?B%o zmt|KjZ(IL!c3d*B*>9`l&HC2N@gwq=VXvx7wpTj7vY_0o+@jn8l&O8vs^-DcAvpJS zV{&9t@0Vrgmb>G`%7V1D>lyEhlNNtQtGl1Wwr+=Lk7%!a^~#fVbL(ejEu-_2{gZ2> znb4s?4->`lo6#b8>)bW$Sb47Qthya4OY*MChBaT}K5d)$B|kquH(ONR4=1L6sO*+E zj|Rd@@RE3Z@>wz>e$D+SySaQ#-5o0quRE#I#odYfCDTgNV6*&R@rdO3+O>MeoA&>`L`9PFaKltIpvewVKuik-KOb? znhWD@{$*Hq8fTk%byj=#m71>4yBL?dBq>Nqzr-!ja{&Hmq9Uz|yL37&f23=81z z^;%eW`66894#Vk|F3~{uB`h-S3OzNG$!#l8Lb)5SaTAHbb#ba=9`p|Z=Oezsnsg~nJobhrVof(NbjJCPXYww%%Xn7& zqkliI%QnGz(GRc|ycl}5dgxP6f(5fz{ga^3K7QTx$l60@8;N;c7*28BquZ0oHLYrW z=}7-)wr<)a{UN;<7SOJNtTZPd9nDSpmZrof`|Fo0B;Bm0r7;T$87KJb%?V9!TUzA4F9#UGH1MpDjVcYrc@hi#B$ua(>Y>Ud=O4EEBST!B%w#+WTN^+WO0k0x&N4vW> zan^S=tPxiUm*?mDv*1~9JmmRiSljIm-NGhVg+B|+Wv2rIrnQK5*w@wL9GH2di?ohT|cXOoT77U;SXW95&om&(=y$uI!q2%CSa*rQrSI zJKQXsM{5HO&Oy-S^bKurOFRu5AlKG$CE7OX=+^=q90p9>4EJ{ig|+;0oDa;gav2@L zqhUCuaz55W-NF`rM6zjZhuY`j)gYPl%}&kXp*z1aI|sITn`EN`bQn0D-Lz(dWCQ%?5_HZd2$cUx5$E*IXM&fX8It zxpPYsYRjcpe2?sf`iF5|b4-26y2tC)c5Bk9rdzZ}xFmfhyV`9T{p>!-o~-mpkAs}I zDeSTigGT8I=yfKzwm4KFWvtPSIFs^`-(=Tl!5wUDow`EMJFtm&tLXMK{L5dI5& z*d#X}8oAZ7*DLF$OG3-|MV$TGKl%)IM(;-Lmtr<OY}RWrrX2oVGHQlPR!b*Tcza;ewgF+O8LL(vERJm~ZufoJOytQ0@& zZ*ZrD)6#X*1=;Ox=V*P{_-O0zkKRhAmTpJ}_#xTr@FI3kCwF8ew=mQD!w&rSy@;fko5vDkKKLQ&~*1OIJ%_firOhjceetTpZkWX*smJ| zKi6wPLLP#3ad-D*(hN39euzH~UsUc-Te)-MUdbY#WmDm2>Z!`&uue1;o@EyLrTN3K z?AI0Aox@>qZ#GU~wR2wr175*d?H^pv{I{%QwjX%V*Oj%x1-=u`KTeJZMWf;A_8J^ASuqHhbTZDW1Y1yl=PyIyJAJWwI&`N*lHiUJ>d64Gn!epG` zJ0+a%zl&b+8$v2s2W!vwp~?J@dj)=l2j{2355&X1kLv@^z8i(jvb)kFvrFTg=>ZLZzJ6>tR_x`cbx|z#mvm+R5phFwLdDir^B+} zAoC22_k!*9S#jjofE-s2*JhWOo7Qa)J2}VYedBFvX4YO&yC9kD=BFFs?CRj`9C#bq zF>MO(f>(flzf?NC=E`J0e{P7f^|DT(C1w<72P@~4hg6OZZKDCDUuy={EQud-ld>f^ zU%qvvw(@KF$jXE1t>Ft7#djuMOHU^!#nmxqbi zNqHw<;-UA8`}(i3CLIP{L07DVzleL6=EiHe7U@>ytt(jfyC0*D@ww2tpOp4X&jgLQ zDy~g_j2?4GgstFvbY}h-&UPQ`CTDlR-fJa&K3^^B1?%4%$M1tn9^i(A z{(Cn2#$R6ZaFeuVMl>n(g`TC9PEJQ;uwIIq>!K#fOYx4;GPk{-gO%`2(Z^v4d>nk`C!7utPg9KQ@6!ftqLi_vu&AjCKT_YU}R68QSjo9)3}@XWSQZ+wtLJ zSYVi!jtWn@QPCireXn!t;ib1 znhwK?>=~Sl?iGFL=0az_WA+a8{5_!mdk<^bcG3RH`|&qe;Z3MKUzwA3fRDGgK_{N} zqhXoi4c`VUq+YNxUkB(p5OU7-uy47QTO&UJzR21@8@@XBa$bZ#$FUB&lI)<6LK`2& z=SKtF=fLlav-iL!&WUFE71&|zoR80rO>a$afbMW%^ldcBkA^nCg?kTDY7Hzmr*=54m=y4aAv|< z??AULW}q2vBSxVub|*J?hhi^sYj{;HC7=4wV5jU~>GS1X%1>2Bg)@L#8oOwHgXSJd7L8v9PzE;};a6o~2#q&?9}ha%y&iyDa_$C)%4vqoJce9@6~VZgKQRd_uHGKDW}Y zye??iR?*!!(>^B}EZdU(c{p(JsY(LuQ>xgx{q_8f}6aQ4ukjIA)&o%4ZF6t`4a(^YekFredk`}Hf~YI*iL44e_6A{5`6<{VNz$JIITy!ILcg<5JlW$k9c29e;g0mg z@`Ls8f)Tp<_oB<<7^m=eNw$f%@u!7zD_7zM#ub(CvfJ`U-G_b^*m~*`&-Nc-k8e8c zn1t|dx<#d5<=b=!_K$W4_1-sHlJu-umVEEu4eQ~)$C{xM*2H>l8~9FKkiQze8@G)H z=CiYl!q3=?9fW=U^W7`Jilv~n^RsJUX&YyYA^$z#2e`$sK)(X}j{PC`B%x)xW#!;> zm;BP`NccRsI(pN+0Df^G)@f64%HX50E%wlF!aa%`A?qvyN7^2qquzlAZ7s}gUz|}d zXZWG+ihSQNADYg$(oORDele_33=6-5a-0yo z0dK?iM?Yuhm2a;9xV#h=LjJ<;)>7eQf?lcs1DcTvQ2!0kvPa{qORpp+ z`pdJ8DnFH*r4MF@V<+(%>^dG2ZQyHNKk&~U*q43|<2F8T8rFe*%+2#ZTuZEbUA#m5 zd31>HnC}TI3(r7SdMaVW9q_Rbri#7EA zZmRE}9D*BY8%&nOVZRru|!SxGPTv~Tu zd57%A{B++WzB>7&bXUzoHSd>xh=UuI9Z^0Nw{@SdZ=X&H758BDWPBjB3jIr0mwrvQ zjmNn6vQsM?!qZ6m%K7Oh*+IatuOeuJlJ!a{d>Ve11ovTOrxm*{du4fx%KmPj7brIV?y=`{6cMPwd>R>igxp!miD<>`v@Xcf_9L zK+Nlk@VV<4&Bb|vRye=40K3G~V3~V5&SA91NrV^E*06BC8sKSO-W4mqHIiH6=lo^) zPq5oGIO`ia=9}cdWX;nb%U4(CWHViB+@$FlU+k~Vw};+<``Wm9@^O50bgW%pp(S^yL*nhbsTHD`?eY8Q? zS-3U43+u?AK;nBX-`oEj{SM2}b$$_Kv3v7D{!Vxg*~mQs?ft^?S(Qt(XWU27K)wyW z-@*67sgC8?IllqtPfiN=W~XJn!|;4>?DwA!8zJXlj_ceEe*$jL9_b_Z3$!Uyp~biY zcQDS!EthM;GT`4E;0>Lz+VOD`%|Xu3Fdl2+Ow8hZ5mr63A!~kDi)#xJkaCj1S5Dy0D7@NMH&djFeYhvGI2DEnXV3mAC zz5tR$SLEIa=S;fdTutQO#d(E8+)G&Peg@ABo!u6BTlh0Q6T5k}7_sxC_uX{tE+oID(syxpW1qX3yAO9`wsXCp0Xi2{{Yhxc7P-A}-{k~181S(M z&R^UF?bbIq5q1e|wRFMxgg)>zu>-u4INTJu3f^}-a*x0np=seX-08U&+O;*o(Xr}@ zT13xb^|Tb$j4HT2H#qB>FLUbw+JAPn`HNXi_&9&VpN&0@`|?G&9o{ipob8%_=(d6F z^j6>~$6-e{htGoP>D|yuw8lxPYu(BIr05WM&zThkaF;QVQXaxx`WEoD^+^6N;8pWv z*Z9!rQh&0G@(-aCJQ>~)`a{pLS2)|v0d&lW*NwWk-?1NbaNZsBbt&Ndr))edfc4CF z$!GahlBvnKcxPzAhGJjl`m8h7-h*-8=0nW%oct`e7CZTgb^1(36|c;v@6_fP!@N3xNMOhAps$XcpH-E!}zHY1|pR4m0>F;Cc%{ zOC9#UPs7=$Yat)?21!WtANs+h}JB`eW^F$o8)wyf;%ohDqMzj%(vMd*vXlP6E7#mYecuY zNr329I6d|(7JdD)|XG0Wri~nhy^}s)_y&R)317`#;gm;;t`6)R2*8-MPU&l$s!*J5?6r6pm^>x6hrtX}E6J$@} z-0P$HEUYK+Kx_1b{{W-3xqA#JBNyOw-Ef?!dLP=Qbur4Lai4Ag&$OCZy z^J1J!xCzwg^XzTt?=bFB(rk*S0z2*$Z$7cx~{4 zRrA5{lXF1c7xZWaG$yaYN6)7D9c~D`5w(Vl{{rsr&C7>@c0~SioL(Cf5?E9oij$p} z;Y`_E&?jyMOKD%iN6}*VoruD?Y<6~A7y?grBhlXTFjqgio%{gk_IpEDdMr52>Y&3r zqQCdLR*>d9`VTQje_)(CVh8KRa1S8%E$mc9fV@Aj?-X#~^Hj9@qD&Hjy(e1Bq>Z)|iixKwu>dcP$^ zIEj2YFlQOgSnYv%orztuwZI=I<3#HqP~S&zB6=&pz&p4zxEwTo8Yss(ZWhiq-VR?U zqd-lTVE&tdaz5$T!cqF`{5RNvI5W>6GhPcXOWVSy#dbKW*AF)qdtfi(E!?qgfwPNu z!-B^7&?c;gnV1KeqL<&+HN&p&{OqM{bT%U!7yiiGfwK4U!*Bv}m%MepKH%*EtZqMq zUG+<#=^v6G;O~aSdRo*M9C;vU()qZXePq@Wobe)PxSj+>p8<-$8CrE3cKm+9Z0rrb zcu@95wjy+KGl4&=xc;!>)d{+&f#}&UXx&h>a|4`(S_P+t-^Xdzs{`-9evET$8vuGP z^IO4h(aSh#IT+`H_QBa&m+u%HR!oQGTLL@hVT{_rlhzoV5o-R3ANu4xQ#ch?W@h2edL7P3ws+@2+w(Fck%?F#UW?P1o&7K1mY+w%uv-2LlFrRo zCtM7E8?qi@PWS$x(#XpO}xpbf@jPwd!#3;J~m_S%}p(;!3d==X=mly&kS zac=6T&;k&=06ctW=(GAqw}MN-qYxnKW49+fVz-2r|6F*}S`XaqeyqMPg2%OwVOiiz zoKmfWJiRo;I3fKt&efg;A5ur4^;_Z8_`Vpk!*D9M4!x+&p9)We1k@w~ci0X(pa&oW z{FNVv6Rqa}%I^n8#emB%!uP?W#2KJDYs2H(nmA4V0oHIEz?R&-;7)%w+-3O-oaPMZ z)#`Ba_7$84n}C)240y=t1F8NfXqkG#n#WzxZeQ!y^M_-uW`+}RQ+rovu>0dg?T_vp zKf_m`v%L%ux{rGfIOFnVkYcuhHe@K|sakm2nt<}3f@iHga1-w(@Tvbm2Y3)pc=rXR z?u&B)yXQV5o&R(XS}--oN>SCFYIM(?T*1bzYe+QbNs&w zNudYka|r0^PS`n|jT&~qIsJB^1WR$>=X%hR>u>`037pHF1^K%#sN(^Ex8)6YDR?_) z9?r8Rd-*YC~&@Acf~D@BXG9&G4$|DaEUK*Yj_8&KfZ$ZqqlJKYdJ{Ua8l1xZ181g(VtmepKItR0^c`riHpOc9JDe(S3u?L} z)=*6#l>}&(JAyi`2buU*=vMk--LpS1ZzDkaOsq$?28H<%76aP?w%>t9={CUZub>s% z;*`q)I9b0Pti2x$9r+y?o6W#oCSrz0fV2F9+u^t3ME>5Od*8vYU2EJk{}sA}JJI`d za6e&u1Gaq&uW#Rl92~YYq>cf=pgGXVZj6yn@%AFdh`zhp=e=>hek^3U?|~7#L%9gN z;8&b$`Vf@<1&r==?4^ByJ1@UOg6)mn&l6ycr61sZ6Y%Gz?l0WQ;=SQdk!L+Vecx^By2#HQ?HCKY{ZaR6z{VZX z!oNVPAA`2&IY8k;v~XRNwI5*QIMjD3C>-yKUynMr#rgK{Ak)2#8T}cMxIb=+f9h7j zx#pg@(Z0;RjZ_2Ci#qh?H{8$J5SVl!WV9Ke?B9asO~z?Q-l{koJ>0#a?FRy@&IC8O z6g|8KC%A`0i-a{YIOZjg*GAxf7{=;G$VFo@b62CZlW{*`2WUv!p_S`mEW5+M@kz)% z2C(}OB#eQmmG{tEgQE1q`RjgY<)NtKGCZe34&4A_5rMjN0UjNP8|@dOwo4jrxL<%b zo>m`?bK5r|HSfEhM`;c%7><~csQ)_P$T;{5eHZVV!JfI-?g*dKF9 z?}F#x9K#5dKOW=%1V(ZcC?z3m6JXh9Xv?7(fd|mC*8yddFplFf-*;iu=|}KbKuTXg z;NH09e<TfNvD0o|_vVtBWf^HT@-nG1@~d-(66-(LY6d4KtPaK_JZ4+p0^ zacgHH=>PSQeJ_O0;zCf#D}k?<0ng6`wjKq{-4;AzE$l5dYj|2f3hD$b-xl8H+QHXa z4lAOwK!4wc4be|QAr?VTm_ee8@zfwkbI_+4=Q>uv_QzMyD1IEi1||9vYu#FC8Czp_ zXMITOqzwkE>1%6o!xZ4MgO7O~AyTT7(xfVKbjkc#?%`fY_3@rKCT z7UAoH`n5w_JAp!O30ku|?(!T2s&*j0`$B`eC+_2HhaI7=u&=Q-^6rau+NtRCko+Rt z7dj1m_E6j~Is#Pi0*t^-@J#tAaG4egrhqRz0}e16`zxcN37&{sEH8o*{TFoLJK*|n z4ZL#>MwK^Lbr<_fjP(M{WIZSWZ~ys*`<=Xd&bynurw4+G&lK-`;I0>+0kh3J)4b)Y zd%n6+*aRWGWyl+k7S4O`C6r711oi~L3o2^(BmVNH;h%tuZy{5E2Z^FBK=ORD!QRJsP``!IU5LFIzpn-j7>f2>06I7fRPgEso8_ZW5-pcs0ophOl;|u_){`0@ z@{}_n?=k%l2>R#H|A>rd$NhL95Bo~(z1p!4{%O9 z3bK*#O#@}*JzLrYp%sYl8)A9)_7}h@Z`SgDGao`eZ<@A1i&z7nCJm2n=M!^j2f(0S zw|sv=*`&O4QRkPyM(VWQ2UUI-&)bk}Cqw3#1j*uQcsHB~DdHjcR(uTakAn(5jOP)2 z^G@SLygv&1^DJl+t@ylzhqinsHROB*REl3f=+w_+DGdv(?K!5 zK={{q6w4d+yh%U3p)}f~n1%dv@X$8KclhKx-&p<+7+>9-=dE*E2Uy%dX?S~mX#=g$ z{drMLqnWZVz%asxXdW0*12rYQ(BcRGa=eqmR?}EQ8T0U|fF99C4*9L_!V~HUn|422 zRt!k51hI_ejeFkGCPn03e9|*(8SB#{*scB~WUF<$^{!4*Cwv2Tx z!8hOdEhwg)frWrpKBAbi??VduGh%3kfOc+ZkwCUhL}zK$iFOJkW00E5b_Z|ilWvo8 z%a#pqMC;Cc2go?Qm(6?Kn}gPF3weuAZ^%76L3-X5v}AWslRY5g^Z`xT8&BVcXK(!K zgV5cPYDc8+ga&NnFx?{@Ry-F>scL5mt|HP93CLgR^~Hmm*b zH)BO_icm|-aNghN9dC;zM^#>ikzWy?Wz$EaK|W@f&Nn`^YeFhOn;oKXR;$XQg%7@| zbXp8x%V{Tok1RLQHh`=HB&bi^QsvS*f*xA(uy@wSQu)Z*2iqz;AGEVTIU@qx%fgLp za1ei%0b;n)Djr(5(L?y4&7(gW@K3Nts}kh%dW0{s_+c=%C~gN;*s{t4P)= zwu_cS^hdlDHb@4dO$5c5T@hIsAqFdj$w!9EhU1K{<|alhh^H$*hJCC{ltI*iWmm_s zrFv{UMJ=o*%5G{eKh;-p2K$s{B8?13$E>mF&1O`iMo1GJaQ--ZtVi@ly`kkP(j&=W zq&oJ93US3^6p4}=AB9;uek+VLg|;9VLP}zkg`s9EjMiEN6NC#s{7u+VOaVotD|{Fu z8cG_)vPj2`LRy>nU9?D{OOZ}FNQGGj+rc+Js#$FycVI8rZrZZYn`k1#39%d{(qlqC zN2HpA5DRP^7{{EVq}BDPpX?#~Mavwt5yeniSdlF?y@_iuo;{@9}rU*-f0b-CIVTIvFVZ%xx$bvshLz$R1mr4z#5K{$Fgk|ERXuDA~ zVK7%3q=TZKMy-kvqm>q8R-8oJNQFq71m8v(Nj*g|xppCi<0m0of9$VXO+F~@XY_z$ zQ{XM>kKq70Gie4N@nVjs#*$pMz-8uF2=T}$#>yd9i+DZfh);oQf=>%0OsPImB9l6- zPx=<+6OH67&`ypl;;@%-!|W84kn$wC3MFC>D!@5tjxd&@ndYof?rn}w(iYjxk*yrj zeQ_dL*P{IyS(&QlJ>pR|fBfd7dFA}^;T#s6Mcjqq_GTQ-a*Bu1+7;U-2qF~l&Aufc zi{jA^5ZfzDKWsDQ@@lHbk>XPg`y2y>5rR3Y8dJj!<0i!%D5iif(o#KKdvXqK-64ri zt5Ly-BsIa8V1{YM`x@gcF_wQ;uWGISrPdbsCRxI;g1xtQS+CPtSr{qVgLrHbh_J_a zQqlXB`>$3RW*LqZE!9XU75SFs*BkQ{Z|Y5v*Elh$7F%MikgUN`GfXbZR~~DR@ieXM zIJU$!%?W=Q^%fo)t!5go+epcYfl4VU-EysHh%vni>tR{cUNDTjk-U!~q97*MRDT=A zFnYq8Wa*LIm@82B+GbwzyN$4MY{HSKnflC;U`mY+N0hR?EI<_&z^q1%592jLtd(Ka zk6_eBl4}!=A0JUk`z6{|jIh#Fm&A9YfcjRfYF7&^yl6?$Kh3gXmwpwW21$~RB-LmH z1v#W^#C6d}$&JKxrZmY>xGt!#wl!Bsh>c4p&8&t?!81enM=PD;jie%cBV88F;itHx zq*c;vu7HXZeHxbb(m(~Ac5a{tVa80s6nN|IP;o)&ba+5 zLIelJr(!fhf>4X&yz#NIv2jzGMQ!4H`px#JA132T+G8n{6v#XD5U0pN>^I~1n2aVK z%(aJN$c?#TV+qP>DHP9gw4PBNMM?JCR)MOA>8szw^NO~qG=_4oLiYdEK61q3VJWg9 z$euI5;`nB3OJT3DwHQ5n(})!_X=5(gLio%%Fj?7V%;1_dUl43CY?MYbCMZ-In_GSx zEilemjVX=!he;R2Ezx|*s*-7|KZ%Eifr2S<7=;%piM=LKizBH<nrW{nL6`yh-g; z+nHOdEJ-Q)W!##rGM*>SZLMdYS%QsRF-mNw&5a}k{i<#!-xFNj$1asTjvMNxqVhBuA-L3bEOfZdJ3XIaANs zH@y|`VdJl{uujMH&lXiF(%?ODlM~abu02usMDY{p;0hT~ z`vTce6HGnHyl8e5yP4AOGZX1z;;WNS~-UY2a3_AQO5{VM1-)2a?~0_jPaRytp% zmo}5%)Iicphf+!xDynaFvLroPBdRskA?$5qPF+p?jegs=@kmJk)pXY=GUthp@dLgQ z3i!tv6%1(Bi+N^zH4UwX8$re2DY8M1K@GnN-?pY8hFLD@SPgy)dS$Yua8Q^;ToI-i z-soExSERAnA}z}oEijyAYT+fzwAu_)S*v7q$?a-|=$hyrrE5yU%t5Lr*;>0$vd&(- zGp(d?N$jGNrZ1F~P03r2Nz^P^^{Nih9ECLQA;UB>T5;KE7j&|k_KP}bJS;`=s{tJT z_aXe|>=pCHs3P2Eu{sxLpRgpz5$%%nVDKhcp*oMaF!5L#KFOa22AEtZd?+NjLarnJ z6mpX&hZI}IzSncgICNv5Q9WN(txSUywmmwgwlHYtuRHEp@Y zGR$OVE0d-3krqfgJxOgZ3oQ-5#s9UcCw5Eb z5uURSj*>|eDwknQ$1Yavq;H1skG~3@^Y>JONTsNTPUbRdAsDpJ0{*Jgn>=H1EFNchif^{5_+>gT zQYS*PLAH&NLX<~w!V3%6FSWz^ss5`+)_eZqLu@qa!FTa&OI73)k7g~ViC}6z>{n4A zM~FSHF5gnB9+j!KF~&lbi`0Wn?+bcnRF`y0)K(OZW%3an6Wvo?dRH8C z@wcsc^=_g3B^p=ALB(3Vh}9eGi)KV~&HPGZGshIgZbqToReF=G)hEu8YHG|0Z0sBN zyn1{D0fH-&;E0V3F?iyeu%97K`1%$PvgqaqO2mdYPYo&f0=|w?jU}mR+2l28mb(77mhbn)li#y+;}9^ z4#r5nA$1__un^^Dn2n*8r{DHVbt#>~R1Y!JQdY+@O_8FS3K;g=x5@X_a97ZcmCJ10 zo}w1kRrE>YRJ|&-e9EEGD8ekST3ii91{an>?P0pce}4~Ayz!96V`QbNKBcs}`K_K< z2zw*wVt6$q^S6Zvf=trZx1^C;Ko3(DvW;XG+aZ%)flwuF0J(DkwR~4^tsTMx)okOV zIF2jBZFCy9&7fM4ug}Kqy7o#O4W==Ix6HV zNm!DSsz<7N1dDe;iDpia(RhvwBRNN$A#zfK%L1AW>KLL_#8jp=Tr`*#l{TJTe3R<) zQ9jk6JS@R-v3x7Bcr&f7;D{%^$??VitxV%nq9Ech#`g<;O??ZWVx1)^K-`hlQ5rB8<1Z%d(Xmczfsp-Nl8lF?9pOJ*l66sCy68dj)x z3N!jC9ArNX536fzsQ!P@slGREOVM(1H({ZTgu$0#jmA}Z3Tm#<>Rz%|@fgQTcq={HwkCZFOt@t9z@Fs2;h$>ztTgqbJNV zC>Gpk4ApYm)nW`o1ew+nrnH*)U5ub0L>!W^pt;t}YdnpjHvU$>n3}j?WipMpc~MSv z2v5w>(=lVku~n+tX?fHd!l}`8QFB7DxQcNJr4VN~4qW}s_#%hOXBpy4?gx!{k&V&-QG8wc%Q zb6%}&)ic=8i2pk?#e5XwZB)RZRXOYzzZp_Mtm65pwJYae{;5fY4N3){q{)I_@m0P{ z#${U3Yiat#GeosbdM+g0;!QHOxQN~hKGB#vkT+=8f;I4|miSeo<>j|l3zEI<9Flf0 zY=@(IH6Wd+Ru{&hiqaTMF2!1{Pi(XCDXrCouk5*v5z|W65Y-Wl(Yxq}y^AuaZ1HFD zUcM`rsE+2t!kAm~(8{y^_xUyoYw|x!En4+YP$3Fl&_SaOgiB$$#R-OlH+nDPg%@m( zLA2qLQZlCCa{TpAe?{8r*y_;g_v)7GU1P|+>b3RWC}}Yd3MtZ?M97?^%$(a|+$3W$ zk8MP0pqPY?Z*CCFRRzL+EJ~J(->5z)yleB z^KNAqtusl5t+0n>@UhnGH-9NV$AuaOhHwNmhXySi9lmpv3zSD4Nh8hiFGihV z21$(JpJu>56}IvxV>yfL4|#yyEC{dxMJmKn7;1RJ_rL%By$-{4(S?F**&A`Vc(jrv z?-Z{yE@~@GQUFR4mF?-~K-w{aAWHXc+xcSSy{Q|-|FnFfL7SPP0bTYFj0R2%!JyA_3e$rNmvAd&I* zXs)ZvRBx>QqJ7o|m8Cvfxy(~MHljtl7^k!bZ<<5ZqIOlcN}R~@TX_~LTu^$Is(u-F z5*9V)@5D8R8_pH)#hc<-uJCo`7{xP`fBdC4E7hoo#WG$r#?lqE-8dPkm+?JOGEyp| zX?#;2g%+O-6}4f0rK@g}+WOC*l05#Yl}f*ItJQidtNNFvtZt)La-?(W8C%8i6y+;= zP_&w*XgoAxe6P-9rIVg(ZE1ZL<<~E!66GiDSLxyj;sln~zHMYxdi5t$Fu#qjdSoqE z{_5P8*7kK}O~v%MELk(7F|oF5d>F(2Td7u-tzP+CJuB)r*+O|(y5$$YGd^lOu)1tP zmdawdm1^H=sm--UrE!?B%%s=in-oP9&Zu1>+fz>{j-ojBS@>zvu1TPxGDdayO@3jN z*6370ovPnRWt4&x!zi24C6!rCcSys;Gpco=3@JE|I2-F%TD>)XXPlL*cl~OdO7z?4 zpITK=IKEk5ti_78U)8mUnu_*WNd;9bO4VpEMzqu3n8!FA=Yh5^$iKB-GG3w4ENH9o z49;>we}X@82En4Ri?tT| zm+Ji0@2aDKcK%k4YD@9XGzHbF&Zl>ls`!FVunhISni5urh(2(fY?O*|V!0|+ZL-m+ zE?;=gPot6wAr+*o%#w{F^1YhMsXW!qdelaK8YL?7G9SmnD5Od*T4Xb+9F*BjZf7p( z4b>LaXYx9!zUctfI*tQrn;@g;nf1DQOdF3GXWzz*EoH1hgpH$7GXAdqW4>av`DPSN zsYJ6h!zQ_B@+EnzPGH7=RI*gE07=_3qN?Yn4;)y%RQ zm!#4KU4nG$oBiT<^`CP5(`Qjj^&4BGmZ;5Yowc05>|MEvRNmC8aEdX^)mS=FxeB$l zv)WnIV09>!elZ`@+c@wSM@$-A#T2pCWvLCeisv3fp*3J>8h^II_IeB0u-Rt_{|KSN zm;&;Y=I_v=@ZwVt-56h0t}vJ-R#Q9XlvH2HCrZh1lc|kP>bG=!3bV9ofuwcgVWO$3 zjq#R4v`4altybD% zB&;231KY?qYb$HeFKeUn7rFS`ei6#mCWa8+H5UcFReJiPFZ}=`BfFrgJr2thBq#SAUA%Yug4x}}(1&95L`X^l%U zOw^NaghKumVTxnf#vO~3S)|6I!QvI-H{u*dsf|M!Ke68hPc#n0b#rm@sNe|-FJcN_ zpdDw+rEmH!IfwQ>x5V^ogsDo?EtpX9r7OrzjES>((rFHTJ2*q8|R%W6-Qx1=&VB)E1U!{ZT57gP`1Sl40t3fjPZg^&~gl?Yj)x8XIuf{7dD@*!ky9HsT2wR0A)~W(KSLPcm z!zfOX$7FcomwHtF$(l%2Y)?`(i@&oBJ!*I3vW4d=%X(YQMOmL(sFAf9uwUv=@mW32 zjb};o%=|X9%4PFvq5N(0O{gxQ$e>ctWS<68{H=H9COj57g$;yyN!2DNn|%tNEYZye zNz{^ndBTD+Da(=MNvtvHxcJtci0XS2x(h)oy%t(+uegZi{Rj3+7SQcPhh%$wS{o!8 ztBq=_fxVHPcpaF zrj*6$%IY`9sYKSo9A@9ztWOlF>AOKT%Nb`k;8nlcWqIlOgm&0@KGZDAGDTs-XO)%8 zGVACpwRmSeYDdurtD9vvewep-{(iPpC8@o0Gm92LR91{d91+k5AO#C!_EY5>5k3B_ttxi=wLltA?TdYcEUDc1_poLkk0;3J9 zl%L^tnu|B2%qES>u~Mv5^~G*Zu_Sxcjv}2>vZqB!>Z!`CUUjp7(noNFjYsp%bkLd` zqus=0@;q`g&ZVT@YV9lIIP-#cLL^~9)~4ujiD#1uMTTj_BgPvZ@;A#?$;CQV*rRjF z!YN{o;NMCUE~zeASuUVdkjr`mmt4D&n$#jhJX-g0WDA)oE=NUZ_5kHcYcmYD$PEkC2@t⁡P(GL9|4R+Pd(mZXf)1QU>R?r8K zd1J9Ke$&^5{9Dks#V-vx^!78-&u@6>WktSAS@{e5E-uYYUC=-~>B`HQS zm`PcJ9?@vdE~OmGDuO4`dd^R^?GV8YZKe@Q87F#M(6xf9ii*nqH!XhC3OFCL{;h9$ zy`bldwc&3^-Z|F(-@82VD2%>%S|WwQm_iRj=?_D`I9O)!kovO~#tHN;n60MU28{;) z#xVrF;st_Xm0fTZ*@II)r7Q4^V`bLjgmuI!j+h?#=3)xI5u0cgUmi!;V%b)g^>o&2 z<7d{^jT5T%;@q4y)@nFx996NLYoi^~T*iX>4Ifu$!pGHq@JmzU7Qu7Tr|_=y z6DY|-crKdV;FIVZPy>21TM8Wi3Gd&+BhjbuU-St+XW=(Jjm^U+z2YoF&LtQTN@y9V zz-)L)dJCS6=!LNcUPQn9pLe9MPzL?&eFZ;C&%z%QeKOJS(xVL?mmbgWhX>4C^E=^J z?;d!JyB+U0!T0FZ`3QJB9ogVr?-F?RUFf4KH%u-X$Cws{TCjb=7Ory=LMIA<*of^Cob%ob5dN1R% z2|~KT8|c>XSkx1Khqi|YqMh)+10aW9i1cg=pG5TY*a!ZK=nIYB(CD*hFTCxK|2-T0 zr0tE+z71ZD_658#%|7@(0RMfFmfqBMZYY`FkalUPWhaE})bPd{w@015@wq)}+ZOGi z2c#|V*#oWE7XLjPo*oS??fJj>?hXFey2B@1H~3xZif?*M;xE3@Gux*4XIz*6^>>p7 zFHM^vL~+c;_v*a#>DCSTx4_73+0X*j$6tKtnMrYcV{I&pCCE1%$F=kSo=y$knmWP< zAD@nxk@gL~+1lZ|9pT-n z5cD?=r54enS8`(zHu2*aPguw&`MHsK58v_93Rpy@-uQR z#>u0T`aym`9?D;o1x3->GwoNCI$D3K*MQWp8~r7v7LAsdG0IETo@#`l^0z~I$$an7 z|D-&A(a#s(DBH+Sk32gO6XjdyH^dRkg^l#L_%lM}1`y*#CnD(;h8|t$FOfIg zx#L54V!ZACXbnMIC=4N=C7e;-ClpE!GKeH>(>Dxbnl`i}!8ajak4bvO3)Z4_1J?@8 z8(>$hmt?`!i}j2$0)5y>KPL~3P0=ES+s=`^5>a|#$!sgz#b3lv@^WIW>fk5Krvyw6 zz;s$q>pXw&peMl|d%$zY=o^&r1OL%0)Y|e>PxGrf<*V<2h!5C|6e7+Ma zO=C$pfOWIq3e$+0u2JJ)8av6cY!fk$bFXz3b8^&7ufg2BRZgrHp4v!LPe7dK4m{K7 z?55Uc`0;=LO#5nQ)|f&%hQ_-VgaP(}vu3nJg%sLi%H^zw(lW7H(mPkshV3Tr6Rru1 zq!6YtpdL}4p@h*|{n6j3){a_<6RSjZ1YxS(-ucZ|>09!@TFLifAE)?b`V^ZJ{^lc% zkBzRCC{9+r0w(Pf-P7vC@Kji@H7{6X=ue4h^F^IV5)N?Q_z)Mgnq(eIchX>q zON!2ts*Cz+b#9+Vw-s;Pjo*bnfN$go^juA8kL7TkVX_>L6}K7s(+YhcrmYG&cWtDj+`-;yl&F=L25~i{CN3IP5>My&8`oqz zYXAe})udmOk->F_=ym%OEC-#$evIqR+NNE3Cyh>r}wMMX5?QrabiNr&WF#jZT61zBR3>W4$ zL^k}AtVBv_BWdHRG~%G516pS>oD`B{&N8LNpE!FI@esM-Z5c#34=Oo1mv*{6>KF6tPqn>dMz-UXXL^nX#s#te!jP>ZM2%+cV z)`;5>YweDJp9mq_A+Cf_uD{zNe+R5}H%6XzSlxES@Aa_u?~h;K=gr*Z@Z)BKt~z}NSm(86@ip9$}TQoc2`N26SddS1%Lq^0nR3;uX_YW7*>lXPkL zDgQm(mA#YgfpydlVQS^*%4^yFXz_MB@C(>2I*6tN|Z+>`4v%%r)d>@SOec`06ZP?iz=%33MrcKj>!Z<%J z>gSqe8>Od&qkZaM%iCnL(}VM0{M-KZP^mnU{>#5gLk*^hX_1(hJ*?-*W@xA_%G_IWGeyJH)vnOjETl9$^??$*E-9heX=;{``MefLa zT6odTj=o1fwoL28?0Co0@&3=WWwwrgDc&O*mVX{T%Xfty<~R4FU)y)dd*&be{o>Ev zYS~WN4$-SMN0eIRt;-u%{^N!w$0W=BVbD>%;5PG<{Db~Tw>I>GFZ&atf4iFSbm$Q^ zEe%R4?vHG6c5=Sd?-}2NUVavqg$?`_@x$>p{_gOfaEaR}n(Vi8M?uRt(DnD-qFI3B z9g#os&ql{aJNt2<5toN^vk-2Hh9zC17qT_e9r8!w9g{QMSLtKfJ$`2Vi|-vqrQ3x! z{AJ1J$$73_I-s&Jd=z~ifv4o#`y}?>XHQqI4gZQxj=%Q%0p5PduM4|n?ZY?j-Dv;l zcXyM!()|{WPM@u85+3t+#}~(aqml0Ra6`5!aN+IzseGIp6g?UZcg;h6wu(Cexb#uj zI^WQb^qc2BvS#^K@qdyx-C5~X>87rCbh8^4&dC4touV`H{@IK$AbK(0)jyuiOo4I! zIo~_KF`F7b_7BJZj$RK@`d#{oYoFW|H+LszTZV_+Hoj@@0KvceCzG4wG5L+@59tH> zbH0z?IsYRY6E1bn_)dOY{(X2f|I4lFXXSr|XYynGJim|Y9_D9H zb&>az!VlS?up*qF-{tm*rboeb4t=u=^Zlaxqg`E=)n^yv7r7zsLcm$cukF4L&xOVL zQT`#o-1Ye?c?V$76ZwwrQ?~-6mF1V`&GHw5&s(|P{y2Z4+YEGPo&4o|Rp|LHM;TLq zQKx2O(*faCze-Xcb;;ApNoiAeRXjPK;JRfyq%Fek?iu$vc-5Q!z32khHNCprEZi60 zQ|cD=&R(wc2w%F}-FxAUkoeKj3I46nIvbx(PhSX|#>*kHU!N^mad`PDzinx--#h(2 zJud21>fmRWw<^!hze(WhWsOcrh7fRCHpkL+7IxzxxwxXH!a@`TJNLX824sA zC43lKxPyIv-`f2cz6np}|8|eN0e-{icz=BOta3*-C_bik=OnLuxBS8KtoV|q18aJO zt?SpUoaSDNPmSySFn^eD;a<&_R$8Y|hDtQFrgP0CKP|nhvU#X;r};bF{cflq>JxWZ zSdy)tr*3*QG_HvzggYwlR{qM@D=n|-ker_1nqHYc3P@_@ABw+7c8LGTkIW{dy|Y^Z zrB}tHlQGfsa5YklaMS($Zcj)8m-%MCCXB5_*&zQ|vO$u&2g8%$m3(~OKYW_ql@E1p{H(Q5G?es1`0c2RgfUk|g|9UA*j+&H&k-VtzdTC^no%#X<0p}Yh9 z^!VQBrF_@$emD(``GwHQJso`$4~*{0*AAzKc7TX=@`-Mp_?P$vKR@eU*}l>|JQ)31 zvr3b#l0Mnmbw}2X%~~cO)UH~4ZFFZkyl&I_$HF0{j!oK?Zp`;gN2VWU1H;Jt*l4?C zx_>{NQr}W89GJL? z24x+Rvul>cHEwG5baq^LEDR18hVAm>T{HhUB$932A^rm&ySLJE`LFai-#s}sdMvx8 z94h;|L*x0;cCK~!Hv1wxl~2w0&7TZchx33lPe-r&5#gH3?Ugsf@zJO8ThUl|e`ucd zO?SxJxdY=p5+D6LY=ZV*5~jM}qkWU%@mB7Hv_)lZ`g#7k-#%LIw?eJW@;RXGt6&_ouJ(-?>5g(6BUY3F=l3OVW_Olph4RzBW6*a&)*L{x(_NPfcG=r{vDR z8&-u_^;5PHIQ&lT(WrB>UvyOHksg_Coqrlm&-#aJ{q*RR{K@jD@{+u5Y2W0B@OtHo zaB4EGbe*4&&Z_hV?)(vM4mZ2K;z#3y{L^7EXxuY-FZa8<*}vh|5ARhDOP|d@jM~KO z`y`LTgzWU}zHD6R>Bjnj(PH1l9g?k6xvtVa-!X2VOm$P!-_r$tLd{X7Z?d7wcUt~V zHne71lhfku)1~#BWn&<(_^>#Pi$ASd94FZwbqAGi%eTbb9s{m@Y<5SsAPfX8e=y$E zj|%O>q1az52lP81=kJeaC8Lr7{*y|Vy4%aM{l&Ex)V$|jPA6oOf%|Rp^~3i5r_wJq z*G8k#@9RIUEXt3M_b44v+B4ofuc_Qse_(o_UsU>cO{e&Pd~UXHez89|n&dP0mwV2w zmyb&?sr1V_`_{>__$WU!bPgMVv-WWtLv9+H%?$_p&Erku>-~(p3ApZdVdt!Gc0j&m zG$dX%8kJue-pd#HKJm%%Kg4A_Rn6*_lkEfjZQj7 zyX0$RUDFxqW8r1D)Nc|!>K}H8<+H+xu4#OE{AK=n`HcFhSsLF_^IJSOw5_x(pHq1_ zd=q^QMaW-ylk(Fmwk$ssPV(b?*$sEk=bb|nNQSLlyJ$D?>?6X*X^*s9dKoCsk#48t z$lAfR5$68a^7r) z!=3SX%mmL`o(WcDJD!E$WCt}#yxQbQU&iFbiaC5~cOz>*B5n?S-WOmomKyl1?0 zOoYM}!Dr}YB0&2N^BC~Jx*|(VzVv#-DFb4%Cu{}Bh+yP^F`6EQG-7EynwQV(g8j|B zG>kV)V}A0UkuhXvd>J+lnNAO)J{cdOUC2#j0k#3Mo}JOD=o*uT>5Eb1JN_HY-)Idp z&T*bCF%4``0i|L_V0oB7>Wma4@1QqYn}#xV%sI%D>KH4?hdwdQHwBr}n0mU_xR4%> zT;Vai@koF%k18}3@*eT55uNd-@hMHwsdSiW3f77!z{Ai<#_9SN{TX^8_KLVnBw!Ed z_l8@B=f+c}RoHwS#n&U(D79|BA%nqq&+!*L6>@^+Q8Nv(#xP zXwx{zw_Zam(2u6Mw8U6u^fI}iD)b}z4AC*;>Hc&-Qz#|}$uS@{jFa&L#izTNwxbPL z1$F>^Y8u5XX67NYc&&)zJQvexLzuoByz?OJ1MWs{CDvo(nD5k9;|^bi?_5jIZ z&d{sqWabYvi+2|v%6o~NW+c!flaM`VHO!_RkYOe((@4ls7n!yrlQELmLt?}Uq^Gex zl}x*%W3hv175KDMu_(+2?F;j21?tAz&fARzBird6#&BjHTFArj{a8QrC*&{sreRi6HTuz%yNSn3!VmMt zAYebKO~whPXsk8NWD89S+Lp;;s+e)iC1wlq8VlfAqpcZ*(Uplu;?TKBEokO`W}N9R z`yr=MOElbcm!3h(>3ht5Q#hK4b>tD)Veo5S8t>3UObd`P zNKca$bA{f>#39$w8>k!7iFr;dnf*u!@{h?#pQT?j^O1w-5p+A^ZtBk*fmtToG{@u) zYuoK;7P`^&iAvXhr*H}LwR zkLcqRYJ}w+;)u#nf8;Imn7%~MV7h>OBH)|#LD!-`U|reAv=>&%BvK2jYb)fUsfxME z3NbF z2YVqm039(I{N7}y5PbPe(^S(`MooWZdLmxvW8{iS4E;yL8=nA+eVkrH&w&-=Jn-#P zkq4&nkY(A6oPZp?D>5Is2$^dOuqCINTc(4^Yh)dak}r7H^U*615%5FKA_|x}@{rHS ze(+l;lQXRMUVsfkz{ih5z0qKF4O)&iptI4p@MPEE$Op(&z^U9paMT&)p`~yq2V{?F zF^set$wvoZp4d>-2N9a~frk!w7<4k4hcufKOfKN}N{}VUPGl&ofyTh90fm@>1L})K zU_R&-({<(q(+S3|6j_4aM2Dh(BkurV^43H^-aQJQI|+G@97di(OyU?SMcx{*q66n#X$TRd6hVzoJWmpLI1s#k^(01U(4a8<*Qfx0e5Y8xpzm8(hu+OlD z`GMWT)?))OUrdf(fp3LqGh&BsLpxwftS3**TY^=g2hl&#IMfOvB;C=aAk)*}8uvi+ zZovH=Q4cf%daxKqXgNe>dLlECyGR+54D;Y5om`UcLRF<4K{}7h+oIQ;$!hEyl`HBUIA|zUVz&Zorwv=NFsu; zB!q;BP~b^;BCf>$AU+a7Yn4TtKwoXYoF`C7#dQ%CqA=z_wvKu*29n>@oHe z`+^hX+Q*uUnw^?qnpn-Bn)|RV)VQeItBxp1sBTW3s4l%WsrFm#_u8CV zw606t^18KkJ?aW-|ESHW`MYLaO?J)N+Cg>x^=0)78ooBr4Ze+&8ecWGX}Z@G(fp=) zPD^`vs3JlcuZmV%Yd&eFX~nuiUA;bp+HB0Hoxsm-3#(RZ7?F0wLh>G;5%d#n5O0?( zwOA@WYWd3Qz4bYpHMW1*ZLvS)a8vf$vCK*6V&$6QI>PO-n}hq`?v(pV57P6h=SVNq z`>c1Q&to4K->JUGeBb&0@O|RD&v%9ILf_%OeBWa}Q9gCv*S*JiTYBetJ@s1d)$Hl* z+1q2Q`xm!HSB;C*Ww`S*C#h3s$2qbq4wd#id(_ToBeaoPYb^_<)fO^~-jeC!&7$W* zt-wKG!zV~F;e@;KY%w>8&FnFKVB~ZxecotqTtvMy1Q@RCTj{sza<)pyucBAAEN{vHT5!eHMIx7CzF}S;LJ686m3PPg5^2~9`g(1S7V`3W3-`z=omVT z7Si91TZ}Qr9BLM&HvDaHGThgX&^ze$x)xoHE?4(W_gVK=cTV?*u0^|2i)dGCYSjbO ziK@=3eC6NDC}oY}v|^BgkzbJyk{epiwRCJrZ=Tw0Y&r#6ncetjqrTx_Lqx-m`g!$^ z^*`!%*Y&TnsQXg8s@AJEv1V)yzvf-_&gwDMfz_(2H&y$p=2wlb8Va9pP7Y<5?0NHTa za@hgd71={sqU^owo$Q|MAK4sPe_6OpB+GI*=`h_P+(BV~*?xq**#48D{32Z{^_8YrECro?CK)dwC3nOl#i;n6=ueTm zC_}hI*a>v_hG3Kc7u@Af<$LoR$k*f{u;QIb2eO`cP3$IygC)-fn>`s1!zo@etj5>z zCh*$vta#{IPs6()p83z8UE2E>o(ue3NbTr6KuJN*QG0d1!V>Wf3`itsKxl+}J8-}@t zFhisMrhcwILazha*{K_%v(|mmF4G2UD>UacQ#2u9TT;~*)N|B<>H^hnRVS5R`BJ%E zIa%3L*+%K5bb9TYnGL-}g?2zi1$NbV-LlUvHAa$DH)lY7an6eUM>~p~=vnxCP7xD0V1RGm{6t()-22EuCxtZX1r(IU>t1pHYyueyi2Q@X9Xg}PC?PPzb{mCm58(57nNYaeN^ zXpd;OX;*9KYbR(2X?tkfYs0l(T4$}3)?O>pQW}M(NmHXK)TC%Wz$o9*T+*D;oYI`s z9Mv4q{G-{YIRsnBG$&y1Wz9{^UCll4H6CkTXkKgH!Hl1*$x%D)6dtR(0|q|^`3@-hV_OAhDw7Y)sNah zJ*FBcU*kCAVTh+o=|S{C@E+ZmNz64y$t0LInUWxmKMXkz>wrLX5t@h!u%Xx~tP0lE zD|w%I){vRJg16vpi4EYl*+3rb2$@b=@O$z%@L%v%{1Cwu!EwP)0V<3XP8RMJz7$pq ztwhnH;i3hiZ7{x1MJb{(QHzKciNp?KcaSVkv6t8fJ}zP_F)5}+N>RD!yXb-FsA!F7 zs;I9hLS!dW2~&kPgqwxq;Ho@fy5PKEv7np4UQofm5BD9y_vI_em*hsW7imrA5od|1 zL=YjzpW_?wo**MxymP!cyjUKOmjW4&d02Z)0@vcA z0}uU`@wV}-@c_h$R~hFUry56rAK%s37UF0=Mn|wtq>-W2Fb3sR9+g6UfbqCSou&>_ z+o&~QwdPZ^scF<0YB1QZu2cdQMFms7ln3QPIZ!r~loC=riZK`rT7%l4G$`Psf-RlF zXh10fzO{g(WMKI`L7qdYSgIq{gBn1Mq$X2ysb$ngYA`B zU^fRDCxQLkY&;6H@>^q;u^v+DHgqexJv|iSAgk$p^cDIwolQ5<1mnU)z>0Y+vk3a; zG-QjuGG&Y&;$#7)jt~=?2b_cxuvYpCSY54240-T2u!oNa zE7S{CVI9$a=m>NYItNB%Bf1+s3h~HWu$q33zCk~uKVhYqkCsBzsS#oo8q@$02o%FH zJ|@O2F&oSlnjPi<^9a`*;kP~PW52P3ZD-6CbAyjF=JY?`Si_kXa6Z?Ba4iU4LBlZ& zAOj5KoDC2|*FbztjyA#SyB4j2Rd^|^K#R}(7WnXoRMjE+M` zqC?RBXfIeZcR<_1+CLNxfc3u{#Cfb>b&H`CqJsQvHRxqNl8JmrJ|c-=`EMbYVEug% z*#+y?)et+FjZ8sCL+-aHFa@H2;SXG49u-0^RR`JQnqMrFpTOBj1kAu~zzLj&{Oca@ z*w%x$w$L;aq8?))wlToe8zL_4VC5TXYGv{P-%Vz+F-alrAutgp2-Gq(V`L1FQ&ciA z?J>=O4`_hsU=6ISYGAF*{;r1KO%S73!bc82)zDPX)Nqy_&ZQZ|#Df`~Z3HyZ#D}Yk zA-iM&cd|5Do9s;X@UewETSKhT@_+4P_pr_K-(zjyDBE9W$RIoA4ABm@xx$lsnY`f* zd?4!M5Aqpe3Wv9fHMKRxgHPHCJk#!w_2>g{Jk&G-5CdaP6HU{AK{E$d`+q^Uem%(Y zHt;?Un2teTorfN~4!O|#5N~@444BVgFIZknAxL^9AZ1x}EsOZX0nua$h!Gc%eaPbj zRwNvBfMtwy1a3!fWFRseW~i|cT^J8hsY%FW@SgvK2*hk;4rs{&Sg|hpw=IOf=EJ@@ z@ZD@=792GN&KL`4kARi!U|@{&g|Y62bOQSo3qDimFRY2nF9eGPta{C;90TBuSS)W9 zVCaiXdBDg?2PV>Y(9us2<9P*d^Bm9u4}srv7up?o$7{etVp&KR;jPd8YG)y4aN2Yd zbo=_1~V`)@l35yQ)HHp^pTd+QEd@gCTN$8e9AaId$}b00tkli>cT z@EjTNG`YZ@Du5?5vnx3aK8vo>KqQl8m9a=97NyD}(pa1s%a~w!T`Z%A<%f7dOps;m z1OO6;WwC@opR;_Fs9$`Y80h`DUyPXq;MH`5{^|l~oSrZiEdQoIjK~n^(LbPHM?>#U zfL@*su|{@8=KdOyMUXvS2{FWt$Yx|4#L)I2djUss0PNXOm@7{~p7%UNk}e@vAkK9Q zqI35kF7^;G5>Fxe^&BuSiT|s;{I%y5#KvADui%>(aMTkxnr)BZ%)7{KxXN|7+9k*b zo`r1nNw~{V$cgWRxoQVI!DfhVu0d8nJZ~X9;Y^U9$?(+U;OR#ogZ@V*+CiQy5_%#K z`o<0V$_}KG!_>3v0yW5M9iV`Uq3?4b%9&>R0lcnH5a)dZ_Tf3mI4cjgU<_GK8Y>MK zV1!v2IQ6TsV{Vp+ld$IuNXdCP<|0T9JNDP$%p3oXKPLf?;F?c=NkO9NHQePL-0Qzapo#mSRW@xauI#yyh zt~iGbV-av{W95g%gtBNguC@Tjw`AElEN{@vBXotHV&%yLbk`dsie-JVd?k)06$w@~ z_E%%InPq2n21)7ynW}D(`o{rm&^`wLjH&!CO(VMG#tar&OYD4E+Mm?s{A z=5p-`pgf+z2tNL`_bD8m2<;_&UjM7ppFpe4zxt=E5iz!!vMf>T(!G zb|fp{$ylb2c|6T5Av0@-9bL9@4#HVZ;?*<;<{9A`M6W2CZ3U|X@YB$SNG;=7;^F=$DSy`SZ+t_)d^RL$R*X+UaI=laB z-M|8{js2_Jug3oF3deSY)&Z`;GBzPj3l=#F?!vJ!gW=wO@D$!)Z8#ZX*{LjA+AI}j z#vj)k9CDeJ3alja8p@4rGmJ0CjN_^sa0~>_0-Ko%9G`)!+F;f`cHjT6v4~|3wanssxudwcKpgAg zH{O>$`!_DwT!Dnc3$x#uuf)n4hw^6aKfCq&FHZ6}EHjG|HnS;N{s&iOgTuyia|Fld zywG5nYa~6WzW_6v_bh}@34SOA~u_%A8vHEY8Witbgm3tJd z0Edod@y%R+u(a{_bCx^8Vc1z@Ima$xwfMh%%+)_(_4jw2pEvtwq3vit<@ zZ*HDrzhU3uzgFY-tjW!r>{xRD*uLWU;G9gFEiEg%X3J@2xNz3l%(D1xxBuTg<|DbQ zaQ2nG8aLjYjWs{P|L->|S!NmHMvQx6PQF;~CCl7p-;n*Ex!J!si8pidfBOim2AFk* zlYQ>GtX!~XaXs^2ePJbo^CiqIK(nplWQFtpetUteq?r4kn;ZV`J?vO;oDz;>#f^}8 ze7Gx_dCR}=#jz3pw|@V3969~^t*^hwlVi(pjttkvY~CCkrc z=WNa*nr)@Iei6sbH`_b+V2*}ph@%f7GK@^W&>%}Sj7v9f5E#NS(-7hs-eSnuEb z@4uGgzt)lcp8f8(l$ytb(=gV`aJG<@Kvpizj8e|>n8%rwHI8+})$C%A`^`W7{T;`+ z`>&*#t9`LYvyJsmSV?20mRqTqWs3EJ+3FCiRI^gYJqhzqQirWw57%)yT2S+3;VyIDp+M4f%qbv1ssYz;P|+j zh67Hj2Sj<#LmcNGM3m$ZGYLn0P#)HbXGg^FodkCL_JDmH0vVTXycK}N*#*a6fvEEo ztPO86?-|w>dk-jB9IwDOp$TXN)`d5Uw+h<>*r^l1%!x;jLPS^$D4r{55^~oR0X2i# zLHsHLwkN^ci6FbdFd-(y)P;$n93|L4Uzoax|Wfg+e5sf^0!7;Dwh!JZ&es4D#vEuo|HL%z?F4 zwyB%xZ>9?J7ZYep`hZaeXsesl9;!dpWH?|LZRl(WH87AL?x#oeg}MqIsSnfx$3vf_ zo2zrsrD-o~_iMLmr)gcZUo|&0xteJ0A#JTTPPb0?Kv$%*)_2s$>zj0&b-i?BAp2IV zlj{5EZ|H3y>o(93XXpUg#EXV6hBU(uLyDo=U`fSMgQ%gvpxa2jp;Q!Uv@~{sjEe<4 z+X-VMHHB&e-#wzFuy>GgkrAbXnNNVp9F48U&Z9M^2lORss-czPAY}7qK`!St^$@Z} zHHLQv59$-u#Tae8NHs#n_>G}Ab%%NYcbP-2qZUHeXo%sBew%)czPtW2WWf9DQgui4 zoeb9?lQf;O0}RGu`W&FIgXnbQaN{xRt0CBsug}$oL5|7J*o}^2a+wh@ieDkR_cv&X z7eq(9B9#!!HGxd`K=cqps0MUgB;*-hLX4=|keap3uB`emsnK z5))vH&=x=yMnZILGDJO{A$yXB`QlxPY@(FdLnv`u+!;`OYcVlzAMYOTDDN)M3Ezu< z!;>MecMe~H_r()HGxh<3>?(c+kYtzffAGKX_4qSfi`x-C!~o(Lkxw)bFNm(h2|%u8 z@_OPygcF%bPUL^*^95D>`TSCH3F$+=1*C!>F`D>5xRZ-Wl3&BWBPbM15?telfWd7f zzmeC->*PB!nfyXllU*R^SP58|?))-9Jd~2H`J4H#`5*WP`9t}UkOy4@-&OD(1p@@* z1s;OKd?EiCoPC+xPR@Y4eIVobxB1ra-9Lg$f}?<+$mRR<*N~p13)zL7M9wF-0Y*+o zis8P`$phq4asX*dekSG-Hbf?#jf;pjL=@ox?-xV(1IBy>9*iq_Pk4)Yy`c@{`2d1) z1N7Pn><~5s>xFg0{IRd-bhIzJ0Wwtes2w&IJCB{kdSls;jhY47CJJ_cuQzmrX*mh|9H7+V!E?rr=}fN#OW{LpF~|&s`t$lw{c_!HZI#AW z(@woql?~XCi;5ON8RfOCZgFk7-`utNWs{+?p&_yUYTdiqh8jst+v>elSk;WmR~3O3 zFUtFrUnu)nT2LY?8Cv|lXid@SBAenP#irsOC96tymrO3SnK@nMWsqk}Q zzap#Rb|tD(afMCQ$7=uDxVp@`th$+X`r0zsuBxl8AK%!wd5!#*@`3t@cC^0C;B5@1 zJsB~?2Ja(V(Iwa}UNOFsY%idMSHvSMWR|b3ylr~ePPIE~|J-4>Oyao6(dc;FX})t` zmnE)xx9uK3JfCyRMS;? z$}xb!(JTH|Jd-P0=Cm|7%bH6Y-!w=XPShLgJnQDwwyBM;71VC7(N!O;_OG5;)uGb5 z!nwS6*@Dt-C69`c;&w%y3&RRJM#K_vpaoyagMwty5DQnU zTANt=lMb#jADO#sh)gHz>onV?t@}u?3BEP{Cjxp0l7Th>_x(ftzWF43|KYvDyS2|U z-&p_P0CnK#;K3nHA+1BTA#ovo!HU2K0sUKD^K0>$>)p@mkY^Xq(Vpp^e|v>`pYp!p z-PSwMYp2IyPWXVX+NIOc4EhbnTm24CD6A6Xi z`B%tK#9*Q$(T|8E>hK+aoahasG!Xb1DPW&B0ZuIy+X1?6jW>eDna9&(4U&flPc0^&OEhOgx1VL{jsbS3yWx!~CekqXE?FEA+>Eq&HF zjOuCVq37xLYX+!Cs3t0{6?0l%HN`Z(s*kKYQX{K=SGk~~tc)xb7l#%O%eTq(&8koH zN$K(<Yx9QW&(2e3C#JW2-}^c4U8~niUj2A&_dY&p)(@u)Q#O-( zG>^zj$oZI=lpda5n;x5aCu>d4^StVU(M3VU@g?2LbQL$NN7Nl}JS|_PZlyb{KWw;9 z-8c54OX$(2AS{j8C`^$QT3)wFx03*tE?zd;k>~8`n&R&6{lpIsJQh4Mv}4%Vu&hvV z=!6iTkhLL;Lz}~1haZn@(`HPxC}u{?`Iw}bFEM*#{A12U(`|g)m?AGl^bZec-63p# z=+%(g;O@cigZc$M4^#wHv^wN3^y}p_(QB5+-)=8moSn})c9dn=_p!Ta6J(ucxm()V z!bTD=ek^J)+9^CIILTj0dJzBc#8?WNn0`s9`okWWl9 z<7t`#+5SPm*p<^u=y`yjo5xHrokX;#Bkv2(7oP^`o)r8u%&6S2?F*TiK|R>cX&mx15I=L8-;xJAGU8K5g3f z;h*i_M!mf7Y}2zXFaCJF=fl*mFMj@$z992V=I4y<=>yZ2r!GzXCv9iOo2&~to_UM% zpF%);Y4P^b4Ha=U!y0zCJXf96J)$nrXPAEAp=YCSd7jb;W4wW*AQ@PV| z=c_K8+Y~CT2Zi%ne+t{$x<346WMiA2v7vEJ@v``v zaY1qG+HQ=U8Z#?;Z<~Lj21Y8wkGA#>TN3g%Xj0&^fRC*V{!;&7zX87PK7GAJ9;L1h zF0-AE$j;g4*$%h)VpS)-B^fS?5$qr*6BmH7`<1t!H6z2T}xkNKAM7o zb7cbvOC@gxeilCh{*Wzkg{UDCiBp7<*iTZVh%W(;M#7IJzu?n&8?jnoK|rN0;0RbT z)99W?JIdCe(8X#CRNWNcns+wtsV}dcS(8y!QgNYdYDq*(F5laH zS@CJu$Ic&pJ{2YveSeVtD0^T|Mdpdrif?Z}*e42}+mIljuNW_ew|#(!F7sqX3S8Q+_#fD1jF z@aDe|92NDp$gz54m*6OK33glJPI^RptoG;&>DiyYj{^pTsv?@ApS87*Z;6}Hwldl; zN)`UO_5Ie@!>>ioi|*5QX8hQ8gW6q)UmKSbn;YF2wIFhF#KZ7LD1BKKx-Y~K)HZNc zt8BlHzBj!iyfQo{xu0@9o98)C z)<2p0?61VF?{9zgPhFe2I=d$OQ1*?i;LMll>(ccZo3lOgrWD*OiYq-^zN4~R^|IQz z4F%0pm8dq!u%D?w=i+_HYW`W_UhzkZIabGPon%X$`@2o^xa8^K73X!t>#cXaFCI`H z+#~#Yo1ty*#~*1I+U|MW_E;kNQ{mlOXqF0AdKm@aMl zM4kwDYyD5?yWq4yJYbW5KflhtBYf6+@Am58IoVzB66Vy^VXJMZ^&rb)i$uvM@jKBx z;R*f_;wLs3;W0X@y`f3xr)$&(>rU#FI)T1Iw?mhyJ)>b%qm@pIKL9UY)eVROKTPmI z&{3E!Tq5F&hl+cO?ZjoGlOjjaVqt?|ouHoY2H1HW@IVPdh#Mh{^cqQFdKqu(hifg> zyA@U~aZRC(tsC~#t*$vzl~OULths1-{@9$%Om*7DpL3Frf64uXeW-t%{?_}0{mk<|U8ygt&IC@)5UF@*9 zkMV61eA_#GNpCD(8{3x0sH*|e8zb>drCaS?lLz|*GA`FPEuKd zUA5Jp7Ve^9e4Mc0%|^qJUZ#nR1szL;==*AKsAE)=qP3z({#~9Zzb(HdKM2U`;VlcA zpELzGJ#Jjq_@?nrlf2nNo+m$~SfQMvlB(CMT{M2$g}T%Fy@u7mt9n77ht+FaWF?w~ zX?Zk0gp~6;3rmI3qC!!sc)8?)q)>9*B1yW`a*}0*bd9u~G|!@|#Z5`9r4bpF;N#GSOq{ zWSh@+i4J~FJl8~bd#?pP?*6v}#sz7E+5|U&PZHL8UBst0VR5n!YrEX;KECIYo{H}F zT~~EXZ}*{XUCf1;lGw0#eZsYlmpV`Ea=G*Mj&BpJ;!@ij4)+PI3ECaFIG}s0DF4xZ zeSQD%p6FrW>gsgL!Pah|^+oA$iKpl|zZm}uI|Uyf36;{m?8_>F_ znlshcYbxsoHnwd(+OkriWt9<@ZWD7CPmQ%$WPL;w$y(%zMq7&9BQ}hrBEL=<~Jfk3Fe=>21^Y|ENit{?6;A z#gpz2Z$9*Wx-RkW_vgN>_$kR4kTogmVdnPCrp$g>ud+JlY{;jICsY`#E$c0s-nQIS z;Ht;!c{(FiZR&=15RQ@_wAo@`B-`kG#O=6eJD<0H3j&NmuR{I|T@ZRMbbeTEYs*OA z=CwLTi|L|Z@-DiUZf8i>#%iL~ zw^cJ#Ey_2FeJw{Chtzec{!-DZ{9M_{a>vS1)oW_S^`GmH*YB%qstK!3tMsn)sC-yi zTa{cBU;nbPwE3KTf^xEYmey9^&oG&q2~jI2y2aRm?!>$>?L`edU;Z)CT8puk)2s;V z?$*UtC6@KlOHv2RG|S6YZ>()?QM(%3bej>@A1rGvE{VqppYwA`e^NkfhFU5k5I&P; z7^z*VidED$w{1LHx3OkZ)$Q`k;*|V-ISrZFX*Yi4ewqET{H@b#`^2j+zrLFF-s;Q8 z?{iWLQriEV|Lw%budfoHet-DqgC!3ep6p2M_u=)|te>0GC0YBk!*a&tq~#!aU-Rn= zhnL98EvlMp?$qCFnyxsf`C{0@1Yp7BbWu;~R_hgZ+hzBhZ@X>s{M)C||9Fslm~+I~ zsEjsyqF+aUiS~~1ixtE*w)@dBuG`dJ2m0>lKVv}9fNT94de84s(dA3Wf$a~pyBWVF zzAS!aJ7GfK1Y#TQK^|6Yw%C}6HzLYE# z&LphSyT-n{R;tRDGGHn>H4bRlSKk8cr0F&4%CY6+OM8{n6f?zpN=}znl{J%srW`Du1+BVWQ(+0Qw+qT&DhwWI~$2R%ax2)P&zOYy?nJab{ zbr94O<9Vl%4YbH`U6Y|aDL>o1rEx&Lq;_xByz;%pOY-k#M`!d;+4Jq>r%UgqygBx| z;PsHV!#=D}+Lqk#!|+3!{Lh!3AGf@ze9`V{@5d`1e|uX0GW6ZO&p(q#q=seM=RC~q zn|C&^cYdFO^@S0|A4;t&m(-lEmoyJic7@`Xg-9!WKfj~+ur$zmwrv-OOsAc0DW1hX z{aQH&KMlJT!9*oTTg0A;eGvOTHlc0*IK17$_D4GTbeq;QvA3~L*S@>@JnWU!eM}ek zjtdh8#-DAwD>g26Ppos>F>MX8fZ?+m~HNeW#s=)G+WoOI2(i@Uf zqJDyF#8d2zDVR1IGIa|eB~mZMU zZ~JjXQorPZDUs>oj2&s^Kem18@P1n2{%5P6o_-ena`>BtpK`y?%h;SVId5Tp|AOuX z%DgSP`MG-wmX(B79H?GVZ_(0G6{a1dcQ$@E?c!}E-Gr?q*DbScBJ6v~nw%!M*?FDv zRkaEZrbF{up9nt}z9hUycx3qX@LrLD(I4CXl~B_0XP39#Jh~@#6?T5seq#L4*oHQ% zqx_>TME%)D)5bCSS)0TtJn~X&UC7uVd8_SyYVR1&W^)}j3>NwR=Ww7#u;u~;6J2X#hY+v8CwzxW?I;8q@HCB77P6mn3 zL5+hOJ2ZMVY8y%$92&1T?ryr;jLOd_=BVN|YVCXdN2-d>Hcdg#WAAwH@%hAEB83=6 zZsYeC_7@Mf=wMl8MOk;T`Dp#lYMG_2^oK+)J}-VHZV{`*iQ+hMplFz&g_sYuCay4q zaiP8=JXK$1L(B1|dksXrb?x@5OXYV;jui&xEy+@(<^C*7F8=b@=af(5llp#}`(t>j zB!kGx&)k#tE_uVJt8Z&xxx5N`-RoWJ&-as8rs^|RW{=7}loyl#IRAD*Op&ViN$H33 zDOG)HQ|iAoerrCU@YB@l56~;oY{Eq}&?3xomeo(|1Gd}kf5>v3U%A(KPx5~j;1fgy zuMA!q+%|YbaL*8%uyx^6qmIPfiStNU+x}g9CZSKew6;Xdil{9SJ;Qgkj%?kk_4L;1 ztrNm$!5GeLZ4505_742%XXV}9z0SG6<7kIuyS28gP3_Yy8o4uX%IJ9671T1K^EA%i-q9 z&3?_Arcuo&TjnWxs-zlU-C;wDF$P!zpG-=`2r8sR=<*ygm?+?l!~76O`iEYxNmP|7#>ywPR5cH8Ha0%2 z*VaTX;@RcZ8a?BF5-RzL$^u=|r$9pfK&sv|GKB(_v-x}YGevz#{2U>+L4PP6T z89g*MFLnY<*NUjzh=}mO*89VHhfNQ|TGzw6wl4f(cy##4)~TVJgI@$R`OWZg^^&_A zT;~G&dX>`|$Mdo^4%YS|wx6s-(%#}ng0W;Dd^Z-3`a{K%@k|dIF`hN}>$hlssJo~_ zm1h()6~lm;I9stu(N$3_UoMXVR&@(787<{Af|&esB9J(Xi}4A(%aHBaj@BW`CP&8K zIMZO%9fuOc398)+p?nv_a_j0J*Dk8=RcTc|u|!GtxDob zv_;Pg`sSX=xbstxeEMtnH+k~hpI6dmX5o4H1$9NsN~V{NFFjupQ<7fdQ#P;sNyUY# z(Y4MEp-q*|zKZthO}YeX75(1if_>tR#joKs@la4DekEOQ{mO2E;~tk$ZYnpWTc%r% zo0I!t_r4x`JPW;J{0jr+At9~rhEIto36E^;7y2le44xV^BJi((CjmA={XmC8gRccw zg!~mcF=Tkq_5kZvV!tTwejdJVXI=U_M>>6R^l>`i)YnPs=;iRyR$-N5fr@>E-hz?* zMdUlkQUnn7yxXXksmAC^?bDCYJ=EUUN_C5LH+5%qVY+Uh9%e(J8#mo@;GZwvLr5MrQ#EBe^*nDVCYL#37JfFTzlSE0_w zYkHz_B=yn|VVI#`qr0QkY8*8ZwWX?qa;T!NMbJ^n$<-)RV#bm*@ z+y^X{942FYdQtkd%oo{Mo?BsYNp`umLR9&xa!U2# znl3e+s(V&{uQ^t?xn5Hr)p)&mtYWyTMcq@ko4U-Lgc=_im?JKP%-md(5X6cuOX4hx zt)uL+?K2&g$ued6vX?TUW1y4XS?!wce%kXfwmPDQ2T`$mm(QHwNfYc|oOlyAIw5qAJv8chK;a=&2tgg7Ku%f`LU}XOC{N2#|e?jjTR@%XTlOZajiYoJi^8E5V@>dmfFWO(il-X1! zRrRUaRXeG!tnSZxyM~m8U5zuE7B#PI86o#l#3;Y19&4WKvJGA7LX!j=%ah^tK({I) z$MNk1w}e`;oAkA1i1l?FmF+RR8+PaI^6aMAw{@5=!<-&Ew{nefBi-M)kMnr$k?B$7 zQR|WHG1B9TyWVZGTdM15*B-9VT(?3zZnj5n&&QraJ^eiPxK+A@IWKZ-a**4vv2SNz zVzRn+luf$+Hzdl>1ey)GIZewNJIax(r>S{fOH$7Y?)9Gf9F0YH*NfyX}swwtYI zS(jT)uu@pwwp?!+WBFM+S2{=Z@ZA#L98QI5!)bVy9*!9gO~@5%s$|hcjB#pe5n@LDfUodb{D!5JC3D7oz~-! zU}?wXLp;>T^g_Bqg(pjN2(lS!gleIN*mPj%KcM&0J!os12T-h9<7?v~<1AyK@dXu5 zy*FGj`~)qz57j=l>b1bER%>r-pF`Xm)!l{KE3umS>IPMbDo%Y=eO0|&Jzl*VxcV9D zZR&6}sjgEU1Izdhxb!Znc$Evl$ZS*zs^O|Rs;eqQ9jA_jV;WTkl|3-~C)I^& zE2!f#O0!aPT2rPOrB!J+LS2av{Ul&|zt9T}n;}NKpVCkhp{nQ%sM6DsxzDr%1WN$0 zzcP@XkVB8c_G2`*9}moI9vQ&yi2@Hyg|HCJQha8AZid506%)4XrpKge5Q(G zMOGq(FhzJuI7&ze&kDi>pZM$fVYM6~>Vaw>wMflVi`5?LcIvU}wd!;5O%YH){h^M|2H?KG)}%uJYc$r{Hrla( zuXzYOdI!MJEZ5!AHA5xLWBNvYv|+g+5qQmQ0Tpu)*wSr`|3KZl5Rj=1I-J=KwVdOC znVI^l_V#cj||9gt`W8rZx#`6MTjm z0WDg-PO8InP1-_j22?Ca*1pl+gPH|*Aj_lzTQv~SPAj4JSL=>~-SX2f*T2(ah6I?q zt{O@WZh$d5LZt#KX)y4^ON~)byGp`r0<1(^$T(hyF)#qK>jYGeI)LJUJ9&jMm@h8| zszR*>RERS^6Tc1fjyEv^Y|9(MK!gE~V>x-6OeF>UHvED78T|9*;i;w8|O$Ej}BtK#zGtd8eV?YY=d(&!fWt%DD_s zMxA+&q3)$4<^jC$t0<0+hkPmm+7phRM#rO@kgcX1sKw|Dc!hMRn6R74W>T1COfl_4 zPc^;+Jedvf^e2I&js_&dcEFF_(?#ej^-~NB3<}Vqi`u7vA6u;bONZ%;b!~Mcw6isL z)zj4%)jpa9n!cK1&=QEBsw-8=swP#Sx|_O}da*iJ9j39<>;he?P<2ulsMl-kwCl77 zv8ji!hFFg z!CFCAr~?!QHAB`xMTc3UbD|5Pc+q&_VgV}X0`(rMg?B}*#4_Dz!O5COJ5M!!>cLYocQxYe3nnsu5!ng;b4^?6mTa+b2Uaw}9~nWCDh z%29q)9F>2CT3$@cDS3!uo8q?Omg2Z#i(&<=js`0t6>bV|MIXgUg^Ti>vRPT7ysvzs z3{`1VZ`GqT4>SduDos6Dv{l-8U3>8UdQ zEs87xr1zygEH7F=()v|QRG#lg>dA{`D#pSK9KI3Tf=WQ+5} zIqn@JxSNTDd8C9M2y2UlR5ny<>Spi*G-aB>ohmhi8^-Dn>$0_e z+RK_5nr<*VtXB)v#VRM58=k8FQr}ZOP^LqjCMQKLWOLffz2)8HLm&n)qUCh6sQF^k zf~Jj4IZd6LS2WLTj%Y@j%bMOdoo?FQbgN0#)T{Y;v%dMymM;JU7D4r$c==lSPkEr? zfWk(3SSf_)cA9F1dVyxAb`9iQ55k|dDf=XQ12vfZWqTb=_S@_&?U&lUv9+|F3-Kbc^%N_q zmCUNWRfJWcPvJS8XueVIT$fQok*(F|4f{truM0o7L8eSPA3 z;{hTNAPEjt>hA77b+@Uz+q6@6H|p)wrtUIz#UVgQ0>s^2_|E@b-^!w$)zYrceePrD z?ETvdu(g;x%v|&a)bhBUO-5iI4T5!NM6?7nq3wWJR}4&#)1c_229E?<0?nwW|Gtmv zTjovlX2aR26lg*z-m#u-?n|y4&N~h*aOv9H`F6xEw70c4gXcNX_6W3Uh^3`@w<*c= z#W>3N%)m07(l6Iv)6)%q8wkci<9HLzyxKg^95l@_*^C#AYm9r1LE~rBC-XDQS}Wi7 z#5UZnv`==}9WR}qTq<{|XQj8+o9Wx(3;WIj>8dO^CyWQ;_yf?RZG?1${pc1L0k#Wn z6TXblk#vo0q&TRbkX^JvbS9&Mae_I8C1KaIZ*tCaf5wv&GkHw@SAI{y62T0?Xu%@E zMZph&U63tYDZC;)E}SbID_k#pA}kkv6&4C73hMcKjiGT@fI_h) zmIP1xLNqVBDsnh{H>3}C3*Ha357he${qz0Z{K0o$b?<-FS%Imbj~&QSypG} zzjP7pC^UQZBs^&e(FtdztDrG^k3EC=6Wtg3yg5x{V;`auqMpdch!6J8+o7+)UjYp$ z0RM(HW3xBSTkpB*83X6~m#|9rbt|D#^%|b?B3LKh+Kko~)^nCtP=k+{Q5LOvySc$6 zF)cNIFt~x=X4O))eKlj%ti}iRmsN?XdgV)Hx$>%NOv4EESIr(RQ+G;tP&Z1OukPKj z92A(k`cVxd8`Ct|x>fpOy-k14P-&!^<1OQ@EPI(F;9TUo?9u^)ag{sfPWM`T#=zUq z+z2+dvFT`BZ&VP)1Je6MdVw>2HIJdd7`2LA5-W5To zq}R!rDJxP}q%p;##UsTe@shODsjpIyltR&VQHkhy%7xUzv^iqCI8Wje=Zk%*M^l=K zYLd~+&u=-8o>xRSi zeW7--xo(`iqPDGkaoqsrwfc$bp!So#nITWl)DCLwq1syavG!1{RX$xgwqcA0(F=@y zOfOA1^9}Q3OU$|$3`4VAe7C}t;M(Ba>dbL1b8q%84LHIrnvS3l3=aDd+Z)uBBk->Y zMG!h^MfTF&3=u2Dy1`z=S;KuEk525s8_ORl=q~J%G%xw5Xjp1lnnaQ$y)1noogyuk zWJwN*#bSRNN!(F9SG-UBSuB!l0nVjb;*ltUc{xE`kxEbLnY>qs5tQ-Bykm)76O{>* z5}w9uxCTx!`w{Ch^Auw}y&dg4HJ`GA^oGzLPr&8C8{Hd2!+e1={bW=xpzr>S-iUk- z=ZAm89fk+0uAaf?0ZgEUf4=XRcLG%PE_wQUtnPKdD;(hZ=$z(cIA1wN1I4nyKHS#X znqtW?|7nT)XM^Od)1FI11OzEvZso1|)JOxImAv@&tc^=6YL-?qapbMRa^cL(ns72JLC zHxjNT9^f74D+K+Mo+oFeJV`uQYO_%y4IB9q3MX5uIPghBAN!LmHOOvDt(iT#-bfP4j);@KzXm65A zaFk!eJHy+`YsK4{_*X)=_-~vp>~iKKMj1UyBhpmV)szpUAB1vT7S;)@tgXPGGJ!Xz zRoon)buWnSiade(TW)B3Pz!tE2LBgdbKhI8X< zJ9j(ecAC9|ZG*MZGRE@LJlsq%TTG8k>83x7#|#nuLj7gk8SMehb@jW(V+~35r(btRkvYbXt9T13}+aGpK${-OCzgjiu(C+@w&{+tu+pqpq{Q zt3hn)ZW(DSb(FeC`$Ya0zE_^#?xpUb?)&aZ-lKt~k(W&zGz%*Mb6k5;GIZp1NGjmCi^n$T*O(Frzj-DZNm7 zUK*8dPp4*V&bXd2G-FfxQt5KZGx2=!QL#hJhWA~Tb}KbMWnOZju)AOjU%zRsk1|fv8))gY{>WHrH%cjqNX*3_$Bx1DLyt$D2Y>LPCUuM%>kD2WKhTI* zhF${ubYbAO-v~8lo43$A+)MMSJimZH;(rN1OS#UoOgptc<>*ul@@Kj76SzDSrE|C`~dfvQKBP%GPF8WKPW}kuHR%eNB8# zVwT=W-hT*){}F9A=%SD**4Cwqx` zg!Q;YY$do8`#>Vpix>rTs_P?hksIM=;Hvo+Bm~<9#`(ASD!kKR1wnbez{$Ph$@J`V zM_t=pXxDn6tY$c~oaK(zj%)UIux4Mi{d&g&%5>#`x?43%WYNl56=`KFOInq#skm4@sV=TQpqA>->2;b{_01Gj zwWykFGNVS5n;kPkn+GbthjCt<)$o^5_0nCFAb|3F@pD?Hh$Hx}N zZN}Wh-zQ!mm6Jut4SEZvm5FA5=gf&8ozN{2#h)eIo@^C`Qp~9v)8?m<)8f-CY5gR( zrNQ*Eng3>;%r4Ja3j16VSuzESV-Ii^q#2;+c~ElILPY z+NabxDaFajNgD({-rtG&30m$Ac-q}ry_oYDyXo6$^N_*RRLVzEJK|&fB%Bl*K&ye{ zz6=QO1DcM&4ZTbBINTNkp=O~SL3N;0;Iuy;YPjot-F%3z!TT6)<3jHZsG@I!r+yT4 zmal+r`^C}EQE2~Z^IAJt4_FlDK4zoomZ`mICAe&^>lHepj;C#_IjL@;{?JHlbk`qL zDHW8u1+|=-ZL-#tFUy9Pjw^jr`o461+1tt<@?rG{8#greY~0>BrlFG}p}M@%COa*k zpxCD9s#>BB>3I6R+WESdrbYH*XN3#zogL^G5(a-leM=VD5Plx{9DNm+jOz_GrexAR za$Cd(U6IR-8dh&^Lp+jL#akeBC;vzhrVSMjk{ptdC1b_u;zQytk}T=P^nIBfvJp@_ zjL6vv9a~;bRW>$fPR^PfkRoIiLeF-(Q~@jY;JNN^6 z6B1`842;j@@;PMo59S0$l;%RDNOvlS@|E-#u?68G81+8@J8}`~ew-K561q3Njd>u8 zg$~tqsFQ|yP=&e}ycjqE>-Tpb7MQSu{5^mR>w|vOJ1@bz$P;#7caH!y-Vf(>$8~$9 z%>jh!36@@#ewG?D$=u&`#<Tsi|aYMuP`cA6N3TB;9&ZwPU-J@z) z#ig=KC7p{-{$l)oS-h;gvy3EvsvO;*g?kN3xm@0}#x4_77uTfKc@&5WUq8IzT;pHr z2y}c+l&{P)maze-5#55$a>Fu-&ayasUBHdS%*~>*N<#GtUjW-q8*}l z8~?B>9JQd(Yw&ZyDf|WM?9IZ*qOW1cEX0o{Wm5m4EKQnh{zRK(hyW;rlU)e)*p5{Eu>5#KF`%~8VtQlFRtmoMqau~VO zb8~aaIrOZCbg5J>ev_t7y_?ER8=uyg`YC0js7>-s;R}9w;IQ{p=Pw9djS#-Ot~deveL8H=Zwt_=cMPn z%bt?$%iNzn18$SrG$id~%3IMNq72cAy%g>FW|rOz$`MHrG=p!5Oi?vwZ@M z>2QnGTnzV~efk!(MW~uYzxs@7I4^_)RR0mf6dWl)Wx5tvptBymCmTMrM>JDP@Xk#khK$rkBp9>u6YL z#@i!~LCzxQXZJ4O+dvm^s%A9ZMxVn~6X@gz)LrzMtf8QdUc@8vPx5aFP77ti#H6#y zLsLJB!;BRKjQnX}n+Ow49@Rr+!Ucm-IH-Vl%mVbqJH*rY96)utLH>_<=~)S^d1T~LdHdp)`fx;Od}x)Ww2d^SUt)fmV> zz@pazd4PdZ0_pSrC=LPO&~}gAjiRDRBq#hJG%~ay*d@@>|BsLCgA)#r-&?s)xmG&+ zI_B7S+d5g#nNv-EsQKmSCTS`fA3+U;uiC2^T{l^tSUbM@d{u`^eR-dTjy`IZ_;mpZ}uPSf3GcGUy@QvD@!OpTmG&5W<_n)shVT*7j<2guj^CQOEf#Q zIKxBp8rx*M*G_WzJ+FL3zuI3D`VyOrDnJjzOu>&QH&GL54fGFeYr;!VD-Gl22}UFZ zlNyuKM3K}}lEc!S(t2rq`jZSpMl_=^^Hdfi``;{V*1wtOGiPVE$-J6*H)~V&tnBMq zj?9^vCo;yR$HN)srC2CtrCm#*h_)qV2nhlne=P4!Vs7Gc@CWB}W$Xj2S4gm*_dIL^q`ho}q%{pV+}PN0 z*iG18*mszI;6sdnKA{M0N4LWC!C29w(LW)*=yu$TrjNku9u}Ph<3J4+1UCe(`5(X< zl;vFy*)5w~Ge8G8$==-7U^!!60k@=xevhuBHcOMDu4|ZGAFpC5g^FtVa9HtQ%l21M zD<_w`N=+rY;!Q>7U)4W1{2cbv@T261`6sTZy7=Fc$))x(Zw0wZU5TzLl6|Uep;)af zQ|)P_=r$VK8Q&X!oBg(V&iSq}?j>G9z!dxz@`oj{GjaRSt+B)K)kGHMF?BNSA4W4a zz!I-VYJZMo`Y$H$pjUG2xCBBg+6d9sCiJRNsxCu1xBL@avIh| z9){nAZU#>W?)r;;mEIRH7F%3XoT(0#?UnT*c)I^I4K$86EY?rbv9%@Y@4`OP@dF5G%00pY9w`0T1DD)F->w!f|d4= zE|$)cn&9-AD}Ir7BW*97BsT*N^hAnPBul=S^h(%U*dS;tn8AO;8_lZ(ARskiT)dmR zpR4B3I6c|BSPz&_7$50W`YPIcqz?LYqacCBNs1?}B1(yMgwaqrC*u#`Fu1u`AEpxM z3wlfyWNK8x=N0Ax<{G9H6M{@a3(OB2jKwp|Cg2<_ge-{m7&r9e3t`9Lp|1h&!4K(H z)VPJvQ#uf94l7kc^l*e9*$sI+>YymN3KEZggO3sC?Fp{9POcPZrlYN0YKvK1X0(}T z3WCc%LqA=&Oglj{MSZ_9xp92M$og5TD&;z*RgtLZQFmHyuEp0PH6yE?vd{2ItS*Ks z1_RFyXYA1s&OCmp7I|IhC`mP zGtxN*Ip=X^)KpAY9Gx(cIFK}z{E8B!Dv)P1KYb(f2kSO_K4&y{YW(zsNr_8%`}o@h zJ%t+KrX*idtK?zHCzDyCg`#01WAe*nGyHdD@}KalE0c#OpGZ0*lnDk1&hj7g?1^U+ zHzhI?(-V^8XK^*0=A5?daV!n<7}LSf)2nDrNFVS)yrl3c7h#7uK_Zd%f==WNa68`O zN8yWbIiQmgg5uyC%z)FF1DGwC5f~KaA4nP)g&qVMW#fTlvjn>CUD09~12J%WhNBmt zH^XNtIs-U0kAV!o3^K{2kYV;DZaq*!jgVg656C=MVMkvFM3xSbOQ83d6eu4}+pD%d*t`*^~fXkb1*>fPo&+Q}wHKKecS_0*zCB zR^3{C4l2lV8c+>8!B4V6wMFGq{aJsrzOH^)L!e=8<3Ek%jT6+pHJ!CAT~ycI@Z1D;oi>7_l`|RoABQ~=Z@#~G;IE)AB#op-8NmNp4LRz&(FZX`>?Hg;f|U4^SWKc( zM34e)G>xKD(R!?&u9eNAINhm zpSq0_Aa5qK$hRPyMn+r+sK0}RRKiOjgwXN7L36qS6m9u95B4^83YHK1$rG42moN`8 zFCmqDGiEsSTN5x$3=h;fZ6GZo3&X~E(G}>&kU6&lX3uOG&5n>Wpa;7DI@t3Szy%Kj z@n|@ZlpaBMRvX<6`nKtjTj6MEIS_@mfRlRwta42MEZjgh5~~@|4_eK-%n4`|I!`R&C~VO@pVPomD&GsSW9j1?B5&;rwyEfK2O+7@h3z2a#jdJ zp(1I~WMG3BnqC7TX)LUCaabqT21?Keg!x1s=?iH-8AUlo=|#mOzme@UDg7nA7vmX& z&0NgXG8eEktRC#+Y&~1b>BSk!naWwq+0I$RS8}P!Naisufi_E4g)8B7}mV6 zFp`HbCqc*Kgn6bzC7`FFTf+I+0Oa(wz}PK>G=pRydYuIl)L`g?SwSsghorYJKxo<@ zEDp2?Jn(1vkAt4A#(UR0!&~j)dq%i#LC#pzSq=$4K1UC@pLez^Z4;pGH`pqG`=ZA3 z!Sc%zXBArs)>oE4EiEkp^K0m2)tcK{mRS~9Mp_nG?pca0uPoOsMV1sublYORZ%woP zur0FN?9&|{$0cVUSGjAZo9}UehGdN|({J^E4O|K?4Yh}~A0;G>E{$aa!R%q&8c?-8 zNB4o1{V29A?ky;&E`Vy`6oEn935t;;klM2oysSBtO_WlKfI5r%nwpLrMhr+EZ8_~T zjZN%J4ss=fnuvgR6p10@%Pl7`1!8EZ=V! zwd>x!-a()iUGDL^kGZG1ySZ_oe`p1B?xXX(bFxze3KNf`$g$p$;;`5o>?(VWy%v71 zwBNRGvah$_05wC<9tTd8K8{t6V~)oTpJT9dH{{G+byhmXu4%5#&;xe67P-ytv7Y}t z9ie(g^9_a!p9E0SWe2VV+Jb8FQYZ(wTuG6SkQ0oJZH1K0BfwKT6^Db|k9hQLNTNLq z43&l0T2Mk6tV&I!v7%uAqvESxDXp+ff27oJ|mBi8^|$aG137cBSvZoq%xhN z?xt>{&Y))XuG47n%SO?pb&PU=N+fVyr4jN&-ZK~?~HX$p*@38+H4jDu&wzK49jE{ms1}ls3VnNgBHsot zL`Qjhc)LQcW1VNDXRIgR^By`&%iR}XT(7!9PQ9}ubg7%1Ctz;gcHVT(0UgkI=RW6Z zXQH#(fpSiPahnhMfM=aCXM0ya*Id^_=ywi;_rm2K;W_0g0Zs4K7K6E)G2=58kg|ncNA&K6MGC}8Oi_Hdpx(FzI36O3|hV|tmx&u(j*x32N zB5>T>D^>S5|r$j!1*-P913j^rb~kiJMa7{NA(1PMSw;1=ps>R3qI>Osw-QeYI{ zQ;txkP_kjq`$aBncMG6sBpqg9(C-@qmk*z0i2$w;bIvEthMEoHj zh;bpAvl29^QY;Q~{GNhNQ-{6@`kV!DI(Y&7KnltVDKoF)c0(3HCQuRY0=v)~+Z#)X z{RZaWZBTR%RF5?1kX3na}01NfEr)#vbdD4Ll6=(8^$izISa;33jM5?j(3h*js=ds zj@gd$jw_Bej_HoS9M3@aRpjtGx`H~O(D}+4aE^hI^||ugOWdE_DW2^fzh|Qt1Nv^0 zZ?<3S9~d|a{DzS*FZYBvpv_E$#K95Kn&?nq8uozk><#@*5^51-{H#KI(33D9FzvAK z{x>tDxcPV|=pglkKA=VIM0!Qyl82M`k*mqgV3mDG@lvv3lkrOFl@-y-Q@+xvJ`5GyQ@Cid=(|*XP zeFVzl0a1R`68Rpv9GMy+MD9TTRLd|EGEi$nSHMpZ3H}>g7!(I90y_g;0v`WW|5(4! zZ-;p^!Kd}E1dWanJW3ZlJ3Z52&wS~g=1zA{M_)%fgEF)Oq#2BcjHH<;73g}upbJ0; zECQu|JZ>%U=#qdM_Z^=I=Zp7*6yiGK7b2B3nsfn35K?&FWn?L35~%U-Q{0rE)Sc8v z)EX)QX^jkkx!40)h`dIc5Gu_ND=n4Aqxq0m$VKEH@(kIHv_jl))~$oLVFrxlS{T{6 zR36->pHRkA9FS)=fKmv5^G$L#xg98H)5&v5Uy0v9%e;-0L0U+xCww932x`zka|qY* z>+xIglkg?Lt-FkU1T3bF*g6=~|5GzpVOC&LF?fsz?sLnb8pT6bgT^0^YK#j3yYCZF z7~jHa6bA{nHL=pzKd~i{eccwiZZD(DqthX)j2>MOc?oA85u8vzg*JzJhT=k(f?c2w z7YRsZc!*1&Lyz#_&MD7Q!Dp#_b>wXDZ*io*&E)rA{ z&pUrO-N4;A<6Plf1#9mNXJ2PCCm&QpeVhj&EAbf2=GD#$XDjd*4u-E?uIH||ps9W9 zBD)v5pTk<*#dFPrzmX1aUXCD(5^nm6M(?DjqsU}KwJc?Z-AIXno2rLl9Pnw zY2d-Y!+4yf`~J7Izf;9Wxs<6Ehj=CJf+XW&)S66|g>M z!>Y6yZW`mEy0{;_8?&LxzP0Ia)4(QEtT={l$_FL%`j|PoAG}eypbMQB=@r=l8FWoR z6YT*frTEBK7-tsrK-LGh1$zX`VO@S6D1%w7hlE0jAMscDmisW!>uw9TeJXTd0kn2eNUF>Qh0=ML#S2_5TtzTbSHOO|+SwDd9VlnSVTH8IQb!5oU4C*D zJN%B8&Lz%EKxz36vzhN&<+8aJx)tsg&~rHl9-bcFXWk6oE}zji(;x8f3lQK8=M1h4 z)rTao=2VB5N3hX_(eKd#kS4YVxV(FTI`;t89%z)EpxSW?oAG~KP~2)@W9}!o38O)u zo(CtFAEX5G3bKschH?OQ)O47Em#7XTKh#y!!PIzANZ+Geg?YM((iOZoZ^&E8gUB7pQ^{w^_rQ(Q18)9Q$SqDM zy#P-Fm4tvFr#UeVydhJGLgEK_=XwFh@-Mit>;P`(WBiQ&-C1sffBY=&4;&VVX)obD z^}&h~!xb^))?e18ylJgUINpgp)VL&^J2fPU# zAisM&aOgzXouJIV3Av@y@b~dz!d*f-aSN=N@uU@`T2g2739^yglX3@cIvc59s1a&5 zG8Wl_TtNPXHTnzk9ZtkAU|oI#WBm?(d{P=$B%$JYZO-ptf#3!e;k0Dfoq5Rh@F5Pg53{m`c}v? z|A;w``4j34F~}@G1UvODsQ+|>xjr6vq)#BxYc_a+4ngW$UYrlInbtrpXhkd>y&jzv zO$XlC-@sw644((G@BiIkgTaBpd>|NigYkbDYz)kRj;9Mc&AWWrzA=z2+TXX{`xyKU zYv3IH3ud&*6@az8jq6V*0=w^OIBz98?T!GX*IsZWIG))H?N{yl?JMku>~cHCQD$Fc z7uf^08rxI&m~CP^3&=~0Y{zYx_78Rj{D~CD3`e75y_4-a39$K5@Md-Q)OqH6P2g7P z?oSDL1OEhvh2p|*!Yd<*(SM_TVwJHGuuu1nGsNA3b601K7iM=hka|brjrci)UxZe~ zC&UpXD`_#@f2M-J{}1Xts*~CW?lq~jm9+b`T3UccqVwrJ=)37PpdL>FWj&r@r~jf~ zrf;P$rca>f(oM9hv>7x3?Gv&HX#uNy6IDgMLtRGgL?u(p;QZa0;w1kIyM80n`lu3Bj(TErH; zd5$@1Qk!h12Gb9d0o<^EL#?CQJi?;3I% z9F_-zH$w6-7P3RH$L2Ks5w{q%9=!sy09fA8S-@{6;E7kk1sfzCh6=_n%2c?qrX%aY z<K6HG+MZ{V%(O9c4G?jODE2oaB7qXgN-fit~cAlhcRe zXP;rWVt-)uV!dW&F%NRdAM1)KVkIG!M%MS@S@Y98t=neu%*DD zz6L*XfjO0pL*ZONK<2?)pu^U~$ae+TTTgH^QQ=*%VO8*bPp})X?SY$Y#@vPK{B!Wv zOaR}r7CdG#NJX^(O*Jd-8RT+aY}y1#mbYW`;T*?^T>{q9{m8b+W=NG4M=roU`eA5e zXb@By&jj0pqiji_1`@50fVck~1Z@uUl)7)g3G1W#th+0y1P4JC{HLfI2p|^|wLqHr_ z6aF3H#`-sHk5i*&V#>jRuEReioF`r-y(E`Yywnt0J358&H{%y0fq9NOk=2Qv0AqTW zyE49ILRrG7#G=G0yaav&|D-@5TqL|EEET$h*d$^SJt;XUJBge0P1s-fP#^+*Ylzp4 zw=(g5f;&DVzJQy<$z_jVond-_wX_SAfE946+fCKLt$8<`#cklxtpTs+c_4Q;;n#u} zWH)Xsj)vQT)nJxk+CkXO9iV0BqA}=usAjM#PlKE;6DS`dO%viSLFVH^xKpiz`r=M- zL{A4T#4zv)-bZ~yCBt4G1d9Ja*x3cxBT%n=3p~|}*az_03%T5bpl*2yIN6us=Wf_+ z=pEQ$O(0?}qt`;8pg+{YmZMTofw*5#Vb(Rxg8K3&sD*_ggV!6G0owfqkOurV)GkyA z{hEhRJ6Ytv;M0S;_#SY*vfUfOQ`HY@%w_fr`zlb(mBK7mnb(*rL3Mk@*w%Q~@LQj( z|EN2n>!f?8Z3CLnLmHlDs~QU${vD0d#=8yU8d^6X4gUg9qgM5gYNM*1%BFm!yrev? z+^t-y?53nCxyl2|*{Y%Sf(CCx3-xc!2;CsPOn=1SHWr)zv5vQ=ID12P?U#Q)1ij9T zXrlX@CZalF=(zj%MB-fFpiro}kZhk%KMb1CkL+sBQSJy(g^o^4=DmO(PPJfS(ynB& z=(uP`%8}ILX>{;NxWz%qFKGyP55J|N^q%QDX)kG!WWPiz884oZc0LuK+9O3MDo=VK zye^o__b2|F@FO0H&*o-vhO)1*8kj#A`{_GqUl9S)k2;HTkDN;00F;;_!Y~4z@DR_y zkH`IoO~($w^hHlYRYFQa2Bai(X)?zOW9?(#qB|k$pghWfdAK2VF?K9A2lD3+0srKB zQ(GAKIgm>{7`+Vr7y2F=1$Q(%`1sZYwqf8UrroZ_xAQqZY;8hK^lMY%VB+ z%frc_41NwynXmp@Uy5&~_m1bI+W|MM2cVUFVn1tpV~tqifjAU1W&y$Vl>VhIrd_0M z&}4wCXlr9xLt;bQ`thp&l#7(9%EyY{3Qyg|I&R%8xwiIw?Z?_PwcTocHH|gzYr5C0 zs;-lLkws*7P}Z2LY*m|NqUs&h=o)dYR(?V8Re4l(wZ3B`T60M|NWaclU@#XasTN2_7O;GbvS677fMd%A#{xaz*ju6MiKU_+CC)cqi$YC^xlV zT3_)-$s6hO^vxM-GmEoYg4*LK|8)T@g*n?GPwvR59a?+W*$RC??9V?jHI@vGy{#tBVr@$ z?mz(q~u1I0>|TX$Oq2Rqk&d|oWR_`t3V(?3*v*8KvlpP7!)Kz z+O{$L7V;9}Vk2UUWBZ^o_^|0dcnTMSk9ZO05!}<=;Hv$AU5jgtFT?jIoQ8aW9|Umt z5swp_kv_rs^(46hGSGG8kL2s*YviZoCs6MhN~Qq0s5j{h{M&jH-s3ayQ*f)XM=&qZ zwJ3EQDTK9 zY6TiyUwc`{<#^5oXgV)Ff;vek1WTNk^`vQs{C9m(`s8fU`HgN8(Z5 zN9d)eBx6#}q&m_DNWMxZW`r{fv)kpK&U>1Fx7n@cH(G3NncC{VRtH z%<06gWu9T&qF<&-kZBYhNe$lpNBB(KXXwyw20h!(*k~B_UEzJ9n&5qKy>APCfhyMu ze?Isd`uIBg5dX-)z~Bw2HqpV~It_N*TcMBP-q91Wg-tqez0ZWtV%W7Q=mQust`nX~ z*a=;b<1n`u6IYY2k}JUFSb}t+wWm>O3AF9BmUI(+4+9S@_AKUAhK)W7yt-wyC$t^3 zT-rILhU%jbDM{q+q#clM-;J;UG`$5_7RCt~E@vRe8#?K0z+EU-mo?0<4=Gx+jJ?BaAh z0#m~mV=Ylxn0C0o z30q0O$N@?vGK{gF^@$A}=lGe4efe_)%Y|!_XQW2Oo1`n!O&Nr2bnfuHx%sH(q?U!P z+O}@ddQxk5>lFpt3KkVSF34&lXp>X$wAHAVIW3~i?&TlI`;jZijRBA8XhueQljM_l zRoXhxTbxO`FG@|GCamVK=7kex#8bIxoYCxNtP_kz+F;~9rIG|s6(7SEV-}#*pdf7? zvw~ZEerRp*J5)Diz#jSH*8vk@qrVp@b%%mtH`~9^pA-lNt^_fm-oOX>7g#0#h1!I3 zB15CyA^ChyQySzz|BRzS61)WS9oVwR2zQD1NGty5-+oPbOU2Pz(YcHlj2z}v<`AZk z$zYCPerL91jc2uD{bc4d=QA$Tn`nn=y`ffo4e1QMY&=y!;gMUAwt;7|KVd4qCoTaT zIiFEG;#xKtqfa7R!_7nQ0$u#iz4@NYF23`cy^ZaOg>T+q#2B9GHfeilNb2f_Yp}*g z6i@1|%iq+N)hMfDvSG4!RS8u+DrZ(aD9zoN@-&0!4gu*rQ&(TLyMD(e-`a8 zI#~3u=vL8;qUJ?^7U7CF7H=)?UEH(ySaEIf=i)=fPl~6Pj3{ka*1r61#f7R{)$429 z*Y#80srNNnG%DRV;}A;^dyTWb=drJI@N~E=+B1%eX~1O?LnJ$;KW#0ef_a8j%8qi^ zB>us_FKClANHjIII?W~?B+Z7XeUTU~%np?ixv&4l?w^AOPeCFg9*>Xz9dLm}lz)~6j$6{aqRoj{aKOY#Z!@@0u# z5-{-?u9p5_WCgiU zJ#hwX0W$CddP2s}2&k*DAay4`To{%_wnOcrKGG8IvRz_jv0Y7z;wFQ?c`5ogdKG3d zb^~rL{wTpj>`xv=@l&=?3!q;7o3@0W!U!^cGbgdGvy`lK_IP%GHk0jRVc2$H_|(9g zVPmXjyrehMI@2nlZuJc6$9a_Nz<_B%k`U7geesiVgRw0!Ez#Xkt>c2R>(NmWbog4Z zQ{bm>EYS6)yGk4x_BB=+cz#L@Q}kS2NMizp&Y*^f%BsXEQQ-FZ2+Ew@HI8a-bsJe> zRgbDKl?jzwD_U1r%g2?UC}Wf|i!O4ap-9qN(V4E;c(&ivbU!b$Zs z^EC@}0I%$aSOMxWrVqZJIFQmFxkWS5M=(9C`=GcTn3&Bs3FaofNv5UznOd7RTOvw# zWgN@;ntd*3X70c|QU1kzSN^7EW1Hh!NLy4lzu5e8^UclW&9wQc`493k@;JHwWN*lN znrX^-mOfYdw}d1aEXJihNNE(!6CF>MB%K!=mH=hrEUGV>4eE*jwhmVaR1GH4Npz5!$UJICWGJl-y@5Ur z-0uCDADG=(A6OmP$Jn3P73^Q^JM5M0HV_Hbk$v01$frypz`8e@b-?3V8fq-e+AP0*S!MI4c8!N(0QU(KlAE5pe~R_&@ZR-CMuRuL*6 zU;d?RXW6Q^Hr#FTl*AB^AuLIXCD(~$DZI2L;_H%9X^V`HnHkw}Ig@j4 zbG+F_STMUXXYJ78U9_Iy1$gG5C9Yrot6Dhw)A3@!fh~JDYzzjie0`|p@ zrf0EcvFFjW=(Y$mQXl>WEQ!MKnef%{3+NJaA`_wJj*ZR(E>X+a=2&^G8zgCuh^ven zkFukVptCXeFa=mS_8M*jehi_2m_Ra+F2dT727QMHS8NNd5{t%EYSb8fdLt;qzG+Ldn;#GF0Wiv zDXsib@veejxxR8|<=D!Fm9ffuRgo%RRjTZmPw2R`i5{I-)hB?EP;mw$t zNy=KBbuP=6H8Xo{wlXV{xiqsqWONj#|C08VBGM6(mube7=b|jp!en~VJponV=db0b z@Qx?kjvpESgL|LzH`~g(%Bp1!WZa>>MFt^jsc6b&5}Nd#h$Sw^mxF$6Dds(-?Jb1# zz+y=A8x)%y&4@0Bbbzi=QFKd`5KD~}L$_Q2o!0eDzu~56hdY7|r2ztu7H!A82VQ0b z>L5=DL?Gz=pJwhq(k1e63W;i@K0|V8lW70YhSIYbKE_pMZ&o#{1A7QNi;ZCu*#+!5 z?CtDLK-Ckof3SwJUNX7NRSXOLPtaL$Xz56E>Hx}O$c8yfyhHemuYpegYp5-pL>+^y zmKCun(Se|d_6Oevj`-L4{`5}q40HEzFPPV%$0g4)kD8^MGAvwBPAS9cr5EweY3i-dTaH2xH){RG1V|?H`Ze0{p5q?Tjip{YqQ2<4n6&9yQ;)b<+#}RLbTxV-rYBSrYjL}v zSG@<)<~jg>DUo!WG@hJE!Bfkq^N?aB1K9Tn{Up6T+$HqP%Pc0l59}ZhK%+<6>^`?8W-4k8=oW~ve><4YX)K4@(X#sYs-7y9a<)>TSgGiv$WZ9(j?|5->sBYN^T;dZCOM}rxeis=D6f{6 z%Ad%O$q&iz$-l|J$luD#Uhl=@KiMzrkWBgnYN*heAh?!WN$CXLmL@Riq^;8#+^lvz(XU*p>+mluqXg51#Yu|f(V`;J#}sGk^R#GM zH?dlLQ-YW3C9fpq5{Kl51T9%9ZX?#ENz;;2JAixbVe_$)EACTV7V0C^8KUS-m_$eeynyQmD!Um3Kj9!z zOnO2ZNS2ewQC?FLs7s))?WZZ6#a*aN?^QE^AXQ*Kc{ zQEHUKA!F}WeSX7-hMSFb>UeDn-FSVO;go5T#bljm4?EN@mFJyrUtoG@NTfx~-E zqW^!Y9u4}Us!#|zmcIf+;GOXUo7Lw%=p}iVdMxg(pf5QNTA{PfJm*6o8l1N$+0WVN zwk_5e@b;tTY3Bb-1*X@=_QuNwGPu@~V2`5dHfWomhMlarqwcTvHC_Uzv9;lL!-j?- zuxpv?zt;a-f3AL4{o?wOK#~?g&Z1MLP!*}Zs=ldysy?V5sIIH-s@|!7t7=qc6{$YE zeqjCT`v2bbMdeLee+G4 zq(R@y&OT?ay-+lJX7;sgHhX?fv)qy<*7Vh!VX1DrXfNqp?IJyAyi@(v0@=X>;i=HG z{Y?yH{oHAyJ(VU13j2sM#Z9GMWliM+6sgKzsypf_n&#TdI)|>EeyhHOfic`LrX=-F zdY3dU*#m##aw(HjOeu}?jm$SMUuwSWe4%_<`QGOHkR97@p^is@KoKQSeJj3GywyHUb?~1PqmqMj1s?4ucE0dJvl$Dgp;8u9x zTxV81P;3Mrs=cC^B2&Ie-cl|G&uyu!tn9gTq*MqVMQ!++b{A)g#)(4oTsY6ohC|~f zAm8gLj+{XT{@a2K1M6ir$FXzacfFP=4JPS7iI4Gr3`^}?Arow`O%(V?j|_D2-h4Z3AJy?KG2?S>uEDt$HVz$ ztcA3kG50Y0OlwWmP2Sw=xpQ+H;qT@af2hXgF3Z(Uuv`&fvE5PlzC?K`w-l4%3;UOXATIJvgLe;2yh1 zE(GTx3O_)fa;hqV#3H-8yXGF|hizJqw!Utm?zYaQOVT&dPt+gQf7H{4#$f61Gh8>k zHT*GT8NM2x7}CJl-)NYD?P*m5tG|m|Lp6O!_W->8_Bw^`v39IBp;@YtYxb#YsMA%G zRryu#z^M=-?A*3Q^v`X;V|4#-VPmM zq3sVwqt{m^YY9nXj3en||ib&n=RBE2mpdJUcCWe0H(y z?^(OEMrJk4%9j<(%s^*)mH7ny=vL<4%;%XO(Ur`|eJPREA!|X_%d9r&NZ)eq=AJO^ zG_SWTwvMxPv$t_{aSnBjbr11$^w#s0_NU~fz?HIhXgyT@OtcKJ%92bTv;^b0vb>2u zNc1O5!q@SMU8YMS4grP>m@r@B#k z0^9YVp_}onku$bTnv--a=}l57sZes$T^}8#yQuA>jcC?s3Tm$5R-#eAgF0lgs++31N``aqHJCfc zpj}v{T#nnz1WaCKl(f=?K9j9*D>!&$R|H3|0T|Lnz@sgIjc#dW1ypBMyr+z^uo6oj zWpVH|$0;|0<8=`X&11@K%B9Mg$|-m*Qm#-g!E61Y>8h?Qgs7OVi*8ER~!^s>B;Ge16iyXt!`4w)lGlB?QN*{75(m z5Ce&qd~M`ms=1l$5BQZYh5kA}T=#EBmC=FF00%;KL$ia|0(oH5O+iYD5AK%JJlEV| zS6$aa=L?6?G1~sbR@}DOnhniQnmNCDp~;fl6)qdqat>xEV?wWz^(eD@rXyorM$L?z zzq|kT#59)q=iHx#xL?=#lmCz8kKm8|Ptu>_e`?`9i~k(`bLY?RKWf~d5B+8S&dVs1 z8Ot=a^K|r$r;H-Q~IZ@OgWfxBIQWRj+Eso6H_{+ z6i;y{-$G5bO7-*8Y@bGKSI-MplLoNC?mK? z)u!%)_jMaQul>B7pUc_Up6p|$22$LOiKWO5=nCJs!qKIX+;Fq--jFZYE|?ln1}5Y^ z^A`k%=O;fb=+HBUT16PK}Xy^)c)31+jhjNwXU%UEDOw0(;O3%yC9d#S)Ef5 z=wJVALH4z*i4b*v&0Lk)CNqDgKjTrxhKykuEi)=*Bxi^-gc-VwvKfstx@L^e*pzWA zBRfN$*)Vf<<~`hwhGqTCnv2P+UT!l}FY{E(2J2DVY5PIPD(4tiD|bQg!_vIdk)lXK z*K{RtGdmA0e%@Ec+gbvkLwOKb5FMvgAW*85q1qI+wmIS|aX(Dfgpv zp3Epe26p{rFuv@{9jeCaO!a(CLG3MVJKalNBmHH4Im2$$QhnoW<5i=}STw0i(vqYr zNtsEpBrYkOx`X@3ga`wLPH&c2l!fNJ+He8=ls&T zTy3g$mbSgNu-316p*g6T0!Cxq!*?0rOlwkPL=eN=%C3t0H5s2NZ>sv zY9dNUruqx8h|Yo$zLzpm8%Ua52={@R&}q!%;_O0}fY*{c(FeI7h2l$N-sq6%=SZ8# zqj1CUg^&)cus4CKfnCT#=;%M@i+j6zPkA^f)IPY{x*j`g12tAV=Gz^%UbZLJTGmUJ zI+myAuI4P$bd%h4BDYCyCT=q_Pc+^JJDPvLnT z_i9(BHmfP_)Yr1YS&g%|W_#eX`8sDnuHNJ}{WX8Le6#+wneDlbH_rX85$>8EsrQF> z7nE1hysvqu0#k$aL%J{(;Ub>si`bTUmjsvC!Q^9)unplqzK`!hD9A75VQQqHxG)0~ z@gR|2d%ZxC zK^Pdv$hC+=zl%AG020-9lX* zg){4ttQA??vkqmQ$V$sf&!V!MXD`pb54Cc$oU^!T2Xh~oj+*ydj#&5F7TCKx$~h_5 z7uOzlZ;!$I(Yx4J$bZ8>2+D=Lz~|tD(B<%n$ckv2SRl4NUL)}ZjMM_iz+Mh55CfIv zD6$Cki`ph=B@7A=&`p4?{43_gdn7fG!Z1x%RQ^dmPho(9ru{^yYItpk8;Tff7#ks7uY<9xvAeN{v5&DoUN$$DGb)XCa1|FK+f8ct zqCW_K*ed8oUv+1(2kfP*q$70iwL4KuC6MBAL^DiNQNtlY@sxUzdJq_|CE(TgLma4#jjuUqvf;aFkaV6%qVbo{~?K*O5oSik>BFj18nu`b~NlH|-VDanjaO zgY=_hxuhnTvU9{T@m^77(IdJu{Yy9s&IGFj`30B2Vf#Z40gtK`agT4vKjhkQU)d3C z9+Fz}5`z;j;5>U2O0k(>EH;W9fd*l8=yR|`@HHHM@8wm_I{!(Ad}qmk&% z*a)0&6B3z;!Eiu2$aaC!`UAg{=t#QBg_Iil@>XE~&xYFbu&64!Q)?s}ER|M4-p3Nm z)MphHaQjf;r{`9URR2}?*JNqNYQ5UAx-YtF$YlJe*FtqY&aldG*l@$}%J2m#J(&g@ zPQWOhzYW*Wk=h%ysHI){0s6A~gf3lo07)=4bt3GQPiiM>YiW7SYt1I4r0U>4enq_r zyMoS0hAFPrsyUTS^-*D&a_az9 zn*^%i^U^hNpzjMzLL>c!#FPe-cjDeqP%IG@7rjJRDn_RZCxb7&9(w%qNM-m!4#WL= zC6Pj$0xSA6k}>4mKDH#A3M^n3yb(snzXAtHix!G*LTbz^)YNRG!wd+f2O0%V=Be@) z`15>YeOcZ<-d~<^9@4YdUBms>H3Ew5|JT)dM+3(@`)Iq$e%m(2R>+oVJ!73^{l}VO z^;_Op_FDQ|3RrT?SIle71I?9zHho6==1S9GQ++TeDN`udmut)Ynfo&L4iJO8xzBPx zs5Ge=7Sw?IlRH*9{p~O%q!6&i#kTIFjp zF2i2KLBl1(JA=g#GsuhujQNd>;VBgUEevAATWqKYV)GQi6u9ZXvo2SAMmtqoM;p`J z#C^Mp#sO4tIQo(kb#Yd;TeS%F(-h}g0jM8oRSbGLKmHYAAki5((|#+R=tn-BcDc$N zoOmCVZYb1%9XfFwZ%-wC|*FfF##xJGes3eG8j@X2>xxB zU^3XfTj9QOkL*Qyh{c4KILv;tsN9*^^8Pvpf-qPl8f;K}?D~sjAWYk;aj$ z(3&3)6$)(%a)IfAP~L((I&ULfh41^uKsWx>JHe~FnlA zhR^U-$6Ci=M@5GV9jw3IVEh#mJUG3 z8FQBTzWIQ8nR%vpDxOQto6QGNO=ryK&6kkthq6NV!bic$?~cTn!|`M&aegOSFn5`*EX!Wty6~C& zNa!-gkv4K9<)r2cq|mI8^uO@`X)Agz>L|_<&zBShi*YdA8z#!_@?n_E$0D(63p^uk zsk>?Hn(11bwj*XfgMKVn4cU4HaHjT#q412@W4L4ZX>b`l=wD9^hYV8<4GanWE&Y6b zYrS6oS9eM`N7q$XM(5L}p~C8-!Y*sZYbt2m>a+jdq>Py6J|IzeDLPIkRM`J30-r2CO|2vU0Fl*#J+rrqZJDvG+oadmafj1EK$li|>n< zAwx(DOkgf_eP8KSbS-E-)(Gnet?;9$BzOvLf0*1!*1~Bx3(f|Q`C)tvoY4Z{Y*c6O z!qwn=Vss)HUjk19coD&gVq}y-f?NsYIn;qiWSh`8_!WwR#{$g*7Wi|v&SU&(P-T_z zTYX27mm&2%gm*zhZ=UCvr?wo;xl%wmBv`+B>Q^3=X&bj(siW#=3R^(1%sFKDJ7>nDrjKW2RY$AkVK1`e0@2 zIO_vzS=&|HFndwQ2geZSS7$@l9+%72)P34r+4IcP%lpmS5!#RX`1jS$`;&Jvuprnl zlpbmj-WE26t3{SYT#?z)60sk#W$~26{zNjfjWMu$z@9n46@;!+2^QsjLPCxpGsr1m z(A*V_fR17>oev%aQPEU!T)Yu#&bQJDGD^M*2|)7{F~xi(fm2GPUZ}QUL$gft42c~L zw6nE$w1lp)ZVK|zF6eIIaUHYX_5WU;*B#ca$KUk8Mzoa9pd)c}f1=&39f!{oX6nOqRurrYJFtN*xOsQMY*!vww;uJAf|nv?1X#Bd7`R`N z0RB-JSX5(Wcg%)cfQ7t7-}5UI=z70Vsk^WV>;ZgS4$W}3;uU-~rs3{YRKdWxhq;al(@xdB`*I|f_ml}r)lCb&$Y_?~z@cr*?{ zKGD=@I5H#R4^P3(ehtz?9t1}RlQAVO3RDYZ-a9iMya>O+cI-(N5+q0Z^oEx3}on@Ut#~sIf zoVbkru6>TZfn8$%ZA-OHwl%U9v1x6zEoe1av#bs)Wh-y{*Y?%c*PdhV;W*{Ua>$$& zoXwoQoC}?=QGNGZ4c#}~O+9AMVeepHivOd3Q(lX}mq4B1qTst=4LE|-2)_u=jdX+u zPB?Zl-VS*@tC?hWH=D%mLCtOFS$+i4c-oPlkt|yS{#h4=E$A0C{MbYt#K-^Bn5IiQ z!5gQZ?6Rynd^5_xr6G)Tm^;cMs5>)MEAx@S6H_+>n)wcL{~p={+V9$kmeQ$EUG-6e zb#)Dq$Ttp1LtiM88sbitjEcLBjq4V4xW-zEHe2&VvrE$*`nAXERqEF0aK}`AR7TZn zQ#;mfyg1H%wk%Zj^`M^zvGETb7FRZznuL&I2uf3KxhUP(>2*!ob!C z$|oXOZV`S@)}Wg$g358ee3HBu@b_XssKQ{TA^`=Tw-x%gzOs6#G&h_hjzNz;Oxj#p z9B8Lk@)=!jk))fX3RI6)xJWD!_kd=B6TJtrK3~)yYDc5UO+TS`(_@gKk|#VT9EOCm zH-e>tMuIqe5=Q}*cuvlQlH~<41yb;NL$rTpOF5o1wX(`XN8^61oR9!7orV4FztY!p!+4?_Az;WYadw z%b(}>zw)2*uY%gVk>3a{<450p-$ma(-z;BCp9;6W>)xf_241)4lxK{mjOT}Ysk;_1 z*T=3+m@HZ$1)!8m0a+1h8v>6jsNj((v8(5vZFcIAJGon)F6Sd zcz91(9@&V@wY$+)v5&Di@ve#1OdaH@y#bfF10hC&-B_fj{SnN=xx0oghh(|c@YK35 z87`H=u|*99x=%h?k)f!JP2XK*Vbv1!C>5}(qw1i#re-+qd2cllcy1}Qg|yAI!?gcu zN8vGDyBLr8+7;SE+UwezK)v>8*J`I|yWpIyt!=2Ss3o*7G`lsEH7zw7O_urrCeBsr z{^|;9QvF4>7im&P%#@FCijGs(RZ4+holz`O3_=oHEk!{^T>ehJ1N%m~JRQ5`ZL$^6 zWDkU%wzNzr%aT3>cjd5jmvpuCUuk<_qRBW%gP4I{K}-D{y50wpGtfq?K-S)PNpGZy zRYTgI7#SvSkj=LhI+^z3D$vbn#Zl1*(J9e7sA+nMiik4kvrvRjMfxF4zZISou7(?7 zNvP@`3sT`MG*2)@&>T*Le(DxH8C4*XQ7$bqVfxYlD zs001Y!MuL(u88^f_-S}c{_(E#cJwBBf1@rIdOCTeo=ff?ZXu?QYpw@4oBgi1i*^@x zH+Q!~B6&l1XLo<#xL;iDft%ED-gNYEizV0{W{dvf z40tDc3`dEJ=yx08wK5nQ;4&h)$P1P46?zMDQhPz~S%{87>2(tck8_2+q3ah4zX*;A zWH!Q0vx)_gp>bc4cnA zE64TOb<4HGH4{0wtzAW2cIQ=`)pMNVoPC^aoDG~+oQ1G0&v4vz9CPf06Z~_B#SwGF zkw=y5_=Vfx6U;Ve@cxUAoA|pw4icZU-6=z#RKxSk>F(=!Mi;w zhDAm?X;{*=BtdfB$E5pQO`Cvy+-6MU5v>jc&sVgWj-7&+10# zK5Dyb9h$Y8a+)`&yt3-Qs!gb@DDGmbFdH<(DH{im;Sz3QM-}@Os}vLAv@{%yg<;@p zwE}OUr=kOVvjpJdzlG<(LU}jLo>k;UAbE|)Q{3~` zNv6TSWFkDETS|(;_s0SS*;;VL3ZVo37Tpo;M1E--ASE1h=XY@b8x4N}3H=?ucx%C) z7$@v5tSu}c6bUU*qs>6BWf4rfAK^354b>PWACU{l#@K(`iL1m|A_=+dYxt(Tk2?vK zX+3zZonpJQGWHoWkEw~Yh96)#_fBx}^YP(vef&mjU`&fCWmU9FG(9p6{J`hoaYz!l z7-|>#j-+xEJi2J?6)NVvf@cH;#$b8hbMGi9j@Nn$d!D!_xYh0}uF0sfg03&lMb4I} ztrAX=Gt2P?k9SB`UgsF@XzQqrj+bYDV?S@-W}jqlZ7*w2vCHiq+Y{S9+Zx17p6dDfkud zrAOceTwUtMU2p~*sKk2o z23O%!o*wuCHSBxrw%#I-&+47w4S4?bgx$N`P2FbKB4}eTI@>#)j;)x03pq%n8~(6A zuphNA1skXVPT5k>&6cq1?2OH9duuyyTW=e0YYn7^M=o5p^^tWIrsBd@m6fr4uxtVL z(iBKdmU#;{WR=WDbHd~@#Y~0F_06q-7w$JF%pEQBEUA`c>wD{Y+Yb9t2kAQJp5WE{ zPe3iF3V)Amj&(?sXDjiw$P~dx;WUv-vRg{ZyD82n3qT`n(W>=MX?;) z&U1=8iYJOU@Kmy5R?fig{0MO3?%;)af#{t;XKjP~<9m35_QP5JUb+zu4HD@kNq6w$ zAHa8|Jx*(n=#A(&?tksk{o-^wYHk(gj~1voD^fEb!Mp7`ycjk@S>Fu(?+si**TOY? zBK$V0B4PeGd=~zaYsn5|VKPL#CiW782p!bF{dggNhg-e}Ee;=Jb=<0yeU-97tG`z-rlbhxVa!uBM)9=Ko; zATj;z3+(&sH|?M87Q54Kvj4Gvx4*KVvCqZsBgr1J{joi=?YE7v<+r`IF19wbx{zg9 z-@==}n@^jEn+us;rjMrkrVFOCc$sTbA|Gd^`LDT#Wu1k#rdva{4vti((!I^|kB|4C z4s;IXi|Awd5;E4ty&*18BZVfqhuA4OCF`hoqAaD}sR?L#-F1B*RAHuZO436xf2tuB z@1JCI(o`^59mXSI`aCrZ12=w=-lFS_#M8Rk-5MX}2a{?Zb_ds#)4-vqrc@|%(Y@Zm z>CmLeQ>f9=CIBNjjQz)6Ocsa0)j1AF%X`Z2z=l5qr&|N}(fUe>@~h%Brj2g!>@mYt zZKHe)T$&2Vg_vb+vR`n2a>+FE>hkue#Dzfc(&5fm0_XQR)OUH{fT_Tqi>q3z{#ET$ zy;eC@F%_r6S_ZRrhU%_rn`#htun+K^WZ>NPhx+oSd_4MKnyi`ZlXSdPCOt3d4==@B z@o9LQ7J^RysAv>)`XZ49^Y32x3ipG@PeUMoHR0=37S3%U;a}lX)Zq%@zrq2)n=8Q| z{S|yayTQHf8eA63QrYAdvI*&jkHkOl_S(XC;|oIJeU_V!Oe!O1VsEm0;q0{rYLZ3l zban_dB?frbeuYclV_>z{m?PMwHDLtI^TaabqkH2g;QjLjI>lpAb#zYTBYbSfg&qdg z!RCQMc?t@RMuMf5W}jytZtq|(ZV%ghw!-!S_T}~+ z_Dh%_L=KIEwR`MgyV#+1=rN5H!z^O5U$C!1T3#Ew!u|!Q%~WLO9kAB6`Ybmrli^bQ z%DfIaV};>0aLKgIw92&3w9j-AuO9$I_Xg^4qWP9tVp(q)Vx47sXs_e^;o9oy>no9G z3M>n;;aTts(KD5}#zYzFuV5Uwk1Zs7rGI59iXqC^sy3Qa+KajY`d|7X2DhO**dH_T zm}eYeOflZTeykCk)bHuLK>VCsu01H-35U5mU{$x2Eyv#dEAYui-~zpq7lUV~SA7&U zIi-HA&QP1wFV!2>P1RYd2`WGC0$SxI+&M$?Bj|(~vi`CU(vINj{wonkPKifgI?;(u zm`4_fdWdQwtyzNmUz84lBPxVDQ5if^L=>D_-_S?sS*XeG@E;lpo?1t^N&FRVLPrc@ zHt7g-CJjC#WvI{OLbyW;ks-2!Xh}HXW!)F4^AdPNa@b!Q@sQwF_>sH8UF2@#Pc!G^ z^5D((i@OfSWEXgoyWq_`gRKHbs=17YISo$kw|JZQ?%21eG1>!OH0jusEDy%v-1-99 z*)#p`eT7hmUvL(e0!}#Ft#dyFLS5J8cRm0>fh_TgHt=um}RV+ z)HI1mI)QCJ1wMZr!%zKg{b0RRzf$)bI81-dDYa335t}hubr4&vqUeMsASfFZX*h+; zE9>EQ*A2aJF}mS3Y{yEg>Z_{Z)HbR@V4}^!dE6A6Fc(}y+v0w=U)~t|i3jkJ7$vKK zyqNdcP@DwE`=wMO>mxf37H%E+7I6EA;;bDA4%GzgYFnuHt3AMnreUWN(bUoo#0{aN zwlutPE^CJ1o^V244_@j8kSBHtXihsQkJYl}QeHA!;zHLmi)M%nqF3}jdM&+zo{h?C zjDFV!d!aq>)s$cwSt)uT@`#j}r|XFOV;i|1xJ_N~OkSZ|E(Ts$UX%+LrHb??I9iqz zzD7Pl2SG`=V6CK-$aZgn^Y|`tm^ei|ga`Cb;>Ul_|G$cbCbDX^O+(8ZJ_4M1?r zL@tqu#FatBUpQWVfeS|pcb%=lZpO4yI59ka3<;HWqn9H8z#pXp91cGRs{}6w>IJUm zm4j>LF!GQsOu$su%>{|8-u6!YG+?; zP9#pZ;|C_(6^=Fz6?827?fr09G}sH;ivq*`YrAecWZPg{V4DjJ=A!M2Z9CkPM%eno z&z-ks;@)V6ThbcqNHEQ3SqE8*AU*d!GKl-4Yf3G(EGsP`%O&e8o6wKK!LH$qt257zv#_xvX zhO5}INx?EXYUpGzLubA~cM3n}G4R4#uda)o#u0Fejziz$QQUxAX;VdU_~Eq%kMWj5 ziCJW?@+meIzfgC}vFWIdM?LH-WXf-fJ#b2N;hra!A4i=PL*=Fax1X2^2h7&MT}n#J zgNwZwTbe59gq`Fu`CdgSoa!3Y93VZ_pd^uK)&nP>uYHFNjb10veF1O1GaM1#YUYA5 z^%T4WN)hU7sEQF6Lh_{l(lsH3l0UuoyDytoEchUkWJOHF{0k$P|ge>&i^lgA^v}-@{U+SR40D&bNJ%? zFYFS!U~1maHfM90Iml*DO>|C#;hj4IyA>%CJi8+aU~RNq^kJkwTsMD&Q{i-43OB{+ zaNL}Y9M35ME^jS-Z%@KC)dJ^n-n$)6^Ev2}neKr=pKrUyy5v9ww_;u%i5^(kscnLkWYb)y%YpONFTFd6P=^fLZ?_CAp?56Oa z$txFJ8u}9+9!-wtCLXa@_)FwwK~wsZXr#m`ttKyw^X{WErYf!xYmaD4V&m38KU?1i zs`kUWjyf8*!nN9epqkvIX$$su4j8&_oO~&&4Ae*;?93YBR6eY{3RL?wZhueV(zzNw z=1-L@eC#`7E7BS_$p@HY0*bYYl8P7d#quuD8i?fIWk+PgWd(u4Y>@VVR;nO;b^A!q zNaf&CUXq1mjnNJ5@;0dU`uNJdRo_)@plBjB4K;%>pHJ0{1v92R_I{t$C)KmmEs;xf z32cNkbm`3s7I(oMDE5@HYtn&8CORqUAPI?2i69`@U7qldm0W# zZ`iKvTigrpBzh*K;90GRw@0dJEcPO{H8wdmGBzmICRQ4Vc5d`~bV;;(v=;a>vBnr?N8W+SMY<)}LUS&?yD)I?53Y0Ap6oy^SHNd~avpGY zb`s9Fj&qKsj;7cJrrXcqE?UDLu>G+8w0*a|u-(SvrtPfl1W+Hby|4Y6y@5mRtnFIk zc6z$|-ue3mEWxp1S7dHXnpn^1xI6qNa)O`&{Y=zR@=jV+K1k66Og*z|4>+IiHSM%V zwXe15+E37tyR?tMuXAX2fL&J=bvPehl0PsBq^cHxCr79PN+Gleyi$USJC1pQ1WSrl zh2hh)QPma*PEl1EoRl6^Z8haNcz7QI8n{Q63{Syn(neChWT&JZaA!S~;yyS#hsA|} z{vDL?@B?C`OQ5{lEKdRJj>Kg029>=GIJi;WOT7}DuUEc%OKxxE&Dfme5fRz)f+Ec)R$Zcpdn{ zouF4JFBZTt;y8L`Tg=RdaWkw6y`We4Qg|AZ@}&PT8W)_yA7JPH0oepqkghddxLx=V zeuXN!0K5w;(3OC6Gr~`pX{Lg8%-|!Z3JQVUFbT?s8)PH$Cea*7&Q2s?y17)aXxhNB z@F#nly~f^Q@8R))eafO=u*a}{-HN$pE<29x13ycKImR@DkL~tEjl}JE9pF1HkZ@Bh zIyQ0%iD@&y;Tjgq2n-6C^Cn}nG6yWK>EPM?_1^XF_fGS+^H%g~uzfz|nS}GZF}83z zj{ucQd))39?lbOV?!)fY?g8#fK)D^RN3M-XMjn9YG(0A_rn{E9<|CK7j7#H6#*Vs( ztAMK__|M&4&G5cruF}AX?9LZpI`CNS=Q=N-+V48=IXB@p&>VZ{nqZC+n(x`D;`+}3a zySgz>^KwvhlW;pt!`>ta58TGeTVNBMln+6@e8P6uCfzITC5=cHU=I?6-&{7*3C@c@ zi3>mvx(Mj>S7iI48T{s?!Bw{&xHE%r^K1dWdK=tN%K>{-C<$dI zX7S;`8b4yY+(xdJ|AA6wH4+HA$*TUh?Ry8U+Ck|UII0%IeV{73aTr>|XF$G3pkmWd zoxPDQL82y)iKdI{LG^cyUVyu#k#=F1@=ADBxCWboLo)^9FkmTbO#R6qC#eW*AeHaU~ummM6+3uEm>Tt{wya z{OD*_WNd_j*J6$EqfobyJGdQ52Ijy@_}#w;53FXM%YPDg%i>_>r1=*4y5ojPU=x$- zU4T7cviGNFzh|tcm8YCX4u-Vd?RERz+3rv7pZJq;yTAwhiI)g~aSMSJjrXkctnw`I ztoH2ntoQW7yi^$Pt%}P2;J)jA>b~dRj(uSbcOf^0o#A0jQIlN5kQ!UwmEzI@V`80R zRPi}iMfYv@bkA(>O&{wo0cOO2(9ZDF$ggNL);95(`IqzZ!^pqXXki9jTwGmJK>7!s zyB66PxeRF(9l#*&jI*+>dMUWhd(=> zX?1NfR1`lnZ!~9-Et9S8t^TeWtxAAmXXZ?yCETKfm(yZ0v>PVe}Hd)g(CF}@a-5-?&H`2 zj+FO=BfAutCO*vHV(1AS@ZHaZ&)#G3xUNH2_+0iGZ)adUQ5#GSD-5S?yFz3Ycv%MIZ> zsPJ=9k8d$wcfdyp;mUImXLebEK=6rLMO6mgxQlF$JdHI(O=JW=1=DE`|1Tb6F>SZt zYx3pzWIoRQ<=%0x@r3OX_m;Z@kI8;qe(n$WQJtaWxWlYqx-&ZFVPXW%^$YP%acgV| zG8xjN%cIS)oBkNtg}Y_Zh%J03JS40NKMXAfb4(Jt{U3+tU0^|=Xy9gE*Sru|5(E4y z{~g~%RA<2Zz`MfR+H3SiJQmM4&tuPh&nJ)9Bk-!d#k`Hdl^yOK6XUMLwfg zfr|Fhk3}cMD8P)ihjYJ_z@h(H;6`}YwDn}{|)@{H13i!v3F@f7eTh!Go0w{G1r{I zO_GMU@HDDEC8d6o=P^O|2MbY5ek67gBas^d0WDHsKIWat zTah;k{KjHn^E~ix_s<5Kr@z0ie<(5q7J}Qe8tDU*ks6`}inPl&!#4t(o%+C&q&|`F z|1RVTpWB=6edc}RedRspUF03=9p~K(P9h5~PT1>1trbDdtHH~-jJ&7&_}Cv_7u>4H z`VN6Bbjr8iw*~n!8+^}wmHa#WpZsxuw>)(qU$A&62);zWNI$q(zl?uPq%yr(8V=@f z`DesVvKn=p8X~AJRN^Pm4zAJt#ZsK9P6;ECK~?`s+6C1@;gmciC$Q<*0=Ie47o zO=ziOz|0>4JDLfWVSmg`}fMn3>*vLFYCgmGZr~k5{zQGH95Oy#bNIBRqoP&%gh42G%g+2@N3&-Flxfag% zjNqQ&s9-M=+DgIGbTCyO{4h0YGeBNPD&=*e119Po;DC0(xqS@v*93aVr`QTo+*Yy3oVkKm9rYQ08%RhK*1+5m6$-{PGWUpP@+YmQKD;NR$@+sqsJqy;O{s-vIvD6VKV9dm(e^a;R`<4m?g;1?MBzLKxg07>ES-R)M;KPJwlSOSlgd z2|NH_azoz5JRuTnl7mxlQ+OV14;9-fWUIUl9YYFsWla4IgN2b6&Pz^>;e^VPw43J^n)i6;_J(8w(Smz$xDqQ;m-KZzQM zuZhb@CV-<*7BlL4A@xfIKzW`>*P|P;9V{2fub_+8Mwb*QexZ&f@>@9dn_%L0;9u|&XwnnZ-v@k+4YC)wqn?!RhYw0C z@WFzjuSnZCCprPA_I+Td*AagecYsUEZt+gh6}lTZI`@&tuo&8;V$f-i#V+tA5>hk3 zidcrcN3Gxi{B}I#64c)y@(vk=d-rc-Z;ru4Er25T6n~2Qg?r)~PC=N62rwTRxI5o5 z<@kX&_GX_jRhW5+&)5Z$P?D`@o+DGRJ2M(vny&0R=$(8_duB!A3YguQu`2PjxH-Nu z{tjr+{@A_vU`EK*0SD~?XJanMZO~>`i`+o<-3+&i{EbYCHi?ai?@LU^PxdC$jhUQC zj^BU}!NW*a&O~+bnu%xdx_bwI+##`tvCHv^i6#kcd^3__TSptle=|XD6Tgt#%T!AciH(UX ziBIuKvE9+1QEB{X;y$|qO5dUc!ROj5 zM(INyrE&!&g&f5Y)%d~OHm*CbBKi{sVk%tFXY=XA1ZoPBGlqa$-&I(go=EHAiP;OT zchi7|1jvqpx!AmI1P`mD&?|gRUlMH*H-mcilK2*Q3afxmZx{8WY2if7uS6sYv(Bqlb$}g2@wuUr#ep8hxB@ENTul;ca|;WnoWh zDXF7g2+q*!MFNovd6sSIDscZkP5;0xvV!0fc@jFzQlto}qcx~H$R8a^y~nnuIynbQ zC@XSe3zKWe=HyJm!*hIPC^i>>U#lfT#26&;T_^eyRS1$ef!vT7dz{&r*qqR^Yxuk5 zPU;;gBW6P(JRe)0+1z`^498Pld~l*H>*Cr1RbNa@;9GO+*?-su-~r8MJFvrOmlVt+mWH-RigExqIflC5L=lo3m?<6@$*Oo{21#Ne~d>nBt)Nxory!8 z$z8@y{v|Ul(J=l`tYEAWGG}|UFBv*Y4GJ07|l}Kjm;JX}ROL4FH zWn^(`JjtMcHe(wyX$gA*-IZOymEiSItb573aO18*6oBV{Q{p6_&2@)&NGa|CCnHXg zx2f$^7xE#0n_G$-{WM-k3_;hp23_e+>ZxG8umY`ymcc9PML(i85wG~U$U{iM{Q8XA zfeGOr85fKa%@^MgIfdN>(tl4Ht|P8t4x6QM#)rEUJiPlUCvja3!q( zAM81B!hVN)qgPlFUGfC7ONP*;=%2_|Z6IufOt~lER2&wr$Dd`9d@+kHL4` z1-9cS!5``@GAmY*$w(#jW2=)%-6l=MF=7hYSx|#MLl+T_BMb9AxfHmLWFx_}Ho2I$ zVw?Rh_k@@s7ziF^FX3mhJU@_)GJn|7{BxoiIh$C`Rb{rv55$`?!?<(kw&%G_WDbgP z4{gg2 zGTb_RF_IVilPC^{&m!14_2=F)?-D}fZM9{`Fx%o!qB+nRJc!LtoMSvljqJg!N8;{t z_ydms$CG7dCCst+(Z$g)vfsn;TL~31JO&`E%fr4#k6Vni=OtWCHUP}Q$ha9fn~%8w zhyO>wkJ6cGe0k~@(BUcMYOV?s#d)&^x{XX^qjhJ-BT*%Xw-QUp{NH0yh(y=av73Arx7)%8^QtN*0@!!my8z;6&4em zK*I6}N>0@yEjSTXd@AXt8L3n;RDMKKhrUO(MxAel9$P`DQZtFs{25+Gwh=ht8#_kg z7o8TC2VTBa_=PSh>O@}=h^Yje6iI5a;I^o{dgmG57AEG z5cH*ZqW;f}tw0q) z5|$sKfW>_OPp%7KwfF&WT>Eg;!JEuuG~eC`Rof7INtcCR!pgA>xa5W1sSUh>iSF zRJ#DT*a1WZLd#cW3nz}i)nO7+!Yd`#GIB1;9p!lDeOwr85Z&|N89kia#aH1Evu$w` zy~I}Lm+>9B3XCRxFiJ<;L_5dc#TzoE*=0adM{*PRvBYDjw1;u#M9o;iNCO~JMD%uS zWugVRW&4PY)Jow}y1YRye*f z(VVq#EeH>BozO$Ywu3j|M&pHca2j(hu_#_3z9>&o^~avAntQxkuCO?I7Tvr zg{#PXK?dH@*ghtMxFTFA%A);(=Ez#hCJoej(#^ML`^T3>E5^1n9m&RYQL$WnhF&Bf zFl@Gjvww9$hD75*P%hC#Z*r`lI=p*63+@vykn*lzSuPhn<|=BtBkpjAL__I8g1%$~ z2+DY30(q3YLCiN=vKppWQ=WSVpwT!UX=3SKUlK&>GjK?l`_SHrjDw%~#=joyrN zy)HJ`-@)Q(NzX)17>}LvLUKA8CQne!1bG4ly%oGrGx*`x#bQxO;SR89kC5NVV(6U= z)mG3HKl3Gma;VjRg#qMipTw*^99pa)_;b48GIfd!6GqZcMyZv+hOR*&c$)esFwg_( zfFOwsuqODLOL1FYN)4i$i)V=g^lrgUay?Oz_ym-41xIsVa7#MPSE4QmchU8P)5%}l z|Ih7SIOB%##Rwb!i95@7V%;pq-2}?Hmix(+O^kuM={DP)7)w?q7P5^nA$(z)^5clX z{7z;{Y(S)9Brobn+~UH#hJV291gG+QJeBFk_2H-T8@M!PXnc3H1ZJ2E@#ai3q~E17 z>l1>+r^Fq01V4z!4ImzY>t7;rId&vbhn3@ZnOEZjVohUDE%jDNGdi>D)wZ&dhfr z7V|Z@QS4i`1%C^9PQST_iP1>Y{hp}D|0O@6k2WEubKlut+$G!uw~;Bt2(CPml^slH zE|n-KSP3qFXMvU~L9W3xz!Ja6ICj1?Z{+rK0%){Xp%ci2HtdBhWGkd_m!=vBW((IK zd9Wfa5ROM`X0EU|?rbhW3qehE!7sw0A_AGzYbEbQ(}hcM0(GYXg1g9N>MANvPeE6_ zDw-{6CfzAX71a`UMwjm+cuMDp&tT*IuZYG+ScHY4#VsL0qzF=q_6v{Ud-nz-rH5Dm zmc}7*I%c0W!u<3zS|z%V{dQ$xNU&F!Nz1V>e=C|tD}-U3qj+>Ew2F-GS63@&Inr58z&R8?KQ3h;v+XB%+Oi z8}u;zxA!EP^V3kH`M7gjGhWNTWW{jAUzVuOs^F`oBB5q;Uu=_FbEW;#hiM)*jqN@|5a8fN4MBKQJ>Fj$epR zjn9uYk4}$Vij<2@NGxL8bA8!jiKo#Pk?e4OY}2cUAHdIKXK-WWMPeVHMNXiOlV|wR zOpR#G;7y>IoH{*rb(*!R_qnpv=1(V1U#6EsHKalT@q|q+KJW>o5 zL?!VIX+Cf)K1j=oR|?(KPD&`)BPfa;T?V~Wd|DciUx9CfL$O9iNg9auAm^%-c!lJq zbSXAA3*;RYEx@Bp1&1PArjRZcpFl3^>i-dS7I0EqT^mm_KI_8n;_luShvE*!-J!*b zyK9RScZyqaC|0z%6?b=CSk}fh$@kCDz908D!)zv#$=rMHx#u2v9x%Jd;}6Pat%Hwe z4Uf;3&*sgh?#7`86HjxPZo9@HG z`NH^(vA4Y?q{j(Ww;~>e59nt}e z+=}{My0%hBag|U=Fo9C@L|84h#KsFq4p@cv!=7CM+}dK|MEpJD*jd}u{lWw>r!+xY zL+(W((6&y)$W#TXg z_CuFOtI4d|UBvQYJK;NZFr0@a)hR+A>9KCMp_u7Uv)!`V+QypOGR~CGxXn<)IL*}3 zQX%Y0_>G88k@q8KM751R8~sDHE~;Ndd7?PKgr`ItjOrJ&Berf_+qf(+z{ym%?ujZD zd5>C`Ct+K{<0Dr@t&4V&7cvceiB(Z+B5Ow64qIV8WobvgSps#`*TV{u`!gdf14$hP z8`B)qUu2QJ#&h~3^??=OwU2{+)6)9PIEoEW{Hqp(~z0uWzEAUUGv!4C~S!p_UMWd0p z4alWc*I)M@tF^EGdwrU2DfvNq)*1_O?`mWSb>hfBx~p8HxbCdfN%DyE#oXdSa)aUp zpQ@^{LT)^8i9%34rZ!S9DeXa@h*SzIUD>y!D-LA=JECjgP(%kN__Gl0K8z>D@9X3L zf<@Uq5E1yz=$GySud~n>UT!N_OgFkND^NW#6~HKHly48!Nd#xxR7R-Mip+ ze&JaM$F$xv%>B13(ls9ZiYpG)aoL&SGI{>=-0+Nrq155Z!aG%*dC2eR?eN;)+rM`_ zcl_dvbMe^+Fr94-qrecEQjw9Kz$nnKq2Hq=>F*Z#aRU8#D0!}j{c5vj&MhJM@Q#&*F${k zi@bw;-}>jm?e!4(p6jdS9}qYid@Qd+Ci^n8GhrLJuS=U zOY|7(tiMF}in$PTAMA<7)W;r;PKvG+)f0xo&Q#hggPpuSoYZr}ZiH&6Yf_02Ykf?G zVGa1FYfv5C+&tdg7%zBHJe#9Hy8RIe%rw3>b~pZM$Zxnu6sDWLus)0$7`;BA6JePg zMr11zL2eObeJ!%@RWWVIr-43DT^*rsno47;Cx2^^(mscU#^kc>Huwc)bv zW~_owGzrY{dU#AN)PR*FUeJJef!S07Eu2oS&^fs3-|52)4ME(UW~gU)rthMEtDCH= zq6^bK#4pxCs=^LB3hP@E8wi`!1od|~RqSMA+?SUSQyd*k56lUCPkqfNzn)0Wj=+aN zM&N2-TA*OyoWDCbJY#%b?=l$q`hfP_#+%!F-!sNz_RMt`ahu%b;5q)_j`bAuR3Q>R z3=epFPi?GAk9#52iR)c`sLT1*<#irH5_&spI0KH~iNQRw|7Cw`@90Q(^mcA>?ssmY zdTfBRqqB}P2i(|eiP5w{TEZMJ>{M#mufu?SEpuvSv&{UNX7b|NW^T?*&1_)13BKGe z`yvoBEaWJ?wXL$1rtWNU<{aXRshN3TT@`J+Gb@3PSByHK-RU+MQgae#oNns{TXG@i zVP}-9E!^mJ+$va^|9Fr2miuc3_JRCV0gvgkU=}QvEAlSojar%&Zoe)~A7!)~mz#>1 z@0*udYJ#lweb|ezz2Pg!^UfW$Gb%Q^4LOYiqnnVa@geFg2o*QTb?g-VB)WIZ^O!ub zm8j2|7JD%^E%s^bN?zxnTI@_rpBP81l}R-k*wZ+rj@2kCNH@u zL(Kclc98h`f~wjD47vlz-!pQ|vzgp5YYs4_nI@Tamd@N&Ez2`=Q~dHlW@|cr_wjsZ z17^i*{ZRc&^raT{iIDc;J%0w{a&sX{xWpRXTFt5+QVPI-kwrcp{2A}a6Hsb81;PUR z{T=*za>~p4F5tl%bZx4a8ceFPqhT#7LL!3hv=>A zj+5kKRst{hqJ4{fioLtNwY{yqIULT}>`AQUIqYw3b8S^@p3DQ8!!vti{+4+m^D&Pj znP=c-pOaYP|2A7tjWZLxK-+w6JC z66x(I?`-Yr=q~9od*gfs{SN==fH$xSpZjaMwsK7QnLSNavA9%FS6u&{p$75pappeo z8Ck9Qi61uzD;jQ~s^)Iw_NZ?`qUehp&WfJN|1GISOan_g8kF($*qU)8z_l9>gH6e} zFR_2d&PO}`8rui7fHyIV$tAi*US>2oZY#hEcp3RqIBd0UbA0%M$G!1E*xIoA!M9A&}TRFhGAhJzVVej?h!TZWLyPD zbdWeu9;#rA(XyHXWM{aHMUd7FNS@#kpfYD8HV7)!zaSqR>)W5)T0m3kXi zKzX8%W;{nnz)HL?P8J)9F?bB>3Gd;78OeV8DJZVl@G%Y|=KVOhGdMDs7@R{T>_mS_ zvMf%K&pQlGx||^2|4MW(C-Rr(`Q*v;2;O*a0dF3!mq=et&ujN8_egg?ByoVdo4dL@ zmpdEyuLIoE+#|rrZ{lw5ZU)0;ad!!@ri!{VTD9ws*EMtoOI=5suT2EzT<3 zOMTZ^*C!D1R)Lsu&-2jx4&Py(V1Ag;j}TqjEN94V@f&5R{qc$I*S*z6=}+i)8(JHG zH=0c0<{p;*;CMqV5I)rR7 z)yc!06*DPjNX+DzKVtUA42;P@eq}K38b!IN;a&w+d5*}n5z#P7N0DdQF061^I9B3G z&<(?^o5?8iuzQ&dll*b=abJWG4UdD+q;hYwk*eWjHqJpKcGcI?=Oce{AN8rjcnraR zb4Oai4&xkqkekvADW7gEvCd9-g04vqB#9mTApCye#5M=v<7rElML#moVnDU)1vmL9 zs&d*G5~)4RLq?F_P~4aogr3q^1S7Byaua2Y;7B4C>^!o0FR?ls_0Nfh7t;5l8lsi{ zF&_G=?4ydnMjy#;Dv_9MilFU)>Qb5bx3Y~apr(q6olPtGLvRw_*AIc+?68LfdK2GM z{OkR-;q|zR^?1(r&gb)e^liX4JmCEX%Wa9x^gapgx@1DkI0=8byf9`@piOl zeCp$GJ7pMPtZOQ2jfTQKePWSLhW8$AZ@phMs$ zmWo^)`65yeVy7YMIVk4UBQvOz*aITjy9gsS6k(C~z#B^^GdF{bv(02vyd+0*Tv%k- zPBP1^)~jUB?If$Ci{+9z+Wd>DmZ>agC(&@pTmw<3sxfFt#u~ha?N%IQ`)m3w)Fs5} zpOLk*5e@k>I%_u`q-{uQC*AkRN-{F~TrwcFHPB``bRz6UGJBF(Jm>Y%wO7a}Uy6U- zFL|T{>K9sYm2*0u&V#01h7NY=8nd6Nq<^X##XiQsd3s$gR_E?yPv#;!k2ja@V$^Aw6WNO;9^gF4^0qQxAquEWre1q585laUgB}ED6I-hgd>iON z9nJ&%Jg;DF?d#9$f9_l1>*Z_WtLrNTzo)~S$m<5aOz$XfR-|$+8ne1*ho>YO^&ez# zhPy8kHpo2-yX-GdLGNeJw`Bjqs^ETtB#b1N=N@v|5nhNa9QC^sJ$~M~;NI>2%{|;b z#r>!It=r>H!!FgqCOh6U$?@1+XAroj)j{R3_{)Kh@X=qK zx{nc9Ew^E(tQ8zh)aMit$_DZ$d86_Ty|xnX^bOSyhW{}96#}uW^I{b;r(Z}WYE1US zQn(Y$z0NQP#TuKVH`WnJ{>C&06s8PQ9Z)sDkRklNrH|!zDh^HTOvZypwU)Jb8@r(h zu*o`-@wf!^;6KTuJV>_MVJZ~|gNpw>*ki-PhL8sr8yGmuf*gE@8sw4PqnG~_Z_sQSNXTt?PoWtiHN4QCDO&~*(BN$fkW60PZj z|1h9?0P52)_QN@J5$v6PQo8hxc+qjZMhm6ccyxw?#8?HNdW0n4*Ss(82bFO?xw&iM z+nOo<0xG}^BB)Kpx?%@$AimVLNL(Yaj+h_*qRYZD+J5R2m(rGyBhgd%flRd~Fo?tn zX=G2{BG-I9zM?MF;uKMX|-D^@EC-S>4B+%#euCH$sU{;yiT@KU$W;3(NPcFn7^{gIk2XiQ{%BbXdtTIh}KOW zMb&T*+Fbd8oKtBG4pM!kf>Ie2t^vwy>Wu@+cc8K6RwH54n5EvwhmjzZ6`GO>)Dgs* zb;1`ihRTa&Xw}5lc!RgWsu)iUuPh_nTZxjC>uthA@&?@#q0gx= zq%X_b+ZMd);bc{;p}t@@=waK*YPd`u_YHD)QmHSn>ofJLK7sld6ZsJ~y_GzP99WhK z=*REaO;}mc6?QEdoEuGzOceWzzx9{++YB1s39=?8vCgli+HjG6vi@h*+m7tXd$7`e ztB)i?^NIEarg#ww!3Z!?4xk_V>ISmrPv9|{m`!h8L+-v1UY(-&I0b4HMf{l`h*RET z#=Z?n_fF({1vRvzsl@r29HA~!JE;}%@{(j$ri!n{S0Zr+@r<|^UfS8>7$m&EI7l2y z6=M^zFy6{9!ZYC-87gaqIb^DgCNsDZJq;1FPy}QkHLD`1ZdBxuTihN4mBd;JnI2t5_!Hjw< zxiPJtTwcxtszHLBQ_jV61eils*&xe7H*7WO=&iTGSE2Si_%L`IG>*Ic|A1H5gE!$# zyc7Hf_T5ut$X(&p1s?Z<&x21m?<()#;s5J=;x)NHUxFWlpUDo9gZ^L!*%h83#1mvM z8ObKi#%?cCjzLNb%O&J8)PU4wezupp$i0Z~50ZzIuk@R|oVY69VRR20-E3%b@PI1R87>=M-acvr50B! zs|{Jf+R&Tb)uHt6Qu=tKx(Vxlm--jFi#q9VC3y4LTPgT%*&C!B&U?r)~`tTF-(nsteb`iT#fjtc5s9({RGibkn z9xz56hg7X0+PR$Llf_y5zfs(dv>o8Fk%*S2VgDen@=tMxxB*SOKwKEox$DH=LBRM; zoX)2vlYu#fx}A}%^Mld7efX|Uq1H}pDgHq9L|yQzs_dyN%HZ^A!NeCnkE29N=qS6;1z!N2f|(2 z171B99tpQWhPedS;sxQnaF%-XQ}p8@`f~?uCy%XYpyk2>`g%_2F_W1yP8bQw88DdOau&D~N(G<)O8up|*sa{RL#LYHdER7VxaS zo=eQ(H`<(kk1yoBC46c**I2>xQkW$d^Q`^7jCU6Q@747IOk zF5(;hyY_GDOeA>P|JqdYnI>UvjOQ_l*V

U?v$(8_EA8`F~ib4Z&6($YTg|Kx^93 zAsiXVv-WC$`g7>@P+kuToi~CkQ0=Odx$0Ch5q_b~&TW!!Vqatb+Unm;}# z$Fnh?^ZwfkARGC3Cl~MK3!R^rxn7V*UXJADnAY-hM3bY!WN(y4s><+OnrH3*GN5Ty z5Go4g$pQQB-&T&lDxoc^ldV{t|Etid@wyhRhVVT{v{vokroCU8D`;1#6lxW?*0*Hl zYu{9cXYF+cTPwY+r?2&bkyl1uVXV>nG8)j&XehKGqvHua>@Jg$V~=4$Bm zHSA~Yj9VQ4hjt}&mUiZU?FR2^SG*It-W|T@5z_V`^uPAaUtc{7$=(ZO@D=y@f%|>Q z{XOR|?T(-GAghnxL+PpdnOEAc`GlUxprwb}*H>vAOXZhq|9=UcpH5#mR2O~Z=Gj3{ zeSM|<@1=b`rb)T>m!BThl|C%N}nuSp$Bt^yezi{aNV#w70x36dX_ZjVs zuT9gU1wwN}`(Kkdt>t0nXh$F$?xJO9nP|35>!hBnIzgzmK< z*VX2l_Kn)NXs`aeOHJ~NaMst~q`j}5t2ONkUuUZJZT~+DvWDh?rY*Goiem;uhIEP6 zkKv)7H4BDNk88dDwTX;Lknz#RNgF{o8p*@6Gc=;wYuCThugw6h{U;e;>&x!j@$oz?FyZx&8@G;wBvTp)Xx3y)z>S0optFU$q4OVx zjy~j5&q6a=)8ntX_Uq8x(X_cXSKjm8nnu^=PYNxSp3wA!cIW^760JwHc@X$tBOy1` z^ofn73&5EQ& z@m<2bUR&rV46cy;8bZI+N>7M1O^5OVBBdJMqTIh8KvYE*^ze5tH*&f?T`^ zNztp&8R;B;-LY`D!cdY-Dim!S(ks5`ZjHs0!HZszhHIX|c7upIOgM4W3Xa83- zX%=Y|uM42r@*x?Kp^=V9Qx<2vFBFoSXg-q_DanDB%@z8j_E%QEBc4xaM>Ppb;#)NB zoICVa7LH`)y|1>wS9>Q9e`(+Sb)C>;Qd>i^@_9{XYS)P6`(rsLKX;wPE6s+=!7ELB zYL-Mi_y6CFAY937P3ARgL;D@tN~O)U2+mL99ZjA!>6DN&t)H~lU+1vapAvs3a+Y2Q zAcYpr)+}>@KC|-B#y}x!N3%sJhNbz@!7h$!eXdEYCaIc5;$;j4uA%8tU+7&KS#|T; z$FU5g)64a>an@dYLTAgopBmCsHm)1wsAk1zE4Z8YCBD&tyu3u_Qn*Ht>#O|#0sG)J z=V@0@=bbdJn1+mf;nUh)LX$yFDpUAdTi<=0r5*XIp)5)p=kcs*O9SVt^q!U1 znnf288tJIec_!Y~bABXa`PEj|#!ed@O*h5yO4GTKyow2pXf&gw$r_}6jA~Y{6wWox zjD8|gqe)Ie=!g+%$c0@|0vXg~DJycO*`!}3P_x&waBjhn1yYFP+I7OXW-g9tt9sth znVP(PwPCdx5+9l|+4#(N*uMohTa%M)AvxFNHwjtQWLlHJIIgCrU%uL{5}(v8;Rt#{ zM*58Ojh{Zz>|75$EAehI?!v5vkQpljuPj_avkNqd^)Uuc`ZkJt2&1PpJI{%9X%?A@ zYq}UiZ4cuPW%)!4Hni7Ej%vS1JO4BK?KyG~!S74tJ1G&MW$+120|vN{Xs#R+x*HeY zqshD`lbMXOCeK!`V4Xdu>^+YQ{+jL6(?6!`1~F9 zgnF7!Y4WDo?>02%U999=|E}q;kiZn~?iSZc3(5I?&i)jVx%d2rXB>aTcYWa48}zSc zRX^i9G`ZJinP%%}@=G-h?ch5+T>nPsjwF6jJa;a0KObohBv8{`+OO1nwU0O+PH$*q z;^T96u4!XTv~}1>mXwR%|Ay~+!zYb=DweSb@@-#dos)0XtQ>6|^jtMsNM)bCpZk-L zH4A?|$7Z?4Zu0^Cm&ADAd$`Q9sSj`5vDt$SyK3 z^7>U1eL(v-k+&G4)rV;*YJTQLB6sweUW^nRjQl!&f3#48?@Hx&IyoaJwLd%6gPfyj zZp}stvXj!h84uK4%vK+LbCJ=~^xHe;fo5ZC{qEu0j^PtaQ%kd7&5Cro=%Zw$;WPKu zh}9_*>9BKE1EU*MwUy^Fo{Bw;XGi=nN&JpiT=5lSmc*~@$ao&b*Y;MeK@_Ajv-T~r zYsI@fL}*XnzE#hv=eUO~+<{ei!0&j&yP6h=~EA6CcE9wS~}_J2-(f zm*wuZ;m_NwmY_$?!bCiLd(;Z}<$j{~ALG9nt+oR94mN^1=QtEPpI|r8FoevC*bF^;n5jE-@aAzFrw9Y{|-LGL(J|D^x#}I ziMshF_^@WdYd(#6FU*U!<69j0`xA-PR@obj zhes_*ZERij3N?aP$tB%MAEhCI^Y9z}f`{lMqf`qC+fRP)e8xOg%|4g8Zny?fP)PBBu3TPN(xONWW;cs|vws4p_tl`Q%wJH9}IzlBiR_)ET7xC^0B1JWX zIdDAeQZ>usA+pgyn8df|Q4Q={F3`U_)gy{kT`IW6=j5`#R{m1&povP|cj?RL^X*>SO1&ml}s{2ag*~~9);`jAYzsE~AMfjCU5LH-4HDrIaBi_iz zNLFp(IZ`xM{YmX9B#PsOhSXS;pvJnk7)_Pe6QwtO)Sdo(!S5`C+&x#T<8^(kmd39= zn2hHQ{KgLS@>O9vHFuY(Tbjx*yMh1ukotkTxZhPf-ta_z&qln#C&;kxMGR+wI#Dbm z-KP?#gtA{9kB9gbe2pj7d15gsL#V8-SK6w5xc@j&Cpy9WY)r+yPn{(cCF1sqdA1gx z%5V0bO1_{-q)naYx0!Ax-bpoSerI!i`Psu?n0IQ-=NLR9}Yw(F@ zQ;&eA(4Jc9L*hNP4yg3=6>8Sizl9rOR`D<8RdAyGR9#OzWjV5wTRn+)KM^F5d{Qp4 ziJG4*i3DK@QI{{mMeem2NTuh%=<9@JzY=0ah02pP$~JW|vfC6-@h_StRQr;NJBu3G zU7}gKBa~6%h?ebDPfEFUAB0uP4`4VX2&a(lbwUSmiQrahQ=RloZ7sYJt`qk;or!QLu#l20-lF3sE4_)1 zB&of`9@02*v|3hqtjra@7mEq+k)-z2GrmwWiMfOcWkByQB$Nf4sDiK;ZIKuMcn`6K zxItK?PE=x$=CYzemm=L2Mk?Fn^@>OBjmAvlJ7=O(lkfnSqzbx-)K18T9_)s$+rq4B zg+IQxFn}4pgsQ1bR>g zSM#e2mAy((oeM7w-VNobJXPI5Wzjdn7vgXC%KoZznGSsS_v--$KBYpWx~E4Sp^YBG5qZPn3o5oL=|UTP%fQEMrS)S5)7 zCWB#M6f5XdakSjfKR>uzI&IV$_N)HD*kDt&7&=50{$cHH#Yz%Qjnz12=?e7=_=3-s z^LvG6%Rm+sPp$sKLdP!_5dBpl^5hC&(ga^83l2eJ6 zgUT<&yB;v>>JTpME0kA~<-4kp2x~WCpA0tuSKlX<6MsUo9}3CBb>%(PlaIt{dW-I; z@(N6WrNZyhChF)@qi$yHI*O0u}EeO9jE>xG-2ebDpTZY>LjTU=#Mo*IL$1%(DwkJk|k3HKE@e8>qxbty^rT0Bo3Qhw~MA!;A$ zOw)yly4kvo!WFrj@~oZDRD>#E!~P>Px36ouovNK%%4^!CSt( z@^tBz-X?X&CVZ|IQR)Y;%UPrwy1IfGEF!;@oQ8T*Qt+66rn(($gw|^R!06y|ArIpe zPnGvOxt)+eynmpWhY{^7=aJ^>-%Gb(od~F5qC*`iCkAK8F5!+&uiqk`S7yom6_+{& zE8x9aif#5wbxNS3uR(C0G{%_AkSgevab!^hgLC9Q!ZF;FzX)fz;~c6aHW9L;0nB6r z^b}&jGRUhKv5k+2O~|cE5KAjlgO7tdlK!SI&Ltcp!XK%OlKT*OAFTEwQmBJ!ASWu%Ivixo>P7j%S-WMJ9q>8tJ^^aKMf{SVRfD`6)dxs>~BX3PAta+ zwY>Z&a3Elk`%zJTNO~Y9gC3JW1ilS6)F7INlM-3~8gPr*r^!(z1_Hhg&`Qi~(C#`bZ6NHbi%7yM4C zE3_7~h{M@+#Inc!hJC>!B}uLpye;<@o9GYg_Nxa$Kbay}jjc>RX@Bsz&lF4*8o?L# zS(+fW7j`K+`DgjG@TdN)A-`^-x<`IORpUkFk$PJ!uiGh|6Mj&Op|!85)rC{yBwbhC z5}}?vC^%E;Csv{=XqoPJv6S*CAjx}$&bo@MYDV!PyOmh76u1X=VU<`PTB+~IZfL8t7j{eE>3V^X5J`qc zWra8}{Dco#`>Rn$zC!UR&DCDi;Z9&*Qb6se)K$i+W5t7H;%pTjVZXm&2QgPJ8yp~) z7gEFq;tu5MvHFjY0FwKB@dHu+aoGHGShIrcLf#1TiSb{da=4V*TRbMIf=SNn5Bha# zcHIvBeBD^_6E^jHbsE*(R;i&F5B9-M_Mf@c-AIQ4-o$3YBRKp%D^sPLpy1iWtGx3; zJ`GYw```&>zo_xDyF|}1x&L6;ub0e?yKj-AEj&JNA^K8kkBXc87kQnr8~R-`$9*l z9V2e$67wrK37miqUu1eGzwHf$x?($tKpLrs6_YBe?&_o}MO6L35WDMk~{T z{emx*?I7K(Q|D8$H%DkDm6jTa7I1}TQ2)G1?n51aS?VM2s!!#sUM0u&+5cZ0uzx;Cp2j%91 zOZrT`l1YkCE(fj$t%4$1_4##);sd3<+*3|b!o;0Y5#1CiTKpNU{fBIq4eBXju{2h< zUDM@qO<2y>$^D@gM07 zcn(Wct5SpPqIF6op)va7jG9lm7SyVpJ_WxOCg?JC&xAX{{DD!y{p`!i2%m*i5MHXQ z^@5E9dE{(@6}*(sR*ldg0 zZ*)bL$ABTS3g*iQEQ0FbCDdakZ=iy8xjI`;55xuA$UeC%n4^7VJ$tQU?4Md;6>8&H zjJcFZp7RZ59Uh$z>Q+URiv}MB=PL2S8g^3!REyG79xAU;vXC>GB>V-}Ykj2=KADqP z_fb+B-T;gG7+k2Ga(Rxw5$aM;TMrwmAb1F)&`w3+Ospz(6aPRf-xADV7FR-QZ#zj63=h7-Zq zz^_lnSDA`G@geqXMfRW@lzezV#*hVk9lydgM$1YD*H%z7++K$PL z*<2IJx%wT;ryBmPuEHjT6`+rBF9T7uEaZ%O{g!N5b|>OYw@0q`0>N@3%rlQKW9RlT=E4jV<~U z%Ct{#fVEU6DzA`+*Q{TQgi*u}j$moF+xFA{$zy>mPb#FP^lstW?waRd3MF9%f5J zJg+O%rg*ZNkWaRhHS{^wtLE!h@Jn_RekYr(oNzys;k5;?S~J2;4;5MY3IAyv9%v7E-PTgw|u;cl|A|2~9QY9Zd~Hi9b5 z0tdDaypN;s7w5p=s{<=)H}B8KPx%txUk~9A;ihnde9-+|$-wIL1g+7FzHf}@bUN<^ zgnPn|L@*xVnX1Sb=42LK;_9#QMt^1o?G|+Col*F=W%gtp*$F2wf8OI|oWp$DL(i{4 z*7hS4TE^l`dU!eiSH)|%jXU+=lO4)0=*7q`=a(+PA6}W9q`dg)()dkF$mmMIXL1?O z))Qv^6g)yv_z%L6-C>}9m!tO&@ylDFHKNG@JdB62I6c11qNL>_ia1#scy7EYQN%rPVHs%3(227&w^^w0)nk+KuqBP-ghGdA5p zMy5V>hXr_Ujcl|c>rL}dHshEG{%Ik+jx`wXqEv+S#P53%Z}fhA;rH=(yO_TZIINYw zlvWFpwSJplca4gw2hdd>pbf2LH+Cbx(MPYZ z#V4%AbTqG5HMKDJJ(N2>g?g-`9UAG1xEak*o$S%B;*VUvEuY%Y(Sza~GFPXPi#iJ)%4#7g-%VEkQasPA@azsn z-t)8bx^F;c#1T0xu>8%>{_&a5-G6TFq9pg6v z|MfWJK&PI?-?yAz{tam<3Rc|?D%ZBk*W~xCI=AJ+WJV8>>yQI2$*JTmugChT9L$ZE z>07D;CIpuS*MKTI23*m?qyePTzJN8lH?W*6{9S>~)Cx=?`~Gzx5O^Or5?CEr36sLu zKqD#%;%N~9r~khHqW>OM1i1p=z_CyoM$K}8l7R&3tc`&JfqH?~v|ivtUJ2L(VZk!N z5meFW<$`iGa-b){)^Je%A~!;Ii!x7MufZ+({mNZt)%x(;40L zj(IW!NqEf4QAoKd_m;l|hX%ugM*>}7<{Lo@*DCPS?o-d>@vH)M_@TS0`!Mxt+nt4- zYaH>84RF{e+veLU+P-9Nf;T|O+?&xY!<4Zh{oC|AY2(szr0q+s4HL!cFO3M9-hbOWPn|v${`3hRv!=<7l7C8Wn;f0|@?-F0h2#>+Pe1Bnm<*DCie6&S57rEBE$9c+oA9;KG!u@akF9Yv`ujTv7T|Ddy zrD6I$#**eUmeyf%_>#zbQ3s+s$B41J!PGbwXNhkee=@#h7Jru62_F;kB(_gnl(;W( zSz@C^Z^EsFM+xqPh{X8Bu*CF)LkVpXu4l=frBVFAxOuUgV}6ew9@RXuLBtQ?b;3$n zt5{aSr&GwRzyh!v)TuC2U1N1a5q$w&X{ox{L)eREx{aPH$Ug2ezQcF2MX9DtK}(s@ zN;yEzzfYF^P5$bGZBv`|D=4s+M@M9nCP_~4k^H)O`ZDk&FEX~I23Ij3vs4F(__g&L z>e|;MU*kbxZx>b(?tl;0G;1+<1~S50g#Q_Cg7uFjlj*oIcD|N!2ZB-W_7PX6qqi@{7?M(0&@eg!5zWM z@)r3S^{y`2K~=>O@P+$=Jt(6c%EMK0ikUw$&@xbiE0+(%25usCk^c3*BCwN|@gDS) z^PEKH-hp6kajk|6?Tn)~d;kUPb8U{yUYWj(%Nb`uMBkJ#hPv?1p!_Fh+)LjArvF$t zCoZP7gJa?_+&(!|!&9rIj!Rul+njnGW}oo1UXJ4VfmQ~ z+fQ<8{j_yyPt)?l(9$l$mC+;fe&$|Vb_WzgZlBi}@B}9-J%}-X06%OJ+4(EQK2k0A zgsb#54S9{_OkK@OENRwZpt(dxJ&l?aoj+z<%!8Oc@G4h`F~l5#W1=>^-o}`IG0S4M zgJEznW=>3&m<`cC!Ishq&Xm~b$M7rWk6I7E=dy^Xh+*O9;D)GXZEb06u50RJJj?v| zQ19cWmVKyUxnVPHsbL6X`awTkZ_$sSBDbP+5}T<4wGqw5dEz^<4VW!IQzPD(+S~jf z5Kl*sOb1Cr4+q|_#@isQP5`xkJq%H|O%dkW@IRe27qlz|NxK^O)nBNQUj*B&FYG%w zm>0lf)hXN*J|7;_AFK!A4zioCasDN9vN;QXHL=tLW5Evla((kDSj`3-|2F6i&Gp-; zvYZE^(Vwi8Eya%9UtdsxyHYEW#BQJ&dmGJf_!bL6CQ@7;E6GSia4(icLv{#X*g<>( z=Pv@C)tT5W-Vb8NRhTvZP%(4~|7bq(7Jh~;Ds}U?FSxq8+|-U| zbuI$)rHlPCTJK<>(_IZs@5f&ju@sDdu8yx#-?e(<3)T>~ZeM=gGD*fBI*E62G1~4Fr~u8u@0p9u_ARzqd8&HmiY98l zR!A?o_pYD|{s~s#_aI%2f#L24xSgu%%YlG8TK`!;6=dL@;8yQ46*Dh2KQ-%NO6!Sa z?zXIfqI>2aVG%)smYqj3UwU4CO}*3FhM_OvjSGNl-oP=on`8DiHD#Lv-6 zx10Oxjt6WmnHUAwhnB>auR^YYR<)pIJ!$!JL2^1?Vi#`1A`Dk2;B{ywzeT5A2En(6 z(u&w*LH6XMsb_mDU6oGKR>GUIjKtAj$T3+96F@9hR#EKEF2wDl$xyh(j<6Q?Vk;jhF8R^MV$An~Kgjwo1;4n}p{?b{*HPm&-Rn0vIzP^3#b5!=;aj$gOcbna* zuIHeyZ+0zmEpYutrFnbs5n8x9Q8zw{TJd+TFzVDF@@_fTVP}$avcur`!(P|^)YjXU zkvTZ?6HF=vGTiA`k;VGyR(Mc1q%}=@4--`}c+oPyluo^$YEPSyF~&CEG1V0b-qK$0 zJMS~^MONtwzR$j=erKS#JVe>0-V|Pm0jV4wu`k93=CziW)=y#U!vio(*uwt~Umm_W zJT<&0oVL3oCq|WzUJuLE#ONB)HrRZOQTyRFyB6t*jDtU_c2xbSD0F1@$TcuL4Gup9 zuX2CuWw<{JnFoQfzZpiH;>NP@hkY`Zq$0PF$!dCt9d(Y%$e;9|L9{BS`+`=Creg6N z@>`$E#{+29OzI=E7;1pO@)uY_GT7I7jTOK+?*kJ1QKL)^`~=fl(|*%A(@j?YCva2c zGIyo=_mw#woTF}*0hS+OlgtSh$}0HO{G2(;)GWmFm=h-4C{s8{>{me7=*+$O4d6bY z8~>&nH~_*{LERf^H&u}hrEg$wtO}l65fEOp0s;Jj-NRVsR4euvt*AF!FJ7m5^aGmj zy=W)WVq%vOPh_&1)E=b5rC5Hqq%V?6g|uELgAnJFQjpROyw^m^C8diu(VDk-{6VHe zSE?#=Ba!XM`Z`DbQ-t~pMQC%VZ5$dr8^{s(-T$rssc)9AmQN2Z=~!=R5C&JUI$wl2 z{F7@EI0b9KfO+j$;%Mf`1}mi9{ti5hf9%igLAyDGk(FW>K-I{wU$dV86XvG<9K0!O z?d$BT?9=Q$?Dav0$%3SAXw) zN5LJ|G24E|OZE}3!+T;&7oB`fJq5v%G>o$PbnP<^6dKgiJMjm6PX6-^(EV~r|n`CE|C zyYScxVr41tFft5fk-Yw};Q5S=Ouw1_fs=2F`6h@$0kaVqD?=*)Q(10WnB|puGq^^% z&F@VoOgphl&zo$fvanukGG8{^%t`3Vbn_zK--s0UGhQ{cfFa;1b;RSSDz1yZjn+Q| zcX%ZFxIU67OV9B6t_5{SPdvBL-=6=LILD$l(>;SZHAats>wRgM9M3DL>4=d zF|&YG;}|+qAevAfE~z*Y6}BpZCmD!%-bLZEw-8LgOJFaj$V%GjuDPVjvPqi-}afT<&9{q z?S0@fJ&U}SfZb=Fx9ZEIbD9{lnXKj%m~!i2cg+qHVcrSfaX-YBiVMabitQU)Ha0BwMa;|?HM&vs(5SJIeIr_gw@3bpSe=$kP>w2t zd2rJD*_zJobEUO4e0Q5HrD3OyH2-8e4YTJas>FBdJAj_w6b{8bdZS?`tL{EyVbfI8 z8B?;!g_e5{8qh0KDqIi==Ax{*1FdszXo|x0j z9`i@@3G*a#dGl-22z2t##HeTwL}XB#8YUhnq*1Hp9DnX~yxg;}j3@C|w0H;Wunl<__lV3b!B^4^3}ipv zgDFZAkPu=NBUl}Z?1Xu53o(XhI8+7&!-8w^OI(33*Wnuh)81hCNG5wMo;B`9?yPQ| zJK%D=94?p3!}ELBRo5=p9M{jRxn)2jGP_)?)&Dq;ICnd@(GEMGJ9C1ob;DJ_J&8l zpthry>ydkrw}gKzSpap2InBaXyiXk~^kV-|Usqe-0Dbhv*b^kvdgk3&V*M<3%St$K z_Ju`-_h98dAATYHT==cvD_3yajfVN#@a@`5q1V zJXV{#m@As|nnm*iSVk_uY2xH@!n6<`(8?hW&?Ck(#uRoa!XGG#6CXey~8b zV}IqNV!R`=_8zS^5APfC1Z+ng`N=7G6rZs>bm48*GNiQ{7bAO{t4No&mkCo};0d?} zZ{BS@wW;LkU4aL89iGi2${D=3k@!AmQ@uQ&=*>F(tv};OjX@uNA~RqT%pIi_4|3R0 zR)Xh((?N{+LJq+k_UH`)Mc9FR@SCmock{>i4}cQ$);rHz8*ZIFaGF^?yWDl$uUxZ1 zs?@NBt~gIS&%*KakMq3qm~#)fSc^aw8V37bPiHr0JMac`INgr>j$@9Ejs=dHXwC_a zL1@@6j-Ir(j;4+djvz<)bvbK0tbr*+>R3!mCh$0x@{@LK13I{119Cd+-*cVJ7;1DU5k z_&tR{OgUgUZaik%Xr5qcWVKs6!%%lMEG~R7@>K>#+dPpg;LMdHZLGsPz`80H`6^;% zMB9j*5h>w^!h45%!}^C^K_9My@7rO1XO=9@EJrL^V0G+>gw(X=wkBIP!W5Rx@&QJj z!)UFu`23E7iFMh0mA&m@7)SG2rdsY=;;mh+N3G#__L}2?>BV*1hm{PIU@E%{v)-@P z7BD)$u*^ZfzA^8G4{y4891K09KmzQ79`vz~>BMg3j&T-9z)uW|K~^dV^5p=%Reu6b zl^nWj;7l(jkM97C$=9S;QYv_>_jtVxU*0d+hC^6cyTOf76)rNXbRS=GCt}>$!4imP zx8DH`xC%XZiMZ8EFf^`+xA8?kz}LJCZP|tR+;d_hUD@yCAvzJo9;7AN&m(EWh>$cO z!&|Eh$x3|3MzrJW z-D}{ncR>m-`bYUA{0rGjpY)Ezo0A5{;33*J5QB!n+&qdlz|)84#bCbe_Z;z@rfu_# z^EC7n0BcBgJKX_yR?sl}dVcY&@%#m*^&5}J6A4OOG4FTYhG56^^|t29ao`w!uoYil z1A1u&QI16XK@MR!7I_zGYsdoE!^$3|^Ri2JfhSc-*BVrp*+k@LgC8{%?39JN?W}kO z^waeBKp`y)w&Xxif`)@gIT%FJWq4>lumj0s%xa9`Q4lQCLdF{GanBf?#$4ErTfjKV zYp#iGjYp$SL&^r5e?`ZhWc?K^MZrC)20l?ueAiuQ{a_mZ+w#=%0(P^fmiPRB*0Ra6 z&@#(12HF1xEcB1a{8V#8bAnkz16*tx2Ug|JusL>yqq({%31qm{V6&ApdJU%yGuXQn zH7H=utYp_-f%QBS9KsWzZLJLbz7Al&S#>X^efXeTu{Vw(`udJt$7y02o5`dZ$zHex zk&Ytlhoe9ejKcc+grEL0SQUebk*av^Ph+nSBg&-V>Ntr;?Ib2K3B;E2N)9;op2>Sb z;~4`wOJ&&vg4QPZ9kT>41%4%#AO`*bRY>(+WCXuw=kn3}zPFt;$cznEvc=%7 zHuL27d|~wGy8D0yEV!?_*1IOUI=d>n^12dTSzS4J4s!*Z>CTVN_dHT~g-YY~1uz5~ zQ#SV(x~me*D zIno?Cz;a84-S(WTg8R06k|&pUm$!z` z1pCLPuMG=PL*q+h4byPb8q*_FR&z7BwV#>Wz&KaJIs{GRv1SPqVL84IKjH!FOfXOf zSjWK!+R9qhTE$w!TESYtnhRFDRxpLG0NpAUJ+=TQyjNkaurzR_cc5Wc!B)D7sKylD zD->oU>M;jy`nJR_rdc-mv~*odg$S4Xzmm_u_rawh$laLv|k#=98w` zraq>+*oe_4Bb=a5u`K5rhp@BB2KMSk8#Kdu|4@HDe?h-tqqJJ%|@% zD|{sL;H@9#?E*sEH{R@C+4Br-xDPF}2rS@Uo(`V2p5~qgp89C4cAm~08S0tNd#gEP zkLNH<_h&rkJy$$;JTE<8JWjNr$K&&eUMu^b2p&n^5+JX2@s9A$1$pf+?*;E&?;EcJ zUVv|WU6B4=FqCc$Z6Vb2XhPg=1Zo>hX7|z1kaGrJrl+%}B0j3&K z4DSuEh@3o!XZCTZJ%Ty%nc)k#&MZ?$?B;}<=L&n4nc#mVim%Bin1G-AGnGgESVc0F zLquYGl2epVu_#_Lg>Qoyx)21auEaHC@WNSydMx zp_!gC;Qih7CV=lc*LTC0#oyJxmH2I&z+oam{Xw^?C?Ar`@e4~~)2eDyBC?Okw1IO9 zYbPMJz=M4iTu%e&qOHLn`$BAE81dLIhJ40`AbAf5?QsK$_2&133fnr}MHZqx{- zcrJ4+$dGx6n>I3cH_yU)3xags(lQ0^*E{SG-myoC!1AkrmDdWi(_caSosPDeN(^=) z`mjHGaWMOXHfXP+)^O`b%bC!cIL$JOJ;G2P-7J+Yk(Ola!MUK9!)8N_td0-57 z3Hg^d7<;fLs)k2k5bgkTcaotKQj-g;wMSqRPD7_v1l9bF?vm~=;xFQK%Bv;hBGe_T#|swFa%SHT>`;@~ z6Qq#QbAT+L0pQUUfwS$ce1RI8S>zB_lB4if?*mVB=3hYRy(B5m0~0yDT0~Z_(@Xq=N;*IrY^L`6gNoP+{#)SrTLa$(x&;PWoK5Bft!D3{Fj^#J6AnJclPM1y8*=Q`B$BDA0DHbxrzai6sIt^@z zgJdMXC3cjD3WR3#>1^_kPLs<)78q7^OHk(K5GTKc&739%#Rw@6S#EWuwyZsaVMrTA z8_EA8rE%;Bf0y=xdV3GVMmHQCvFsD0dE_RZS{-hLPGB^T#lo8in*6W2S+FSls#}0( zV;?&2w$7%@0?SNgeFJ#e2NUNQP2^)TxPFs}$^Jy^2g>M={6Bzq``{Do%=2*YcYj4s zZe^W33+CTbknvq$`MLCIAdx=+#qTdP(n2)SAkcB!;M1#sho!8(By%cBpP? zuy}o9=>JH)Sug6Hy4Skvy5rzbF5_N%g5Fmab})sV#~Ezcg>X6a3dv_u2!k?Be2fM= zO-9NJaWp%RdZ3wE$w@ncg!UzmJV4bU+%IG#l_n#}L)OpB5{Pw;Pu_~T&$P{4Efdq!2AOvPmc3gr&*aT%{ix5@`0#YOT zMi3AK2#7kM1Ed52=dI-X-IMkXU@Dc$pULZb@7=xc?LPPP>F(2CcO>`o@$A@`x)-uz z3gu73Uxa^gu~s2eCzQcVApJtQp?qxoS^VhJ>=bzU@=zctHEcXjmsk?7(51zays|j9tpcZy=FZ! z^%UOgjqsP=PtQg^vXn(H#kGNp^jv&^wp##+Z!jZnYBM5gH~y*l_{H4^v#T;5dTa4U z`Xy~20p{ph#%QFT$-2p};4;3F7@D|^p4;O1QnXuEyjuKR>;P>b1!#%P*p0DM%*8sN zS+oaX|2L0Tj|O0iegb{`P3AD3!h8&mQATx&+#Sh)r`wcSQ z*~s$9p2+D)AX<|hyybr$9%VN7@7h`qf*_4yB z;%~!{ti~+1k0!=58*LchiQ37w5Lrj#k+lhr?vqd%e~bmzi+a&)+BeTI(z!2n`Xbsy z8(;zF@@8(Rc#zRw^{_a)1qQ&7Euj5sMPMs=a0(*#&G1EgV2RDc%Kn0pJN4;p?S*!k zow}Mil8&XG#|oCJc?a&&g^cF%gM9=sOz==H&ygST+K>|l3%P=->|{R6h#4A|Zi zAc6ln*qfOZ+92nZgJSufRK+q8;cjsU}mZ58|xkG9(xe4nl98> z?`OF$^yfj;Ee6M)hz*a8hEzM1x#5?@R>l6pOsAV;TVwB1`v@~f(;;RWIZIiX7O#ru zT{^M<9DekO_zZ~BVdi3E6iVU_=0WbCcmf*eROXjki!_xal8KvmuDh`pdO$xNg7??# z|e}%YGs6-cH(oO3_-CY1M2&dr@b`nfJlRbr>Gn6VYMI@m4)TJ*x_> zPTeRsC*i%gp4xp$#RwX$Q6Q7K`W~Vm=wWQ%0a!VY(Q-N*FX{Zi^VAGpL?Yi{#UZ!XWq$@*tspB&!%I8H&1DRd|bzz1r-U&E*HzbS%bZy?0%?H@v|8-NQ*#5g%muW!O{Ui%7az?&cxK-Vdo&#u zY2}JBy!pde^mFhy9)_Rjy=bvI$k#>uF!#`sw&K!UytYSU>G#FcvJ>9fnb;-GXxFHU zzvcO43AN8K<;H6KF(<>O{T2Ch4>_gsGJy}(=TL`tCAKgg?X|=lY6X)hZ*uXcdW6~G z?!&7p13q(Oh_F=?*T9L5dOX^1Ajkfl`AENte+ifNINvW=5?A(g{2W}_0IepKuoG({ zE4OfV<3zK>PZM_~Ix%-kFEmgNF+79R&!Zl)4cn>&ZIt589DPT!3;MV}rTP%c_TMG* zkfZT<=uW{Fok<(b0$OU;;cxySPjwufbtZX%F-F(m5m68SojZB{?s!V);O+Y~p5O)e z55Gh$XbrtJVf=%|PdV+ipZ|!~$VQ}OTfDY=!vz?NrdwQL4R!jDkiL`n;6^H>LjJFT zPksYhl3HTLWx`$UK)4%ki%#TGPjX7Wv_tW{9gPR*R9Z-v;8VT^AKdNq8to!{KzI+X z_!h>_Y($RN5jIhmEn<5yZKwz6WBQtTD5Diq&@eRuwVC&?E`9U$k@Uua1}xR}LY=@( zfodEP3M6TpD5aD;NZ-_Mu2GD2zl-=bF)pEyQ8Dw7r00;NC-E5WjSo~dKE#c@){FB< z(%+D!9rT&5fuQgzeWDYoDdu9Y4#4~SL23f+sW;xrd)XLYcCl^}P+!E<`IwO%JK;#I zV*JERe5^;|`9Ba|uOzMmUgEcr`nQk=H$(Kjo{ z-Skd`nI}%Y6d!ua>aEzvzWw<3A0|&r7)fw~-!C}p2wM)~Q!cvlPICPn`p(|wtKZFh zx3cYBTJQF7WGRx6NQRL5+UVOR+)=wr+4PD$Kr75Zv`;QR?RoUUiIX~k8Ey*j=r(5S z6y6_ETPOW<+ho>ec%xokV|+^?o?>elSGR$u--;aVARUT4*5xj2{`VQ(7Dm(Wqs8Jg zJOIuhT}k*P;x^TyUe%ml?Y6WEb;kPW%2&T#@eb;Y+;w5?e#YTEOzJ<5eUjs~nExJG z98Xy?8~^LYcyPXs^|FHR637>G@yPxoI`T#K&R~Q@J|lQ1<83q*PySi#TTK7tiVAPx zN4||x$*8>|_ikhC@D}J28`)O~@nkMnn8w)?y)c0mtY;Wkm&Ztj;q*8Sqb+MFGBF6P zJdk^P6s^;Pd+or8f^^0;HNdl}HZ%$&yc&gBBQlyJH!ZQ%GZH(qb)nX5Y3g}D ziLWR6p6Fv@{W;p7tr_*|2Kux_eoQjv<{MJ;C}qV*q;C-;CN`4`Z{p?k3cXj)lUEbS z$+38r=aH`?2)`xdQiC1JH`iMqiO8X|-;{Se-VcuBfxOBU5*IQuP4#MnCc?ha4M5-i;s+N7CyuobM<~)KP4CiZPT= zB3n81Rp-ED9zuA6ts{B&VWl8HGm(dv@WOZnmh%F_pOBDOS$o-&w3+A$$=U=Ya|}{4 zitiYf#}S@I0;h4@eB^W$wY4pbL)lIV{yutiKVBKd*pJ)Tx)B>`trymCTp{gxi-~7G z-$mZi%iP5{?sqiT9m_orL>qPF8Smt2(~+Qt$efYKuEn2I#7VIuZ}7BuE7B!{a0jU( zdT)mc*=VvXmbBguY0MziB)Q_AiEJiGZN`5+;+w2bN} z{-h|MN@tNO#oiOwFM|J~g6M3HzbewCczY#W)u^VQ5rf#>AM-qWc`C&zTG$B~Ocyv? z3_KCzM6R>kXoXz2_Hr5jneey{JH=|~zl2c-+*TXU#l2SlugHKR0M=$%Bl2poq+onO z@xR3xHpZ}s!Xj!n|~R+L<&t(J@)X?dsoGB%{~e?@~-3YU3uA@5!hq?Hnq45Q?VPG}{b zEA>R6b?=p=PV{0&BX+rN@ffLCbX2{kN^PT{D#eYqDITiw!eo+c;A3}Sxvd=6du%dV z2|eeiy(SCd`T9Y*rz4ev;w~yxm7)rk(n@P1UY9SyMU50O(DK6m{an#r#cc10u`Zn~ zDsHD~lwNk6;{zM}QKVhF7mLSI(`dkA{wl9c&LkI>*P=^nx!U)^k%UcBj5Vz*ThO}Q zr;E!h6O*hfpUlPWuC6#0pLnd2ISb|2jQOr#cUN}jlE}+t*k5z^lmdJ>dA*(g_N-?&#WXYk3O0E&H@>9pko1 z&*`mFFs-Lw?e(wi;uTTb501@kdfCb!|5s0B!Ie>ZN?SIa?KGD3yz!K!v-EN4zh8!AZx}%yZ+OJ1Mceb5dc_ zVry2mSuXqM7=7GwS=V;)*uV1H1=H=330zp zj_ggbbdef-ba;Hj*(vhwL_d2?szI7zz;R*u=dm$xf?-L5CMH_1J} zG?a9dK4xXg4zUz6S#@i6hNYHEIV;Qj_t2zW7KaOd%DFW4^`uMj^8b3HtweHnCB572Succq~Mv01@hVBq^T4lFIAo}NhCT)NnP zi^<>WuAwXGOzp9cUA6pvKSo`p{F*!GKlv{o*VX5_rzju))kpnzTmJns_(%9p>n~aC a{xMhHjqP#2KeX4s!j*gfzy80K0{;LM69&Hk literal 0 HcmV?d00001 diff --git a/examples/utils/logger.ts b/examples/utils/logger.ts index 59bc9fd8..2a1e8afe 100644 --- a/examples/utils/logger.ts +++ b/examples/utils/logger.ts @@ -1,4 +1,5 @@ -export const log = (message: string) => { +// eslint-disable-next-line @typescript-eslint/no-explicit-any +export const log = (message: any) => { // eslint-disable-next-line no-console console.log(message); }; From a6522675c34d8de92362057cdbb80af7132dfda2 Mon Sep 17 00:00:00 2001 From: Nikolay Matrosov Date: Mon, 28 Feb 2022 11:42:53 +0300 Subject: [PATCH 06/54] docs(examples): update example of using the SDK with streaming endpoint --- examples/stream-stt/index.ts | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/examples/stream-stt/index.ts b/examples/stream-stt/index.ts index e00c1ce5..d062dba0 100644 --- a/examples/stream-stt/index.ts +++ b/examples/stream-stt/index.ts @@ -4,17 +4,18 @@ import { StreamingRecognitionRequest, } from '@yandex-cloud/nodejs-sdk/dist/generated/yandex/cloud/ai/stt/v2/stt_service'; import * as fs from 'fs'; +import * as stream from 'stream'; import * as wav from 'wav'; -import { Format } from 'wav'; import { getEnv } from '../utils/get-env'; import { log } from '../utils/logger'; const file = fs.createReadStream('test.wav'); const reader = new wav.Reader(); +const data = new stream.PassThrough(); -const formatPromise = new Promise((resolve) => { +const formatPromise = new Promise((resolve) => { // the "format" event gets emitted at the end of the WAVE header - reader.on('format', (format: Format) => { + reader.on('format', (format: wav.Format) => { // pass the format object resolve(format); }); @@ -22,6 +23,7 @@ const formatPromise = new Promise((resolve) => { // pipe the WAVE file to the Reader instance file.pipe(reader); +reader.pipe(data); (async () => { const authToken = getEnv('YC_OAUTH_TOKEN'); @@ -32,8 +34,6 @@ file.pipe(reader); async function* createRequest(): AsyncIterable { const format = await formatPromise; - log(JSON.stringify(format, null, 2)); - // First message of the stream should be the config message yield StreamingRecognitionRequest.fromPartial({ config: { specification: { @@ -44,8 +44,7 @@ file.pipe(reader); folderId, }, }); - // Now we can send the data - for await (const chunk of file) { + for await (const chunk of data) { yield StreamingRecognitionRequest.fromPartial({ audioContent: chunk, }); From 2636b074f34881b63d078ea98d6c8f5b3befa8f9 Mon Sep 17 00:00:00 2001 From: Nikolay Matrosov Date: Mon, 28 Feb 2022 15:28:04 +0300 Subject: [PATCH 07/54] docs(examples): update sdk version in examples --- examples/package-lock.json | 64 ++++++++++++++++++------------------ examples/package.json | 2 +- examples/stream-stt/index.ts | 4 ++- 3 files changed, 36 insertions(+), 34 deletions(-) diff --git a/examples/package-lock.json b/examples/package-lock.json index c22c7532..3af69824 100644 --- a/examples/package-lock.json +++ b/examples/package-lock.json @@ -9,7 +9,7 @@ "version": "1.0.0", "license": "ISC", "dependencies": { - "@yandex-cloud/nodejs-sdk": "^2.0.0-beta.1", + "@yandex-cloud/nodejs-sdk": "^2.0.0-beta.2", "wav": "^1.0.2" }, "devDependencies": { @@ -67,9 +67,9 @@ } }, "node_modules/@grpc/grpc-js": { - "version": "1.4.4", + "version": "1.5.5", "resolved": "https://gitpkg.now.sh/DavyJohnes/grpc-node/packages/grpc-js?fix-class-options-issue-with-dist", - "integrity": "sha512-RzhxaO5zpygPvFLKiWu24lb3lYlEdQBeZJAYlEgoB+OaqIZLFDeG/833v+lInxVYvPiNB/GMCfQrhktzsereiw==", + "integrity": "sha512-GrWZNWxbvWNKOWphkYZGiilHZvRHFzOyrSNTc52ZckQDVRG17obLd8L/eZPLZA+4OqHWLqiPSgA7cK3jqVKhkA==", "license": "Apache-2.0", "dependencies": { "@grpc/proto-loader": "^0.6.4", @@ -176,9 +176,9 @@ } }, "node_modules/@yandex-cloud/nodejs-sdk": { - "version": "2.0.0-beta.1", - "resolved": "https://registry.npmjs.org/@yandex-cloud/nodejs-sdk/-/nodejs-sdk-2.0.0-beta.1.tgz", - "integrity": "sha512-1nAtsNaCwefM7BP9GI2144aCsGdhQBxVzxZR13N6+e66A1uJH/nxJpHDXM1nB61jveOmDZUH8/JOyjGVLOM3qw==", + "version": "2.0.0-beta.2", + "resolved": "https://registry.npmjs.org/@yandex-cloud/nodejs-sdk/-/nodejs-sdk-2.0.0-beta.2.tgz", + "integrity": "sha512-38malOtD1lu8CWEb3CwwBEpaicK8g9GADp7Yo8YVnhtgZjkx+4aWfz7kuLtC8Cz5lUF8woXwCmHdmVweLO81Rg==", "dependencies": { "@grpc/grpc-js": "https://gitpkg.now.sh/DavyJohnes/grpc-node/packages/grpc-js?fix-class-options-issue-with-dist", "axios": "0.24.0", @@ -187,8 +187,8 @@ "log4js": "6.3.0", "long": "5.2.0", "luxon": "2.2.0", - "nice-grpc": "1.0.4", - "nice-grpc-client-middleware-deadline": "1.0.4", + "nice-grpc": "1.0.6", + "nice-grpc-client-middleware-deadline": "1.0.6", "protobufjs": "6.8.8", "utility-types": "3.10.0" }, @@ -574,22 +574,22 @@ "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" }, "node_modules/nice-grpc": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/nice-grpc/-/nice-grpc-1.0.4.tgz", - "integrity": "sha512-/1fAKywTdwHzVxt1Ski6120lx6S++RpGjXp7y0OUTZze4wHrwgC64xuuRTT6COz5BcX+Pch7gTc2m5fz7+M4nA==", + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/nice-grpc/-/nice-grpc-1.0.6.tgz", + "integrity": "sha512-cRImN+SpnPaTAqWSbuU5ixq/wo1Jr1QOv0IZjmcb40XNU0og4JEyt7VCtTM7SAbeLAjdFxd65wiIid696kVTJA==", "dependencies": { - "@grpc/grpc-js": "^1.2.6", + "@grpc/grpc-js": "^1.5.1", "abort-controller-x": "^0.2.4", - "nice-grpc-common": "^1.0.3", + "nice-grpc-common": "^1.0.4", "node-abort-controller": "^1.2.1" } }, "node_modules/nice-grpc-client-middleware-deadline": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/nice-grpc-client-middleware-deadline/-/nice-grpc-client-middleware-deadline-1.0.4.tgz", - "integrity": "sha512-IYLEzWkLI0ij41WVDLBjBJohmlh2cI+2ttMDawK8h7G209vrAndEJ4iiN9gQUqtguVzq4S3e8BzQgJ26hBMQtw==", + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/nice-grpc-client-middleware-deadline/-/nice-grpc-client-middleware-deadline-1.0.6.tgz", + "integrity": "sha512-AokugSveg+2IPohuLbGR5OITgh3W4yZvAmLhuqistjwSRLchzQI4CwQEL1Tj4R0wscreSFoiHkXyG4qtKygOug==", "dependencies": { - "nice-grpc-common": "^1.0.3", + "nice-grpc-common": "^1.0.4", "node-abort-controller": "^2.0.0" } }, @@ -862,7 +862,7 @@ "dependencies": { "@grpc/grpc-js": { "version": "https://gitpkg.now.sh/DavyJohnes/grpc-node/packages/grpc-js?fix-class-options-issue-with-dist", - "integrity": "sha512-RzhxaO5zpygPvFLKiWu24lb3lYlEdQBeZJAYlEgoB+OaqIZLFDeG/833v+lInxVYvPiNB/GMCfQrhktzsereiw==", + "integrity": "sha512-GrWZNWxbvWNKOWphkYZGiilHZvRHFzOyrSNTc52ZckQDVRG17obLd8L/eZPLZA+4OqHWLqiPSgA7cK3jqVKhkA==", "requires": { "@grpc/proto-loader": "^0.6.4", "@types/node": ">=12.12.47" @@ -961,9 +961,9 @@ } }, "@yandex-cloud/nodejs-sdk": { - "version": "2.0.0-beta.1", - "resolved": "https://registry.npmjs.org/@yandex-cloud/nodejs-sdk/-/nodejs-sdk-2.0.0-beta.1.tgz", - "integrity": "sha512-1nAtsNaCwefM7BP9GI2144aCsGdhQBxVzxZR13N6+e66A1uJH/nxJpHDXM1nB61jveOmDZUH8/JOyjGVLOM3qw==", + "version": "2.0.0-beta.2", + "resolved": "https://registry.npmjs.org/@yandex-cloud/nodejs-sdk/-/nodejs-sdk-2.0.0-beta.2.tgz", + "integrity": "sha512-38malOtD1lu8CWEb3CwwBEpaicK8g9GADp7Yo8YVnhtgZjkx+4aWfz7kuLtC8Cz5lUF8woXwCmHdmVweLO81Rg==", "requires": { "@grpc/grpc-js": "https://gitpkg.now.sh/DavyJohnes/grpc-node/packages/grpc-js?fix-class-options-issue-with-dist", "axios": "0.24.0", @@ -972,8 +972,8 @@ "log4js": "6.3.0", "long": "5.2.0", "luxon": "2.2.0", - "nice-grpc": "1.0.4", - "nice-grpc-client-middleware-deadline": "1.0.4", + "nice-grpc": "1.0.6", + "nice-grpc-client-middleware-deadline": "1.0.6", "protobufjs": "6.8.8", "utility-types": "3.10.0" }, @@ -1298,22 +1298,22 @@ "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" }, "nice-grpc": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/nice-grpc/-/nice-grpc-1.0.4.tgz", - "integrity": "sha512-/1fAKywTdwHzVxt1Ski6120lx6S++RpGjXp7y0OUTZze4wHrwgC64xuuRTT6COz5BcX+Pch7gTc2m5fz7+M4nA==", + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/nice-grpc/-/nice-grpc-1.0.6.tgz", + "integrity": "sha512-cRImN+SpnPaTAqWSbuU5ixq/wo1Jr1QOv0IZjmcb40XNU0og4JEyt7VCtTM7SAbeLAjdFxd65wiIid696kVTJA==", "requires": { - "@grpc/grpc-js": "^1.2.6", + "@grpc/grpc-js": "^1.5.1", "abort-controller-x": "^0.2.4", - "nice-grpc-common": "^1.0.3", + "nice-grpc-common": "^1.0.4", "node-abort-controller": "^1.2.1" } }, "nice-grpc-client-middleware-deadline": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/nice-grpc-client-middleware-deadline/-/nice-grpc-client-middleware-deadline-1.0.4.tgz", - "integrity": "sha512-IYLEzWkLI0ij41WVDLBjBJohmlh2cI+2ttMDawK8h7G209vrAndEJ4iiN9gQUqtguVzq4S3e8BzQgJ26hBMQtw==", + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/nice-grpc-client-middleware-deadline/-/nice-grpc-client-middleware-deadline-1.0.6.tgz", + "integrity": "sha512-AokugSveg+2IPohuLbGR5OITgh3W4yZvAmLhuqistjwSRLchzQI4CwQEL1Tj4R0wscreSFoiHkXyG4qtKygOug==", "requires": { - "nice-grpc-common": "^1.0.3", + "nice-grpc-common": "^1.0.4", "node-abort-controller": "^2.0.0" }, "dependencies": { diff --git a/examples/package.json b/examples/package.json index 3d250922..92b6dbbb 100644 --- a/examples/package.json +++ b/examples/package.json @@ -10,7 +10,7 @@ "author": "", "license": "ISC", "dependencies": { - "@yandex-cloud/nodejs-sdk": "^2.0.0-beta.1", + "@yandex-cloud/nodejs-sdk": "^2.0.0-beta.2", "wav": "^1.0.2" }, "devDependencies": { diff --git a/examples/stream-stt/index.ts b/examples/stream-stt/index.ts index d062dba0..5a1058e4 100644 --- a/examples/stream-stt/index.ts +++ b/examples/stream-stt/index.ts @@ -4,12 +4,14 @@ import { StreamingRecognitionRequest, } from '@yandex-cloud/nodejs-sdk/dist/generated/yandex/cloud/ai/stt/v2/stt_service'; import * as fs from 'fs'; +import * as path from 'path'; import * as stream from 'stream'; import * as wav from 'wav'; import { getEnv } from '../utils/get-env'; import { log } from '../utils/logger'; -const file = fs.createReadStream('test.wav'); +// eslint-disable-next-line unicorn/prefer-module +const file = fs.createReadStream(path.join(__dirname, 'test.wav')); const reader = new wav.Reader(); const data = new stream.PassThrough(); From b9680856bd75ee4b6801e880fa18fe0a6b665bb3 Mon Sep 17 00:00:00 2001 From: Nikolay Matrosov Date: Mon, 28 Feb 2022 16:15:01 +0300 Subject: [PATCH 08/54] docs(examples): added instruction to `README.md` on how to run examples --- README.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/README.md b/README.md index fb380897..e9acd7be 100644 --- a/README.md +++ b/README.md @@ -80,6 +80,13 @@ const response = await cloudService.list(ListCloudsRequest.fromPartial({ Check [examples](./examples) directory for more examples. +To run example scripts, you should execute the following commands: +```bash +cd examples +npm i +YC_OAUTH_TOKEN=... YC_FOLDER_ID=... npm run start path/to/example.ts +``` + ## Services * AI Translate; From 215fab13cbc7ead95c67f54c1e9f07ea96b69395 Mon Sep 17 00:00:00 2001 From: Ivan Zuev Date: Fri, 1 Apr 2022 13:19:54 +0300 Subject: [PATCH 09/54] feat: update @grpc/grpc-js --- package-lock.json | 14 +++++++------- package.json | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/package-lock.json b/package-lock.json index f9bc1ac3..ae8acfa8 100644 --- a/package-lock.json +++ b/package-lock.json @@ -9,7 +9,7 @@ "version": "2.0.0-alpha.4", "license": "MIT", "dependencies": { - "@grpc/grpc-js": "https://gitpkg.now.sh/DavyJohnes/grpc-node/packages/grpc-js?fix-class-options-issue-with-dist", + "@grpc/grpc-js": "1.6.0", "axios": "0.24.0", "jsonwebtoken": "8.5.1", "lodash": "4.17.21", @@ -1154,10 +1154,9 @@ } }, "node_modules/@grpc/grpc-js": { - "version": "1.5.5", - "resolved": "https://gitpkg.now.sh/DavyJohnes/grpc-node/packages/grpc-js?fix-class-options-issue-with-dist", - "integrity": "sha512-GrWZNWxbvWNKOWphkYZGiilHZvRHFzOyrSNTc52ZckQDVRG17obLd8L/eZPLZA+4OqHWLqiPSgA7cK3jqVKhkA==", - "license": "Apache-2.0", + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/@grpc/grpc-js/-/grpc-js-1.6.0.tgz", + "integrity": "sha512-KwNibKGx1qmAwsrYu75FhUo3+m6GMJoBfdnYZte9YQ2EM3hZ5Ez+8+Q+FAMONtfU0XJGUkGK5S+q4CXSjx5Ahw==", "dependencies": { "@grpc/proto-loader": "^0.6.4", "@types/node": ">=12.12.47" @@ -13644,8 +13643,9 @@ } }, "@grpc/grpc-js": { - "version": "https://gitpkg.now.sh/DavyJohnes/grpc-node/packages/grpc-js?fix-class-options-issue-with-dist", - "integrity": "sha512-GrWZNWxbvWNKOWphkYZGiilHZvRHFzOyrSNTc52ZckQDVRG17obLd8L/eZPLZA+4OqHWLqiPSgA7cK3jqVKhkA==", + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/@grpc/grpc-js/-/grpc-js-1.6.0.tgz", + "integrity": "sha512-KwNibKGx1qmAwsrYu75FhUo3+m6GMJoBfdnYZte9YQ2EM3hZ5Ez+8+Q+FAMONtfU0XJGUkGK5S+q4CXSjx5Ahw==", "requires": { "@grpc/proto-loader": "^0.6.4", "@types/node": ">=12.12.47" diff --git a/package.json b/package.json index 9f151495..2b801f3e 100644 --- a/package.json +++ b/package.json @@ -22,7 +22,7 @@ ], "homepage": "https://github.com/yandex-cloud/nodejs-sdk#readme", "dependencies": { - "@grpc/grpc-js": "https://gitpkg.now.sh/DavyJohnes/grpc-node/packages/grpc-js?fix-class-options-issue-with-dist", + "@grpc/grpc-js": "1.6.0", "axios": "0.24.0", "jsonwebtoken": "8.5.1", "lodash": "4.17.21", From 835381c27596f6c3b866b35162d7bab2da94ce6b Mon Sep 17 00:00:00 2001 From: Ivan Zuev Date: Fri, 1 Apr 2022 13:57:38 +0300 Subject: [PATCH 10/54] feat: updated services definitions --- cloudapi | 2 +- scripts/services.ts | 6 + src/generated/yandex/cloud/access/access.ts | 2 +- src/generated/yandex/cloud/ai/index.ts | 5 +- .../yandex/cloud/ai/stt/v2/stt_service.ts | 23 + src/generated/yandex/cloud/ai/stt/v3/stt.ts | 2814 +++++++++++ .../yandex/cloud/ai/stt/v3/stt_service.ts | 73 + .../ai/translate/v2/translation_service.ts | 15 + src/generated/yandex/cloud/ai/tts/v3/tts.ts | 38 +- .../yandex/cloud/ai/vision/v2/image.ts | 193 + .../cloud/ai/vision/v2/image_classifier.ts | 541 +++ .../ai/vision/v2/image_classifier_service.ts | 75 + .../cloud/apploadbalancer/v1/backend_group.ts | 301 +- .../v1/backend_group_service.ts | 49 +- .../cloud/apploadbalancer/v1/load_balancer.ts | 36 +- .../v1/load_balancer_service.ts | 12 +- .../cloud/apploadbalancer/v1/virtual_host.ts | 6 +- src/generated/yandex/cloud/cdn/index.ts | 2 + .../yandex/cloud/cdn/v1/cache_service.ts | 18 +- .../cloud/cdn/v1/origin_group_service.ts | 21 +- .../yandex/cloud/cdn/v1/origin_service.ts | 21 +- src/generated/yandex/cloud/cdn/v1/raw_logs.ts | 193 + .../yandex/cloud/cdn/v1/raw_logs_service.ts | 1039 ++++ src/generated/yandex/cloud/cdn/v1/resource.ts | 18 +- .../yandex/cloud/cdn/v1/resource_service.ts | 57 +- src/generated/yandex/cloud/compute/v1/disk.ts | 2 +- .../yandex/cloud/compute/v1/disk_service.ts | 224 + .../cloud/compute/v1/filesystem_service.ts | 15 + .../yandex/cloud/compute/v1/image.ts | 2 +- .../yandex/cloud/compute/v1/image_service.ts | 2 +- .../yandex/cloud/compute/v1/instance.ts | 111 + .../cloud/compute/v1/instance_service.ts | 369 ++ .../instancegroup/instance_group_service.ts | 35 + .../yandex/cloud/compute/v1/snapshot.ts | 2 +- .../cloud/containerregistry/v1/registry.ts | 2 +- .../dataproc/manager/v1/manager_service.ts | 8 +- .../yandex/cloud/dataproc/v1/cluster.ts | 145 + .../yandex/cloud/dataproc/v1/common.ts | 8 +- .../yandex/cloud/dataproc/v1/subcluster.ts | 4 +- .../cloud/datatransfer/v1/endpoint/common.ts | 44 + .../cloud/datatransfer/v1/endpoint/mysql.ts | 46 +- .../datatransfer/v1/endpoint/postgres.ts | 34 +- .../cloud/datatransfer/v1/endpoint_service.ts | 2 +- .../cloud/datatransfer/v1/transfer_service.ts | 2 +- .../yandex/cloud/iam/v1/iam_token_service.ts | 2 +- .../yandex/cloud/iam/v1/user_account.ts | 8 +- .../cloud/iam/v1/user_account_service.ts | 2 +- .../metering/image_product_usage_service.ts | 2 +- .../marketplace/v1/metering/usage_record.ts | 4 +- .../yandex/cloud/mdb/clickhouse/v1/cluster.ts | 41 +- .../mdb/clickhouse/v1/cluster_service.ts | 346 ++ .../cloud/mdb/elasticsearch/v1/backup.ts | 280 ++ .../mdb/elasticsearch/v1/backup_service.ts | 429 ++ .../mdb/elasticsearch/v1/cluster_service.ts | 1160 ++++- .../cloud/mdb/elasticsearch/v1/extension.ts | 281 ++ .../mdb/elasticsearch/v1/extension_service.ts | 1127 +++++ .../yandex/cloud/mdb/greenplum/v1/backup.ts | 232 + .../cloud/mdb/greenplum/v1/backup_service.ts | 429 ++ .../yandex/cloud/mdb/greenplum/v1/cluster.ts | 490 +- .../cloud/mdb/greenplum/v1/cluster_service.ts | 1913 +++++++- .../yandex/cloud/mdb/greenplum/v1/config.ts | 1318 ++--- .../yandex/cloud/mdb/greenplum/v1/host.ts | 24 +- .../cloud/mdb/greenplum/v1/maintenance.ts | 11 +- .../cloud/mdb/greenplum/v1/resource_preset.ts | 291 ++ .../greenplum/v1/resource_preset_service.ts | 475 ++ src/generated/yandex/cloud/mdb/index.ts | 12 + .../yandex/cloud/mdb/kafka/v1/cluster.ts | 87 +- .../yandex/cloud/mdb/mongodb/v1/cluster.ts | 1593 ++++++- .../cloud/mdb/mongodb/v1/cluster_service.ts | 1636 ++++++- .../v1/config/mongodb4_4_enterprise.ts | 2902 +++++++++++ .../v1/config/mongodb5_0_enterprise.ts | 2927 ++++++++++++ .../yandex/cloud/mdb/mysql/v1/backup.ts | 11 +- .../cloud/mdb/mysql/v1/backup_service.ts | 66 +- .../yandex/cloud/mdb/mysql/v1/cluster.ts | 205 +- .../cloud/mdb/mysql/v1/cluster_service.ts | 672 ++- .../cloud/mdb/mysql/v1/config/mysql5_7.ts | 350 +- .../cloud/mdb/mysql/v1/config/mysql8_0.ts | 321 +- .../yandex/cloud/mdb/mysql/v1/database.ts | 9 +- .../cloud/mdb/mysql/v1/database_service.ts | 102 +- .../yandex/cloud/mdb/mysql/v1/maintenance.ts | 2 +- .../cloud/mdb/mysql/v1/resource_preset.ts | 9 +- .../mdb/mysql/v1/resource_preset_service.ts | 49 +- .../yandex/cloud/mdb/mysql/v1/user.ts | 50 +- .../yandex/cloud/mdb/mysql/v1/user_service.ts | 163 +- .../yandex/cloud/mdb/mysql/v1alpha/cluster.ts | 2 +- .../mdb/mysql/v1alpha/cluster_service.ts | 2 +- .../cloud/mdb/mysql/v1alpha/database.ts | 5 +- .../yandex/cloud/mdb/mysql/v1alpha/user.ts | 9 +- .../cloud/mdb/mysql/v1alpha/user_service.ts | 77 +- .../yandex/cloud/mdb/postgresql/v1/cluster.ts | 78 +- .../mdb/postgresql/v1/cluster_service.ts | 60 +- .../cloud/mdb/postgresql/v1/config/host14.ts | 2028 ++++++++ .../mdb/postgresql/v1/config/postgresql10.ts | 117 + .../postgresql/v1/config/postgresql10_1c.ts | 117 + .../mdb/postgresql/v1/config/postgresql11.ts | 117 + .../postgresql/v1/config/postgresql11_1c.ts | 117 + .../mdb/postgresql/v1/config/postgresql12.ts | 117 + .../postgresql/v1/config/postgresql12_1c.ts | 117 + .../mdb/postgresql/v1/config/postgresql13.ts | 117 + .../mdb/postgresql/v1/config/postgresql14.ts | 4230 +++++++++++++++++ .../yandex/cloud/mdb/redis/v1/cluster.ts | 103 +- .../cloud/mdb/redis/v1/cluster_service.ts | 478 +- .../cloud/mdb/redis/v1/config/redis5_0.ts | 192 + .../cloud/mdb/redis/v1/config/redis6_0.ts | 192 + .../cloud/mdb/redis/v1/config/redis6_2.ts | 192 + .../yandex/cloud/mdb/sqlserver/v1/cluster.ts | 36 +- .../cloud/mdb/sqlserver/v1/cluster_service.ts | 55 + .../cloud/operation/operation_service.ts | 18 +- .../organizationmanager/v1/user_account.ts | 8 +- .../organizationmanager/v1/user_service.ts | 2 +- .../serverless/apigateway/v1/apigateway.ts | 115 + .../apigateway/v1/apigateway_service.ts | 51 +- .../serverless/containers/v1/container.ts | 228 + .../containers/v1/container_service.ts | 43 + .../cloud/serverless/functions/v1/function.ts | 129 + .../functions/v1/function_service.ts | 21 + src/generated/yandex/cloud/service_clients.ts | 8 +- src/service-endpoints.ts | 10 +- 118 files changed, 33455 insertions(+), 2384 deletions(-) create mode 100644 src/generated/yandex/cloud/ai/stt/v3/stt.ts create mode 100644 src/generated/yandex/cloud/ai/stt/v3/stt_service.ts create mode 100644 src/generated/yandex/cloud/ai/vision/v2/image.ts create mode 100644 src/generated/yandex/cloud/ai/vision/v2/image_classifier.ts create mode 100644 src/generated/yandex/cloud/ai/vision/v2/image_classifier_service.ts create mode 100644 src/generated/yandex/cloud/cdn/v1/raw_logs.ts create mode 100644 src/generated/yandex/cloud/cdn/v1/raw_logs_service.ts create mode 100644 src/generated/yandex/cloud/mdb/elasticsearch/v1/backup.ts create mode 100644 src/generated/yandex/cloud/mdb/elasticsearch/v1/backup_service.ts create mode 100644 src/generated/yandex/cloud/mdb/elasticsearch/v1/extension.ts create mode 100644 src/generated/yandex/cloud/mdb/elasticsearch/v1/extension_service.ts create mode 100644 src/generated/yandex/cloud/mdb/greenplum/v1/backup.ts create mode 100644 src/generated/yandex/cloud/mdb/greenplum/v1/backup_service.ts create mode 100644 src/generated/yandex/cloud/mdb/greenplum/v1/resource_preset.ts create mode 100644 src/generated/yandex/cloud/mdb/greenplum/v1/resource_preset_service.ts create mode 100644 src/generated/yandex/cloud/mdb/mongodb/v1/config/mongodb4_4_enterprise.ts create mode 100644 src/generated/yandex/cloud/mdb/mongodb/v1/config/mongodb5_0_enterprise.ts create mode 100644 src/generated/yandex/cloud/mdb/postgresql/v1/config/host14.ts create mode 100644 src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql14.ts diff --git a/cloudapi b/cloudapi index 83dafd6b..3cd72656 160000 --- a/cloudapi +++ b/cloudapi @@ -1 +1 @@ -Subproject commit 83dafd6b465cfa321dfd492f840ed480f7269493 +Subproject commit 3cd726562074d55546973cb075bc03754fdedc99 diff --git a/scripts/services.ts b/scripts/services.ts index 5b8faca6..a0fa8594 100644 --- a/scripts/services.ts +++ b/scripts/services.ts @@ -7,6 +7,7 @@ export const servicesConfig: ServicesConfig = { translate_translation_service: { importClassName: 'TranslationServiceClient' }, tts_service: { importClassName: 'SynthesizerClient' }, vision_service: { importClassName: 'VisionServiceClient' }, + vision_image_classifier_service: { importClassName: 'ImageClassifierServiceClient' }, }, apploadbalancer: { backend_group_service: { importClassName: 'BackendGroupServiceClient' }, @@ -28,6 +29,7 @@ export const servicesConfig: ServicesConfig = { origin_service: { importClassName: 'OriginServiceClient' }, provider_service: { importClassName: 'ProviderServiceClient' }, resource_service: { importClassName: 'ResourceServiceClient' }, + raw_logs_service: { importClassName: 'RawLogsServiceClient' }, }, certificatemanager: { certificate_content_service: { importClassName: 'CertificateContentServiceClient' }, @@ -128,10 +130,14 @@ export const servicesConfig: ServicesConfig = { clickhouse_user_service: { importClassName: 'UserServiceClient', exportClassName: 'ClickHouseUserServiceClient' }, clickhouse_versions_service: { importClassName: 'VersionsServiceClient' }, elasticsearch_auth_service: { importClassName: 'AuthServiceClient' }, + elasticsearch_backup_service: { importClassName: 'BackupServiceClient', exportClassName: 'ElasticBackupServiceClient' }, + elasticsearch_extension_service: { importClassName: 'ExtensionServiceClient', exportClassName: 'ElasticExtensionServiceClient' }, elasticsearch_cluster_service: { importClassName: 'ClusterServiceClient', exportClassName: 'ElasticClusterServiceClient' }, elasticsearch_resource_preset_service: { importClassName: 'ResourcePresetServiceClient', exportClassName: 'ElasticResourcePresetServiceClient' }, elasticsearch_user_service: { importClassName: 'UserServiceClient', exportClassName: 'ElasticUserServiceClient' }, greenplum_cluster_service: { importClassName: 'ClusterServiceClient', exportClassName: 'GreenplumClusterServiceClient' }, + greenplum_backup_service: { importClassName: 'BackupServiceClient', exportClassName: 'GreenplumBackupServiceClient' }, + greenplum_resource_preset_service: { importClassName: 'ResourcePresetServiceClient', exportClassName: 'GreenplumResourcePresetServiceClient' }, kafka_cluster_service: { importClassName: 'ClusterServiceClient', exportClassName: 'KafkaClusterServiceClient' }, kafka_connector_service: { importClassName: 'ConnectorServiceClient' }, kafka_resource_preset_service: { importClassName: 'ResourcePresetServiceClient', exportClassName: 'KafkaResourcePresetServiceClient' }, diff --git a/src/generated/yandex/cloud/access/access.ts b/src/generated/yandex/cloud/access/access.ts index d9026df9..35939c1c 100644 --- a/src/generated/yandex/cloud/access/access.ts +++ b/src/generated/yandex/cloud/access/access.ts @@ -63,7 +63,7 @@ export interface Subject { * Type of the subject. * * It can contain one of the following values: - * * `userAccount`: An account on Yandex or Yandex.Connect, added to Yandex.Cloud. + * * `userAccount`: An account on Yandex or Yandex Connect, added to Yandex Cloud. * * `serviceAccount`: A service account. This type represents the [yandex.cloud.iam.v1.ServiceAccount] resource. * * `federatedUser`: A federated account. This type represents a user from an identity federation, like Active Directory. * * `system`: System group. This type represents several accounts with a common system identifier. diff --git a/src/generated/yandex/cloud/ai/index.ts b/src/generated/yandex/cloud/ai/index.ts index ea08c21a..972cfc79 100644 --- a/src/generated/yandex/cloud/ai/index.ts +++ b/src/generated/yandex/cloud/ai/index.ts @@ -8,4 +8,7 @@ export * as vision_face_detection from './vision/v1/face_detection' export * as vision_image_copy_search from './vision/v1/image_copy_search' export * as vision_primitives from './vision/v1/primitives' export * as vision_text_detection from './vision/v1/text_detection' -export * as vision_service from './vision/v1/vision_service' \ No newline at end of file +export * as vision_service from './vision/v1/vision_service' +export * as vision_image from './vision/v2/image' +export * as vision_image_classifier from './vision/v2/image_classifier' +export * as vision_image_classifier_service from './vision/v2/image_classifier_service' diff --git a/src/generated/yandex/cloud/ai/stt/v2/stt_service.ts b/src/generated/yandex/cloud/ai/stt/v2/stt_service.ts index 17545d6a..a53e28b1 100644 --- a/src/generated/yandex/cloud/ai/stt/v2/stt_service.ts +++ b/src/generated/yandex/cloud/ai/stt/v2/stt_service.ts @@ -75,6 +75,8 @@ export interface RecognitionSpec { audioChannelCount: number; /** This mark allows disable normalization text */ rawResults: boolean; + /** Rewrite text in literature style (default: false) */ + literatureText: boolean; } export enum RecognitionSpec_AudioEncoding { @@ -82,6 +84,8 @@ export enum RecognitionSpec_AudioEncoding { /** LINEAR16_PCM - 16-bit signed little-endian (Linear PCM) */ LINEAR16_PCM = 1, OGG_OPUS = 2, + /** MP3 - transcription only */ + MP3 = 3, UNRECOGNIZED = -1, } @@ -98,6 +102,9 @@ export function recognitionSpec_AudioEncodingFromJSON( case 2: case "OGG_OPUS": return RecognitionSpec_AudioEncoding.OGG_OPUS; + case 3: + case "MP3": + return RecognitionSpec_AudioEncoding.MP3; case -1: case "UNRECOGNIZED": default: @@ -115,6 +122,8 @@ export function recognitionSpec_AudioEncodingToJSON( return "LINEAR16_PCM"; case RecognitionSpec_AudioEncoding.OGG_OPUS: return "OGG_OPUS"; + case RecognitionSpec_AudioEncoding.MP3: + return "MP3"; default: return "UNKNOWN"; } @@ -686,6 +695,7 @@ const baseRecognitionSpec: object = { singleUtterance: false, audioChannelCount: 0, rawResults: false, + literatureText: false, }; export const RecognitionSpec = { @@ -722,6 +732,9 @@ export const RecognitionSpec = { if (message.rawResults === true) { writer.uint32(80).bool(message.rawResults); } + if (message.literatureText === true) { + writer.uint32(88).bool(message.literatureText); + } return writer; }, @@ -759,6 +772,9 @@ export const RecognitionSpec = { case 10: message.rawResults = reader.bool(); break; + case 11: + message.literatureText = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -806,6 +822,10 @@ export const RecognitionSpec = { object.rawResults !== undefined && object.rawResults !== null ? Boolean(object.rawResults) : false; + message.literatureText = + object.literatureText !== undefined && object.literatureText !== null + ? Boolean(object.literatureText) + : false; return message; }, @@ -829,6 +849,8 @@ export const RecognitionSpec = { message.audioChannelCount !== undefined && (obj.audioChannelCount = Math.round(message.audioChannelCount)); message.rawResults !== undefined && (obj.rawResults = message.rawResults); + message.literatureText !== undefined && + (obj.literatureText = message.literatureText); return obj; }, @@ -845,6 +867,7 @@ export const RecognitionSpec = { message.singleUtterance = object.singleUtterance ?? false; message.audioChannelCount = object.audioChannelCount ?? 0; message.rawResults = object.rawResults ?? false; + message.literatureText = object.literatureText ?? false; return message; }, }; diff --git a/src/generated/yandex/cloud/ai/stt/v3/stt.ts b/src/generated/yandex/cloud/ai/stt/v3/stt.ts new file mode 100644 index 00000000..c941aa89 --- /dev/null +++ b/src/generated/yandex/cloud/ai/stt/v3/stt.ts @@ -0,0 +1,2814 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; + +export const protobufPackage = "speechkit.stt.v3"; + +export enum CodeType { + CODE_TYPE_UNSPECIFIED = 0, + /** WORKING - all good */ + WORKING = 1, + /** WARNING - for example, if speech is sent not in real time. or unknown context (and we've made fallback) */ + WARNING = 2, + /** CLOSED - after session was closed */ + CLOSED = 3, + UNRECOGNIZED = -1, +} + +export function codeTypeFromJSON(object: any): CodeType { + switch (object) { + case 0: + case "CODE_TYPE_UNSPECIFIED": + return CodeType.CODE_TYPE_UNSPECIFIED; + case 1: + case "WORKING": + return CodeType.WORKING; + case 2: + case "WARNING": + return CodeType.WARNING; + case 3: + case "CLOSED": + return CodeType.CLOSED; + case -1: + case "UNRECOGNIZED": + default: + return CodeType.UNRECOGNIZED; + } +} + +export function codeTypeToJSON(object: CodeType): string { + switch (object) { + case CodeType.CODE_TYPE_UNSPECIFIED: + return "CODE_TYPE_UNSPECIFIED"; + case CodeType.WORKING: + return "WORKING"; + case CodeType.WARNING: + return "WARNING"; + case CodeType.CLOSED: + return "CLOSED"; + default: + return "UNKNOWN"; + } +} + +/** options */ +export interface TextNormalizationOptions { + $type: "speechkit.stt.v3.TextNormalizationOptions"; + textNormalization: TextNormalizationOptions_TextNormalization; + /** Filter profanity (default: false) */ + profanityFilter: boolean; + /** Rewrite text in literature style (default: false) */ + literatureText: boolean; +} + +/** Normalization */ +export enum TextNormalizationOptions_TextNormalization { + TEXT_NORMALIZATION_UNSPECIFIED = 0, + /** TEXT_NORMALIZATION_ENABLED - Enable normalization */ + TEXT_NORMALIZATION_ENABLED = 1, + /** TEXT_NORMALIZATION_DISABLED - Disable normalization */ + TEXT_NORMALIZATION_DISABLED = 2, + UNRECOGNIZED = -1, +} + +export function textNormalizationOptions_TextNormalizationFromJSON( + object: any +): TextNormalizationOptions_TextNormalization { + switch (object) { + case 0: + case "TEXT_NORMALIZATION_UNSPECIFIED": + return TextNormalizationOptions_TextNormalization.TEXT_NORMALIZATION_UNSPECIFIED; + case 1: + case "TEXT_NORMALIZATION_ENABLED": + return TextNormalizationOptions_TextNormalization.TEXT_NORMALIZATION_ENABLED; + case 2: + case "TEXT_NORMALIZATION_DISABLED": + return TextNormalizationOptions_TextNormalization.TEXT_NORMALIZATION_DISABLED; + case -1: + case "UNRECOGNIZED": + default: + return TextNormalizationOptions_TextNormalization.UNRECOGNIZED; + } +} + +export function textNormalizationOptions_TextNormalizationToJSON( + object: TextNormalizationOptions_TextNormalization +): string { + switch (object) { + case TextNormalizationOptions_TextNormalization.TEXT_NORMALIZATION_UNSPECIFIED: + return "TEXT_NORMALIZATION_UNSPECIFIED"; + case TextNormalizationOptions_TextNormalization.TEXT_NORMALIZATION_ENABLED: + return "TEXT_NORMALIZATION_ENABLED"; + case TextNormalizationOptions_TextNormalization.TEXT_NORMALIZATION_DISABLED: + return "TEXT_NORMALIZATION_DISABLED"; + default: + return "UNKNOWN"; + } +} + +export interface DefaultEouClassifier { + $type: "speechkit.stt.v3.DefaultEouClassifier"; + /** EOU sensitivity. Currently two levels, faster with more error and more conservative (our default) */ + type: DefaultEouClassifier_EouSensitivity; + /** hint for max pause between words. Our EoU detector could use this information to distinguish between end of utterance and slow speech (like one two three, etc) */ + maxPauseBetweenWordsHintMs: number; +} + +export enum DefaultEouClassifier_EouSensitivity { + EOU_SENSITIVITY_UNSPECIFIED = 0, + DEFAULT = 1, + HIGH = 2, + UNRECOGNIZED = -1, +} + +export function defaultEouClassifier_EouSensitivityFromJSON( + object: any +): DefaultEouClassifier_EouSensitivity { + switch (object) { + case 0: + case "EOU_SENSITIVITY_UNSPECIFIED": + return DefaultEouClassifier_EouSensitivity.EOU_SENSITIVITY_UNSPECIFIED; + case 1: + case "DEFAULT": + return DefaultEouClassifier_EouSensitivity.DEFAULT; + case 2: + case "HIGH": + return DefaultEouClassifier_EouSensitivity.HIGH; + case -1: + case "UNRECOGNIZED": + default: + return DefaultEouClassifier_EouSensitivity.UNRECOGNIZED; + } +} + +export function defaultEouClassifier_EouSensitivityToJSON( + object: DefaultEouClassifier_EouSensitivity +): string { + switch (object) { + case DefaultEouClassifier_EouSensitivity.EOU_SENSITIVITY_UNSPECIFIED: + return "EOU_SENSITIVITY_UNSPECIFIED"; + case DefaultEouClassifier_EouSensitivity.DEFAULT: + return "DEFAULT"; + case DefaultEouClassifier_EouSensitivity.HIGH: + return "HIGH"; + default: + return "UNKNOWN"; + } +} + +/** use EOU provided by user */ +export interface ExternalEouClassifier { + $type: "speechkit.stt.v3.ExternalEouClassifier"; +} + +export interface EouClassifierOptions { + $type: "speechkit.stt.v3.EouClassifierOptions"; + /** EOU classifier provided by SpeechKit. Default */ + defaultClassifier?: DefaultEouClassifier | undefined; + /** EoU is enforced by external messages from user */ + externalClassifier?: ExternalEouClassifier | undefined; +} + +/** RAW Audio format spec (no container to infer type). used in AudioFormat options */ +export interface RawAudio { + $type: "speechkit.stt.v3.RawAudio"; + /** type of audio encoding */ + audioEncoding: RawAudio_AudioEncoding; + /** PCM sample rate */ + sampleRateHertz: number; + /** PCM channel count. Currently only single channel audio is supported in real-time recognition */ + audioChannelCount: number; +} + +export enum RawAudio_AudioEncoding { + AUDIO_ENCODING_UNSPECIFIED = 0, + LINEAR16_PCM = 1, + UNRECOGNIZED = -1, +} + +export function rawAudio_AudioEncodingFromJSON( + object: any +): RawAudio_AudioEncoding { + switch (object) { + case 0: + case "AUDIO_ENCODING_UNSPECIFIED": + return RawAudio_AudioEncoding.AUDIO_ENCODING_UNSPECIFIED; + case 1: + case "LINEAR16_PCM": + return RawAudio_AudioEncoding.LINEAR16_PCM; + case -1: + case "UNRECOGNIZED": + default: + return RawAudio_AudioEncoding.UNRECOGNIZED; + } +} + +export function rawAudio_AudioEncodingToJSON( + object: RawAudio_AudioEncoding +): string { + switch (object) { + case RawAudio_AudioEncoding.AUDIO_ENCODING_UNSPECIFIED: + return "AUDIO_ENCODING_UNSPECIFIED"; + case RawAudio_AudioEncoding.LINEAR16_PCM: + return "LINEAR16_PCM"; + default: + return "UNKNOWN"; + } +} + +/** Audio with fixed type in container. used in AudioFormat options */ +export interface ContainerAudio { + $type: "speechkit.stt.v3.ContainerAudio"; + /** type of audio container */ + containerAudioType: ContainerAudio_ContainerAudioType; +} + +export enum ContainerAudio_ContainerAudioType { + CONTAINER_AUDIO_TYPE_UNSPECIFIED = 0, + WAV = 1, + OGG_OPUS = 2, + MP3 = 3, + UNRECOGNIZED = -1, +} + +export function containerAudio_ContainerAudioTypeFromJSON( + object: any +): ContainerAudio_ContainerAudioType { + switch (object) { + case 0: + case "CONTAINER_AUDIO_TYPE_UNSPECIFIED": + return ContainerAudio_ContainerAudioType.CONTAINER_AUDIO_TYPE_UNSPECIFIED; + case 1: + case "WAV": + return ContainerAudio_ContainerAudioType.WAV; + case 2: + case "OGG_OPUS": + return ContainerAudio_ContainerAudioType.OGG_OPUS; + case 3: + case "MP3": + return ContainerAudio_ContainerAudioType.MP3; + case -1: + case "UNRECOGNIZED": + default: + return ContainerAudio_ContainerAudioType.UNRECOGNIZED; + } +} + +export function containerAudio_ContainerAudioTypeToJSON( + object: ContainerAudio_ContainerAudioType +): string { + switch (object) { + case ContainerAudio_ContainerAudioType.CONTAINER_AUDIO_TYPE_UNSPECIFIED: + return "CONTAINER_AUDIO_TYPE_UNSPECIFIED"; + case ContainerAudio_ContainerAudioType.WAV: + return "WAV"; + case ContainerAudio_ContainerAudioType.OGG_OPUS: + return "OGG_OPUS"; + case ContainerAudio_ContainerAudioType.MP3: + return "MP3"; + default: + return "UNKNOWN"; + } +} + +/** audio format options */ +export interface AudioFormatOptions { + $type: "speechkit.stt.v3.AudioFormatOptions"; + /** audio without container */ + rawAudio?: RawAudio | undefined; + /** audio is wrapped in container */ + containerAudio?: ContainerAudio | undefined; +} + +export interface LanguageRestrictionOptions { + $type: "speechkit.stt.v3.LanguageRestrictionOptions"; + restrictionType: LanguageRestrictionOptions_LanguageRestrictionType; + languageCode: string[]; +} + +export enum LanguageRestrictionOptions_LanguageRestrictionType { + LANGUAGE_RESTRICTION_TYPE_UNSPECIFIED = 0, + WHITELIST = 1, + BLACKLIST = 2, + UNRECOGNIZED = -1, +} + +export function languageRestrictionOptions_LanguageRestrictionTypeFromJSON( + object: any +): LanguageRestrictionOptions_LanguageRestrictionType { + switch (object) { + case 0: + case "LANGUAGE_RESTRICTION_TYPE_UNSPECIFIED": + return LanguageRestrictionOptions_LanguageRestrictionType.LANGUAGE_RESTRICTION_TYPE_UNSPECIFIED; + case 1: + case "WHITELIST": + return LanguageRestrictionOptions_LanguageRestrictionType.WHITELIST; + case 2: + case "BLACKLIST": + return LanguageRestrictionOptions_LanguageRestrictionType.BLACKLIST; + case -1: + case "UNRECOGNIZED": + default: + return LanguageRestrictionOptions_LanguageRestrictionType.UNRECOGNIZED; + } +} + +export function languageRestrictionOptions_LanguageRestrictionTypeToJSON( + object: LanguageRestrictionOptions_LanguageRestrictionType +): string { + switch (object) { + case LanguageRestrictionOptions_LanguageRestrictionType.LANGUAGE_RESTRICTION_TYPE_UNSPECIFIED: + return "LANGUAGE_RESTRICTION_TYPE_UNSPECIFIED"; + case LanguageRestrictionOptions_LanguageRestrictionType.WHITELIST: + return "WHITELIST"; + case LanguageRestrictionOptions_LanguageRestrictionType.BLACKLIST: + return "BLACKLIST"; + default: + return "UNKNOWN"; + } +} + +export interface RecognitionModelOptions { + $type: "speechkit.stt.v3.RecognitionModelOptions"; + /** reserved for future, do not use */ + model: string; + /** config for input audio */ + audioFormat?: AudioFormatOptions; + /** text normalization options */ + textNormalization?: TextNormalizationOptions; + /** possible languages in audio */ + languageRestriction?: LanguageRestrictionOptions; + /** how to deal with audio data (in real time, after all data is received, etc). Default is REAL_TIME */ + audioProcessingType: RecognitionModelOptions_AudioProcessingType; +} + +export enum RecognitionModelOptions_AudioProcessingType { + AUDIO_PROCESSING_TYPE_UNSPECIFIED = 0, + REAL_TIME = 1, + FULL_DATA = 2, + UNRECOGNIZED = -1, +} + +export function recognitionModelOptions_AudioProcessingTypeFromJSON( + object: any +): RecognitionModelOptions_AudioProcessingType { + switch (object) { + case 0: + case "AUDIO_PROCESSING_TYPE_UNSPECIFIED": + return RecognitionModelOptions_AudioProcessingType.AUDIO_PROCESSING_TYPE_UNSPECIFIED; + case 1: + case "REAL_TIME": + return RecognitionModelOptions_AudioProcessingType.REAL_TIME; + case 2: + case "FULL_DATA": + return RecognitionModelOptions_AudioProcessingType.FULL_DATA; + case -1: + case "UNRECOGNIZED": + default: + return RecognitionModelOptions_AudioProcessingType.UNRECOGNIZED; + } +} + +export function recognitionModelOptions_AudioProcessingTypeToJSON( + object: RecognitionModelOptions_AudioProcessingType +): string { + switch (object) { + case RecognitionModelOptions_AudioProcessingType.AUDIO_PROCESSING_TYPE_UNSPECIFIED: + return "AUDIO_PROCESSING_TYPE_UNSPECIFIED"; + case RecognitionModelOptions_AudioProcessingType.REAL_TIME: + return "REAL_TIME"; + case RecognitionModelOptions_AudioProcessingType.FULL_DATA: + return "FULL_DATA"; + default: + return "UNKNOWN"; + } +} + +export interface StreamingOptions { + $type: "speechkit.stt.v3.StreamingOptions"; + /** configuration for speech recognition model */ + recognitionModel?: RecognitionModelOptions; + /** configuration for end of utterance detection model */ + eouClassifier?: EouClassifierOptions; +} + +/** data chunk with audio */ +export interface AudioChunk { + $type: "speechkit.stt.v3.AudioChunk"; + /** bytes with audio data */ + data: Buffer; +} + +export interface SilenceChunk { + $type: "speechkit.stt.v3.SilenceChunk"; + /** duration of silence chunk in ms */ + durationMs: number; +} + +/** force EOU */ +export interface Eou { + $type: "speechkit.stt.v3.Eou"; +} + +/** + * streaming audio request + * Events are control messages from user + * first message should be session options + * the next messages are audio data chunks or control messages + */ +export interface StreamingRequest { + $type: "speechkit.stt.v3.StreamingRequest"; + /** Session options. should be first message from user */ + sessionOptions?: StreamingOptions | undefined; + /** chunk with audio data */ + chunk?: AudioChunk | undefined; + /** chunk with silence */ + silenceChunk?: SilenceChunk | undefined; + /** request to end current utterance. Works only with external EoU detector */ + eou?: Eou | undefined; +} + +/** recognized word */ +export interface Word { + $type: "speechkit.stt.v3.Word"; + /** word text */ + text: string; + /** estimation of word start time in ms */ + startTimeMs: number; + /** estimation of word end time in ms */ + endTimeMs: number; +} + +/** recognition of specific time frame */ +export interface Alternative { + $type: "speechkit.stt.v3.Alternative"; + /** words in time frame */ + words: Word[]; + /** text in time frame */ + text: string; + /** start of time frame */ + startTimeMs: number; + /** end of time frame */ + endTimeMs: number; + /** hypothesis confidence. Currently is not used */ + confidence: number; +} + +/** Update information from */ +export interface EouUpdate { + $type: "speechkit.stt.v3.EouUpdate"; + /** end of utterance estimated time */ + timeMs: number; +} + +/** update of hypothesis */ +export interface AlternativeUpdate { + $type: "speechkit.stt.v3.AlternativeUpdate"; + /** list of hypothesis for timeframes */ + alternatives: Alternative[]; + /** tag for distinguish audio channels. */ + channelTag: string; +} + +/** AudioCursors are state of ASR recognition stream */ +export interface AudioCursors { + $type: "speechkit.stt.v3.AudioCursors"; + /** amount of audio chunks server received. This cursor is moved after each audio chunk was received by server. */ + receivedDataMs: number; + /** input stream reset data */ + resetTimeMs: number; + /** + * how much audio was processed. This time includes trimming silences as well. This cursor is moved after server received enough data + * to update recognition results (includes silence as well) + */ + partialTimeMs: number; + /** + * Time of last final. This cursor is moved when server decides that recognition from start of audio until final_time_ms will not change anymore + * usually this even is followed by EOU detection (but this could change in future) + */ + finalTimeMs: number; + /** This is index of last final server send. Incremented after each new final. */ + finalIndex: number; + /** + * Estimated time of EOU. Cursor is updated after each new EOU is sent + * For external classifier this equals to received_data_ms at the moment EOU event arrives + * For internal classifier this is estimation of time. The time is not exact and has the same guarantees as word timings + */ + eouTimeMs: number; +} + +/** refinement for final hypo. For example, text normalization is refinement. */ +export interface FinalRefinement { + $type: "speechkit.stt.v3.FinalRefinement"; + /** index of final for which server sends additional information */ + finalIndex: number; + /** normalized text instead of raw one */ + normalizedText?: AlternativeUpdate | undefined; +} + +/** status message */ +export interface StatusCode { + $type: "speechkit.stt.v3.StatusCode"; + /** code type */ + codeType: CodeType; + /** human readable message */ + message: string; +} + +/** session identifier */ +export interface SessionUuid { + $type: "speechkit.stt.v3.SessionUuid"; + /** internal session identifier */ + uuid: string; + /** user session identifier */ + userRequestId: string; +} + +/** + * responses from server + * each response contains session uuid + * AudioCursors + * plus specific even + */ +export interface StreamingResponse { + $type: "speechkit.stt.v3.StreamingResponse"; + /** session identifier */ + sessionUuid?: SessionUuid; + /** progress bar for stream session recognition: how many data we obtained; final and partial times; etc */ + audioCursors?: AudioCursors; + /** wall clock on server side. This is time when server wrote results to stream */ + responseWallTimeMs: number; + /** + * partial results, server will send them regularly after enough audio data was received from user. This are current text estimation + * from final_time_ms to partial_time_ms. Could change after new data will arrive + */ + partial?: AlternativeUpdate | undefined; + /** final results, the recognition is now fixed until final_time_ms. For now, final is sent only if the EOU event was triggered. This could be change in future releases */ + final?: AlternativeUpdate | undefined; + /** + * After EOU classifier, send the message with final, send the EouUpdate with time of EOU + * before eou_update we send final with the same time. there could be several finals before eou update + */ + eouUpdate?: EouUpdate | undefined; + /** + * For each final, if normalization is enabled, sent the normalized text (or some other advanced post-processing). + * Final normalization will introduce additional latency + */ + finalRefinement?: FinalRefinement | undefined; + /** Status messages, send by server with fixed interval (keep-alive) */ + statusCode?: StatusCode | undefined; +} + +const baseTextNormalizationOptions: object = { + $type: "speechkit.stt.v3.TextNormalizationOptions", + textNormalization: 0, + profanityFilter: false, + literatureText: false, +}; + +export const TextNormalizationOptions = { + $type: "speechkit.stt.v3.TextNormalizationOptions" as const, + + encode( + message: TextNormalizationOptions, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.textNormalization !== 0) { + writer.uint32(8).int32(message.textNormalization); + } + if (message.profanityFilter === true) { + writer.uint32(16).bool(message.profanityFilter); + } + if (message.literatureText === true) { + writer.uint32(24).bool(message.literatureText); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): TextNormalizationOptions { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseTextNormalizationOptions, + } as TextNormalizationOptions; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.textNormalization = reader.int32() as any; + break; + case 2: + message.profanityFilter = reader.bool(); + break; + case 3: + message.literatureText = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): TextNormalizationOptions { + const message = { + ...baseTextNormalizationOptions, + } as TextNormalizationOptions; + message.textNormalization = + object.textNormalization !== undefined && + object.textNormalization !== null + ? textNormalizationOptions_TextNormalizationFromJSON( + object.textNormalization + ) + : 0; + message.profanityFilter = + object.profanityFilter !== undefined && object.profanityFilter !== null + ? Boolean(object.profanityFilter) + : false; + message.literatureText = + object.literatureText !== undefined && object.literatureText !== null + ? Boolean(object.literatureText) + : false; + return message; + }, + + toJSON(message: TextNormalizationOptions): unknown { + const obj: any = {}; + message.textNormalization !== undefined && + (obj.textNormalization = textNormalizationOptions_TextNormalizationToJSON( + message.textNormalization + )); + message.profanityFilter !== undefined && + (obj.profanityFilter = message.profanityFilter); + message.literatureText !== undefined && + (obj.literatureText = message.literatureText); + return obj; + }, + + fromPartial, I>>( + object: I + ): TextNormalizationOptions { + const message = { + ...baseTextNormalizationOptions, + } as TextNormalizationOptions; + message.textNormalization = object.textNormalization ?? 0; + message.profanityFilter = object.profanityFilter ?? false; + message.literatureText = object.literatureText ?? false; + return message; + }, +}; + +messageTypeRegistry.set( + TextNormalizationOptions.$type, + TextNormalizationOptions +); + +const baseDefaultEouClassifier: object = { + $type: "speechkit.stt.v3.DefaultEouClassifier", + type: 0, + maxPauseBetweenWordsHintMs: 0, +}; + +export const DefaultEouClassifier = { + $type: "speechkit.stt.v3.DefaultEouClassifier" as const, + + encode( + message: DefaultEouClassifier, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.type !== 0) { + writer.uint32(8).int32(message.type); + } + if (message.maxPauseBetweenWordsHintMs !== 0) { + writer.uint32(16).int64(message.maxPauseBetweenWordsHintMs); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DefaultEouClassifier { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDefaultEouClassifier } as DefaultEouClassifier; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.type = reader.int32() as any; + break; + case 2: + message.maxPauseBetweenWordsHintMs = longToNumber( + reader.int64() as Long + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DefaultEouClassifier { + const message = { ...baseDefaultEouClassifier } as DefaultEouClassifier; + message.type = + object.type !== undefined && object.type !== null + ? defaultEouClassifier_EouSensitivityFromJSON(object.type) + : 0; + message.maxPauseBetweenWordsHintMs = + object.maxPauseBetweenWordsHintMs !== undefined && + object.maxPauseBetweenWordsHintMs !== null + ? Number(object.maxPauseBetweenWordsHintMs) + : 0; + return message; + }, + + toJSON(message: DefaultEouClassifier): unknown { + const obj: any = {}; + message.type !== undefined && + (obj.type = defaultEouClassifier_EouSensitivityToJSON(message.type)); + message.maxPauseBetweenWordsHintMs !== undefined && + (obj.maxPauseBetweenWordsHintMs = Math.round( + message.maxPauseBetweenWordsHintMs + )); + return obj; + }, + + fromPartial, I>>( + object: I + ): DefaultEouClassifier { + const message = { ...baseDefaultEouClassifier } as DefaultEouClassifier; + message.type = object.type ?? 0; + message.maxPauseBetweenWordsHintMs = object.maxPauseBetweenWordsHintMs ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(DefaultEouClassifier.$type, DefaultEouClassifier); + +const baseExternalEouClassifier: object = { + $type: "speechkit.stt.v3.ExternalEouClassifier", +}; + +export const ExternalEouClassifier = { + $type: "speechkit.stt.v3.ExternalEouClassifier" as const, + + encode( + _: ExternalEouClassifier, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ExternalEouClassifier { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseExternalEouClassifier } as ExternalEouClassifier; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(_: any): ExternalEouClassifier { + const message = { ...baseExternalEouClassifier } as ExternalEouClassifier; + return message; + }, + + toJSON(_: ExternalEouClassifier): unknown { + const obj: any = {}; + return obj; + }, + + fromPartial, I>>( + _: I + ): ExternalEouClassifier { + const message = { ...baseExternalEouClassifier } as ExternalEouClassifier; + return message; + }, +}; + +messageTypeRegistry.set(ExternalEouClassifier.$type, ExternalEouClassifier); + +const baseEouClassifierOptions: object = { + $type: "speechkit.stt.v3.EouClassifierOptions", +}; + +export const EouClassifierOptions = { + $type: "speechkit.stt.v3.EouClassifierOptions" as const, + + encode( + message: EouClassifierOptions, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.defaultClassifier !== undefined) { + DefaultEouClassifier.encode( + message.defaultClassifier, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.externalClassifier !== undefined) { + ExternalEouClassifier.encode( + message.externalClassifier, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): EouClassifierOptions { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseEouClassifierOptions } as EouClassifierOptions; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.defaultClassifier = DefaultEouClassifier.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.externalClassifier = ExternalEouClassifier.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): EouClassifierOptions { + const message = { ...baseEouClassifierOptions } as EouClassifierOptions; + message.defaultClassifier = + object.defaultClassifier !== undefined && + object.defaultClassifier !== null + ? DefaultEouClassifier.fromJSON(object.defaultClassifier) + : undefined; + message.externalClassifier = + object.externalClassifier !== undefined && + object.externalClassifier !== null + ? ExternalEouClassifier.fromJSON(object.externalClassifier) + : undefined; + return message; + }, + + toJSON(message: EouClassifierOptions): unknown { + const obj: any = {}; + message.defaultClassifier !== undefined && + (obj.defaultClassifier = message.defaultClassifier + ? DefaultEouClassifier.toJSON(message.defaultClassifier) + : undefined); + message.externalClassifier !== undefined && + (obj.externalClassifier = message.externalClassifier + ? ExternalEouClassifier.toJSON(message.externalClassifier) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): EouClassifierOptions { + const message = { ...baseEouClassifierOptions } as EouClassifierOptions; + message.defaultClassifier = + object.defaultClassifier !== undefined && + object.defaultClassifier !== null + ? DefaultEouClassifier.fromPartial(object.defaultClassifier) + : undefined; + message.externalClassifier = + object.externalClassifier !== undefined && + object.externalClassifier !== null + ? ExternalEouClassifier.fromPartial(object.externalClassifier) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(EouClassifierOptions.$type, EouClassifierOptions); + +const baseRawAudio: object = { + $type: "speechkit.stt.v3.RawAudio", + audioEncoding: 0, + sampleRateHertz: 0, + audioChannelCount: 0, +}; + +export const RawAudio = { + $type: "speechkit.stt.v3.RawAudio" as const, + + encode( + message: RawAudio, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.audioEncoding !== 0) { + writer.uint32(8).int32(message.audioEncoding); + } + if (message.sampleRateHertz !== 0) { + writer.uint32(16).int64(message.sampleRateHertz); + } + if (message.audioChannelCount !== 0) { + writer.uint32(24).int64(message.audioChannelCount); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): RawAudio { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseRawAudio } as RawAudio; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.audioEncoding = reader.int32() as any; + break; + case 2: + message.sampleRateHertz = longToNumber(reader.int64() as Long); + break; + case 3: + message.audioChannelCount = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RawAudio { + const message = { ...baseRawAudio } as RawAudio; + message.audioEncoding = + object.audioEncoding !== undefined && object.audioEncoding !== null + ? rawAudio_AudioEncodingFromJSON(object.audioEncoding) + : 0; + message.sampleRateHertz = + object.sampleRateHertz !== undefined && object.sampleRateHertz !== null + ? Number(object.sampleRateHertz) + : 0; + message.audioChannelCount = + object.audioChannelCount !== undefined && + object.audioChannelCount !== null + ? Number(object.audioChannelCount) + : 0; + return message; + }, + + toJSON(message: RawAudio): unknown { + const obj: any = {}; + message.audioEncoding !== undefined && + (obj.audioEncoding = rawAudio_AudioEncodingToJSON(message.audioEncoding)); + message.sampleRateHertz !== undefined && + (obj.sampleRateHertz = Math.round(message.sampleRateHertz)); + message.audioChannelCount !== undefined && + (obj.audioChannelCount = Math.round(message.audioChannelCount)); + return obj; + }, + + fromPartial, I>>(object: I): RawAudio { + const message = { ...baseRawAudio } as RawAudio; + message.audioEncoding = object.audioEncoding ?? 0; + message.sampleRateHertz = object.sampleRateHertz ?? 0; + message.audioChannelCount = object.audioChannelCount ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(RawAudio.$type, RawAudio); + +const baseContainerAudio: object = { + $type: "speechkit.stt.v3.ContainerAudio", + containerAudioType: 0, +}; + +export const ContainerAudio = { + $type: "speechkit.stt.v3.ContainerAudio" as const, + + encode( + message: ContainerAudio, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.containerAudioType !== 0) { + writer.uint32(8).int32(message.containerAudioType); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ContainerAudio { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseContainerAudio } as ContainerAudio; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.containerAudioType = reader.int32() as any; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ContainerAudio { + const message = { ...baseContainerAudio } as ContainerAudio; + message.containerAudioType = + object.containerAudioType !== undefined && + object.containerAudioType !== null + ? containerAudio_ContainerAudioTypeFromJSON(object.containerAudioType) + : 0; + return message; + }, + + toJSON(message: ContainerAudio): unknown { + const obj: any = {}; + message.containerAudioType !== undefined && + (obj.containerAudioType = containerAudio_ContainerAudioTypeToJSON( + message.containerAudioType + )); + return obj; + }, + + fromPartial, I>>( + object: I + ): ContainerAudio { + const message = { ...baseContainerAudio } as ContainerAudio; + message.containerAudioType = object.containerAudioType ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(ContainerAudio.$type, ContainerAudio); + +const baseAudioFormatOptions: object = { + $type: "speechkit.stt.v3.AudioFormatOptions", +}; + +export const AudioFormatOptions = { + $type: "speechkit.stt.v3.AudioFormatOptions" as const, + + encode( + message: AudioFormatOptions, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.rawAudio !== undefined) { + RawAudio.encode(message.rawAudio, writer.uint32(10).fork()).ldelim(); + } + if (message.containerAudio !== undefined) { + ContainerAudio.encode( + message.containerAudio, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): AudioFormatOptions { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseAudioFormatOptions } as AudioFormatOptions; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.rawAudio = RawAudio.decode(reader, reader.uint32()); + break; + case 2: + message.containerAudio = ContainerAudio.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AudioFormatOptions { + const message = { ...baseAudioFormatOptions } as AudioFormatOptions; + message.rawAudio = + object.rawAudio !== undefined && object.rawAudio !== null + ? RawAudio.fromJSON(object.rawAudio) + : undefined; + message.containerAudio = + object.containerAudio !== undefined && object.containerAudio !== null + ? ContainerAudio.fromJSON(object.containerAudio) + : undefined; + return message; + }, + + toJSON(message: AudioFormatOptions): unknown { + const obj: any = {}; + message.rawAudio !== undefined && + (obj.rawAudio = message.rawAudio + ? RawAudio.toJSON(message.rawAudio) + : undefined); + message.containerAudio !== undefined && + (obj.containerAudio = message.containerAudio + ? ContainerAudio.toJSON(message.containerAudio) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): AudioFormatOptions { + const message = { ...baseAudioFormatOptions } as AudioFormatOptions; + message.rawAudio = + object.rawAudio !== undefined && object.rawAudio !== null + ? RawAudio.fromPartial(object.rawAudio) + : undefined; + message.containerAudio = + object.containerAudio !== undefined && object.containerAudio !== null + ? ContainerAudio.fromPartial(object.containerAudio) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(AudioFormatOptions.$type, AudioFormatOptions); + +const baseLanguageRestrictionOptions: object = { + $type: "speechkit.stt.v3.LanguageRestrictionOptions", + restrictionType: 0, + languageCode: "", +}; + +export const LanguageRestrictionOptions = { + $type: "speechkit.stt.v3.LanguageRestrictionOptions" as const, + + encode( + message: LanguageRestrictionOptions, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.restrictionType !== 0) { + writer.uint32(8).int32(message.restrictionType); + } + for (const v of message.languageCode) { + writer.uint32(18).string(v!); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): LanguageRestrictionOptions { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseLanguageRestrictionOptions, + } as LanguageRestrictionOptions; + message.languageCode = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.restrictionType = reader.int32() as any; + break; + case 2: + message.languageCode.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): LanguageRestrictionOptions { + const message = { + ...baseLanguageRestrictionOptions, + } as LanguageRestrictionOptions; + message.restrictionType = + object.restrictionType !== undefined && object.restrictionType !== null + ? languageRestrictionOptions_LanguageRestrictionTypeFromJSON( + object.restrictionType + ) + : 0; + message.languageCode = (object.languageCode ?? []).map((e: any) => + String(e) + ); + return message; + }, + + toJSON(message: LanguageRestrictionOptions): unknown { + const obj: any = {}; + message.restrictionType !== undefined && + (obj.restrictionType = + languageRestrictionOptions_LanguageRestrictionTypeToJSON( + message.restrictionType + )); + if (message.languageCode) { + obj.languageCode = message.languageCode.map((e) => e); + } else { + obj.languageCode = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): LanguageRestrictionOptions { + const message = { + ...baseLanguageRestrictionOptions, + } as LanguageRestrictionOptions; + message.restrictionType = object.restrictionType ?? 0; + message.languageCode = object.languageCode?.map((e) => e) || []; + return message; + }, +}; + +messageTypeRegistry.set( + LanguageRestrictionOptions.$type, + LanguageRestrictionOptions +); + +const baseRecognitionModelOptions: object = { + $type: "speechkit.stt.v3.RecognitionModelOptions", + model: "", + audioProcessingType: 0, +}; + +export const RecognitionModelOptions = { + $type: "speechkit.stt.v3.RecognitionModelOptions" as const, + + encode( + message: RecognitionModelOptions, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.model !== "") { + writer.uint32(10).string(message.model); + } + if (message.audioFormat !== undefined) { + AudioFormatOptions.encode( + message.audioFormat, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.textNormalization !== undefined) { + TextNormalizationOptions.encode( + message.textNormalization, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.languageRestriction !== undefined) { + LanguageRestrictionOptions.encode( + message.languageRestriction, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.audioProcessingType !== 0) { + writer.uint32(40).int32(message.audioProcessingType); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): RecognitionModelOptions { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseRecognitionModelOptions, + } as RecognitionModelOptions; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.model = reader.string(); + break; + case 2: + message.audioFormat = AudioFormatOptions.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.textNormalization = TextNormalizationOptions.decode( + reader, + reader.uint32() + ); + break; + case 4: + message.languageRestriction = LanguageRestrictionOptions.decode( + reader, + reader.uint32() + ); + break; + case 5: + message.audioProcessingType = reader.int32() as any; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RecognitionModelOptions { + const message = { + ...baseRecognitionModelOptions, + } as RecognitionModelOptions; + message.model = + object.model !== undefined && object.model !== null + ? String(object.model) + : ""; + message.audioFormat = + object.audioFormat !== undefined && object.audioFormat !== null + ? AudioFormatOptions.fromJSON(object.audioFormat) + : undefined; + message.textNormalization = + object.textNormalization !== undefined && + object.textNormalization !== null + ? TextNormalizationOptions.fromJSON(object.textNormalization) + : undefined; + message.languageRestriction = + object.languageRestriction !== undefined && + object.languageRestriction !== null + ? LanguageRestrictionOptions.fromJSON(object.languageRestriction) + : undefined; + message.audioProcessingType = + object.audioProcessingType !== undefined && + object.audioProcessingType !== null + ? recognitionModelOptions_AudioProcessingTypeFromJSON( + object.audioProcessingType + ) + : 0; + return message; + }, + + toJSON(message: RecognitionModelOptions): unknown { + const obj: any = {}; + message.model !== undefined && (obj.model = message.model); + message.audioFormat !== undefined && + (obj.audioFormat = message.audioFormat + ? AudioFormatOptions.toJSON(message.audioFormat) + : undefined); + message.textNormalization !== undefined && + (obj.textNormalization = message.textNormalization + ? TextNormalizationOptions.toJSON(message.textNormalization) + : undefined); + message.languageRestriction !== undefined && + (obj.languageRestriction = message.languageRestriction + ? LanguageRestrictionOptions.toJSON(message.languageRestriction) + : undefined); + message.audioProcessingType !== undefined && + (obj.audioProcessingType = + recognitionModelOptions_AudioProcessingTypeToJSON( + message.audioProcessingType + )); + return obj; + }, + + fromPartial, I>>( + object: I + ): RecognitionModelOptions { + const message = { + ...baseRecognitionModelOptions, + } as RecognitionModelOptions; + message.model = object.model ?? ""; + message.audioFormat = + object.audioFormat !== undefined && object.audioFormat !== null + ? AudioFormatOptions.fromPartial(object.audioFormat) + : undefined; + message.textNormalization = + object.textNormalization !== undefined && + object.textNormalization !== null + ? TextNormalizationOptions.fromPartial(object.textNormalization) + : undefined; + message.languageRestriction = + object.languageRestriction !== undefined && + object.languageRestriction !== null + ? LanguageRestrictionOptions.fromPartial(object.languageRestriction) + : undefined; + message.audioProcessingType = object.audioProcessingType ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(RecognitionModelOptions.$type, RecognitionModelOptions); + +const baseStreamingOptions: object = { + $type: "speechkit.stt.v3.StreamingOptions", +}; + +export const StreamingOptions = { + $type: "speechkit.stt.v3.StreamingOptions" as const, + + encode( + message: StreamingOptions, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.recognitionModel !== undefined) { + RecognitionModelOptions.encode( + message.recognitionModel, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.eouClassifier !== undefined) { + EouClassifierOptions.encode( + message.eouClassifier, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): StreamingOptions { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseStreamingOptions } as StreamingOptions; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.recognitionModel = RecognitionModelOptions.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.eouClassifier = EouClassifierOptions.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): StreamingOptions { + const message = { ...baseStreamingOptions } as StreamingOptions; + message.recognitionModel = + object.recognitionModel !== undefined && object.recognitionModel !== null + ? RecognitionModelOptions.fromJSON(object.recognitionModel) + : undefined; + message.eouClassifier = + object.eouClassifier !== undefined && object.eouClassifier !== null + ? EouClassifierOptions.fromJSON(object.eouClassifier) + : undefined; + return message; + }, + + toJSON(message: StreamingOptions): unknown { + const obj: any = {}; + message.recognitionModel !== undefined && + (obj.recognitionModel = message.recognitionModel + ? RecognitionModelOptions.toJSON(message.recognitionModel) + : undefined); + message.eouClassifier !== undefined && + (obj.eouClassifier = message.eouClassifier + ? EouClassifierOptions.toJSON(message.eouClassifier) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): StreamingOptions { + const message = { ...baseStreamingOptions } as StreamingOptions; + message.recognitionModel = + object.recognitionModel !== undefined && object.recognitionModel !== null + ? RecognitionModelOptions.fromPartial(object.recognitionModel) + : undefined; + message.eouClassifier = + object.eouClassifier !== undefined && object.eouClassifier !== null + ? EouClassifierOptions.fromPartial(object.eouClassifier) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(StreamingOptions.$type, StreamingOptions); + +const baseAudioChunk: object = { $type: "speechkit.stt.v3.AudioChunk" }; + +export const AudioChunk = { + $type: "speechkit.stt.v3.AudioChunk" as const, + + encode( + message: AudioChunk, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.data.length !== 0) { + writer.uint32(10).bytes(message.data); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): AudioChunk { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseAudioChunk } as AudioChunk; + message.data = Buffer.alloc(0); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.data = reader.bytes() as Buffer; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AudioChunk { + const message = { ...baseAudioChunk } as AudioChunk; + message.data = + object.data !== undefined && object.data !== null + ? Buffer.from(bytesFromBase64(object.data)) + : Buffer.alloc(0); + return message; + }, + + toJSON(message: AudioChunk): unknown { + const obj: any = {}; + message.data !== undefined && + (obj.data = base64FromBytes( + message.data !== undefined ? message.data : Buffer.alloc(0) + )); + return obj; + }, + + fromPartial, I>>( + object: I + ): AudioChunk { + const message = { ...baseAudioChunk } as AudioChunk; + message.data = object.data ?? Buffer.alloc(0); + return message; + }, +}; + +messageTypeRegistry.set(AudioChunk.$type, AudioChunk); + +const baseSilenceChunk: object = { + $type: "speechkit.stt.v3.SilenceChunk", + durationMs: 0, +}; + +export const SilenceChunk = { + $type: "speechkit.stt.v3.SilenceChunk" as const, + + encode( + message: SilenceChunk, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.durationMs !== 0) { + writer.uint32(8).int64(message.durationMs); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): SilenceChunk { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseSilenceChunk } as SilenceChunk; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.durationMs = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): SilenceChunk { + const message = { ...baseSilenceChunk } as SilenceChunk; + message.durationMs = + object.durationMs !== undefined && object.durationMs !== null + ? Number(object.durationMs) + : 0; + return message; + }, + + toJSON(message: SilenceChunk): unknown { + const obj: any = {}; + message.durationMs !== undefined && + (obj.durationMs = Math.round(message.durationMs)); + return obj; + }, + + fromPartial, I>>( + object: I + ): SilenceChunk { + const message = { ...baseSilenceChunk } as SilenceChunk; + message.durationMs = object.durationMs ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(SilenceChunk.$type, SilenceChunk); + +const baseEou: object = { $type: "speechkit.stt.v3.Eou" }; + +export const Eou = { + $type: "speechkit.stt.v3.Eou" as const, + + encode(_: Eou, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Eou { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseEou } as Eou; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(_: any): Eou { + const message = { ...baseEou } as Eou; + return message; + }, + + toJSON(_: Eou): unknown { + const obj: any = {}; + return obj; + }, + + fromPartial, I>>(_: I): Eou { + const message = { ...baseEou } as Eou; + return message; + }, +}; + +messageTypeRegistry.set(Eou.$type, Eou); + +const baseStreamingRequest: object = { + $type: "speechkit.stt.v3.StreamingRequest", +}; + +export const StreamingRequest = { + $type: "speechkit.stt.v3.StreamingRequest" as const, + + encode( + message: StreamingRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.sessionOptions !== undefined) { + StreamingOptions.encode( + message.sessionOptions, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.chunk !== undefined) { + AudioChunk.encode(message.chunk, writer.uint32(18).fork()).ldelim(); + } + if (message.silenceChunk !== undefined) { + SilenceChunk.encode( + message.silenceChunk, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.eou !== undefined) { + Eou.encode(message.eou, writer.uint32(34).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): StreamingRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseStreamingRequest } as StreamingRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.sessionOptions = StreamingOptions.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.chunk = AudioChunk.decode(reader, reader.uint32()); + break; + case 3: + message.silenceChunk = SilenceChunk.decode(reader, reader.uint32()); + break; + case 4: + message.eou = Eou.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): StreamingRequest { + const message = { ...baseStreamingRequest } as StreamingRequest; + message.sessionOptions = + object.sessionOptions !== undefined && object.sessionOptions !== null + ? StreamingOptions.fromJSON(object.sessionOptions) + : undefined; + message.chunk = + object.chunk !== undefined && object.chunk !== null + ? AudioChunk.fromJSON(object.chunk) + : undefined; + message.silenceChunk = + object.silenceChunk !== undefined && object.silenceChunk !== null + ? SilenceChunk.fromJSON(object.silenceChunk) + : undefined; + message.eou = + object.eou !== undefined && object.eou !== null + ? Eou.fromJSON(object.eou) + : undefined; + return message; + }, + + toJSON(message: StreamingRequest): unknown { + const obj: any = {}; + message.sessionOptions !== undefined && + (obj.sessionOptions = message.sessionOptions + ? StreamingOptions.toJSON(message.sessionOptions) + : undefined); + message.chunk !== undefined && + (obj.chunk = message.chunk + ? AudioChunk.toJSON(message.chunk) + : undefined); + message.silenceChunk !== undefined && + (obj.silenceChunk = message.silenceChunk + ? SilenceChunk.toJSON(message.silenceChunk) + : undefined); + message.eou !== undefined && + (obj.eou = message.eou ? Eou.toJSON(message.eou) : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): StreamingRequest { + const message = { ...baseStreamingRequest } as StreamingRequest; + message.sessionOptions = + object.sessionOptions !== undefined && object.sessionOptions !== null + ? StreamingOptions.fromPartial(object.sessionOptions) + : undefined; + message.chunk = + object.chunk !== undefined && object.chunk !== null + ? AudioChunk.fromPartial(object.chunk) + : undefined; + message.silenceChunk = + object.silenceChunk !== undefined && object.silenceChunk !== null + ? SilenceChunk.fromPartial(object.silenceChunk) + : undefined; + message.eou = + object.eou !== undefined && object.eou !== null + ? Eou.fromPartial(object.eou) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(StreamingRequest.$type, StreamingRequest); + +const baseWord: object = { + $type: "speechkit.stt.v3.Word", + text: "", + startTimeMs: 0, + endTimeMs: 0, +}; + +export const Word = { + $type: "speechkit.stt.v3.Word" as const, + + encode(message: Word, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + if (message.text !== "") { + writer.uint32(10).string(message.text); + } + if (message.startTimeMs !== 0) { + writer.uint32(16).int64(message.startTimeMs); + } + if (message.endTimeMs !== 0) { + writer.uint32(24).int64(message.endTimeMs); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Word { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseWord } as Word; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.text = reader.string(); + break; + case 2: + message.startTimeMs = longToNumber(reader.int64() as Long); + break; + case 3: + message.endTimeMs = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Word { + const message = { ...baseWord } as Word; + message.text = + object.text !== undefined && object.text !== null + ? String(object.text) + : ""; + message.startTimeMs = + object.startTimeMs !== undefined && object.startTimeMs !== null + ? Number(object.startTimeMs) + : 0; + message.endTimeMs = + object.endTimeMs !== undefined && object.endTimeMs !== null + ? Number(object.endTimeMs) + : 0; + return message; + }, + + toJSON(message: Word): unknown { + const obj: any = {}; + message.text !== undefined && (obj.text = message.text); + message.startTimeMs !== undefined && + (obj.startTimeMs = Math.round(message.startTimeMs)); + message.endTimeMs !== undefined && + (obj.endTimeMs = Math.round(message.endTimeMs)); + return obj; + }, + + fromPartial, I>>(object: I): Word { + const message = { ...baseWord } as Word; + message.text = object.text ?? ""; + message.startTimeMs = object.startTimeMs ?? 0; + message.endTimeMs = object.endTimeMs ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(Word.$type, Word); + +const baseAlternative: object = { + $type: "speechkit.stt.v3.Alternative", + text: "", + startTimeMs: 0, + endTimeMs: 0, + confidence: 0, +}; + +export const Alternative = { + $type: "speechkit.stt.v3.Alternative" as const, + + encode( + message: Alternative, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.words) { + Word.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.text !== "") { + writer.uint32(18).string(message.text); + } + if (message.startTimeMs !== 0) { + writer.uint32(24).int64(message.startTimeMs); + } + if (message.endTimeMs !== 0) { + writer.uint32(32).int64(message.endTimeMs); + } + if (message.confidence !== 0) { + writer.uint32(41).double(message.confidence); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Alternative { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseAlternative } as Alternative; + message.words = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.words.push(Word.decode(reader, reader.uint32())); + break; + case 2: + message.text = reader.string(); + break; + case 3: + message.startTimeMs = longToNumber(reader.int64() as Long); + break; + case 4: + message.endTimeMs = longToNumber(reader.int64() as Long); + break; + case 5: + message.confidence = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Alternative { + const message = { ...baseAlternative } as Alternative; + message.words = (object.words ?? []).map((e: any) => Word.fromJSON(e)); + message.text = + object.text !== undefined && object.text !== null + ? String(object.text) + : ""; + message.startTimeMs = + object.startTimeMs !== undefined && object.startTimeMs !== null + ? Number(object.startTimeMs) + : 0; + message.endTimeMs = + object.endTimeMs !== undefined && object.endTimeMs !== null + ? Number(object.endTimeMs) + : 0; + message.confidence = + object.confidence !== undefined && object.confidence !== null + ? Number(object.confidence) + : 0; + return message; + }, + + toJSON(message: Alternative): unknown { + const obj: any = {}; + if (message.words) { + obj.words = message.words.map((e) => (e ? Word.toJSON(e) : undefined)); + } else { + obj.words = []; + } + message.text !== undefined && (obj.text = message.text); + message.startTimeMs !== undefined && + (obj.startTimeMs = Math.round(message.startTimeMs)); + message.endTimeMs !== undefined && + (obj.endTimeMs = Math.round(message.endTimeMs)); + message.confidence !== undefined && (obj.confidence = message.confidence); + return obj; + }, + + fromPartial, I>>( + object: I + ): Alternative { + const message = { ...baseAlternative } as Alternative; + message.words = object.words?.map((e) => Word.fromPartial(e)) || []; + message.text = object.text ?? ""; + message.startTimeMs = object.startTimeMs ?? 0; + message.endTimeMs = object.endTimeMs ?? 0; + message.confidence = object.confidence ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(Alternative.$type, Alternative); + +const baseEouUpdate: object = { + $type: "speechkit.stt.v3.EouUpdate", + timeMs: 0, +}; + +export const EouUpdate = { + $type: "speechkit.stt.v3.EouUpdate" as const, + + encode( + message: EouUpdate, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.timeMs !== 0) { + writer.uint32(16).int64(message.timeMs); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): EouUpdate { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseEouUpdate } as EouUpdate; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + message.timeMs = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): EouUpdate { + const message = { ...baseEouUpdate } as EouUpdate; + message.timeMs = + object.timeMs !== undefined && object.timeMs !== null + ? Number(object.timeMs) + : 0; + return message; + }, + + toJSON(message: EouUpdate): unknown { + const obj: any = {}; + message.timeMs !== undefined && (obj.timeMs = Math.round(message.timeMs)); + return obj; + }, + + fromPartial, I>>( + object: I + ): EouUpdate { + const message = { ...baseEouUpdate } as EouUpdate; + message.timeMs = object.timeMs ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(EouUpdate.$type, EouUpdate); + +const baseAlternativeUpdate: object = { + $type: "speechkit.stt.v3.AlternativeUpdate", + channelTag: "", +}; + +export const AlternativeUpdate = { + $type: "speechkit.stt.v3.AlternativeUpdate" as const, + + encode( + message: AlternativeUpdate, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.alternatives) { + Alternative.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.channelTag !== "") { + writer.uint32(18).string(message.channelTag); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): AlternativeUpdate { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseAlternativeUpdate } as AlternativeUpdate; + message.alternatives = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.alternatives.push( + Alternative.decode(reader, reader.uint32()) + ); + break; + case 2: + message.channelTag = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AlternativeUpdate { + const message = { ...baseAlternativeUpdate } as AlternativeUpdate; + message.alternatives = (object.alternatives ?? []).map((e: any) => + Alternative.fromJSON(e) + ); + message.channelTag = + object.channelTag !== undefined && object.channelTag !== null + ? String(object.channelTag) + : ""; + return message; + }, + + toJSON(message: AlternativeUpdate): unknown { + const obj: any = {}; + if (message.alternatives) { + obj.alternatives = message.alternatives.map((e) => + e ? Alternative.toJSON(e) : undefined + ); + } else { + obj.alternatives = []; + } + message.channelTag !== undefined && (obj.channelTag = message.channelTag); + return obj; + }, + + fromPartial, I>>( + object: I + ): AlternativeUpdate { + const message = { ...baseAlternativeUpdate } as AlternativeUpdate; + message.alternatives = + object.alternatives?.map((e) => Alternative.fromPartial(e)) || []; + message.channelTag = object.channelTag ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(AlternativeUpdate.$type, AlternativeUpdate); + +const baseAudioCursors: object = { + $type: "speechkit.stt.v3.AudioCursors", + receivedDataMs: 0, + resetTimeMs: 0, + partialTimeMs: 0, + finalTimeMs: 0, + finalIndex: 0, + eouTimeMs: 0, +}; + +export const AudioCursors = { + $type: "speechkit.stt.v3.AudioCursors" as const, + + encode( + message: AudioCursors, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.receivedDataMs !== 0) { + writer.uint32(8).int64(message.receivedDataMs); + } + if (message.resetTimeMs !== 0) { + writer.uint32(16).int64(message.resetTimeMs); + } + if (message.partialTimeMs !== 0) { + writer.uint32(24).int64(message.partialTimeMs); + } + if (message.finalTimeMs !== 0) { + writer.uint32(32).int64(message.finalTimeMs); + } + if (message.finalIndex !== 0) { + writer.uint32(40).int64(message.finalIndex); + } + if (message.eouTimeMs !== 0) { + writer.uint32(48).int64(message.eouTimeMs); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): AudioCursors { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseAudioCursors } as AudioCursors; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.receivedDataMs = longToNumber(reader.int64() as Long); + break; + case 2: + message.resetTimeMs = longToNumber(reader.int64() as Long); + break; + case 3: + message.partialTimeMs = longToNumber(reader.int64() as Long); + break; + case 4: + message.finalTimeMs = longToNumber(reader.int64() as Long); + break; + case 5: + message.finalIndex = longToNumber(reader.int64() as Long); + break; + case 6: + message.eouTimeMs = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AudioCursors { + const message = { ...baseAudioCursors } as AudioCursors; + message.receivedDataMs = + object.receivedDataMs !== undefined && object.receivedDataMs !== null + ? Number(object.receivedDataMs) + : 0; + message.resetTimeMs = + object.resetTimeMs !== undefined && object.resetTimeMs !== null + ? Number(object.resetTimeMs) + : 0; + message.partialTimeMs = + object.partialTimeMs !== undefined && object.partialTimeMs !== null + ? Number(object.partialTimeMs) + : 0; + message.finalTimeMs = + object.finalTimeMs !== undefined && object.finalTimeMs !== null + ? Number(object.finalTimeMs) + : 0; + message.finalIndex = + object.finalIndex !== undefined && object.finalIndex !== null + ? Number(object.finalIndex) + : 0; + message.eouTimeMs = + object.eouTimeMs !== undefined && object.eouTimeMs !== null + ? Number(object.eouTimeMs) + : 0; + return message; + }, + + toJSON(message: AudioCursors): unknown { + const obj: any = {}; + message.receivedDataMs !== undefined && + (obj.receivedDataMs = Math.round(message.receivedDataMs)); + message.resetTimeMs !== undefined && + (obj.resetTimeMs = Math.round(message.resetTimeMs)); + message.partialTimeMs !== undefined && + (obj.partialTimeMs = Math.round(message.partialTimeMs)); + message.finalTimeMs !== undefined && + (obj.finalTimeMs = Math.round(message.finalTimeMs)); + message.finalIndex !== undefined && + (obj.finalIndex = Math.round(message.finalIndex)); + message.eouTimeMs !== undefined && + (obj.eouTimeMs = Math.round(message.eouTimeMs)); + return obj; + }, + + fromPartial, I>>( + object: I + ): AudioCursors { + const message = { ...baseAudioCursors } as AudioCursors; + message.receivedDataMs = object.receivedDataMs ?? 0; + message.resetTimeMs = object.resetTimeMs ?? 0; + message.partialTimeMs = object.partialTimeMs ?? 0; + message.finalTimeMs = object.finalTimeMs ?? 0; + message.finalIndex = object.finalIndex ?? 0; + message.eouTimeMs = object.eouTimeMs ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(AudioCursors.$type, AudioCursors); + +const baseFinalRefinement: object = { + $type: "speechkit.stt.v3.FinalRefinement", + finalIndex: 0, +}; + +export const FinalRefinement = { + $type: "speechkit.stt.v3.FinalRefinement" as const, + + encode( + message: FinalRefinement, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.finalIndex !== 0) { + writer.uint32(8).int64(message.finalIndex); + } + if (message.normalizedText !== undefined) { + AlternativeUpdate.encode( + message.normalizedText, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): FinalRefinement { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseFinalRefinement } as FinalRefinement; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.finalIndex = longToNumber(reader.int64() as Long); + break; + case 2: + message.normalizedText = AlternativeUpdate.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): FinalRefinement { + const message = { ...baseFinalRefinement } as FinalRefinement; + message.finalIndex = + object.finalIndex !== undefined && object.finalIndex !== null + ? Number(object.finalIndex) + : 0; + message.normalizedText = + object.normalizedText !== undefined && object.normalizedText !== null + ? AlternativeUpdate.fromJSON(object.normalizedText) + : undefined; + return message; + }, + + toJSON(message: FinalRefinement): unknown { + const obj: any = {}; + message.finalIndex !== undefined && + (obj.finalIndex = Math.round(message.finalIndex)); + message.normalizedText !== undefined && + (obj.normalizedText = message.normalizedText + ? AlternativeUpdate.toJSON(message.normalizedText) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): FinalRefinement { + const message = { ...baseFinalRefinement } as FinalRefinement; + message.finalIndex = object.finalIndex ?? 0; + message.normalizedText = + object.normalizedText !== undefined && object.normalizedText !== null + ? AlternativeUpdate.fromPartial(object.normalizedText) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(FinalRefinement.$type, FinalRefinement); + +const baseStatusCode: object = { + $type: "speechkit.stt.v3.StatusCode", + codeType: 0, + message: "", +}; + +export const StatusCode = { + $type: "speechkit.stt.v3.StatusCode" as const, + + encode( + message: StatusCode, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.codeType !== 0) { + writer.uint32(8).int32(message.codeType); + } + if (message.message !== "") { + writer.uint32(18).string(message.message); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): StatusCode { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseStatusCode } as StatusCode; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.codeType = reader.int32() as any; + break; + case 2: + message.message = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): StatusCode { + const message = { ...baseStatusCode } as StatusCode; + message.codeType = + object.codeType !== undefined && object.codeType !== null + ? codeTypeFromJSON(object.codeType) + : 0; + message.message = + object.message !== undefined && object.message !== null + ? String(object.message) + : ""; + return message; + }, + + toJSON(message: StatusCode): unknown { + const obj: any = {}; + message.codeType !== undefined && + (obj.codeType = codeTypeToJSON(message.codeType)); + message.message !== undefined && (obj.message = message.message); + return obj; + }, + + fromPartial, I>>( + object: I + ): StatusCode { + const message = { ...baseStatusCode } as StatusCode; + message.codeType = object.codeType ?? 0; + message.message = object.message ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(StatusCode.$type, StatusCode); + +const baseSessionUuid: object = { + $type: "speechkit.stt.v3.SessionUuid", + uuid: "", + userRequestId: "", +}; + +export const SessionUuid = { + $type: "speechkit.stt.v3.SessionUuid" as const, + + encode( + message: SessionUuid, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.uuid !== "") { + writer.uint32(10).string(message.uuid); + } + if (message.userRequestId !== "") { + writer.uint32(18).string(message.userRequestId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): SessionUuid { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseSessionUuid } as SessionUuid; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.uuid = reader.string(); + break; + case 2: + message.userRequestId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): SessionUuid { + const message = { ...baseSessionUuid } as SessionUuid; + message.uuid = + object.uuid !== undefined && object.uuid !== null + ? String(object.uuid) + : ""; + message.userRequestId = + object.userRequestId !== undefined && object.userRequestId !== null + ? String(object.userRequestId) + : ""; + return message; + }, + + toJSON(message: SessionUuid): unknown { + const obj: any = {}; + message.uuid !== undefined && (obj.uuid = message.uuid); + message.userRequestId !== undefined && + (obj.userRequestId = message.userRequestId); + return obj; + }, + + fromPartial, I>>( + object: I + ): SessionUuid { + const message = { ...baseSessionUuid } as SessionUuid; + message.uuid = object.uuid ?? ""; + message.userRequestId = object.userRequestId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(SessionUuid.$type, SessionUuid); + +const baseStreamingResponse: object = { + $type: "speechkit.stt.v3.StreamingResponse", + responseWallTimeMs: 0, +}; + +export const StreamingResponse = { + $type: "speechkit.stt.v3.StreamingResponse" as const, + + encode( + message: StreamingResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.sessionUuid !== undefined) { + SessionUuid.encode( + message.sessionUuid, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.audioCursors !== undefined) { + AudioCursors.encode( + message.audioCursors, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.responseWallTimeMs !== 0) { + writer.uint32(24).int64(message.responseWallTimeMs); + } + if (message.partial !== undefined) { + AlternativeUpdate.encode( + message.partial, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.final !== undefined) { + AlternativeUpdate.encode( + message.final, + writer.uint32(42).fork() + ).ldelim(); + } + if (message.eouUpdate !== undefined) { + EouUpdate.encode(message.eouUpdate, writer.uint32(50).fork()).ldelim(); + } + if (message.finalRefinement !== undefined) { + FinalRefinement.encode( + message.finalRefinement, + writer.uint32(58).fork() + ).ldelim(); + } + if (message.statusCode !== undefined) { + StatusCode.encode(message.statusCode, writer.uint32(66).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): StreamingResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseStreamingResponse } as StreamingResponse; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.sessionUuid = SessionUuid.decode(reader, reader.uint32()); + break; + case 2: + message.audioCursors = AudioCursors.decode(reader, reader.uint32()); + break; + case 3: + message.responseWallTimeMs = longToNumber(reader.int64() as Long); + break; + case 4: + message.partial = AlternativeUpdate.decode(reader, reader.uint32()); + break; + case 5: + message.final = AlternativeUpdate.decode(reader, reader.uint32()); + break; + case 6: + message.eouUpdate = EouUpdate.decode(reader, reader.uint32()); + break; + case 7: + message.finalRefinement = FinalRefinement.decode( + reader, + reader.uint32() + ); + break; + case 8: + message.statusCode = StatusCode.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): StreamingResponse { + const message = { ...baseStreamingResponse } as StreamingResponse; + message.sessionUuid = + object.sessionUuid !== undefined && object.sessionUuid !== null + ? SessionUuid.fromJSON(object.sessionUuid) + : undefined; + message.audioCursors = + object.audioCursors !== undefined && object.audioCursors !== null + ? AudioCursors.fromJSON(object.audioCursors) + : undefined; + message.responseWallTimeMs = + object.responseWallTimeMs !== undefined && + object.responseWallTimeMs !== null + ? Number(object.responseWallTimeMs) + : 0; + message.partial = + object.partial !== undefined && object.partial !== null + ? AlternativeUpdate.fromJSON(object.partial) + : undefined; + message.final = + object.final !== undefined && object.final !== null + ? AlternativeUpdate.fromJSON(object.final) + : undefined; + message.eouUpdate = + object.eouUpdate !== undefined && object.eouUpdate !== null + ? EouUpdate.fromJSON(object.eouUpdate) + : undefined; + message.finalRefinement = + object.finalRefinement !== undefined && object.finalRefinement !== null + ? FinalRefinement.fromJSON(object.finalRefinement) + : undefined; + message.statusCode = + object.statusCode !== undefined && object.statusCode !== null + ? StatusCode.fromJSON(object.statusCode) + : undefined; + return message; + }, + + toJSON(message: StreamingResponse): unknown { + const obj: any = {}; + message.sessionUuid !== undefined && + (obj.sessionUuid = message.sessionUuid + ? SessionUuid.toJSON(message.sessionUuid) + : undefined); + message.audioCursors !== undefined && + (obj.audioCursors = message.audioCursors + ? AudioCursors.toJSON(message.audioCursors) + : undefined); + message.responseWallTimeMs !== undefined && + (obj.responseWallTimeMs = Math.round(message.responseWallTimeMs)); + message.partial !== undefined && + (obj.partial = message.partial + ? AlternativeUpdate.toJSON(message.partial) + : undefined); + message.final !== undefined && + (obj.final = message.final + ? AlternativeUpdate.toJSON(message.final) + : undefined); + message.eouUpdate !== undefined && + (obj.eouUpdate = message.eouUpdate + ? EouUpdate.toJSON(message.eouUpdate) + : undefined); + message.finalRefinement !== undefined && + (obj.finalRefinement = message.finalRefinement + ? FinalRefinement.toJSON(message.finalRefinement) + : undefined); + message.statusCode !== undefined && + (obj.statusCode = message.statusCode + ? StatusCode.toJSON(message.statusCode) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): StreamingResponse { + const message = { ...baseStreamingResponse } as StreamingResponse; + message.sessionUuid = + object.sessionUuid !== undefined && object.sessionUuid !== null + ? SessionUuid.fromPartial(object.sessionUuid) + : undefined; + message.audioCursors = + object.audioCursors !== undefined && object.audioCursors !== null + ? AudioCursors.fromPartial(object.audioCursors) + : undefined; + message.responseWallTimeMs = object.responseWallTimeMs ?? 0; + message.partial = + object.partial !== undefined && object.partial !== null + ? AlternativeUpdate.fromPartial(object.partial) + : undefined; + message.final = + object.final !== undefined && object.final !== null + ? AlternativeUpdate.fromPartial(object.final) + : undefined; + message.eouUpdate = + object.eouUpdate !== undefined && object.eouUpdate !== null + ? EouUpdate.fromPartial(object.eouUpdate) + : undefined; + message.finalRefinement = + object.finalRefinement !== undefined && object.finalRefinement !== null + ? FinalRefinement.fromPartial(object.finalRefinement) + : undefined; + message.statusCode = + object.statusCode !== undefined && object.statusCode !== null + ? StatusCode.fromPartial(object.statusCode) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(StreamingResponse.$type, StreamingResponse); + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +const atob: (b64: string) => string = + globalThis.atob || + ((b64) => globalThis.Buffer.from(b64, "base64").toString("binary")); +function bytesFromBase64(b64: string): Uint8Array { + const bin = atob(b64); + const arr = new Uint8Array(bin.length); + for (let i = 0; i < bin.length; ++i) { + arr[i] = bin.charCodeAt(i); + } + return arr; +} + +const btoa: (bin: string) => string = + globalThis.btoa || + ((bin) => globalThis.Buffer.from(bin, "binary").toString("base64")); +function base64FromBytes(arr: Uint8Array): string { + const bin: string[] = []; + for (const byte of arr) { + bin.push(String.fromCharCode(byte)); + } + return btoa(bin.join("")); +} + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/ai/stt/v3/stt_service.ts b/src/generated/yandex/cloud/ai/stt/v3/stt_service.ts new file mode 100644 index 00000000..56c6cc74 --- /dev/null +++ b/src/generated/yandex/cloud/ai/stt/v3/stt_service.ts @@ -0,0 +1,73 @@ +/* eslint-disable */ +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleBidiStreamingCall, + Client, + ClientDuplexStream, + CallOptions, + Metadata, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; +import { + StreamingRequest, + StreamingResponse, +} from "../../../../../yandex/cloud/ai/stt/v3/stt"; + +export const protobufPackage = "speechkit.stt.v3"; + +/** A set of methods for voice recognition. */ +export const RecognizerService = { + /** Expects audio in real-time */ + recognizeStreaming: { + path: "/speechkit.stt.v3.Recognizer/RecognizeStreaming", + requestStream: true, + responseStream: true, + requestSerialize: (value: StreamingRequest) => + Buffer.from(StreamingRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => StreamingRequest.decode(value), + responseSerialize: (value: StreamingResponse) => + Buffer.from(StreamingResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => StreamingResponse.decode(value), + }, +} as const; + +export interface RecognizerServer extends UntypedServiceImplementation { + /** Expects audio in real-time */ + recognizeStreaming: handleBidiStreamingCall< + StreamingRequest, + StreamingResponse + >; +} + +export interface RecognizerClient extends Client { + /** Expects audio in real-time */ + recognizeStreaming(): ClientDuplexStream; + recognizeStreaming( + options: Partial + ): ClientDuplexStream; + recognizeStreaming( + metadata: Metadata, + options?: Partial + ): ClientDuplexStream; +} + +export const RecognizerClient = makeGenericClientConstructor( + RecognizerService, + "speechkit.stt.v3.Recognizer" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): RecognizerClient; + service: typeof RecognizerService; +}; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/ai/translate/v2/translation_service.ts b/src/generated/yandex/cloud/ai/translate/v2/translation_service.ts index 6c0239af..35032682 100644 --- a/src/generated/yandex/cloud/ai/translate/v2/translation_service.ts +++ b/src/generated/yandex/cloud/ai/translate/v2/translation_service.ts @@ -52,6 +52,8 @@ export interface TranslateRequest { model: string; /** Glossary to be applied for the translation. For more information, see [Glossaries](/docs/translate/concepts/glossary). */ glossaryConfig?: TranslateGlossaryConfig; + /** use speller */ + speller: boolean; } export enum TranslateRequest_Format { @@ -182,6 +184,7 @@ const baseTranslateRequest: object = { texts: "", folderId: "", model: "", + speller: false, }; export const TranslateRequest = { @@ -215,6 +218,9 @@ export const TranslateRequest = { writer.uint32(58).fork() ).ldelim(); } + if (message.speller === true) { + writer.uint32(64).bool(message.speller); + } return writer; }, @@ -250,6 +256,9 @@ export const TranslateRequest = { reader.uint32() ); break; + case 8: + message.speller = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -287,6 +296,10 @@ export const TranslateRequest = { object.glossaryConfig !== undefined && object.glossaryConfig !== null ? TranslateGlossaryConfig.fromJSON(object.glossaryConfig) : undefined; + message.speller = + object.speller !== undefined && object.speller !== null + ? Boolean(object.speller) + : false; return message; }, @@ -309,6 +322,7 @@ export const TranslateRequest = { (obj.glossaryConfig = message.glossaryConfig ? TranslateGlossaryConfig.toJSON(message.glossaryConfig) : undefined); + message.speller !== undefined && (obj.speller = message.speller); return obj; }, @@ -326,6 +340,7 @@ export const TranslateRequest = { object.glossaryConfig !== undefined && object.glossaryConfig !== null ? TranslateGlossaryConfig.fromPartial(object.glossaryConfig) : undefined; + message.speller = object.speller ?? false; return message; }, }; diff --git a/src/generated/yandex/cloud/ai/tts/v3/tts.ts b/src/generated/yandex/cloud/ai/tts/v3/tts.ts index dc8c1b19..8db6f39d 100644 --- a/src/generated/yandex/cloud/ai/tts/v3/tts.ts +++ b/src/generated/yandex/cloud/ai/tts/v3/tts.ts @@ -76,6 +76,7 @@ export enum ContainerAudio_ContainerAudioType { /** WAV - Audio bit depth 16-bit signed little-endian (Linear PCM). */ WAV = 1, OGG_OPUS = 2, + MP3 = 3, UNRECOGNIZED = -1, } @@ -92,6 +93,9 @@ export function containerAudio_ContainerAudioTypeFromJSON( case 2: case "OGG_OPUS": return ContainerAudio_ContainerAudioType.OGG_OPUS; + case 3: + case "MP3": + return ContainerAudio_ContainerAudioType.MP3; case -1: case "UNRECOGNIZED": default: @@ -109,6 +113,8 @@ export function containerAudio_ContainerAudioTypeToJSON( return "WAV"; case ContainerAudio_ContainerAudioType.OGG_OPUS: return "OGG_OPUS"; + case ContainerAudio_ContainerAudioType.MP3: + return "MP3"; default: return "UNKNOWN"; } @@ -180,16 +186,11 @@ export interface Hints { speed: number | undefined; /** hint to regulate volume. For LOUDNESS_NORMALIZATION_TYPE_UNSPECIFIED normalization will use MAX_PEAK, if volume in (0, 1], LUFS if volume in [-145, 0). */ volume: number | undefined; + role: string | undefined; } export interface UtteranceSynthesisRequest { $type: "speechkit.tts.v3.UtteranceSynthesisRequest"; - /** - * The name of the model. - * - * Specifies basic synthesis functionality. Currently should be empty. - */ - model: string; /** Raw text (e.g. "Hello, Alice"). */ text: string | undefined; /** Text template instance, e.g. `{"Hello, {username}" with username="Alice"}`. */ @@ -1084,6 +1085,9 @@ export const Hints = { if (message.volume !== undefined) { writer.uint32(33).double(message.volume); } + if (message.role !== undefined) { + writer.uint32(42).string(message.role); + } return writer; }, @@ -1106,6 +1110,9 @@ export const Hints = { case 4: message.volume = reader.double(); break; + case 5: + message.role = reader.string(); + break; default: reader.skipType(tag & 7); break; @@ -1132,6 +1139,10 @@ export const Hints = { object.volume !== undefined && object.volume !== null ? Number(object.volume) : undefined; + message.role = + object.role !== undefined && object.role !== null + ? String(object.role) + : undefined; return message; }, @@ -1144,6 +1155,7 @@ export const Hints = { : undefined); message.speed !== undefined && (obj.speed = message.speed); message.volume !== undefined && (obj.volume = message.volume); + message.role !== undefined && (obj.role = message.role); return obj; }, @@ -1156,6 +1168,7 @@ export const Hints = { : undefined; message.speed = object.speed ?? undefined; message.volume = object.volume ?? undefined; + message.role = object.role ?? undefined; return message; }, }; @@ -1164,7 +1177,6 @@ messageTypeRegistry.set(Hints.$type, Hints); const baseUtteranceSynthesisRequest: object = { $type: "speechkit.tts.v3.UtteranceSynthesisRequest", - model: "", loudnessNormalizationType: 0, unsafeMode: false, }; @@ -1176,9 +1188,6 @@ export const UtteranceSynthesisRequest = { message: UtteranceSynthesisRequest, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { - if (message.model !== "") { - writer.uint32(10).string(message.model); - } if (message.text !== undefined) { writer.uint32(18).string(message.text); } @@ -1219,9 +1228,6 @@ export const UtteranceSynthesisRequest = { while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { - case 1: - message.model = reader.string(); - break; case 2: message.text = reader.string(); break; @@ -1255,10 +1261,6 @@ export const UtteranceSynthesisRequest = { const message = { ...baseUtteranceSynthesisRequest, } as UtteranceSynthesisRequest; - message.model = - object.model !== undefined && object.model !== null - ? String(object.model) - : ""; message.text = object.text !== undefined && object.text !== null ? String(object.text) @@ -1288,7 +1290,6 @@ export const UtteranceSynthesisRequest = { toJSON(message: UtteranceSynthesisRequest): unknown { const obj: any = {}; - message.model !== undefined && (obj.model = message.model); message.text !== undefined && (obj.text = message.text); message.textTemplate !== undefined && (obj.textTemplate = message.textTemplate @@ -1318,7 +1319,6 @@ export const UtteranceSynthesisRequest = { const message = { ...baseUtteranceSynthesisRequest, } as UtteranceSynthesisRequest; - message.model = object.model ?? ""; message.text = object.text ?? undefined; message.textTemplate = object.textTemplate !== undefined && object.textTemplate !== null diff --git a/src/generated/yandex/cloud/ai/vision/v2/image.ts b/src/generated/yandex/cloud/ai/vision/v2/image.ts new file mode 100644 index 00000000..1f0f05fc --- /dev/null +++ b/src/generated/yandex/cloud/ai/vision/v2/image.ts @@ -0,0 +1,193 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; + +export const protobufPackage = "yandex.cloud.ai.vision.v2"; + +export interface Image { + $type: "yandex.cloud.ai.vision.v2.Image"; + /** bytes with data */ + content: Buffer | undefined; + /** type of data */ + imageType: Image_ImageType; +} + +/** type of image */ +export enum Image_ImageType { + IMAGE_TYPE_UNSPECIFIED = 0, + JPEG = 1, + PNG = 2, + UNRECOGNIZED = -1, +} + +export function image_ImageTypeFromJSON(object: any): Image_ImageType { + switch (object) { + case 0: + case "IMAGE_TYPE_UNSPECIFIED": + return Image_ImageType.IMAGE_TYPE_UNSPECIFIED; + case 1: + case "JPEG": + return Image_ImageType.JPEG; + case 2: + case "PNG": + return Image_ImageType.PNG; + case -1: + case "UNRECOGNIZED": + default: + return Image_ImageType.UNRECOGNIZED; + } +} + +export function image_ImageTypeToJSON(object: Image_ImageType): string { + switch (object) { + case Image_ImageType.IMAGE_TYPE_UNSPECIFIED: + return "IMAGE_TYPE_UNSPECIFIED"; + case Image_ImageType.JPEG: + return "JPEG"; + case Image_ImageType.PNG: + return "PNG"; + default: + return "UNKNOWN"; + } +} + +const baseImage: object = { + $type: "yandex.cloud.ai.vision.v2.Image", + imageType: 0, +}; + +export const Image = { + $type: "yandex.cloud.ai.vision.v2.Image" as const, + + encode(message: Image, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + if (message.content !== undefined) { + writer.uint32(10).bytes(message.content); + } + if (message.imageType !== 0) { + writer.uint32(16).int32(message.imageType); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Image { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseImage } as Image; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.content = reader.bytes() as Buffer; + break; + case 2: + message.imageType = reader.int32() as any; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Image { + const message = { ...baseImage } as Image; + message.content = + object.content !== undefined && object.content !== null + ? Buffer.from(bytesFromBase64(object.content)) + : undefined; + message.imageType = + object.imageType !== undefined && object.imageType !== null + ? image_ImageTypeFromJSON(object.imageType) + : 0; + return message; + }, + + toJSON(message: Image): unknown { + const obj: any = {}; + message.content !== undefined && + (obj.content = + message.content !== undefined + ? base64FromBytes(message.content) + : undefined); + message.imageType !== undefined && + (obj.imageType = image_ImageTypeToJSON(message.imageType)); + return obj; + }, + + fromPartial, I>>(object: I): Image { + const message = { ...baseImage } as Image; + message.content = object.content ?? undefined; + message.imageType = object.imageType ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(Image.$type, Image); + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +const atob: (b64: string) => string = + globalThis.atob || + ((b64) => globalThis.Buffer.from(b64, "base64").toString("binary")); +function bytesFromBase64(b64: string): Uint8Array { + const bin = atob(b64); + const arr = new Uint8Array(bin.length); + for (let i = 0; i < bin.length; ++i) { + arr[i] = bin.charCodeAt(i); + } + return arr; +} + +const btoa: (bin: string) => string = + globalThis.btoa || + ((bin) => globalThis.Buffer.from(bin, "binary").toString("base64")); +function base64FromBytes(arr: Uint8Array): string { + const bin: string[] = []; + for (const byte of arr) { + bin.push(String.fromCharCode(byte)); + } + return btoa(bin.join("")); +} + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/ai/vision/v2/image_classifier.ts b/src/generated/yandex/cloud/ai/vision/v2/image_classifier.ts new file mode 100644 index 00000000..09d0627f --- /dev/null +++ b/src/generated/yandex/cloud/ai/vision/v2/image_classifier.ts @@ -0,0 +1,541 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { Image } from "../../../../../yandex/cloud/ai/vision/v2/image"; + +export const protobufPackage = "yandex.cloud.ai.vision.v2"; + +/** Description of single label */ +export interface Label { + $type: "yandex.cloud.ai.vision.v2.Label"; + /** Label name */ + name: string; + /** human readable description of label */ + description: string; +} + +/** Image annotation for specific label */ +export interface ClassAnnotation { + $type: "yandex.cloud.ai.vision.v2.ClassAnnotation"; + /** list of annotated labels */ + label?: Label; + /** confidence for each label */ + confidence: number; +} + +/** Specification of model used for annotation */ +export interface ClassifierSpecification { + $type: "yandex.cloud.ai.vision.v2.ClassifierSpecification"; + /** List of labels, annotated by service */ + labels: Label[]; + /** type of annotation: exclusive (multi-class) or non-exclusive (multi-label) */ + classificationType: ClassifierSpecification_ClassificationType; +} + +export enum ClassifierSpecification_ClassificationType { + CLASSIFICATION_TYPE_UNSPECIFIED = 0, + MULTI_LABEL = 1, + MULTI_CLASS = 2, + UNRECOGNIZED = -1, +} + +export function classifierSpecification_ClassificationTypeFromJSON( + object: any +): ClassifierSpecification_ClassificationType { + switch (object) { + case 0: + case "CLASSIFICATION_TYPE_UNSPECIFIED": + return ClassifierSpecification_ClassificationType.CLASSIFICATION_TYPE_UNSPECIFIED; + case 1: + case "MULTI_LABEL": + return ClassifierSpecification_ClassificationType.MULTI_LABEL; + case 2: + case "MULTI_CLASS": + return ClassifierSpecification_ClassificationType.MULTI_CLASS; + case -1: + case "UNRECOGNIZED": + default: + return ClassifierSpecification_ClassificationType.UNRECOGNIZED; + } +} + +export function classifierSpecification_ClassificationTypeToJSON( + object: ClassifierSpecification_ClassificationType +): string { + switch (object) { + case ClassifierSpecification_ClassificationType.CLASSIFICATION_TYPE_UNSPECIFIED: + return "CLASSIFICATION_TYPE_UNSPECIFIED"; + case ClassifierSpecification_ClassificationType.MULTI_LABEL: + return "MULTI_LABEL"; + case ClassifierSpecification_ClassificationType.MULTI_CLASS: + return "MULTI_CLASS"; + default: + return "UNKNOWN"; + } +} + +/** */ +export interface AnnotationResponse { + $type: "yandex.cloud.ai.vision.v2.AnnotationResponse"; + /** internal service requestId */ + requestId: string; + /** class specification */ + classifierSpecification?: ClassifierSpecification; + /** annotations for each class */ + annotations: ClassAnnotation[]; +} + +/** request for annotation */ +export interface AnnotationRequest { + $type: "yandex.cloud.ai.vision.v2.AnnotationRequest"; + /** image to annotate */ + image?: Image; +} + +const baseLabel: object = { + $type: "yandex.cloud.ai.vision.v2.Label", + name: "", + description: "", +}; + +export const Label = { + $type: "yandex.cloud.ai.vision.v2.Label" as const, + + encode(message: Label, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + if (message.description !== "") { + writer.uint32(18).string(message.description); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Label { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseLabel } as Label; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.description = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Label { + const message = { ...baseLabel } as Label; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + return message; + }, + + toJSON(message: Label): unknown { + const obj: any = {}; + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + return obj; + }, + + fromPartial, I>>(object: I): Label { + const message = { ...baseLabel } as Label; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(Label.$type, Label); + +const baseClassAnnotation: object = { + $type: "yandex.cloud.ai.vision.v2.ClassAnnotation", + confidence: 0, +}; + +export const ClassAnnotation = { + $type: "yandex.cloud.ai.vision.v2.ClassAnnotation" as const, + + encode( + message: ClassAnnotation, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.label !== undefined) { + Label.encode(message.label, writer.uint32(10).fork()).ldelim(); + } + if (message.confidence !== 0) { + writer.uint32(17).double(message.confidence); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ClassAnnotation { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseClassAnnotation } as ClassAnnotation; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.label = Label.decode(reader, reader.uint32()); + break; + case 2: + message.confidence = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ClassAnnotation { + const message = { ...baseClassAnnotation } as ClassAnnotation; + message.label = + object.label !== undefined && object.label !== null + ? Label.fromJSON(object.label) + : undefined; + message.confidence = + object.confidence !== undefined && object.confidence !== null + ? Number(object.confidence) + : 0; + return message; + }, + + toJSON(message: ClassAnnotation): unknown { + const obj: any = {}; + message.label !== undefined && + (obj.label = message.label ? Label.toJSON(message.label) : undefined); + message.confidence !== undefined && (obj.confidence = message.confidence); + return obj; + }, + + fromPartial, I>>( + object: I + ): ClassAnnotation { + const message = { ...baseClassAnnotation } as ClassAnnotation; + message.label = + object.label !== undefined && object.label !== null + ? Label.fromPartial(object.label) + : undefined; + message.confidence = object.confidence ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(ClassAnnotation.$type, ClassAnnotation); + +const baseClassifierSpecification: object = { + $type: "yandex.cloud.ai.vision.v2.ClassifierSpecification", + classificationType: 0, +}; + +export const ClassifierSpecification = { + $type: "yandex.cloud.ai.vision.v2.ClassifierSpecification" as const, + + encode( + message: ClassifierSpecification, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.labels) { + Label.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.classificationType !== 0) { + writer.uint32(16).int32(message.classificationType); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ClassifierSpecification { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseClassifierSpecification, + } as ClassifierSpecification; + message.labels = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.labels.push(Label.decode(reader, reader.uint32())); + break; + case 2: + message.classificationType = reader.int32() as any; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ClassifierSpecification { + const message = { + ...baseClassifierSpecification, + } as ClassifierSpecification; + message.labels = (object.labels ?? []).map((e: any) => Label.fromJSON(e)); + message.classificationType = + object.classificationType !== undefined && + object.classificationType !== null + ? classifierSpecification_ClassificationTypeFromJSON( + object.classificationType + ) + : 0; + return message; + }, + + toJSON(message: ClassifierSpecification): unknown { + const obj: any = {}; + if (message.labels) { + obj.labels = message.labels.map((e) => (e ? Label.toJSON(e) : undefined)); + } else { + obj.labels = []; + } + message.classificationType !== undefined && + (obj.classificationType = + classifierSpecification_ClassificationTypeToJSON( + message.classificationType + )); + return obj; + }, + + fromPartial, I>>( + object: I + ): ClassifierSpecification { + const message = { + ...baseClassifierSpecification, + } as ClassifierSpecification; + message.labels = object.labels?.map((e) => Label.fromPartial(e)) || []; + message.classificationType = object.classificationType ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(ClassifierSpecification.$type, ClassifierSpecification); + +const baseAnnotationResponse: object = { + $type: "yandex.cloud.ai.vision.v2.AnnotationResponse", + requestId: "", +}; + +export const AnnotationResponse = { + $type: "yandex.cloud.ai.vision.v2.AnnotationResponse" as const, + + encode( + message: AnnotationResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.requestId !== "") { + writer.uint32(10).string(message.requestId); + } + if (message.classifierSpecification !== undefined) { + ClassifierSpecification.encode( + message.classifierSpecification, + writer.uint32(18).fork() + ).ldelim(); + } + for (const v of message.annotations) { + ClassAnnotation.encode(v!, writer.uint32(26).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): AnnotationResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseAnnotationResponse } as AnnotationResponse; + message.annotations = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.requestId = reader.string(); + break; + case 2: + message.classifierSpecification = ClassifierSpecification.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.annotations.push( + ClassAnnotation.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AnnotationResponse { + const message = { ...baseAnnotationResponse } as AnnotationResponse; + message.requestId = + object.requestId !== undefined && object.requestId !== null + ? String(object.requestId) + : ""; + message.classifierSpecification = + object.classifierSpecification !== undefined && + object.classifierSpecification !== null + ? ClassifierSpecification.fromJSON(object.classifierSpecification) + : undefined; + message.annotations = (object.annotations ?? []).map((e: any) => + ClassAnnotation.fromJSON(e) + ); + return message; + }, + + toJSON(message: AnnotationResponse): unknown { + const obj: any = {}; + message.requestId !== undefined && (obj.requestId = message.requestId); + message.classifierSpecification !== undefined && + (obj.classifierSpecification = message.classifierSpecification + ? ClassifierSpecification.toJSON(message.classifierSpecification) + : undefined); + if (message.annotations) { + obj.annotations = message.annotations.map((e) => + e ? ClassAnnotation.toJSON(e) : undefined + ); + } else { + obj.annotations = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): AnnotationResponse { + const message = { ...baseAnnotationResponse } as AnnotationResponse; + message.requestId = object.requestId ?? ""; + message.classifierSpecification = + object.classifierSpecification !== undefined && + object.classifierSpecification !== null + ? ClassifierSpecification.fromPartial(object.classifierSpecification) + : undefined; + message.annotations = + object.annotations?.map((e) => ClassAnnotation.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(AnnotationResponse.$type, AnnotationResponse); + +const baseAnnotationRequest: object = { + $type: "yandex.cloud.ai.vision.v2.AnnotationRequest", +}; + +export const AnnotationRequest = { + $type: "yandex.cloud.ai.vision.v2.AnnotationRequest" as const, + + encode( + message: AnnotationRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.image !== undefined) { + Image.encode(message.image, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): AnnotationRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseAnnotationRequest } as AnnotationRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.image = Image.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AnnotationRequest { + const message = { ...baseAnnotationRequest } as AnnotationRequest; + message.image = + object.image !== undefined && object.image !== null + ? Image.fromJSON(object.image) + : undefined; + return message; + }, + + toJSON(message: AnnotationRequest): unknown { + const obj: any = {}; + message.image !== undefined && + (obj.image = message.image ? Image.toJSON(message.image) : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): AnnotationRequest { + const message = { ...baseAnnotationRequest } as AnnotationRequest; + message.image = + object.image !== undefined && object.image !== null + ? Image.fromPartial(object.image) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(AnnotationRequest.$type, AnnotationRequest); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/ai/vision/v2/image_classifier_service.ts b/src/generated/yandex/cloud/ai/vision/v2/image_classifier_service.ts new file mode 100644 index 00000000..1d9f8dae --- /dev/null +++ b/src/generated/yandex/cloud/ai/vision/v2/image_classifier_service.ts @@ -0,0 +1,75 @@ +/* eslint-disable */ +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleUnaryCall, + Client, + ClientUnaryCall, + Metadata, + CallOptions, + ServiceError, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; +import { + AnnotationRequest, + AnnotationResponse, +} from "../../../../../yandex/cloud/ai/vision/v2/image_classifier"; + +export const protobufPackage = "yandex.cloud.ai.vision.v2"; + +export const ImageClassifierServiceService = { + annotate: { + path: "/yandex.cloud.ai.vision.v2.ImageClassifierService/Annotate", + requestStream: false, + responseStream: false, + requestSerialize: (value: AnnotationRequest) => + Buffer.from(AnnotationRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => AnnotationRequest.decode(value), + responseSerialize: (value: AnnotationResponse) => + Buffer.from(AnnotationResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => AnnotationResponse.decode(value), + }, +} as const; + +export interface ImageClassifierServiceServer + extends UntypedServiceImplementation { + annotate: handleUnaryCall; +} + +export interface ImageClassifierServiceClient extends Client { + annotate( + request: AnnotationRequest, + callback: (error: ServiceError | null, response: AnnotationResponse) => void + ): ClientUnaryCall; + annotate( + request: AnnotationRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: AnnotationResponse) => void + ): ClientUnaryCall; + annotate( + request: AnnotationRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: AnnotationResponse) => void + ): ClientUnaryCall; +} + +export const ImageClassifierServiceClient = makeGenericClientConstructor( + ImageClassifierServiceService, + "yandex.cloud.ai.vision.v2.ImageClassifierService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): ImageClassifierServiceClient; + service: typeof ImageClassifierServiceService; +}; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/apploadbalancer/v1/backend_group.ts b/src/generated/yandex/cloud/apploadbalancer/v1/backend_group.ts index 4803e7bd..43d3bc79 100644 --- a/src/generated/yandex/cloud/apploadbalancer/v1/backend_group.ts +++ b/src/generated/yandex/cloud/apploadbalancer/v1/backend_group.ts @@ -37,16 +37,16 @@ export enum LoadBalancingMode { */ LEAST_REQUEST = 2, /** - * MAGLEV_HASH - Maglev hashing load balancing mode, used only if session affinity is working for the backend group. + * MAGLEV_HASH - Maglev hashing load balancing mode. * * Each endpoint is hashed, and a hash table with 65537 rows is filled accordingly, so that every endpoint occupies - * the same amount of rows. An attribute of each request, specified in session affinity configuration of the backend - * group, is also hashed by the same function. The row with the same number as the resulting value is looked up in the - * table to determine the endpoint that receives the request. + * the same amount of rows. An attribute of each request is also hashed by the same function (if session affinity is + * enabled for the backend group, the attribute to hash is specified in session affinity configuration). The row + * with the same number as the resulting value is looked up in the table to determine the endpoint that receives + * the request. * - * If session affinity is not working for the backend group (i.e. it is not configured or the group contains more - * than one backend with positive weight), endpoints for backends with `MAGLEV_HASH` load balancing mode are picked at - * `RANDOM` instead. + * If the backend group with session affinity enabled contains more than one backend with positive weight, endpoints + * for backends with `MAGLEV_HASH` load balancing mode are picked at `RANDOM` instead. */ MAGLEV_HASH = 3, UNRECOGNIZED = -1, @@ -112,7 +112,7 @@ export interface BackendGroup { http?: HttpBackendGroup | undefined; /** List of gRPC backends that the backend group consists of. */ grpc?: GrpcBackendGroup | undefined; - /** List of stream backends that the backend group consist of. */ + /** List of stream (TCP) backends that the backend group consists of. */ stream?: StreamBackendGroup | undefined; /** Creation timestamp. */ createdAt?: Date; @@ -124,10 +124,16 @@ export interface BackendGroup_LabelsEntry { value: string; } -/** A Stream backend group resource. */ +/** A stream (TCP) backend group resource. */ export interface StreamBackendGroup { $type: "yandex.cloud.apploadbalancer.v1.StreamBackendGroup"; + /** List of stream (TCP) backends. */ backends: StreamBackend[]; + /** + * Connection-based session affinity configuration. + * + * For now, a connection is defined only by an IP address of the client. + */ connection?: ConnectionSessionAffinity | undefined; } @@ -178,10 +184,12 @@ export interface CookieSessionAffinity { /** Name of the cookie that is used for session affinity. */ name: string; /** - * Maximum age of cookies that are generated for sessions (persistent cookies). + * Maximum age of cookies that are generated for sessions. * - * If not set, session cookies are used, which are stored by clients in temporary memory and are deleted + * If set to `0`, session cookies are used, which are stored by clients in temporary memory and are deleted * on client restarts. + * + * If not set, the balancer does not generate cookies and only uses incoming ones for establishing session affinity. */ ttl?: Duration; } @@ -211,7 +219,8 @@ export interface LoadBalancingConfig { panicThreshold: number; /** * Percentage of traffic that a load balancer node sends to healthy backends in its availability zone. - * The rest is divided equally between other zones. For details about zone-aware routing, see [documentation](/docs/application-load-balancer/concepts/backend-group#locality). + * The rest is divided equally between other zones. For details about zone-aware routing, see + * [documentation](/docs/application-load-balancer/concepts/backend-group#locality). * * If there are no healthy backends in an availability zone, all the traffic is divided between other zones. * @@ -237,24 +246,52 @@ export interface LoadBalancingConfig { /** * Load balancing mode for the backend. * - * For detals about load balancing modes, see + * For details about load balancing modes, see * [documentation](/docs/application-load-balancer/concepts/backend-group#balancing-mode). */ mode: LoadBalancingMode; } -/** A stream backend resource. */ +/** A stream (TCP) backend resource. */ export interface StreamBackend { $type: "yandex.cloud.apploadbalancer.v1.StreamBackend"; + /** Name of the backend. */ name: string; - /** If not set, backend will be disabled. */ + /** + * Backend weight. Traffic is distributed between backends of a backend group according to their weights. + * + * Weights must be set either for all backends in a group or for none of them. + * Setting no weights is the same as setting equal non-zero weights for all backends. + * + * If the weight is non-positive, traffic is not sent to the backend. + */ backendWeight?: number; + /** Load balancing configuration for the backend. */ loadBalancingConfig?: LoadBalancingConfig; - /** Optional alternative port for all targets. */ + /** Port used by all targets to receive traffic. */ port: number; + /** + * Target groups that belong to the backend. For details about target groups, see + * [documentation](/docs/application-load-balancer/concepts/target-group). + */ targetGroups?: TargetGroupsBackend | undefined; + /** + * Health checks to perform on targets from target groups. + * For details about health checking, see [documentation](/docs/application-load-balancer/concepts/backend-group#health-checks). + * + * If no health checks are specified, active health checking is not performed. + */ healthchecks: HealthCheck[]; + /** + * Settings for TLS connections between load balancer nodes and backend targets. + * + * If specified, the load balancer establishes TLS-encrypted TCP connections with targets and compares received + * certificates with the one specified in [BackendTls.validation_context]. + * If not specified, the load balancer establishes unencrypted TCP connections with targets. + */ tls?: BackendTls; + /** If set, proxy protocol will be enabled for this backend. */ + enableProxyProtocol: boolean; } /** An HTTP backend resource. */ @@ -359,6 +396,20 @@ export interface TargetGroupsBackend { targetGroupIds: string[]; } +/** Transport settings to be used instead of the settings configured per-cluster */ +export interface PlaintextTransportSettings { + $type: "yandex.cloud.apploadbalancer.v1.PlaintextTransportSettings"; +} + +/** Transport settings to be used instead of the settings configured per-cluster */ +export interface SecureTransportSettings { + $type: "yandex.cloud.apploadbalancer.v1.SecureTransportSettings"; + /** SNI string for TLS connections. */ + sni: string; + /** Validation context for backend TLS connections. */ + validationContext?: ValidationContext; +} + /** A resource for backend TLS settings. */ export interface BackendTls { $type: "yandex.cloud.apploadbalancer.v1.BackendTls"; @@ -427,6 +478,8 @@ export interface HealthCheck { http?: HealthCheck_HttpHealthCheck | undefined; /** gRPC health check settings. */ grpc?: HealthCheck_GrpcHealthCheck | undefined; + plaintext?: PlaintextTransportSettings | undefined; + tls?: SecureTransportSettings | undefined; } /** A resource for TCP stream health check settings. */ @@ -1484,6 +1537,7 @@ const baseStreamBackend: object = { $type: "yandex.cloud.apploadbalancer.v1.StreamBackend", name: "", port: 0, + enableProxyProtocol: false, }; export const StreamBackend = { @@ -1523,6 +1577,9 @@ export const StreamBackend = { if (message.tls !== undefined) { BackendTls.encode(message.tls, writer.uint32(58).fork()).ldelim(); } + if (message.enableProxyProtocol === true) { + writer.uint32(64).bool(message.enableProxyProtocol); + } return writer; }, @@ -1566,6 +1623,9 @@ export const StreamBackend = { case 7: message.tls = BackendTls.decode(reader, reader.uint32()); break; + case 8: + message.enableProxyProtocol = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -1604,6 +1664,11 @@ export const StreamBackend = { object.tls !== undefined && object.tls !== null ? BackendTls.fromJSON(object.tls) : undefined; + message.enableProxyProtocol = + object.enableProxyProtocol !== undefined && + object.enableProxyProtocol !== null + ? Boolean(object.enableProxyProtocol) + : false; return message; }, @@ -1630,6 +1695,8 @@ export const StreamBackend = { } message.tls !== undefined && (obj.tls = message.tls ? BackendTls.toJSON(message.tls) : undefined); + message.enableProxyProtocol !== undefined && + (obj.enableProxyProtocol = message.enableProxyProtocol); return obj; }, @@ -1655,6 +1722,7 @@ export const StreamBackend = { object.tls !== undefined && object.tls !== null ? BackendTls.fromPartial(object.tls) : undefined; + message.enableProxyProtocol = object.enableProxyProtocol ?? false; return message; }, }; @@ -2126,6 +2194,162 @@ export const TargetGroupsBackend = { messageTypeRegistry.set(TargetGroupsBackend.$type, TargetGroupsBackend); +const basePlaintextTransportSettings: object = { + $type: "yandex.cloud.apploadbalancer.v1.PlaintextTransportSettings", +}; + +export const PlaintextTransportSettings = { + $type: "yandex.cloud.apploadbalancer.v1.PlaintextTransportSettings" as const, + + encode( + _: PlaintextTransportSettings, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): PlaintextTransportSettings { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...basePlaintextTransportSettings, + } as PlaintextTransportSettings; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(_: any): PlaintextTransportSettings { + const message = { + ...basePlaintextTransportSettings, + } as PlaintextTransportSettings; + return message; + }, + + toJSON(_: PlaintextTransportSettings): unknown { + const obj: any = {}; + return obj; + }, + + fromPartial, I>>( + _: I + ): PlaintextTransportSettings { + const message = { + ...basePlaintextTransportSettings, + } as PlaintextTransportSettings; + return message; + }, +}; + +messageTypeRegistry.set( + PlaintextTransportSettings.$type, + PlaintextTransportSettings +); + +const baseSecureTransportSettings: object = { + $type: "yandex.cloud.apploadbalancer.v1.SecureTransportSettings", + sni: "", +}; + +export const SecureTransportSettings = { + $type: "yandex.cloud.apploadbalancer.v1.SecureTransportSettings" as const, + + encode( + message: SecureTransportSettings, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.sni !== "") { + writer.uint32(10).string(message.sni); + } + if (message.validationContext !== undefined) { + ValidationContext.encode( + message.validationContext, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): SecureTransportSettings { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseSecureTransportSettings, + } as SecureTransportSettings; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.sni = reader.string(); + break; + case 3: + message.validationContext = ValidationContext.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): SecureTransportSettings { + const message = { + ...baseSecureTransportSettings, + } as SecureTransportSettings; + message.sni = + object.sni !== undefined && object.sni !== null ? String(object.sni) : ""; + message.validationContext = + object.validationContext !== undefined && + object.validationContext !== null + ? ValidationContext.fromJSON(object.validationContext) + : undefined; + return message; + }, + + toJSON(message: SecureTransportSettings): unknown { + const obj: any = {}; + message.sni !== undefined && (obj.sni = message.sni); + message.validationContext !== undefined && + (obj.validationContext = message.validationContext + ? ValidationContext.toJSON(message.validationContext) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): SecureTransportSettings { + const message = { + ...baseSecureTransportSettings, + } as SecureTransportSettings; + message.sni = object.sni ?? ""; + message.validationContext = + object.validationContext !== undefined && + object.validationContext !== null + ? ValidationContext.fromPartial(object.validationContext) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(SecureTransportSettings.$type, SecureTransportSettings); + const baseBackendTls: object = { $type: "yandex.cloud.apploadbalancer.v1.BackendTls", sni: "", @@ -2328,6 +2552,18 @@ export const HealthCheck = { writer.uint32(74).fork() ).ldelim(); } + if (message.plaintext !== undefined) { + PlaintextTransportSettings.encode( + message.plaintext, + writer.uint32(82).fork() + ).ldelim(); + } + if (message.tls !== undefined) { + SecureTransportSettings.encode( + message.tls, + writer.uint32(90).fork() + ).ldelim(); + } return writer; }, @@ -2374,6 +2610,15 @@ export const HealthCheck = { reader.uint32() ); break; + case 10: + message.plaintext = PlaintextTransportSettings.decode( + reader, + reader.uint32() + ); + break; + case 11: + message.tls = SecureTransportSettings.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -2422,6 +2667,14 @@ export const HealthCheck = { object.grpc !== undefined && object.grpc !== null ? HealthCheck_GrpcHealthCheck.fromJSON(object.grpc) : undefined; + message.plaintext = + object.plaintext !== undefined && object.plaintext !== null + ? PlaintextTransportSettings.fromJSON(object.plaintext) + : undefined; + message.tls = + object.tls !== undefined && object.tls !== null + ? SecureTransportSettings.fromJSON(object.tls) + : undefined; return message; }, @@ -2455,6 +2708,14 @@ export const HealthCheck = { (obj.grpc = message.grpc ? HealthCheck_GrpcHealthCheck.toJSON(message.grpc) : undefined); + message.plaintext !== undefined && + (obj.plaintext = message.plaintext + ? PlaintextTransportSettings.toJSON(message.plaintext) + : undefined); + message.tls !== undefined && + (obj.tls = message.tls + ? SecureTransportSettings.toJSON(message.tls) + : undefined); return obj; }, @@ -2486,6 +2747,14 @@ export const HealthCheck = { object.grpc !== undefined && object.grpc !== null ? HealthCheck_GrpcHealthCheck.fromPartial(object.grpc) : undefined; + message.plaintext = + object.plaintext !== undefined && object.plaintext !== null + ? PlaintextTransportSettings.fromPartial(object.plaintext) + : undefined; + message.tls = + object.tls !== undefined && object.tls !== null + ? SecureTransportSettings.fromPartial(object.tls) + : undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/apploadbalancer/v1/backend_group_service.ts b/src/generated/yandex/cloud/apploadbalancer/v1/backend_group_service.ts index da092846..a7add978 100644 --- a/src/generated/yandex/cloud/apploadbalancer/v1/backend_group_service.ts +++ b/src/generated/yandex/cloud/apploadbalancer/v1/backend_group_service.ts @@ -19,6 +19,7 @@ import { BackendGroup, HttpBackendGroup, GrpcBackendGroup, + StreamBackendGroup, HttpBackend, GrpcBackend, StreamBackend, @@ -131,6 +132,8 @@ export interface UpdateBackendGroupRequest { http?: HttpBackendGroup | undefined; /** New list of gRPC backends that the backend group will consist of. */ grpc?: GrpcBackendGroup | undefined; + /** New list of stream (TCP) backends that the backend group will consist of. */ + stream?: StreamBackendGroup | undefined; } export interface UpdateBackendGroupRequest_LabelsEntry { @@ -169,6 +172,8 @@ export interface CreateBackendGroupRequest { http?: HttpBackendGroup | undefined; /** List of gRPC backends that the backend group consists of. */ grpc?: GrpcBackendGroup | undefined; + /** List of stream (TCP) backends that the backend group consists of. */ + stream?: StreamBackendGroup | undefined; } export interface CreateBackendGroupRequest_LabelsEntry { @@ -217,7 +222,7 @@ export interface UpdateBackendRequest { http?: HttpBackend | undefined; /** New settings for the gRPC backend. */ grpc?: GrpcBackend | undefined; - /** New settings for the Stream backend. */ + /** New settings for the stream (TCP) backend. */ stream?: StreamBackend | undefined; } @@ -758,6 +763,12 @@ export const UpdateBackendGroupRequest = { if (message.grpc !== undefined) { GrpcBackendGroup.encode(message.grpc, writer.uint32(58).fork()).ldelim(); } + if (message.stream !== undefined) { + StreamBackendGroup.encode( + message.stream, + writer.uint32(66).fork() + ).ldelim(); + } return writer; }, @@ -801,6 +812,9 @@ export const UpdateBackendGroupRequest = { case 7: message.grpc = GrpcBackendGroup.decode(reader, reader.uint32()); break; + case 8: + message.stream = StreamBackendGroup.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -843,6 +857,10 @@ export const UpdateBackendGroupRequest = { object.grpc !== undefined && object.grpc !== null ? GrpcBackendGroup.fromJSON(object.grpc) : undefined; + message.stream = + object.stream !== undefined && object.stream !== null + ? StreamBackendGroup.fromJSON(object.stream) + : undefined; return message; }, @@ -871,6 +889,10 @@ export const UpdateBackendGroupRequest = { (obj.grpc = message.grpc ? GrpcBackendGroup.toJSON(message.grpc) : undefined); + message.stream !== undefined && + (obj.stream = message.stream + ? StreamBackendGroup.toJSON(message.stream) + : undefined); return obj; }, @@ -903,6 +925,10 @@ export const UpdateBackendGroupRequest = { object.grpc !== undefined && object.grpc !== null ? GrpcBackendGroup.fromPartial(object.grpc) : undefined; + message.stream = + object.stream !== undefined && object.stream !== null + ? StreamBackendGroup.fromPartial(object.stream) + : undefined; return message; }, }; @@ -1114,6 +1140,12 @@ export const CreateBackendGroupRequest = { if (message.grpc !== undefined) { GrpcBackendGroup.encode(message.grpc, writer.uint32(50).fork()).ldelim(); } + if (message.stream !== undefined) { + StreamBackendGroup.encode( + message.stream, + writer.uint32(58).fork() + ).ldelim(); + } return writer; }, @@ -1154,6 +1186,9 @@ export const CreateBackendGroupRequest = { case 6: message.grpc = GrpcBackendGroup.decode(reader, reader.uint32()); break; + case 7: + message.stream = StreamBackendGroup.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -1192,6 +1227,10 @@ export const CreateBackendGroupRequest = { object.grpc !== undefined && object.grpc !== null ? GrpcBackendGroup.fromJSON(object.grpc) : undefined; + message.stream = + object.stream !== undefined && object.stream !== null + ? StreamBackendGroup.fromJSON(object.stream) + : undefined; return message; }, @@ -1215,6 +1254,10 @@ export const CreateBackendGroupRequest = { (obj.grpc = message.grpc ? GrpcBackendGroup.toJSON(message.grpc) : undefined); + message.stream !== undefined && + (obj.stream = message.stream + ? StreamBackendGroup.toJSON(message.stream) + : undefined); return obj; }, @@ -1243,6 +1286,10 @@ export const CreateBackendGroupRequest = { object.grpc !== undefined && object.grpc !== null ? GrpcBackendGroup.fromPartial(object.grpc) : undefined; + message.stream = + object.stream !== undefined && object.stream !== null + ? StreamBackendGroup.fromPartial(object.stream) + : undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/apploadbalancer/v1/load_balancer.ts b/src/generated/yandex/cloud/apploadbalancer/v1/load_balancer.ts index 09180897..7811394d 100644 --- a/src/generated/yandex/cloud/apploadbalancer/v1/load_balancer.ts +++ b/src/generated/yandex/cloud/apploadbalancer/v1/load_balancer.ts @@ -227,11 +227,17 @@ export interface Listener { * Endpoints are defined by their IP addresses and ports. */ endpoints: Endpoint[]; - /** HTTP listener settings. */ + /** Unencrypted HTTP listener settings. */ http?: HttpListener | undefined; - /** HTTPS (HTTP over TLS) listener settings. */ + /** + * TLS-encrypted HTTP or TCP stream listener settings. + * + * All handlers within a listener ([TlsListener.default_handler] and [TlsListener.sni_handlers]) must be of one + * type, [HttpHandler] or [StreamHandler]. Mixing HTTP and TCP stream traffic in a TLS-encrypted listener is not + * supported. + */ tls?: TlsListener | undefined; - /** Stream listener settings. */ + /** Unencrypted stream (TCP) listener settings. */ stream?: StreamListener | undefined; } @@ -261,7 +267,7 @@ export interface HttpListener { redirects?: Redirects; } -/** An HTTPS (HTTP over TLS) listener resource. */ +/** TLS-encrypted (HTTP or TCP stream) listener resource. */ export interface TlsListener { $type: "yandex.cloud.apploadbalancer.v1.TlsListener"; /** @@ -276,9 +282,10 @@ export interface TlsListener { sniHandlers: SniMatch[]; } -/** A Stream listener resource. */ +/** A stream (TCP) listener resource. */ export interface StreamListener { $type: "yandex.cloud.apploadbalancer.v1.StreamListener"; + /** Settings for handling stream (TCP) requests. */ handler?: StreamHandler; } @@ -289,9 +296,17 @@ export interface Http2Options { maxConcurrentStreams: number; } -/** A stream handler resource. */ +/** A stream (TCP) handler resource. */ export interface StreamHandler { $type: "yandex.cloud.apploadbalancer.v1.StreamHandler"; + /** + * ID of the backend group processing requests. For details about the concept, see + * [documentation](/docs/application-load-balancer/concepts/backend-group). + * + * The backend group type, specified via [BackendGroup.backend], must be `stream`. + * + * To get the list of all available backend groups, make a [BackendGroupService.List] request. + */ backendGroupId: string; } @@ -299,9 +314,10 @@ export interface StreamHandler { export interface HttpHandler { $type: "yandex.cloud.apploadbalancer.v1.HttpHandler"; /** - * ID of the HTTP router processing requests. + * ID of the HTTP router processing requests. For details about the concept, see + * [documentation](/docs/application-load-balancer/concepts/http-router). * - * For details about the concept, see [documentation](/docs/application-load-balancer/concepts/http-router). + * To get the list of all available HTTP routers, make a [HttpRouterService.List] request. */ httpRouterId: string; /** @@ -337,12 +353,12 @@ export interface SniMatch { handler?: TlsHandler; } -/** An HTTPS (HTTP over TLS) handler resource. */ +/** A TLS-encrypted (HTTP or TCP stream) handler resource. */ export interface TlsHandler { $type: "yandex.cloud.apploadbalancer.v1.TlsHandler"; /** HTTP handler. */ httpHandler?: HttpHandler | undefined; - /** Stream handler */ + /** Stream (TCP) handler. */ streamHandler?: StreamHandler | undefined; /** * ID's of the TLS server certificates from [Certificate Manager](/docs/certificate-manager/). diff --git a/src/generated/yandex/cloud/apploadbalancer/v1/load_balancer_service.ts b/src/generated/yandex/cloud/apploadbalancer/v1/load_balancer_service.ts index 36446b17..2c45bc1c 100644 --- a/src/generated/yandex/cloud/apploadbalancer/v1/load_balancer_service.ts +++ b/src/generated/yandex/cloud/apploadbalancer/v1/load_balancer_service.ts @@ -380,11 +380,17 @@ export interface ListenerSpec { * Endpoints are defined by their IP addresses and ports. */ endpointSpecs: EndpointSpec[]; - /** HTTP listener settings. */ + /** Unencrypted HTTP listener settings. */ http?: HttpListener | undefined; - /** TLS listener settings. */ + /** + * TLS-encrypted HTTP or TCP stream listener settings. + * + * All handlers within a listener ([TlsListener.default_handler] and [TlsListener.sni_handlers]) must be of one + * type, [HttpHandler] or [StreamHandler]. Mixing HTTP and TCP stream traffic in a TLS-encrypted listener is not + * supported. + */ tls?: TlsListener | undefined; - /** TCP listener settings. */ + /** Unencrypted stream (TCP) listener settings. */ stream?: StreamListener | undefined; } diff --git a/src/generated/yandex/cloud/apploadbalancer/v1/virtual_host.ts b/src/generated/yandex/cloud/apploadbalancer/v1/virtual_host.ts index ee8c69d0..685be7ae 100644 --- a/src/generated/yandex/cloud/apploadbalancer/v1/virtual_host.ts +++ b/src/generated/yandex/cloud/apploadbalancer/v1/virtual_host.ts @@ -358,7 +358,11 @@ export function grpcStatusResponseAction_StatusToJSON( /** An HTTP route action resource. */ export interface HttpRouteAction { $type: "yandex.cloud.apploadbalancer.v1.HttpRouteAction"; - /** Backend group to forward requests to. */ + /** + * Backend group to forward requests to. + * + * Stream (TCP) backend groups are not supported. + */ backendGroupId: string; /** * Overall timeout for an HTTP connection between a load balancer node an a backend from the backend group: diff --git a/src/generated/yandex/cloud/cdn/index.ts b/src/generated/yandex/cloud/cdn/index.ts index e82aaf81..b5ae2427 100644 --- a/src/generated/yandex/cloud/cdn/index.ts +++ b/src/generated/yandex/cloud/cdn/index.ts @@ -4,5 +4,7 @@ export * as origin_group from './v1/origin_group' export * as origin_group_service from './v1/origin_group_service' export * as origin_service from './v1/origin_service' export * as provider_service from './v1/provider_service' +export * as raw_logs from './v1/raw_logs' +export * as raw_logs_service from './v1/raw_logs_service' export * as resource from './v1/resource' export * as resource_service from './v1/resource_service' \ No newline at end of file diff --git a/src/generated/yandex/cloud/cdn/v1/cache_service.ts b/src/generated/yandex/cloud/cdn/v1/cache_service.ts index f37bc77a..b2e9d335 100644 --- a/src/generated/yandex/cloud/cdn/v1/cache_service.ts +++ b/src/generated/yandex/cloud/cdn/v1/cache_service.ts @@ -339,7 +339,11 @@ messageTypeRegistry.set(PrefetchCacheMetadata.$type, PrefetchCacheMetadata); /** A set of methods for managing Cache Service resources. */ export const CacheServiceService = { - /** Removes specified files from the cache of the specified resource. For details about purging, see [documentation](/docs/cdn/concepts/caching#purge). */ + /** + * Removes specified files from the cache of the specified resource. For details about purging, see [documentation](/docs/cdn/concepts/caching#purge). + * + * Purging may take up to 15 minutes. + */ purge: { path: "/yandex.cloud.cdn.v1.CacheService/Purge", requestStream: false, @@ -366,14 +370,22 @@ export const CacheServiceService = { } as const; export interface CacheServiceServer extends UntypedServiceImplementation { - /** Removes specified files from the cache of the specified resource. For details about purging, see [documentation](/docs/cdn/concepts/caching#purge). */ + /** + * Removes specified files from the cache of the specified resource. For details about purging, see [documentation](/docs/cdn/concepts/caching#purge). + * + * Purging may take up to 15 minutes. + */ purge: handleUnaryCall; /** Uploads specified files from origins to cache of the specified resource. For defails about prefetching, see [documentation](/docs/cdn/concepts/caching#prefetch). */ prefetch: handleUnaryCall; } export interface CacheServiceClient extends Client { - /** Removes specified files from the cache of the specified resource. For details about purging, see [documentation](/docs/cdn/concepts/caching#purge). */ + /** + * Removes specified files from the cache of the specified resource. For details about purging, see [documentation](/docs/cdn/concepts/caching#purge). + * + * Purging may take up to 15 minutes. + */ purge( request: PurgeCacheRequest, callback: (error: ServiceError | null, response: Operation) => void diff --git a/src/generated/yandex/cloud/cdn/v1/origin_group_service.ts b/src/generated/yandex/cloud/cdn/v1/origin_group_service.ts index a1918295..d8649684 100644 --- a/src/generated/yandex/cloud/cdn/v1/origin_group_service.ts +++ b/src/generated/yandex/cloud/cdn/v1/origin_group_service.ts @@ -1022,7 +1022,12 @@ export const OriginGroupServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Updates origin group. */ + /** + * Updates the specified origin group. + * + * Changes may take up to 15 minutes to apply. Afterwards, it is recommended to purge cache of the resources that + * use the origin group via a [CacheService.Purge] request. + */ update: { path: "/yandex.cloud.cdn.v1.OriginGroupService/Update", requestStream: false, @@ -1057,7 +1062,12 @@ export interface OriginGroupServiceServer extends UntypedServiceImplementation { list: handleUnaryCall; /** Creates origin group. */ create: handleUnaryCall; - /** Updates origin group. */ + /** + * Updates the specified origin group. + * + * Changes may take up to 15 minutes to apply. Afterwards, it is recommended to purge cache of the resources that + * use the origin group via a [CacheService.Purge] request. + */ update: handleUnaryCall; /** Deletes origin group with specified origin group id. */ delete: handleUnaryCall; @@ -1121,7 +1131,12 @@ export interface OriginGroupServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Updates origin group. */ + /** + * Updates the specified origin group. + * + * Changes may take up to 15 minutes to apply. Afterwards, it is recommended to purge cache of the resources that + * use the origin group via a [CacheService.Purge] request. + */ update( request: UpdateOriginGroupRequest, callback: (error: ServiceError | null, response: Operation) => void diff --git a/src/generated/yandex/cloud/cdn/v1/origin_service.ts b/src/generated/yandex/cloud/cdn/v1/origin_service.ts index d3a9e990..c0233d4b 100644 --- a/src/generated/yandex/cloud/cdn/v1/origin_service.ts +++ b/src/generated/yandex/cloud/cdn/v1/origin_service.ts @@ -967,7 +967,12 @@ export const OriginServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Updates origin from origin group. */ + /** + * Updates the specified origin from the origin group. + * + * Changes may take up to 15 minutes to apply. Afterwards, it is recommended to purge cache of the resources that + * use the origin via a [CacheService.Purge] request. + */ update: { path: "/yandex.cloud.cdn.v1.OriginService/Update", requestStream: false, @@ -1000,7 +1005,12 @@ export interface OriginServiceServer extends UntypedServiceImplementation { list: handleUnaryCall; /** Creates origin inside origin group. */ create: handleUnaryCall; - /** Updates origin from origin group. */ + /** + * Updates the specified origin from the origin group. + * + * Changes may take up to 15 minutes to apply. Afterwards, it is recommended to purge cache of the resources that + * use the origin via a [CacheService.Purge] request. + */ update: handleUnaryCall; /** Deletes origin from origin group. */ delete: handleUnaryCall; @@ -1064,7 +1074,12 @@ export interface OriginServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Updates origin from origin group. */ + /** + * Updates the specified origin from the origin group. + * + * Changes may take up to 15 minutes to apply. Afterwards, it is recommended to purge cache of the resources that + * use the origin via a [CacheService.Purge] request. + */ update( request: UpdateOriginRequest, callback: (error: ServiceError | null, response: Operation) => void diff --git a/src/generated/yandex/cloud/cdn/v1/raw_logs.ts b/src/generated/yandex/cloud/cdn/v1/raw_logs.ts new file mode 100644 index 00000000..f45f2616 --- /dev/null +++ b/src/generated/yandex/cloud/cdn/v1/raw_logs.ts @@ -0,0 +1,193 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; + +export const protobufPackage = "yandex.cloud.cdn.v1"; + +/** Provider side statuses of Raw logs processing. */ +export enum RawLogsStatus { + RAW_LOGS_STATUS_UNSPECIFIED = 0, + /** RAW_LOGS_STATUS_NOT_ACTIVATED - Raw logs wasn't activated. */ + RAW_LOGS_STATUS_NOT_ACTIVATED = 1, + /** RAW_LOGS_STATUS_OK - Raw logs was activated, and logs storing process works as expected. */ + RAW_LOGS_STATUS_OK = 2, + /** RAW_LOGS_STATUS_FAILED - Raw logs was activated, but logs CDN provider has been failed to store logs. */ + RAW_LOGS_STATUS_FAILED = 3, + UNRECOGNIZED = -1, +} + +export function rawLogsStatusFromJSON(object: any): RawLogsStatus { + switch (object) { + case 0: + case "RAW_LOGS_STATUS_UNSPECIFIED": + return RawLogsStatus.RAW_LOGS_STATUS_UNSPECIFIED; + case 1: + case "RAW_LOGS_STATUS_NOT_ACTIVATED": + return RawLogsStatus.RAW_LOGS_STATUS_NOT_ACTIVATED; + case 2: + case "RAW_LOGS_STATUS_OK": + return RawLogsStatus.RAW_LOGS_STATUS_OK; + case 3: + case "RAW_LOGS_STATUS_FAILED": + return RawLogsStatus.RAW_LOGS_STATUS_FAILED; + case -1: + case "UNRECOGNIZED": + default: + return RawLogsStatus.UNRECOGNIZED; + } +} + +export function rawLogsStatusToJSON(object: RawLogsStatus): string { + switch (object) { + case RawLogsStatus.RAW_LOGS_STATUS_UNSPECIFIED: + return "RAW_LOGS_STATUS_UNSPECIFIED"; + case RawLogsStatus.RAW_LOGS_STATUS_NOT_ACTIVATED: + return "RAW_LOGS_STATUS_NOT_ACTIVATED"; + case RawLogsStatus.RAW_LOGS_STATUS_OK: + return "RAW_LOGS_STATUS_OK"; + case RawLogsStatus.RAW_LOGS_STATUS_FAILED: + return "RAW_LOGS_STATUS_FAILED"; + default: + return "UNKNOWN"; + } +} + +/** User settings for Raw logs. */ +export interface RawLogsSettings { + $type: "yandex.cloud.cdn.v1.RawLogsSettings"; + /** Destination S3 bucket name, note that the suer should be owner of the bucket. */ + bucketName: string; + /** Bucket region, unused for now, could be blank. */ + bucketRegion: string; + /** + * file_prefix: prefix each log object name with specified prefix. + * + * The prefix makes it simpler for you to locate the log objects. + * For example, if you specify the prefix value logs/, each log object that + * S3 creates begins with the logs/ prefix in its key, so pseudo S3 folders + * could be setup. + */ + filePrefix: string; +} + +const baseRawLogsSettings: object = { + $type: "yandex.cloud.cdn.v1.RawLogsSettings", + bucketName: "", + bucketRegion: "", + filePrefix: "", +}; + +export const RawLogsSettings = { + $type: "yandex.cloud.cdn.v1.RawLogsSettings" as const, + + encode( + message: RawLogsSettings, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.bucketName !== "") { + writer.uint32(10).string(message.bucketName); + } + if (message.bucketRegion !== "") { + writer.uint32(18).string(message.bucketRegion); + } + if (message.filePrefix !== "") { + writer.uint32(26).string(message.filePrefix); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): RawLogsSettings { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseRawLogsSettings } as RawLogsSettings; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.bucketName = reader.string(); + break; + case 2: + message.bucketRegion = reader.string(); + break; + case 3: + message.filePrefix = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RawLogsSettings { + const message = { ...baseRawLogsSettings } as RawLogsSettings; + message.bucketName = + object.bucketName !== undefined && object.bucketName !== null + ? String(object.bucketName) + : ""; + message.bucketRegion = + object.bucketRegion !== undefined && object.bucketRegion !== null + ? String(object.bucketRegion) + : ""; + message.filePrefix = + object.filePrefix !== undefined && object.filePrefix !== null + ? String(object.filePrefix) + : ""; + return message; + }, + + toJSON(message: RawLogsSettings): unknown { + const obj: any = {}; + message.bucketName !== undefined && (obj.bucketName = message.bucketName); + message.bucketRegion !== undefined && + (obj.bucketRegion = message.bucketRegion); + message.filePrefix !== undefined && (obj.filePrefix = message.filePrefix); + return obj; + }, + + fromPartial, I>>( + object: I + ): RawLogsSettings { + const message = { ...baseRawLogsSettings } as RawLogsSettings; + message.bucketName = object.bucketName ?? ""; + message.bucketRegion = object.bucketRegion ?? ""; + message.filePrefix = object.filePrefix ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(RawLogsSettings.$type, RawLogsSettings); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/cdn/v1/raw_logs_service.ts b/src/generated/yandex/cloud/cdn/v1/raw_logs_service.ts new file mode 100644 index 00000000..76b7dc9a --- /dev/null +++ b/src/generated/yandex/cloud/cdn/v1/raw_logs_service.ts @@ -0,0 +1,1039 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../typeRegistry"; +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleUnaryCall, + Client, + ClientUnaryCall, + Metadata, + CallOptions, + ServiceError, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; +import { + RawLogsSettings, + RawLogsStatus, + rawLogsStatusFromJSON, + rawLogsStatusToJSON, +} from "../../../../yandex/cloud/cdn/v1/raw_logs"; +import { Operation } from "../../../../yandex/cloud/operation/operation"; + +export const protobufPackage = "yandex.cloud.cdn.v1"; + +export interface ActivateRawLogsRequest { + $type: "yandex.cloud.cdn.v1.ActivateRawLogsRequest"; + /** ID of CDN resource to switch logs storage for.. */ + resourceId: string; + /** Raw logs settings. */ + settings?: RawLogsSettings; +} + +export interface ActivateRawLogsMetadata { + $type: "yandex.cloud.cdn.v1.ActivateRawLogsMetadata"; + /** ID of resource with activated raw logs. */ + resourceId: string; +} + +export interface ActivateRawLogsResponse { + $type: "yandex.cloud.cdn.v1.ActivateRawLogsResponse"; + /** Raw logs status. */ + status: RawLogsStatus; + /** Raw logs settings. */ + settings?: RawLogsSettings; +} + +export interface DeactivateRawLogsRequest { + $type: "yandex.cloud.cdn.v1.DeactivateRawLogsRequest"; + /** ID of CDN resource to deactivate Raw Logs for. */ + resourceId: string; +} + +export interface DeactivateRawLogsMetadata { + $type: "yandex.cloud.cdn.v1.DeactivateRawLogsMetadata"; + /** ID of CDN resource. */ + resourceId: string; +} + +export interface GetRawLogsRequest { + $type: "yandex.cloud.cdn.v1.GetRawLogsRequest"; + /** ID of CDN resource to request status and settings. */ + resourceId: string; +} + +export interface GetRawLogsResponse { + $type: "yandex.cloud.cdn.v1.GetRawLogsResponse"; + /** Raw logs status. */ + status: RawLogsStatus; + /** Raw logs settings. */ + settings?: RawLogsSettings; +} + +export interface UpdateRawLogsRequest { + $type: "yandex.cloud.cdn.v1.UpdateRawLogsRequest"; + /** ID of CDN resource. */ + resourceId: string; + /** Raw logs settings. */ + settings?: RawLogsSettings; +} + +export interface UpdateRawLogsResponse { + $type: "yandex.cloud.cdn.v1.UpdateRawLogsResponse"; + /** Raw logs status. */ + status: RawLogsStatus; + /** Raw logs settings. */ + settings?: RawLogsSettings; +} + +export interface UpdateRawLogsMetadata { + $type: "yandex.cloud.cdn.v1.UpdateRawLogsMetadata"; + /** ID of CDN resource. */ + resourceId: string; +} + +const baseActivateRawLogsRequest: object = { + $type: "yandex.cloud.cdn.v1.ActivateRawLogsRequest", + resourceId: "", +}; + +export const ActivateRawLogsRequest = { + $type: "yandex.cloud.cdn.v1.ActivateRawLogsRequest" as const, + + encode( + message: ActivateRawLogsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.resourceId !== "") { + writer.uint32(10).string(message.resourceId); + } + if (message.settings !== undefined) { + RawLogsSettings.encode( + message.settings, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ActivateRawLogsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseActivateRawLogsRequest } as ActivateRawLogsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.resourceId = reader.string(); + break; + case 2: + message.settings = RawLogsSettings.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ActivateRawLogsRequest { + const message = { ...baseActivateRawLogsRequest } as ActivateRawLogsRequest; + message.resourceId = + object.resourceId !== undefined && object.resourceId !== null + ? String(object.resourceId) + : ""; + message.settings = + object.settings !== undefined && object.settings !== null + ? RawLogsSettings.fromJSON(object.settings) + : undefined; + return message; + }, + + toJSON(message: ActivateRawLogsRequest): unknown { + const obj: any = {}; + message.resourceId !== undefined && (obj.resourceId = message.resourceId); + message.settings !== undefined && + (obj.settings = message.settings + ? RawLogsSettings.toJSON(message.settings) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): ActivateRawLogsRequest { + const message = { ...baseActivateRawLogsRequest } as ActivateRawLogsRequest; + message.resourceId = object.resourceId ?? ""; + message.settings = + object.settings !== undefined && object.settings !== null + ? RawLogsSettings.fromPartial(object.settings) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(ActivateRawLogsRequest.$type, ActivateRawLogsRequest); + +const baseActivateRawLogsMetadata: object = { + $type: "yandex.cloud.cdn.v1.ActivateRawLogsMetadata", + resourceId: "", +}; + +export const ActivateRawLogsMetadata = { + $type: "yandex.cloud.cdn.v1.ActivateRawLogsMetadata" as const, + + encode( + message: ActivateRawLogsMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.resourceId !== "") { + writer.uint32(10).string(message.resourceId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ActivateRawLogsMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseActivateRawLogsMetadata, + } as ActivateRawLogsMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.resourceId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ActivateRawLogsMetadata { + const message = { + ...baseActivateRawLogsMetadata, + } as ActivateRawLogsMetadata; + message.resourceId = + object.resourceId !== undefined && object.resourceId !== null + ? String(object.resourceId) + : ""; + return message; + }, + + toJSON(message: ActivateRawLogsMetadata): unknown { + const obj: any = {}; + message.resourceId !== undefined && (obj.resourceId = message.resourceId); + return obj; + }, + + fromPartial, I>>( + object: I + ): ActivateRawLogsMetadata { + const message = { + ...baseActivateRawLogsMetadata, + } as ActivateRawLogsMetadata; + message.resourceId = object.resourceId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ActivateRawLogsMetadata.$type, ActivateRawLogsMetadata); + +const baseActivateRawLogsResponse: object = { + $type: "yandex.cloud.cdn.v1.ActivateRawLogsResponse", + status: 0, +}; + +export const ActivateRawLogsResponse = { + $type: "yandex.cloud.cdn.v1.ActivateRawLogsResponse" as const, + + encode( + message: ActivateRawLogsResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.status !== 0) { + writer.uint32(8).int32(message.status); + } + if (message.settings !== undefined) { + RawLogsSettings.encode( + message.settings, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ActivateRawLogsResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseActivateRawLogsResponse, + } as ActivateRawLogsResponse; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.status = reader.int32() as any; + break; + case 2: + message.settings = RawLogsSettings.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ActivateRawLogsResponse { + const message = { + ...baseActivateRawLogsResponse, + } as ActivateRawLogsResponse; + message.status = + object.status !== undefined && object.status !== null + ? rawLogsStatusFromJSON(object.status) + : 0; + message.settings = + object.settings !== undefined && object.settings !== null + ? RawLogsSettings.fromJSON(object.settings) + : undefined; + return message; + }, + + toJSON(message: ActivateRawLogsResponse): unknown { + const obj: any = {}; + message.status !== undefined && + (obj.status = rawLogsStatusToJSON(message.status)); + message.settings !== undefined && + (obj.settings = message.settings + ? RawLogsSettings.toJSON(message.settings) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): ActivateRawLogsResponse { + const message = { + ...baseActivateRawLogsResponse, + } as ActivateRawLogsResponse; + message.status = object.status ?? 0; + message.settings = + object.settings !== undefined && object.settings !== null + ? RawLogsSettings.fromPartial(object.settings) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(ActivateRawLogsResponse.$type, ActivateRawLogsResponse); + +const baseDeactivateRawLogsRequest: object = { + $type: "yandex.cloud.cdn.v1.DeactivateRawLogsRequest", + resourceId: "", +}; + +export const DeactivateRawLogsRequest = { + $type: "yandex.cloud.cdn.v1.DeactivateRawLogsRequest" as const, + + encode( + message: DeactivateRawLogsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.resourceId !== "") { + writer.uint32(10).string(message.resourceId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DeactivateRawLogsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseDeactivateRawLogsRequest, + } as DeactivateRawLogsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.resourceId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeactivateRawLogsRequest { + const message = { + ...baseDeactivateRawLogsRequest, + } as DeactivateRawLogsRequest; + message.resourceId = + object.resourceId !== undefined && object.resourceId !== null + ? String(object.resourceId) + : ""; + return message; + }, + + toJSON(message: DeactivateRawLogsRequest): unknown { + const obj: any = {}; + message.resourceId !== undefined && (obj.resourceId = message.resourceId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeactivateRawLogsRequest { + const message = { + ...baseDeactivateRawLogsRequest, + } as DeactivateRawLogsRequest; + message.resourceId = object.resourceId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + DeactivateRawLogsRequest.$type, + DeactivateRawLogsRequest +); + +const baseDeactivateRawLogsMetadata: object = { + $type: "yandex.cloud.cdn.v1.DeactivateRawLogsMetadata", + resourceId: "", +}; + +export const DeactivateRawLogsMetadata = { + $type: "yandex.cloud.cdn.v1.DeactivateRawLogsMetadata" as const, + + encode( + message: DeactivateRawLogsMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.resourceId !== "") { + writer.uint32(10).string(message.resourceId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DeactivateRawLogsMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseDeactivateRawLogsMetadata, + } as DeactivateRawLogsMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.resourceId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeactivateRawLogsMetadata { + const message = { + ...baseDeactivateRawLogsMetadata, + } as DeactivateRawLogsMetadata; + message.resourceId = + object.resourceId !== undefined && object.resourceId !== null + ? String(object.resourceId) + : ""; + return message; + }, + + toJSON(message: DeactivateRawLogsMetadata): unknown { + const obj: any = {}; + message.resourceId !== undefined && (obj.resourceId = message.resourceId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeactivateRawLogsMetadata { + const message = { + ...baseDeactivateRawLogsMetadata, + } as DeactivateRawLogsMetadata; + message.resourceId = object.resourceId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + DeactivateRawLogsMetadata.$type, + DeactivateRawLogsMetadata +); + +const baseGetRawLogsRequest: object = { + $type: "yandex.cloud.cdn.v1.GetRawLogsRequest", + resourceId: "", +}; + +export const GetRawLogsRequest = { + $type: "yandex.cloud.cdn.v1.GetRawLogsRequest" as const, + + encode( + message: GetRawLogsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.resourceId !== "") { + writer.uint32(10).string(message.resourceId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): GetRawLogsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGetRawLogsRequest } as GetRawLogsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.resourceId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GetRawLogsRequest { + const message = { ...baseGetRawLogsRequest } as GetRawLogsRequest; + message.resourceId = + object.resourceId !== undefined && object.resourceId !== null + ? String(object.resourceId) + : ""; + return message; + }, + + toJSON(message: GetRawLogsRequest): unknown { + const obj: any = {}; + message.resourceId !== undefined && (obj.resourceId = message.resourceId); + return obj; + }, + + fromPartial, I>>( + object: I + ): GetRawLogsRequest { + const message = { ...baseGetRawLogsRequest } as GetRawLogsRequest; + message.resourceId = object.resourceId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(GetRawLogsRequest.$type, GetRawLogsRequest); + +const baseGetRawLogsResponse: object = { + $type: "yandex.cloud.cdn.v1.GetRawLogsResponse", + status: 0, +}; + +export const GetRawLogsResponse = { + $type: "yandex.cloud.cdn.v1.GetRawLogsResponse" as const, + + encode( + message: GetRawLogsResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.status !== 0) { + writer.uint32(8).int32(message.status); + } + if (message.settings !== undefined) { + RawLogsSettings.encode( + message.settings, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): GetRawLogsResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGetRawLogsResponse } as GetRawLogsResponse; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.status = reader.int32() as any; + break; + case 2: + message.settings = RawLogsSettings.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GetRawLogsResponse { + const message = { ...baseGetRawLogsResponse } as GetRawLogsResponse; + message.status = + object.status !== undefined && object.status !== null + ? rawLogsStatusFromJSON(object.status) + : 0; + message.settings = + object.settings !== undefined && object.settings !== null + ? RawLogsSettings.fromJSON(object.settings) + : undefined; + return message; + }, + + toJSON(message: GetRawLogsResponse): unknown { + const obj: any = {}; + message.status !== undefined && + (obj.status = rawLogsStatusToJSON(message.status)); + message.settings !== undefined && + (obj.settings = message.settings + ? RawLogsSettings.toJSON(message.settings) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): GetRawLogsResponse { + const message = { ...baseGetRawLogsResponse } as GetRawLogsResponse; + message.status = object.status ?? 0; + message.settings = + object.settings !== undefined && object.settings !== null + ? RawLogsSettings.fromPartial(object.settings) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(GetRawLogsResponse.$type, GetRawLogsResponse); + +const baseUpdateRawLogsRequest: object = { + $type: "yandex.cloud.cdn.v1.UpdateRawLogsRequest", + resourceId: "", +}; + +export const UpdateRawLogsRequest = { + $type: "yandex.cloud.cdn.v1.UpdateRawLogsRequest" as const, + + encode( + message: UpdateRawLogsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.resourceId !== "") { + writer.uint32(10).string(message.resourceId); + } + if (message.settings !== undefined) { + RawLogsSettings.encode( + message.settings, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateRawLogsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseUpdateRawLogsRequest } as UpdateRawLogsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.resourceId = reader.string(); + break; + case 2: + message.settings = RawLogsSettings.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateRawLogsRequest { + const message = { ...baseUpdateRawLogsRequest } as UpdateRawLogsRequest; + message.resourceId = + object.resourceId !== undefined && object.resourceId !== null + ? String(object.resourceId) + : ""; + message.settings = + object.settings !== undefined && object.settings !== null + ? RawLogsSettings.fromJSON(object.settings) + : undefined; + return message; + }, + + toJSON(message: UpdateRawLogsRequest): unknown { + const obj: any = {}; + message.resourceId !== undefined && (obj.resourceId = message.resourceId); + message.settings !== undefined && + (obj.settings = message.settings + ? RawLogsSettings.toJSON(message.settings) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateRawLogsRequest { + const message = { ...baseUpdateRawLogsRequest } as UpdateRawLogsRequest; + message.resourceId = object.resourceId ?? ""; + message.settings = + object.settings !== undefined && object.settings !== null + ? RawLogsSettings.fromPartial(object.settings) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(UpdateRawLogsRequest.$type, UpdateRawLogsRequest); + +const baseUpdateRawLogsResponse: object = { + $type: "yandex.cloud.cdn.v1.UpdateRawLogsResponse", + status: 0, +}; + +export const UpdateRawLogsResponse = { + $type: "yandex.cloud.cdn.v1.UpdateRawLogsResponse" as const, + + encode( + message: UpdateRawLogsResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.status !== 0) { + writer.uint32(8).int32(message.status); + } + if (message.settings !== undefined) { + RawLogsSettings.encode( + message.settings, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateRawLogsResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseUpdateRawLogsResponse } as UpdateRawLogsResponse; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.status = reader.int32() as any; + break; + case 2: + message.settings = RawLogsSettings.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateRawLogsResponse { + const message = { ...baseUpdateRawLogsResponse } as UpdateRawLogsResponse; + message.status = + object.status !== undefined && object.status !== null + ? rawLogsStatusFromJSON(object.status) + : 0; + message.settings = + object.settings !== undefined && object.settings !== null + ? RawLogsSettings.fromJSON(object.settings) + : undefined; + return message; + }, + + toJSON(message: UpdateRawLogsResponse): unknown { + const obj: any = {}; + message.status !== undefined && + (obj.status = rawLogsStatusToJSON(message.status)); + message.settings !== undefined && + (obj.settings = message.settings + ? RawLogsSettings.toJSON(message.settings) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateRawLogsResponse { + const message = { ...baseUpdateRawLogsResponse } as UpdateRawLogsResponse; + message.status = object.status ?? 0; + message.settings = + object.settings !== undefined && object.settings !== null + ? RawLogsSettings.fromPartial(object.settings) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(UpdateRawLogsResponse.$type, UpdateRawLogsResponse); + +const baseUpdateRawLogsMetadata: object = { + $type: "yandex.cloud.cdn.v1.UpdateRawLogsMetadata", + resourceId: "", +}; + +export const UpdateRawLogsMetadata = { + $type: "yandex.cloud.cdn.v1.UpdateRawLogsMetadata" as const, + + encode( + message: UpdateRawLogsMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.resourceId !== "") { + writer.uint32(10).string(message.resourceId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateRawLogsMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseUpdateRawLogsMetadata } as UpdateRawLogsMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.resourceId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateRawLogsMetadata { + const message = { ...baseUpdateRawLogsMetadata } as UpdateRawLogsMetadata; + message.resourceId = + object.resourceId !== undefined && object.resourceId !== null + ? String(object.resourceId) + : ""; + return message; + }, + + toJSON(message: UpdateRawLogsMetadata): unknown { + const obj: any = {}; + message.resourceId !== undefined && (obj.resourceId = message.resourceId); + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateRawLogsMetadata { + const message = { ...baseUpdateRawLogsMetadata } as UpdateRawLogsMetadata; + message.resourceId = object.resourceId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(UpdateRawLogsMetadata.$type, UpdateRawLogsMetadata); + +export const RawLogsServiceService = { + activate: { + path: "/yandex.cloud.cdn.v1.RawLogsService/Activate", + requestStream: false, + responseStream: false, + requestSerialize: (value: ActivateRawLogsRequest) => + Buffer.from(ActivateRawLogsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => ActivateRawLogsRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + deactivate: { + path: "/yandex.cloud.cdn.v1.RawLogsService/Deactivate", + requestStream: false, + responseStream: false, + requestSerialize: (value: DeactivateRawLogsRequest) => + Buffer.from(DeactivateRawLogsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + DeactivateRawLogsRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + get: { + path: "/yandex.cloud.cdn.v1.RawLogsService/Get", + requestStream: false, + responseStream: false, + requestSerialize: (value: GetRawLogsRequest) => + Buffer.from(GetRawLogsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => GetRawLogsRequest.decode(value), + responseSerialize: (value: GetRawLogsResponse) => + Buffer.from(GetRawLogsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => GetRawLogsResponse.decode(value), + }, + update: { + path: "/yandex.cloud.cdn.v1.RawLogsService/Update", + requestStream: false, + responseStream: false, + requestSerialize: (value: UpdateRawLogsRequest) => + Buffer.from(UpdateRawLogsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => UpdateRawLogsRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, +} as const; + +export interface RawLogsServiceServer extends UntypedServiceImplementation { + activate: handleUnaryCall; + deactivate: handleUnaryCall; + get: handleUnaryCall; + update: handleUnaryCall; +} + +export interface RawLogsServiceClient extends Client { + activate( + request: ActivateRawLogsRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + activate( + request: ActivateRawLogsRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + activate( + request: ActivateRawLogsRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + deactivate( + request: DeactivateRawLogsRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + deactivate( + request: DeactivateRawLogsRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + deactivate( + request: DeactivateRawLogsRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + get( + request: GetRawLogsRequest, + callback: (error: ServiceError | null, response: GetRawLogsResponse) => void + ): ClientUnaryCall; + get( + request: GetRawLogsRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: GetRawLogsResponse) => void + ): ClientUnaryCall; + get( + request: GetRawLogsRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: GetRawLogsResponse) => void + ): ClientUnaryCall; + update( + request: UpdateRawLogsRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + update( + request: UpdateRawLogsRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + update( + request: UpdateRawLogsRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; +} + +export const RawLogsServiceClient = makeGenericClientConstructor( + RawLogsServiceService, + "yandex.cloud.cdn.v1.RawLogsService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): RawLogsServiceClient; + service: typeof RawLogsServiceService; +}; + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/cdn/v1/resource.ts b/src/generated/yandex/cloud/cdn/v1/resource.ts index 18f38f22..f826fced 100644 --- a/src/generated/yandex/cloud/cdn/v1/resource.ts +++ b/src/generated/yandex/cloud/cdn/v1/resource.ts @@ -122,7 +122,7 @@ export function rewriteFlagToJSON(object: RewriteFlag): string { } } -/** A certificate type patameters. */ +/** A certificate type parameters. */ export enum SSLCertificateType { /** SSL_CERTIFICATE_TYPE_UNSPECIFIED - SSL certificate is unspecified. */ SSL_CERTIFICATE_TYPE_UNSPECIFIED = 0, @@ -171,7 +171,7 @@ export function sSLCertificateTypeToJSON(object: SSLCertificateType): string { } } -/** A certificate status patameters. */ +/** A certificate status parameters. */ export enum SSLCertificateStatus { /** SSL_CERTIFICATE_STATUS_UNSPECIFIED - SSL certificate is unspecified. */ SSL_CERTIFICATE_STATUS_UNSPECIFIED = 0, @@ -557,19 +557,19 @@ export interface ResourceOptions_RewriteOption { flag: RewriteFlag; } -/** A set of the personal SSL certificate patameters. */ +/** A set of the personal SSL certificate parameters. */ export interface SSLTargetCertificate { $type: "yandex.cloud.cdn.v1.SSLTargetCertificate"; - /** Type of the sertificate. */ + /** Type of the certificate. */ type: SSLCertificateType; /** Certificate data. */ data?: SSLCertificateData; } -/** A SSL sertificate patameters. */ +/** A SSL certificate parameters. */ export interface SSLCertificate { $type: "yandex.cloud.cdn.v1.SSLCertificate"; - /** Type of the sertificate. */ + /** Type of the certificate. */ type: SSLCertificateType; /** Active status. */ status: SSLCertificateStatus; @@ -577,7 +577,7 @@ export interface SSLCertificate { data?: SSLCertificateData; } -/** A certificate data patameters. */ +/** A certificate data parameters. */ export interface SSLCertificateData { $type: "yandex.cloud.cdn.v1.SSLCertificateData"; /** @@ -587,10 +587,10 @@ export interface SSLCertificateData { cm?: SSLCertificateCMData | undefined; } -/** A certificate data custom patameters. */ +/** A certificate data custom parameters. */ export interface SSLCertificateCMData { $type: "yandex.cloud.cdn.v1.SSLCertificateCMData"; - /** ID of the custom sertificate. */ + /** ID of the custom certificate. */ id: string; } diff --git a/src/generated/yandex/cloud/cdn/v1/resource_service.ts b/src/generated/yandex/cloud/cdn/v1/resource_service.ts index e5a50a87..b3d961af 100644 --- a/src/generated/yandex/cloud/cdn/v1/resource_service.ts +++ b/src/generated/yandex/cloud/cdn/v1/resource_service.ts @@ -102,14 +102,14 @@ export interface CreateResourceRequest_Origin { * returned in result. */ originSource: string | undefined; - /** Set up resourse origin parameters. */ + /** Set up resource origin parameters. */ originSourceParams?: ResourceOriginParams | undefined; } -/** A set of resourse origin parameters. */ +/** A set of resource origin parameters. */ export interface ResourceOriginParams { $type: "yandex.cloud.cdn.v1.ResourceOriginParams"; - /** Sourse of the content. */ + /** Source of the content. */ source: string; /** Set up type of the origin. */ meta?: OriginMeta; @@ -1438,7 +1438,11 @@ export const ResourceServiceService = { Buffer.from(ListResourcesResponse.encode(value).finish()), responseDeserialize: (value: Buffer) => ListResourcesResponse.decode(value), }, - /** Creates client's CDN resource. */ + /** + * Creates a CDN resource in the specified folder. + * + * Creation may take up to 15 minutes. + */ create: { path: "/yandex.cloud.cdn.v1.ResourceService/Create", requestStream: false, @@ -1450,7 +1454,14 @@ export const ResourceServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Updates of client's CDN resource. (PATCH behavior) */ + /** + * Updates the specified CDN resource. + * + * The method implements patch behaviour, i.e. only the fields specified in the request are updated in the resource. + * + * Changes may take up to 15 minutes to apply. Afterwards, it is recommended to purge the resource's cache via a + * [CacheService.Purge] request. + */ update: { path: "/yandex.cloud.cdn.v1.ResourceService/Update", requestStream: false, @@ -1475,7 +1486,7 @@ export const ResourceServiceService = { responseDeserialize: (value: Buffer) => Operation.decode(value), }, /** - * Get Provider's CNAME (edge endpoint) binded to specified folder id. + * Get Provider's CNAME (edge endpoint) bind to specified folder id. * Returns UNIMPLEMENTED error, if provider doesn't support CNAME request. */ getProviderCName: { @@ -1498,14 +1509,25 @@ export interface ResourceServiceServer extends UntypedServiceImplementation { get: handleUnaryCall; /** Lists CDN resources. */ list: handleUnaryCall; - /** Creates client's CDN resource. */ + /** + * Creates a CDN resource in the specified folder. + * + * Creation may take up to 15 minutes. + */ create: handleUnaryCall; - /** Updates of client's CDN resource. (PATCH behavior) */ + /** + * Updates the specified CDN resource. + * + * The method implements patch behaviour, i.e. only the fields specified in the request are updated in the resource. + * + * Changes may take up to 15 minutes to apply. Afterwards, it is recommended to purge the resource's cache via a + * [CacheService.Purge] request. + */ update: handleUnaryCall; /** Deletes client's CDN resource. */ delete: handleUnaryCall; /** - * Get Provider's CNAME (edge endpoint) binded to specified folder id. + * Get Provider's CNAME (edge endpoint) bind to specified folder id. * Returns UNIMPLEMENTED error, if provider doesn't support CNAME request. */ getProviderCName: handleUnaryCall< @@ -1556,7 +1578,11 @@ export interface ResourceServiceClient extends Client { response: ListResourcesResponse ) => void ): ClientUnaryCall; - /** Creates client's CDN resource. */ + /** + * Creates a CDN resource in the specified folder. + * + * Creation may take up to 15 minutes. + */ create( request: CreateResourceRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -1572,7 +1598,14 @@ export interface ResourceServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Updates of client's CDN resource. (PATCH behavior) */ + /** + * Updates the specified CDN resource. + * + * The method implements patch behaviour, i.e. only the fields specified in the request are updated in the resource. + * + * Changes may take up to 15 minutes to apply. Afterwards, it is recommended to purge the resource's cache via a + * [CacheService.Purge] request. + */ update( request: UpdateResourceRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -1605,7 +1638,7 @@ export interface ResourceServiceClient extends Client { callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; /** - * Get Provider's CNAME (edge endpoint) binded to specified folder id. + * Get Provider's CNAME (edge endpoint) bind to specified folder id. * Returns UNIMPLEMENTED error, if provider doesn't support CNAME request. */ getProviderCName( diff --git a/src/generated/yandex/cloud/compute/v1/disk.ts b/src/generated/yandex/cloud/compute/v1/disk.ts index 9e58fa87..0905a82e 100644 --- a/src/generated/yandex/cloud/compute/v1/disk.ts +++ b/src/generated/yandex/cloud/compute/v1/disk.ts @@ -32,7 +32,7 @@ export interface Disk { * License IDs that indicate which licenses are attached to this resource. * License IDs are used to calculate additional charges for the use of the virtual machine. * - * The correct license ID is generated by Yandex.Cloud. IDs are inherited by new resources created from this resource. + * The correct license ID is generated by Yandex Cloud. IDs are inherited by new resources created from this resource. * * If you know the license IDs, specify them when you create the image. * For example, if you create a disk image using a third-party utility and load it into Yandex Object Storage, the license IDs will be lost. diff --git a/src/generated/yandex/cloud/compute/v1/disk_service.ts b/src/generated/yandex/cloud/compute/v1/disk_service.ts index e9a6acf8..7f813b4f 100644 --- a/src/generated/yandex/cloud/compute/v1/disk_service.ts +++ b/src/generated/yandex/cloud/compute/v1/disk_service.ts @@ -209,6 +209,32 @@ export interface ListDiskOperationsResponse { nextPageToken: string; } +export interface MoveDiskRequest { + $type: "yandex.cloud.compute.v1.MoveDiskRequest"; + /** + * ID of the disk to move. + * + * To get the disk ID, make a [DiskService.List] request. + */ + diskId: string; + /** + * ID of the folder to move the disk to. + * + * To get the folder ID, make a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + */ + destinationFolderId: string; +} + +export interface MoveDiskMetadata { + $type: "yandex.cloud.compute.v1.MoveDiskMetadata"; + /** ID of the disk that is being moved. */ + diskId: string; + /** ID of the folder that the disk is being moved from. */ + sourceFolderId: string; + /** ID of the folder that the disk is being moved to. */ + destinationFolderId: string; +} + const baseGetDiskRequest: object = { $type: "yandex.cloud.compute.v1.GetDiskRequest", diskId: "", @@ -1481,6 +1507,174 @@ messageTypeRegistry.set( ListDiskOperationsResponse ); +const baseMoveDiskRequest: object = { + $type: "yandex.cloud.compute.v1.MoveDiskRequest", + diskId: "", + destinationFolderId: "", +}; + +export const MoveDiskRequest = { + $type: "yandex.cloud.compute.v1.MoveDiskRequest" as const, + + encode( + message: MoveDiskRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.diskId !== "") { + writer.uint32(10).string(message.diskId); + } + if (message.destinationFolderId !== "") { + writer.uint32(18).string(message.destinationFolderId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): MoveDiskRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMoveDiskRequest } as MoveDiskRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.diskId = reader.string(); + break; + case 2: + message.destinationFolderId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): MoveDiskRequest { + const message = { ...baseMoveDiskRequest } as MoveDiskRequest; + message.diskId = + object.diskId !== undefined && object.diskId !== null + ? String(object.diskId) + : ""; + message.destinationFolderId = + object.destinationFolderId !== undefined && + object.destinationFolderId !== null + ? String(object.destinationFolderId) + : ""; + return message; + }, + + toJSON(message: MoveDiskRequest): unknown { + const obj: any = {}; + message.diskId !== undefined && (obj.diskId = message.diskId); + message.destinationFolderId !== undefined && + (obj.destinationFolderId = message.destinationFolderId); + return obj; + }, + + fromPartial, I>>( + object: I + ): MoveDiskRequest { + const message = { ...baseMoveDiskRequest } as MoveDiskRequest; + message.diskId = object.diskId ?? ""; + message.destinationFolderId = object.destinationFolderId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(MoveDiskRequest.$type, MoveDiskRequest); + +const baseMoveDiskMetadata: object = { + $type: "yandex.cloud.compute.v1.MoveDiskMetadata", + diskId: "", + sourceFolderId: "", + destinationFolderId: "", +}; + +export const MoveDiskMetadata = { + $type: "yandex.cloud.compute.v1.MoveDiskMetadata" as const, + + encode( + message: MoveDiskMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.diskId !== "") { + writer.uint32(10).string(message.diskId); + } + if (message.sourceFolderId !== "") { + writer.uint32(18).string(message.sourceFolderId); + } + if (message.destinationFolderId !== "") { + writer.uint32(26).string(message.destinationFolderId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): MoveDiskMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMoveDiskMetadata } as MoveDiskMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.diskId = reader.string(); + break; + case 2: + message.sourceFolderId = reader.string(); + break; + case 3: + message.destinationFolderId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): MoveDiskMetadata { + const message = { ...baseMoveDiskMetadata } as MoveDiskMetadata; + message.diskId = + object.diskId !== undefined && object.diskId !== null + ? String(object.diskId) + : ""; + message.sourceFolderId = + object.sourceFolderId !== undefined && object.sourceFolderId !== null + ? String(object.sourceFolderId) + : ""; + message.destinationFolderId = + object.destinationFolderId !== undefined && + object.destinationFolderId !== null + ? String(object.destinationFolderId) + : ""; + return message; + }, + + toJSON(message: MoveDiskMetadata): unknown { + const obj: any = {}; + message.diskId !== undefined && (obj.diskId = message.diskId); + message.sourceFolderId !== undefined && + (obj.sourceFolderId = message.sourceFolderId); + message.destinationFolderId !== undefined && + (obj.destinationFolderId = message.destinationFolderId); + return obj; + }, + + fromPartial, I>>( + object: I + ): MoveDiskMetadata { + const message = { ...baseMoveDiskMetadata } as MoveDiskMetadata; + message.diskId = object.diskId ?? ""; + message.sourceFolderId = object.sourceFolderId ?? ""; + message.destinationFolderId = object.destinationFolderId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(MoveDiskMetadata.$type, MoveDiskMetadata); + /** A set of methods for managing Disk resources. */ export const DiskServiceService = { /** @@ -1573,6 +1767,18 @@ export const DiskServiceService = { responseDeserialize: (value: Buffer) => ListDiskOperationsResponse.decode(value), }, + /** Moves the specified disk to another folder of the same cloud. */ + move: { + path: "/yandex.cloud.compute.v1.DiskService/Move", + requestStream: false, + responseStream: false, + requestSerialize: (value: MoveDiskRequest) => + Buffer.from(MoveDiskRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => MoveDiskRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, } as const; export interface DiskServiceServer extends UntypedServiceImplementation { @@ -1607,6 +1813,8 @@ export interface DiskServiceServer extends UntypedServiceImplementation { ListDiskOperationsRequest, ListDiskOperationsResponse >; + /** Moves the specified disk to another folder of the same cloud. */ + move: handleUnaryCall; } export interface DiskServiceClient extends Client { @@ -1731,6 +1939,22 @@ export interface DiskServiceClient extends Client { response: ListDiskOperationsResponse ) => void ): ClientUnaryCall; + /** Moves the specified disk to another folder of the same cloud. */ + move( + request: MoveDiskRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + move( + request: MoveDiskRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + move( + request: MoveDiskRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; } export const DiskServiceClient = makeGenericClientConstructor( diff --git a/src/generated/yandex/cloud/compute/v1/filesystem_service.ts b/src/generated/yandex/cloud/compute/v1/filesystem_service.ts index 361acea6..e074f64c 100644 --- a/src/generated/yandex/cloud/compute/v1/filesystem_service.ts +++ b/src/generated/yandex/cloud/compute/v1/filesystem_service.ts @@ -163,6 +163,8 @@ export interface UpdateFilesystemRequest { * 3. Send the new set in this field. */ labels: { [key: string]: string }; + /** Size of the filesystem, specified in bytes. */ + size: number; } export interface UpdateFilesystemRequest_LabelsEntry { @@ -846,6 +848,7 @@ const baseUpdateFilesystemRequest: object = { filesystemId: "", name: "", description: "", + size: 0, }; export const UpdateFilesystemRequest = { @@ -877,6 +880,9 @@ export const UpdateFilesystemRequest = { writer.uint32(42).fork() ).ldelim(); }); + if (message.size !== 0) { + writer.uint32(48).int64(message.size); + } return writer; }, @@ -914,6 +920,9 @@ export const UpdateFilesystemRequest = { message.labels[entry5.key] = entry5.value; } break; + case 6: + message.size = longToNumber(reader.int64() as Long); + break; default: reader.skipType(tag & 7); break; @@ -948,6 +957,10 @@ export const UpdateFilesystemRequest = { acc[key] = String(value); return acc; }, {}); + message.size = + object.size !== undefined && object.size !== null + ? Number(object.size) + : 0; return message; }, @@ -968,6 +981,7 @@ export const UpdateFilesystemRequest = { obj.labels[k] = v; }); } + message.size !== undefined && (obj.size = Math.round(message.size)); return obj; }, @@ -992,6 +1006,7 @@ export const UpdateFilesystemRequest = { } return acc; }, {}); + message.size = object.size ?? 0; return message; }, }; diff --git a/src/generated/yandex/cloud/compute/v1/image.ts b/src/generated/yandex/cloud/compute/v1/image.ts index 8cc3e817..a5ad7257 100644 --- a/src/generated/yandex/cloud/compute/v1/image.ts +++ b/src/generated/yandex/cloud/compute/v1/image.ts @@ -36,7 +36,7 @@ export interface Image { * License IDs that indicate which licenses are attached to this resource. * License IDs are used to calculate additional charges for the use of the virtual machine. * - * The correct license ID is generated by Yandex.Cloud. IDs are inherited by new resources created from this resource. + * The correct license ID is generated by Yandex Cloud. IDs are inherited by new resources created from this resource. * * If you know the license IDs, specify them when you create the image. * For example, if you create a disk image using a third-party utility and load it into Yandex Object Storage, the license IDs will be lost. diff --git a/src/generated/yandex/cloud/compute/v1/image_service.ts b/src/generated/yandex/cloud/compute/v1/image_service.ts index e66f685b..9e4c1ec1 100644 --- a/src/generated/yandex/cloud/compute/v1/image_service.ts +++ b/src/generated/yandex/cloud/compute/v1/image_service.ts @@ -112,7 +112,7 @@ export interface CreateImageRequest { * License IDs that indicate which licenses are attached to this resource. * License IDs are used to calculate additional charges for the use of the virtual machine. * - * The correct license ID is generated by Yandex.Cloud. IDs are inherited by new resources created from this resource. + * The correct license ID is generated by Yandex Cloud. IDs are inherited by new resources created from this resource. * * If you know the license IDs, specify them when you create the image. * For example, if you create a disk image using a third-party utility and load it into Yandex Object Storage, the license IDs will be lost. diff --git a/src/generated/yandex/cloud/compute/v1/instance.ts b/src/generated/yandex/cloud/compute/v1/instance.ts index ec2f772e..65988ab7 100644 --- a/src/generated/yandex/cloud/compute/v1/instance.ts +++ b/src/generated/yandex/cloud/compute/v1/instance.ts @@ -79,6 +79,8 @@ export interface Instance { bootDisk?: AttachedDisk; /** Array of secondary disks that are attached to the instance. */ secondaryDisks: AttachedDisk[]; + /** Array of local disks that are attached to the instance. */ + localDisks: AttachedLocalDisk[]; /** Array of filesystems that are attached to the instance. */ filesystems: AttachedFilesystem[]; /** Array of network interfaces that are attached to the instance. */ @@ -282,6 +284,19 @@ export function attachedDisk_ModeToJSON(object: AttachedDisk_Mode): string { } } +export interface AttachedLocalDisk { + $type: "yandex.cloud.compute.v1.AttachedLocalDisk"; + /** Size of the disk, specified in bytes. */ + size: number; + /** + * Serial number that is reflected into the /dev/disk/by-id/ tree + * of a Linux operating system running within the instance. + * + * This value can be used to reference the device for mounting, resizing, and so on, from within the instance. + */ + deviceName: string; +} + export interface AttachedFilesystem { $type: "yandex.cloud.compute.v1.AttachedFilesystem"; /** Access mode to the filesystem. */ @@ -595,6 +610,9 @@ export const Instance = { for (const v of message.secondaryDisks) { AttachedDisk.encode(v!, writer.uint32(106).fork()).ldelim(); } + for (const v of message.localDisks) { + AttachedLocalDisk.encode(v!, writer.uint32(178).fork()).ldelim(); + } for (const v of message.filesystems) { AttachedFilesystem.encode(v!, writer.uint32(170).fork()).ldelim(); } @@ -635,6 +653,7 @@ export const Instance = { message.labels = {}; message.metadata = {}; message.secondaryDisks = []; + message.localDisks = []; message.filesystems = []; message.networkInterfaces = []; while (reader.pos < end) { @@ -692,6 +711,11 @@ export const Instance = { AttachedDisk.decode(reader, reader.uint32()) ); break; + case 22: + message.localDisks.push( + AttachedLocalDisk.decode(reader, reader.uint32()) + ); + break; case 21: message.filesystems.push( AttachedFilesystem.decode(reader, reader.uint32()) @@ -789,6 +813,9 @@ export const Instance = { message.secondaryDisks = (object.secondaryDisks ?? []).map((e: any) => AttachedDisk.fromJSON(e) ); + message.localDisks = (object.localDisks ?? []).map((e: any) => + AttachedLocalDisk.fromJSON(e) + ); message.filesystems = (object.filesystems ?? []).map((e: any) => AttachedFilesystem.fromJSON(e) ); @@ -858,6 +885,13 @@ export const Instance = { } else { obj.secondaryDisks = []; } + if (message.localDisks) { + obj.localDisks = message.localDisks.map((e) => + e ? AttachedLocalDisk.toJSON(e) : undefined + ); + } else { + obj.localDisks = []; + } if (message.filesystems) { obj.filesystems = message.filesystems.map((e) => e ? AttachedFilesystem.toJSON(e) : undefined @@ -926,6 +960,8 @@ export const Instance = { : undefined; message.secondaryDisks = object.secondaryDisks?.map((e) => AttachedDisk.fromPartial(e)) || []; + message.localDisks = + object.localDisks?.map((e) => AttachedLocalDisk.fromPartial(e)) || []; message.filesystems = object.filesystems?.map((e) => AttachedFilesystem.fromPartial(e)) || []; message.networkInterfaces = @@ -1307,6 +1343,81 @@ export const AttachedDisk = { messageTypeRegistry.set(AttachedDisk.$type, AttachedDisk); +const baseAttachedLocalDisk: object = { + $type: "yandex.cloud.compute.v1.AttachedLocalDisk", + size: 0, + deviceName: "", +}; + +export const AttachedLocalDisk = { + $type: "yandex.cloud.compute.v1.AttachedLocalDisk" as const, + + encode( + message: AttachedLocalDisk, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.size !== 0) { + writer.uint32(8).int64(message.size); + } + if (message.deviceName !== "") { + writer.uint32(18).string(message.deviceName); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): AttachedLocalDisk { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseAttachedLocalDisk } as AttachedLocalDisk; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.size = longToNumber(reader.int64() as Long); + break; + case 2: + message.deviceName = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AttachedLocalDisk { + const message = { ...baseAttachedLocalDisk } as AttachedLocalDisk; + message.size = + object.size !== undefined && object.size !== null + ? Number(object.size) + : 0; + message.deviceName = + object.deviceName !== undefined && object.deviceName !== null + ? String(object.deviceName) + : ""; + return message; + }, + + toJSON(message: AttachedLocalDisk): unknown { + const obj: any = {}; + message.size !== undefined && (obj.size = Math.round(message.size)); + message.deviceName !== undefined && (obj.deviceName = message.deviceName); + return obj; + }, + + fromPartial, I>>( + object: I + ): AttachedLocalDisk { + const message = { ...baseAttachedLocalDisk } as AttachedLocalDisk; + message.size = object.size ?? 0; + message.deviceName = object.deviceName ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(AttachedLocalDisk.$type, AttachedLocalDisk); + const baseAttachedFilesystem: object = { $type: "yandex.cloud.compute.v1.AttachedFilesystem", mode: 0, diff --git a/src/generated/yandex/cloud/compute/v1/instance_service.ts b/src/generated/yandex/cloud/compute/v1/instance_service.ts index 7322f303..46c7daa0 100644 --- a/src/generated/yandex/cloud/compute/v1/instance_service.ts +++ b/src/generated/yandex/cloud/compute/v1/instance_service.ts @@ -166,6 +166,8 @@ export interface CreateInstanceRequest { bootDiskSpec?: AttachedDiskSpec; /** Array of secondary disks to attach to the instance. */ secondaryDiskSpecs: AttachedDiskSpec[]; + /** Array of local disks to attach to the instance. */ + localDiskSpecs: AttachedLocalDiskSpec[]; /** * Array of filesystems to attach to the instance. * @@ -275,6 +277,8 @@ export interface UpdateInstanceRequest { networkSettings?: NetworkSettings; /** Placement policy configuration. */ placementPolicy?: PlacementPolicy; + /** Scheduling policy configuration. */ + schedulingPolicy?: SchedulingPolicy; } export interface UpdateInstanceRequest_LabelsEntry { @@ -678,6 +682,12 @@ export interface AttachedDiskSpec_DiskSpec { snapshotId: string | undefined; } +export interface AttachedLocalDiskSpec { + $type: "yandex.cloud.compute.v1.AttachedLocalDiskSpec"; + /** Size of the disk, specified in bytes. */ + size: number; +} + export interface AttachedFilesystemSpec { $type: "yandex.cloud.compute.v1.AttachedFilesystemSpec"; /** Mode of access to the filesystem that should be attached. */ @@ -789,6 +799,32 @@ export interface DnsRecordSpec { ptr: boolean; } +export interface MoveInstanceRequest { + $type: "yandex.cloud.compute.v1.MoveInstanceRequest"; + /** + * ID of the instance to move. + * + * To get the instance ID, make a [InstanceService.List] request. + */ + instanceId: string; + /** + * ID of the folder to move the instance to. + * + * To get the folder ID, make a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + */ + destinationFolderId: string; +} + +export interface MoveInstanceMetadata { + $type: "yandex.cloud.compute.v1.MoveInstanceMetadata"; + /** ID of the instance that is being moved. */ + instanceId: string; + /** ID of the folder that the instance is being moved from. */ + sourceFolderId: string; + /** ID of the folder that the instance is being moved to. */ + destinationFolderId: string; +} + const baseGetInstanceRequest: object = { $type: "yandex.cloud.compute.v1.GetInstanceRequest", instanceId: "", @@ -1122,6 +1158,9 @@ export const CreateInstanceRequest = { for (const v of message.secondaryDiskSpecs) { AttachedDiskSpec.encode(v!, writer.uint32(82).fork()).ldelim(); } + for (const v of message.localDiskSpecs) { + AttachedLocalDiskSpec.encode(v!, writer.uint32(146).fork()).ldelim(); + } for (const v of message.filesystemSpecs) { AttachedFilesystemSpec.encode(v!, writer.uint32(138).fork()).ldelim(); } @@ -1165,6 +1204,7 @@ export const CreateInstanceRequest = { message.labels = {}; message.metadata = {}; message.secondaryDiskSpecs = []; + message.localDiskSpecs = []; message.filesystemSpecs = []; message.networkInterfaceSpecs = []; while (reader.pos < end) { @@ -1217,6 +1257,11 @@ export const CreateInstanceRequest = { AttachedDiskSpec.decode(reader, reader.uint32()) ); break; + case 18: + message.localDiskSpecs.push( + AttachedLocalDiskSpec.decode(reader, reader.uint32()) + ); + break; case 17: message.filesystemSpecs.push( AttachedFilesystemSpec.decode(reader, reader.uint32()) @@ -1304,6 +1349,9 @@ export const CreateInstanceRequest = { message.secondaryDiskSpecs = (object.secondaryDiskSpecs ?? []).map( (e: any) => AttachedDiskSpec.fromJSON(e) ); + message.localDiskSpecs = (object.localDiskSpecs ?? []).map((e: any) => + AttachedLocalDiskSpec.fromJSON(e) + ); message.filesystemSpecs = (object.filesystemSpecs ?? []).map((e: any) => AttachedFilesystemSpec.fromJSON(e) ); @@ -1368,6 +1416,13 @@ export const CreateInstanceRequest = { } else { obj.secondaryDiskSpecs = []; } + if (message.localDiskSpecs) { + obj.localDiskSpecs = message.localDiskSpecs.map((e) => + e ? AttachedLocalDiskSpec.toJSON(e) : undefined + ); + } else { + obj.localDiskSpecs = []; + } if (message.filesystemSpecs) { obj.filesystemSpecs = message.filesystemSpecs.map((e) => e ? AttachedFilesystemSpec.toJSON(e) : undefined @@ -1436,6 +1491,9 @@ export const CreateInstanceRequest = { message.secondaryDiskSpecs = object.secondaryDiskSpecs?.map((e) => AttachedDiskSpec.fromPartial(e)) || []; + message.localDiskSpecs = + object.localDiskSpecs?.map((e) => AttachedLocalDiskSpec.fromPartial(e)) || + []; message.filesystemSpecs = object.filesystemSpecs?.map((e) => AttachedFilesystemSpec.fromPartial(e) @@ -1771,6 +1829,12 @@ export const UpdateInstanceRequest = { writer.uint32(90).fork() ).ldelim(); } + if (message.schedulingPolicy !== undefined) { + SchedulingPolicy.encode( + message.schedulingPolicy, + writer.uint32(98).fork() + ).ldelim(); + } return writer; }, @@ -1837,6 +1901,12 @@ export const UpdateInstanceRequest = { reader.uint32() ); break; + case 12: + message.schedulingPolicy = SchedulingPolicy.decode( + reader, + reader.uint32() + ); + break; default: reader.skipType(tag & 7); break; @@ -1895,6 +1965,10 @@ export const UpdateInstanceRequest = { object.placementPolicy !== undefined && object.placementPolicy !== null ? PlacementPolicy.fromJSON(object.placementPolicy) : undefined; + message.schedulingPolicy = + object.schedulingPolicy !== undefined && object.schedulingPolicy !== null + ? SchedulingPolicy.fromJSON(object.schedulingPolicy) + : undefined; return message; }, @@ -1935,6 +2009,10 @@ export const UpdateInstanceRequest = { (obj.placementPolicy = message.placementPolicy ? PlacementPolicy.toJSON(message.placementPolicy) : undefined); + message.schedulingPolicy !== undefined && + (obj.schedulingPolicy = message.schedulingPolicy + ? SchedulingPolicy.toJSON(message.schedulingPolicy) + : undefined); return obj; }, @@ -1979,6 +2057,10 @@ export const UpdateInstanceRequest = { object.placementPolicy !== undefined && object.placementPolicy !== null ? PlacementPolicy.fromPartial(object.placementPolicy) : undefined; + message.schedulingPolicy = + object.schedulingPolicy !== undefined && object.schedulingPolicy !== null + ? SchedulingPolicy.fromPartial(object.schedulingPolicy) + : undefined; return message; }, }; @@ -5197,6 +5279,71 @@ messageTypeRegistry.set( AttachedDiskSpec_DiskSpec ); +const baseAttachedLocalDiskSpec: object = { + $type: "yandex.cloud.compute.v1.AttachedLocalDiskSpec", + size: 0, +}; + +export const AttachedLocalDiskSpec = { + $type: "yandex.cloud.compute.v1.AttachedLocalDiskSpec" as const, + + encode( + message: AttachedLocalDiskSpec, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.size !== 0) { + writer.uint32(8).int64(message.size); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): AttachedLocalDiskSpec { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseAttachedLocalDiskSpec } as AttachedLocalDiskSpec; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.size = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AttachedLocalDiskSpec { + const message = { ...baseAttachedLocalDiskSpec } as AttachedLocalDiskSpec; + message.size = + object.size !== undefined && object.size !== null + ? Number(object.size) + : 0; + return message; + }, + + toJSON(message: AttachedLocalDiskSpec): unknown { + const obj: any = {}; + message.size !== undefined && (obj.size = Math.round(message.size)); + return obj; + }, + + fromPartial, I>>( + object: I + ): AttachedLocalDiskSpec { + const message = { ...baseAttachedLocalDiskSpec } as AttachedLocalDiskSpec; + message.size = object.size ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(AttachedLocalDiskSpec.$type, AttachedLocalDiskSpec); + const baseAttachedFilesystemSpec: object = { $type: "yandex.cloud.compute.v1.AttachedFilesystemSpec", mode: 0, @@ -5727,6 +5874,177 @@ export const DnsRecordSpec = { messageTypeRegistry.set(DnsRecordSpec.$type, DnsRecordSpec); +const baseMoveInstanceRequest: object = { + $type: "yandex.cloud.compute.v1.MoveInstanceRequest", + instanceId: "", + destinationFolderId: "", +}; + +export const MoveInstanceRequest = { + $type: "yandex.cloud.compute.v1.MoveInstanceRequest" as const, + + encode( + message: MoveInstanceRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.instanceId !== "") { + writer.uint32(10).string(message.instanceId); + } + if (message.destinationFolderId !== "") { + writer.uint32(18).string(message.destinationFolderId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): MoveInstanceRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMoveInstanceRequest } as MoveInstanceRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.instanceId = reader.string(); + break; + case 2: + message.destinationFolderId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): MoveInstanceRequest { + const message = { ...baseMoveInstanceRequest } as MoveInstanceRequest; + message.instanceId = + object.instanceId !== undefined && object.instanceId !== null + ? String(object.instanceId) + : ""; + message.destinationFolderId = + object.destinationFolderId !== undefined && + object.destinationFolderId !== null + ? String(object.destinationFolderId) + : ""; + return message; + }, + + toJSON(message: MoveInstanceRequest): unknown { + const obj: any = {}; + message.instanceId !== undefined && (obj.instanceId = message.instanceId); + message.destinationFolderId !== undefined && + (obj.destinationFolderId = message.destinationFolderId); + return obj; + }, + + fromPartial, I>>( + object: I + ): MoveInstanceRequest { + const message = { ...baseMoveInstanceRequest } as MoveInstanceRequest; + message.instanceId = object.instanceId ?? ""; + message.destinationFolderId = object.destinationFolderId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(MoveInstanceRequest.$type, MoveInstanceRequest); + +const baseMoveInstanceMetadata: object = { + $type: "yandex.cloud.compute.v1.MoveInstanceMetadata", + instanceId: "", + sourceFolderId: "", + destinationFolderId: "", +}; + +export const MoveInstanceMetadata = { + $type: "yandex.cloud.compute.v1.MoveInstanceMetadata" as const, + + encode( + message: MoveInstanceMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.instanceId !== "") { + writer.uint32(10).string(message.instanceId); + } + if (message.sourceFolderId !== "") { + writer.uint32(18).string(message.sourceFolderId); + } + if (message.destinationFolderId !== "") { + writer.uint32(26).string(message.destinationFolderId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): MoveInstanceMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMoveInstanceMetadata } as MoveInstanceMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.instanceId = reader.string(); + break; + case 2: + message.sourceFolderId = reader.string(); + break; + case 3: + message.destinationFolderId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): MoveInstanceMetadata { + const message = { ...baseMoveInstanceMetadata } as MoveInstanceMetadata; + message.instanceId = + object.instanceId !== undefined && object.instanceId !== null + ? String(object.instanceId) + : ""; + message.sourceFolderId = + object.sourceFolderId !== undefined && object.sourceFolderId !== null + ? String(object.sourceFolderId) + : ""; + message.destinationFolderId = + object.destinationFolderId !== undefined && + object.destinationFolderId !== null + ? String(object.destinationFolderId) + : ""; + return message; + }, + + toJSON(message: MoveInstanceMetadata): unknown { + const obj: any = {}; + message.instanceId !== undefined && (obj.instanceId = message.instanceId); + message.sourceFolderId !== undefined && + (obj.sourceFolderId = message.sourceFolderId); + message.destinationFolderId !== undefined && + (obj.destinationFolderId = message.destinationFolderId); + return obj; + }, + + fromPartial, I>>( + object: I + ): MoveInstanceMetadata { + const message = { ...baseMoveInstanceMetadata } as MoveInstanceMetadata; + message.instanceId = object.instanceId ?? ""; + message.sourceFolderId = object.sourceFolderId ?? ""; + message.destinationFolderId = object.destinationFolderId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(MoveInstanceMetadata.$type, MoveInstanceMetadata); + /** A set of methods for managing Instance resources. */ export const InstanceServiceService = { /** @@ -5985,6 +6303,25 @@ export const InstanceServiceService = { responseDeserialize: (value: Buffer) => ListInstanceOperationsResponse.decode(value), }, + /** + * Moves the specified instance to another folder of the same cloud. + * + * The instance must be stopped before moving. To stop the instance, make a [Stop] request. + * + * After moving, the instance will start recording its Yandex Monitoring default metrics to its new folder. Metrics + * that have been recorded to the source folder prior to moving will be retained. + */ + move: { + path: "/yandex.cloud.compute.v1.InstanceService/Move", + requestStream: false, + responseStream: false, + requestSerialize: (value: MoveInstanceRequest) => + Buffer.from(MoveInstanceRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => MoveInstanceRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, } as const; export interface InstanceServiceServer extends UntypedServiceImplementation { @@ -6064,6 +6401,15 @@ export interface InstanceServiceServer extends UntypedServiceImplementation { ListInstanceOperationsRequest, ListInstanceOperationsResponse >; + /** + * Moves the specified instance to another folder of the same cloud. + * + * The instance must be stopped before moving. To stop the instance, make a [Stop] request. + * + * After moving, the instance will start recording its Yandex Monitoring default metrics to its new folder. Metrics + * that have been recorded to the source folder prior to moving will be retained. + */ + move: handleUnaryCall; } export interface InstanceServiceClient extends Client { @@ -6410,6 +6756,29 @@ export interface InstanceServiceClient extends Client { response: ListInstanceOperationsResponse ) => void ): ClientUnaryCall; + /** + * Moves the specified instance to another folder of the same cloud. + * + * The instance must be stopped before moving. To stop the instance, make a [Stop] request. + * + * After moving, the instance will start recording its Yandex Monitoring default metrics to its new folder. Metrics + * that have been recorded to the source folder prior to moving will be retained. + */ + move( + request: MoveInstanceRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + move( + request: MoveInstanceRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + move( + request: MoveInstanceRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; } export const InstanceServiceClient = makeGenericClientConstructor( diff --git a/src/generated/yandex/cloud/compute/v1/instancegroup/instance_group_service.ts b/src/generated/yandex/cloud/compute/v1/instancegroup/instance_group_service.ts index 9eddaf9b..78804e17 100644 --- a/src/generated/yandex/cloud/compute/v1/instancegroup/instance_group_service.ts +++ b/src/generated/yandex/cloud/compute/v1/instancegroup/instance_group_service.ts @@ -241,6 +241,11 @@ export interface UpdateInstanceGroupRequest { variables: Variable[]; /** Flag that inhibits deletion of the instance group */ deletionProtection: boolean; + /** + * Settings for balancing load between instances via [Application Load Balancer](/docs/application-load-balancer/concepts) + * (OSI model layer 7). + */ + applicationLoadBalancerSpec?: ApplicationLoadBalancerSpec; } export interface UpdateInstanceGroupRequest_LabelsEntry { @@ -1623,6 +1628,12 @@ export const UpdateInstanceGroupRequest = { if (message.deletionProtection === true) { writer.uint32(128).bool(message.deletionProtection); } + if (message.applicationLoadBalancerSpec !== undefined) { + ApplicationLoadBalancerSpec.encode( + message.applicationLoadBalancerSpec, + writer.uint32(138).fork() + ).ldelim(); + } return writer; }, @@ -1700,6 +1711,10 @@ export const UpdateInstanceGroupRequest = { case 16: message.deletionProtection = reader.bool(); break; + case 17: + message.applicationLoadBalancerSpec = + ApplicationLoadBalancerSpec.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -1770,6 +1785,13 @@ export const UpdateInstanceGroupRequest = { object.deletionProtection !== null ? Boolean(object.deletionProtection) : false; + message.applicationLoadBalancerSpec = + object.applicationLoadBalancerSpec !== undefined && + object.applicationLoadBalancerSpec !== null + ? ApplicationLoadBalancerSpec.fromJSON( + object.applicationLoadBalancerSpec + ) + : undefined; return message; }, @@ -1825,6 +1847,12 @@ export const UpdateInstanceGroupRequest = { } message.deletionProtection !== undefined && (obj.deletionProtection = message.deletionProtection); + message.applicationLoadBalancerSpec !== undefined && + (obj.applicationLoadBalancerSpec = message.applicationLoadBalancerSpec + ? ApplicationLoadBalancerSpec.toJSON( + message.applicationLoadBalancerSpec + ) + : undefined); return obj; }, @@ -1877,6 +1905,13 @@ export const UpdateInstanceGroupRequest = { message.variables = object.variables?.map((e) => Variable.fromPartial(e)) || []; message.deletionProtection = object.deletionProtection ?? false; + message.applicationLoadBalancerSpec = + object.applicationLoadBalancerSpec !== undefined && + object.applicationLoadBalancerSpec !== null + ? ApplicationLoadBalancerSpec.fromPartial( + object.applicationLoadBalancerSpec + ) + : undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/compute/v1/snapshot.ts b/src/generated/yandex/cloud/compute/v1/snapshot.ts index 41ec4895..4c1fe67c 100644 --- a/src/generated/yandex/cloud/compute/v1/snapshot.ts +++ b/src/generated/yandex/cloud/compute/v1/snapshot.ts @@ -28,7 +28,7 @@ export interface Snapshot { * License IDs that indicate which licenses are attached to this resource. * License IDs are used to calculate additional charges for the use of the virtual machine. * - * The correct license ID is generated by Yandex.Cloud. IDs are inherited by new resources created from this resource. + * The correct license ID is generated by Yandex Cloud. IDs are inherited by new resources created from this resource. * * If you know the license IDs, specify them when you create the image. * For example, if you create a disk image using a third-party utility and load it into Yandex Object Storage, the license IDs will be lost. diff --git a/src/generated/yandex/cloud/containerregistry/v1/registry.ts b/src/generated/yandex/cloud/containerregistry/v1/registry.ts index d36724dc..9b6a8f87 100644 --- a/src/generated/yandex/cloud/containerregistry/v1/registry.ts +++ b/src/generated/yandex/cloud/containerregistry/v1/registry.ts @@ -6,7 +6,7 @@ import { Timestamp } from "../../../../google/protobuf/timestamp"; export const protobufPackage = "yandex.cloud.containerregistry.v1"; -/** A Registry resource. For more information, see [Registry](/docs/cloud/containerregistry/registry). */ +/** A Registry resource. For more information, see the [Registry](/docs/container-registry/concepts/registry) section of the documentation. */ export interface Registry { $type: "yandex.cloud.containerregistry.v1.Registry"; /** Output only. ID of the registry. */ diff --git a/src/generated/yandex/cloud/dataproc/manager/v1/manager_service.ts b/src/generated/yandex/cloud/dataproc/manager/v1/manager_service.ts index 832a5c39..11b240c3 100644 --- a/src/generated/yandex/cloud/dataproc/manager/v1/manager_service.ts +++ b/src/generated/yandex/cloud/dataproc/manager/v1/manager_service.ts @@ -1710,9 +1710,9 @@ export const ReportReply = { messageTypeRegistry.set(ReportReply.$type, ReportReply); -/** Data Proc manager service defifnition */ +/** Data Proc manager service definition. */ export const DataprocManagerServiceService = { - /** Sends a status report from a host */ + /** Sends a status report from a host. */ report: { path: "/yandex.cloud.dataproc.manager.v1.DataprocManagerService/Report", requestStream: false, @@ -1728,12 +1728,12 @@ export const DataprocManagerServiceService = { export interface DataprocManagerServiceServer extends UntypedServiceImplementation { - /** Sends a status report from a host */ + /** Sends a status report from a host. */ report: handleUnaryCall; } export interface DataprocManagerServiceClient extends Client { - /** Sends a status report from a host */ + /** Sends a status report from a host. */ report( request: ReportRequest, callback: (error: ServiceError | null, response: ReportReply) => void diff --git a/src/generated/yandex/cloud/dataproc/v1/cluster.ts b/src/generated/yandex/cloud/dataproc/v1/cluster.ts index aaf7dd28..c477b3da 100644 --- a/src/generated/yandex/cloud/dataproc/v1/cluster.ts +++ b/src/generated/yandex/cloud/dataproc/v1/cluster.ts @@ -159,6 +159,8 @@ export interface HadoopConfig { properties: { [key: string]: string }; /** List of public SSH keys to access to cluster hosts. */ sshPublicKeys: string[]; + /** Set of init-actions */ + initializationActions: InitializationAction[]; } export enum HadoopConfig_Service { @@ -286,6 +288,16 @@ export interface ClusterConfig { hadoop?: HadoopConfig; } +export interface InitializationAction { + $type: "yandex.cloud.dataproc.v1.InitializationAction"; + /** URI of the executable file */ + uri: string; + /** Arguments to the initialization action */ + args: string[]; + /** Execution timeout */ + timeout: number; +} + const baseCluster: object = { $type: "yandex.cloud.dataproc.v1.Cluster", id: "", @@ -811,6 +823,9 @@ export const HadoopConfig = { for (const v of message.sshPublicKeys) { writer.uint32(26).string(v!); } + for (const v of message.initializationActions) { + InitializationAction.encode(v!, writer.uint32(34).fork()).ldelim(); + } return writer; }, @@ -821,6 +836,7 @@ export const HadoopConfig = { message.services = []; message.properties = {}; message.sshPublicKeys = []; + message.initializationActions = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -846,6 +862,11 @@ export const HadoopConfig = { case 3: message.sshPublicKeys.push(reader.string()); break; + case 4: + message.initializationActions.push( + InitializationAction.decode(reader, reader.uint32()) + ); + break; default: reader.skipType(tag & 7); break; @@ -868,6 +889,9 @@ export const HadoopConfig = { message.sshPublicKeys = (object.sshPublicKeys ?? []).map((e: any) => String(e) ); + message.initializationActions = (object.initializationActions ?? []).map( + (e: any) => InitializationAction.fromJSON(e) + ); return message; }, @@ -889,6 +913,13 @@ export const HadoopConfig = { } else { obj.sshPublicKeys = []; } + if (message.initializationActions) { + obj.initializationActions = message.initializationActions.map((e) => + e ? InitializationAction.toJSON(e) : undefined + ); + } else { + obj.initializationActions = []; + } return obj; }, @@ -906,6 +937,10 @@ export const HadoopConfig = { return acc; }, {}); message.sshPublicKeys = object.sshPublicKeys?.map((e) => e) || []; + message.initializationActions = + object.initializationActions?.map((e) => + InitializationAction.fromPartial(e) + ) || []; return message; }, }; @@ -1077,6 +1112,109 @@ export const ClusterConfig = { messageTypeRegistry.set(ClusterConfig.$type, ClusterConfig); +const baseInitializationAction: object = { + $type: "yandex.cloud.dataproc.v1.InitializationAction", + uri: "", + args: "", + timeout: 0, +}; + +export const InitializationAction = { + $type: "yandex.cloud.dataproc.v1.InitializationAction" as const, + + encode( + message: InitializationAction, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.uri !== "") { + writer.uint32(10).string(message.uri); + } + for (const v of message.args) { + writer.uint32(18).string(v!); + } + if (message.timeout !== 0) { + writer.uint32(24).int64(message.timeout); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): InitializationAction { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseInitializationAction } as InitializationAction; + message.args = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.uri = reader.string(); + break; + case 2: + message.args.push(reader.string()); + break; + case 3: + message.timeout = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): InitializationAction { + const message = { ...baseInitializationAction } as InitializationAction; + message.uri = + object.uri !== undefined && object.uri !== null ? String(object.uri) : ""; + message.args = (object.args ?? []).map((e: any) => String(e)); + message.timeout = + object.timeout !== undefined && object.timeout !== null + ? Number(object.timeout) + : 0; + return message; + }, + + toJSON(message: InitializationAction): unknown { + const obj: any = {}; + message.uri !== undefined && (obj.uri = message.uri); + if (message.args) { + obj.args = message.args.map((e) => e); + } else { + obj.args = []; + } + message.timeout !== undefined && + (obj.timeout = Math.round(message.timeout)); + return obj; + }, + + fromPartial, I>>( + object: I + ): InitializationAction { + const message = { ...baseInitializationAction } as InitializationAction; + message.uri = object.uri ?? ""; + message.args = object.args?.map((e) => e) || []; + message.timeout = object.timeout ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(InitializationAction.$type, InitializationAction); + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + type Builtin = | Date | Function @@ -1126,6 +1264,13 @@ function fromJsonTimestamp(o: any): Date { } } +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + if (_m0.util.Long !== Long) { _m0.util.Long = Long as any; _m0.configure(); diff --git a/src/generated/yandex/cloud/dataproc/v1/common.ts b/src/generated/yandex/cloud/dataproc/v1/common.ts index 8048efa6..043839dc 100644 --- a/src/generated/yandex/cloud/dataproc/v1/common.ts +++ b/src/generated/yandex/cloud/dataproc/v1/common.ts @@ -6,13 +6,13 @@ import _m0 from "protobufjs/minimal"; export const protobufPackage = "yandex.cloud.dataproc.v1"; export enum Health { - /** HEALTH_UNKNOWN - State of the cluster is unknown ([Host.health] for every host in the cluster is UNKNOWN). */ + /** HEALTH_UNKNOWN - Object is in unknown state (we have no data). */ HEALTH_UNKNOWN = 0, - /** ALIVE - Cluster is alive and well ([Host.health] for every host in the cluster is ALIVE). */ + /** ALIVE - Object is alive and well (for example, all hosts of the cluster are alive). */ ALIVE = 1, - /** DEAD - Cluster is inoperable ([Host.health] for every host in the cluster is DEAD). */ + /** DEAD - Object is inoperable (it cannot perform any of its essential functions). */ DEAD = 2, - /** DEGRADED - Cluster is working below capacity ([Host.health] for at least one host in the cluster is not ALIVE). */ + /** DEGRADED - Object is partially alive (it can perform some of its essential functions). */ DEGRADED = 3, UNRECOGNIZED = -1, } diff --git a/src/generated/yandex/cloud/dataproc/v1/subcluster.ts b/src/generated/yandex/cloud/dataproc/v1/subcluster.ts index 9df3789c..d493c53a 100644 --- a/src/generated/yandex/cloud/dataproc/v1/subcluster.ts +++ b/src/generated/yandex/cloud/dataproc/v1/subcluster.ts @@ -148,12 +148,12 @@ export interface Host { /** * Name of the Data Proc host. The host name is assigned by Data Proc at creation time * and cannot be changed. The name is generated to be unique across all existing Data Proc - * hosts in Yandex.Cloud, as it defines the FQDN of the host. + * hosts in Yandex Cloud, as it defines the FQDN of the host. */ name: string; /** ID of the Data Proc subcluster that the host belongs to. */ subclusterId: string; - /** Host status code. */ + /** Status code of the aggregated health of the host. */ health: Health; /** ID of the Compute virtual machine that is used as the Data Proc host. */ computeInstanceId: string; diff --git a/src/generated/yandex/cloud/datatransfer/v1/endpoint/common.ts b/src/generated/yandex/cloud/datatransfer/v1/endpoint/common.ts index 98251e8e..595c01ad 100644 --- a/src/generated/yandex/cloud/datatransfer/v1/endpoint/common.ts +++ b/src/generated/yandex/cloud/datatransfer/v1/endpoint/common.ts @@ -50,6 +50,50 @@ export function objectTransferStageToJSON(object: ObjectTransferStage): string { } } +export enum CleanupPolicy { + CLEANUP_POLICY_UNSPECIFIED = 0, + DISABLED = 1, + DROP = 2, + TRUNCATE = 3, + UNRECOGNIZED = -1, +} + +export function cleanupPolicyFromJSON(object: any): CleanupPolicy { + switch (object) { + case 0: + case "CLEANUP_POLICY_UNSPECIFIED": + return CleanupPolicy.CLEANUP_POLICY_UNSPECIFIED; + case 1: + case "DISABLED": + return CleanupPolicy.DISABLED; + case 2: + case "DROP": + return CleanupPolicy.DROP; + case 3: + case "TRUNCATE": + return CleanupPolicy.TRUNCATE; + case -1: + case "UNRECOGNIZED": + default: + return CleanupPolicy.UNRECOGNIZED; + } +} + +export function cleanupPolicyToJSON(object: CleanupPolicy): string { + switch (object) { + case CleanupPolicy.CLEANUP_POLICY_UNSPECIFIED: + return "CLEANUP_POLICY_UNSPECIFIED"; + case CleanupPolicy.DISABLED: + return "DISABLED"; + case CleanupPolicy.DROP: + return "DROP"; + case CleanupPolicy.TRUNCATE: + return "TRUNCATE"; + default: + return "UNKNOWN"; + } +} + export interface Secret { $type: "yandex.cloud.datatransfer.v1.endpoint.Secret"; /** Password */ diff --git a/src/generated/yandex/cloud/datatransfer/v1/endpoint/mysql.ts b/src/generated/yandex/cloud/datatransfer/v1/endpoint/mysql.ts index 20720b44..3fa97f38 100644 --- a/src/generated/yandex/cloud/datatransfer/v1/endpoint/mysql.ts +++ b/src/generated/yandex/cloud/datatransfer/v1/endpoint/mysql.ts @@ -6,8 +6,11 @@ import { TLSMode, ObjectTransferStage, Secret, + CleanupPolicy, objectTransferStageFromJSON, objectTransferStageToJSON, + cleanupPolicyFromJSON, + cleanupPolicyToJSON, } from "../../../../../yandex/cloud/datatransfer/v1/endpoint/common"; export const protobufPackage = "yandex.cloud.datatransfer.v1.endpoint"; @@ -40,7 +43,7 @@ export interface MysqlConnection { /** * Managed cluster * - * Yandex.Cloud Managed MySQL cluster ID + * Yandex Managed Service for MySQL cluster ID */ mdbClusterId: string | undefined; /** @@ -166,6 +169,19 @@ export interface MysqlTarget { * IANA timezone database. Default: local timezone. */ timezone: string; + /** + * Cleanup policy + * + * Cleanup policy for activate, reactivate and reupload processes. Default is + * DISABLED. + */ + cleanupPolicy: CleanupPolicy; + /** + * Database schema for service table + * + * Default: db name. Here created technical tables (__tm_keeper, __tm_gtid_keeper). + */ + serviceDatabase: string; } const baseOnPremiseMysql: object = { @@ -657,6 +673,8 @@ const baseMysqlTarget: object = { sqlMode: "", skipConstraintChecks: false, timezone: "", + cleanupPolicy: 0, + serviceDatabase: "", }; export const MysqlTarget = { @@ -690,6 +708,12 @@ export const MysqlTarget = { if (message.timezone !== "") { writer.uint32(58).string(message.timezone); } + if (message.cleanupPolicy !== 0) { + writer.uint32(64).int32(message.cleanupPolicy); + } + if (message.serviceDatabase !== "") { + writer.uint32(122).string(message.serviceDatabase); + } return writer; }, @@ -721,6 +745,12 @@ export const MysqlTarget = { case 7: message.timezone = reader.string(); break; + case 8: + message.cleanupPolicy = reader.int32() as any; + break; + case 15: + message.serviceDatabase = reader.string(); + break; default: reader.skipType(tag & 7); break; @@ -760,6 +790,14 @@ export const MysqlTarget = { object.timezone !== undefined && object.timezone !== null ? String(object.timezone) : ""; + message.cleanupPolicy = + object.cleanupPolicy !== undefined && object.cleanupPolicy !== null + ? cleanupPolicyFromJSON(object.cleanupPolicy) + : 0; + message.serviceDatabase = + object.serviceDatabase !== undefined && object.serviceDatabase !== null + ? String(object.serviceDatabase) + : ""; return message; }, @@ -779,6 +817,10 @@ export const MysqlTarget = { message.skipConstraintChecks !== undefined && (obj.skipConstraintChecks = message.skipConstraintChecks); message.timezone !== undefined && (obj.timezone = message.timezone); + message.cleanupPolicy !== undefined && + (obj.cleanupPolicy = cleanupPolicyToJSON(message.cleanupPolicy)); + message.serviceDatabase !== undefined && + (obj.serviceDatabase = message.serviceDatabase); return obj; }, @@ -799,6 +841,8 @@ export const MysqlTarget = { message.sqlMode = object.sqlMode ?? ""; message.skipConstraintChecks = object.skipConstraintChecks ?? false; message.timezone = object.timezone ?? ""; + message.cleanupPolicy = object.cleanupPolicy ?? 0; + message.serviceDatabase = object.serviceDatabase ?? ""; return message; }, }; diff --git a/src/generated/yandex/cloud/datatransfer/v1/endpoint/postgres.ts b/src/generated/yandex/cloud/datatransfer/v1/endpoint/postgres.ts index 036f86c4..beec5fae 100644 --- a/src/generated/yandex/cloud/datatransfer/v1/endpoint/postgres.ts +++ b/src/generated/yandex/cloud/datatransfer/v1/endpoint/postgres.ts @@ -6,8 +6,11 @@ import { ObjectTransferStage, TLSMode, Secret, + CleanupPolicy, objectTransferStageFromJSON, objectTransferStageToJSON, + cleanupPolicyFromJSON, + cleanupPolicyToJSON, } from "../../../../../yandex/cloud/datatransfer/v1/endpoint/common"; export const protobufPackage = "yandex.cloud.datatransfer.v1.endpoint"; @@ -140,7 +143,7 @@ export interface PostgresConnection { /** * Managed cluster * - * Yandex.Cloud Managed PostgreSQL cluster ID + * Yandex Managed Service for PostgreSQL cluster ID */ mdbClusterId: string | undefined; /** @@ -176,15 +179,15 @@ export interface PostgresSource { /** * Included tables * - * If none or empty list is presented, all tables are replicated. Can contain - * regular expression. + * If none or empty list is presented, all tables are replicated. Full table name + * with schema. Can contain schema_name.* patterns. */ includeTables: string[]; /** * Excluded tables * - * If none or empty list is presented, all tables are replicated. Can contain - * regular expression. + * If none or empty list is presented, all tables are replicated. Full table name + * with schema. Can contain schema_name.* patterns. */ excludeTables: string[]; /** @@ -232,6 +235,13 @@ export interface PostgresTarget { * Password for database access. */ password?: Secret; + /** + * Cleanup policy + * + * Cleanup policy for activate, reactivate and reupload processes. Default is + * DISABLED. + */ + cleanupPolicy: CleanupPolicy; } const basePostgresObjectTransferSettings: object = { @@ -922,6 +932,7 @@ const basePostgresTarget: object = { $type: "yandex.cloud.datatransfer.v1.endpoint.PostgresTarget", database: "", user: "", + cleanupPolicy: 0, }; export const PostgresTarget = { @@ -946,6 +957,9 @@ export const PostgresTarget = { if (message.password !== undefined) { Secret.encode(message.password, writer.uint32(34).fork()).ldelim(); } + if (message.cleanupPolicy !== 0) { + writer.uint32(40).int32(message.cleanupPolicy); + } return writer; }, @@ -971,6 +985,9 @@ export const PostgresTarget = { case 4: message.password = Secret.decode(reader, reader.uint32()); break; + case 5: + message.cleanupPolicy = reader.int32() as any; + break; default: reader.skipType(tag & 7); break; @@ -997,6 +1014,10 @@ export const PostgresTarget = { object.password !== undefined && object.password !== null ? Secret.fromJSON(object.password) : undefined; + message.cleanupPolicy = + object.cleanupPolicy !== undefined && object.cleanupPolicy !== null + ? cleanupPolicyFromJSON(object.cleanupPolicy) + : 0; return message; }, @@ -1012,6 +1033,8 @@ export const PostgresTarget = { (obj.password = message.password ? Secret.toJSON(message.password) : undefined); + message.cleanupPolicy !== undefined && + (obj.cleanupPolicy = cleanupPolicyToJSON(message.cleanupPolicy)); return obj; }, @@ -1029,6 +1052,7 @@ export const PostgresTarget = { object.password !== undefined && object.password !== null ? Secret.fromPartial(object.password) : undefined; + message.cleanupPolicy = object.cleanupPolicy ?? 0; return message; }, }; diff --git a/src/generated/yandex/cloud/datatransfer/v1/endpoint_service.ts b/src/generated/yandex/cloud/datatransfer/v1/endpoint_service.ts index 6f0910f1..d358258b 100644 --- a/src/generated/yandex/cloud/datatransfer/v1/endpoint_service.ts +++ b/src/generated/yandex/cloud/datatransfer/v1/endpoint_service.ts @@ -37,7 +37,7 @@ export interface ListEndpointsRequest { * folder contains more endpoints than page_size, next_page_token will be included * in the response message. Include it into the subsequent ListEndpointRequest to * fetch the next page. Defaults to 100 if not specified. The maximum allowed value - * for this field is 100. + * for this field is 500. */ pageSize: number; /** diff --git a/src/generated/yandex/cloud/datatransfer/v1/transfer_service.ts b/src/generated/yandex/cloud/datatransfer/v1/transfer_service.ts index e5d9a0a1..902b07ea 100644 --- a/src/generated/yandex/cloud/datatransfer/v1/transfer_service.ts +++ b/src/generated/yandex/cloud/datatransfer/v1/transfer_service.ts @@ -96,7 +96,7 @@ export interface ListTransfersRequest { * folder contains more transfers than page_size, next_page_token will be included * in the response message. Include it into the subsequent ListTransfersRequest to * fetch the next page. Defaults to 100 if not specified. The maximum allowed value - * for this field is 100. + * for this field is 500. */ pageSize: number; /** diff --git a/src/generated/yandex/cloud/iam/v1/iam_token_service.ts b/src/generated/yandex/cloud/iam/v1/iam_token_service.ts index 44afa18e..7d40e056 100644 --- a/src/generated/yandex/cloud/iam/v1/iam_token_service.ts +++ b/src/generated/yandex/cloud/iam/v1/iam_token_service.ts @@ -21,7 +21,7 @@ export const protobufPackage = "yandex.cloud.iam.v1"; export interface CreateIamTokenRequest { $type: "yandex.cloud.iam.v1.CreateIamTokenRequest"; /** - * OAuth token for a Yandex.Passport account. + * OAuth token for a Yandex account. * For more information, see [OAuth token](/docs/iam/concepts/authorization/oauth-token). */ yandexPassportOauthToken: string | undefined; diff --git a/src/generated/yandex/cloud/iam/v1/user_account.ts b/src/generated/yandex/cloud/iam/v1/user_account.ts index 80a46b9e..812d1d41 100644 --- a/src/generated/yandex/cloud/iam/v1/user_account.ts +++ b/src/generated/yandex/cloud/iam/v1/user_account.ts @@ -5,7 +5,7 @@ import _m0 from "protobufjs/minimal"; export const protobufPackage = "yandex.cloud.iam.v1"; -/** Currently represents only [Yandex.Passport account](/docs/iam/concepts/#passport). */ +/** Currently represents only [Yandex account](/docs/iam/concepts/#passport). */ export interface UserAccount { $type: "yandex.cloud.iam.v1.UserAccount"; /** ID of the user account. */ @@ -18,13 +18,13 @@ export interface UserAccount { /** * A YandexPassportUserAccount resource. - * For more information, see [Yandex.Passport account](/docs/iam/concepts/#passport). + * For more information, see [Yandex account](/docs/iam/concepts/#passport). */ export interface YandexPassportUserAccount { $type: "yandex.cloud.iam.v1.YandexPassportUserAccount"; - /** Login of the Yandex.Passport user account. */ + /** Login of the Yandex user account. */ login: string; - /** Default email of the Yandex.Passport user account. */ + /** Default email of the Yandex user account. */ defaultEmail: string; } diff --git a/src/generated/yandex/cloud/iam/v1/user_account_service.ts b/src/generated/yandex/cloud/iam/v1/user_account_service.ts index 7fabe0ff..61d63898 100644 --- a/src/generated/yandex/cloud/iam/v1/user_account_service.ts +++ b/src/generated/yandex/cloud/iam/v1/user_account_service.ts @@ -90,7 +90,7 @@ export const GetUserAccountRequest = { messageTypeRegistry.set(GetUserAccountRequest.$type, GetUserAccountRequest); -/** A set of methods for managing user accounts. Currently applicable only for [Yandex.Passport accounts](/docs/iam/concepts/#passport). */ +/** A set of methods for managing user accounts. Currently applicable only for [Yandex accounts](/docs/iam/concepts/#passport). */ export const UserAccountServiceService = { /** Returns the specified UserAccount resource. */ get: { diff --git a/src/generated/yandex/cloud/marketplace/v1/metering/image_product_usage_service.ts b/src/generated/yandex/cloud/marketplace/v1/metering/image_product_usage_service.ts index 37c11e34..b3bb2098 100644 --- a/src/generated/yandex/cloud/marketplace/v1/metering/image_product_usage_service.ts +++ b/src/generated/yandex/cloud/marketplace/v1/metering/image_product_usage_service.ts @@ -28,7 +28,7 @@ export interface WriteImageProductUsageRequest { validateOnly: boolean; /** Marketplace Product's ID. */ productId: string; - /** List of product usage records (up to 25 pet request). */ + /** List of product usage records (up to 25 per request). */ usageRecords: UsageRecord[]; } diff --git a/src/generated/yandex/cloud/marketplace/v1/metering/usage_record.ts b/src/generated/yandex/cloud/marketplace/v1/metering/usage_record.ts index 47be5b54..d2b9582d 100644 --- a/src/generated/yandex/cloud/marketplace/v1/metering/usage_record.ts +++ b/src/generated/yandex/cloud/marketplace/v1/metering/usage_record.ts @@ -10,9 +10,9 @@ export interface UsageRecord { $type: "yandex.cloud.marketplace.v1.metering.UsageRecord"; /** Unique identifier of the usage record (UUID format). */ uuid: string; - /** Consumed Marketplace SaaS Sku ID, linked to `UsageRecord.product_id`. */ + /** Consumed Marketplace SKU ID, linked to `UsageRecord.product_id`. */ skuId: string; - /** Quantity of sku consumed, measured in `sku.usage_unit` units (e.g. bytes). */ + /** Quantity of SKU consumed, measured in `sku.usage_unit` units (e.g. bytes). */ quantity: number; /** Timestamp in UTC for which the usage is being reported. */ timestamp?: Date; diff --git a/src/generated/yandex/cloud/mdb/clickhouse/v1/cluster.ts b/src/generated/yandex/cloud/mdb/clickhouse/v1/cluster.ts index 722cb265..cbcd1c38 100644 --- a/src/generated/yandex/cloud/mdb/clickhouse/v1/cluster.ts +++ b/src/generated/yandex/cloud/mdb/clickhouse/v1/cluster.ts @@ -61,7 +61,6 @@ export interface Cluster { deletionProtection: boolean; } -/** Deployment environment. */ export enum Cluster_Environment { ENVIRONMENT_UNSPECIFIED = 0, /** @@ -331,7 +330,7 @@ export interface Host { * Name of the ClickHouse host. The host name is assigned by MDB at creation time, and cannot be changed. * 1-63 characters long. * - * The name is unique across all existing MDB hosts in Yandex.Cloud, as it defines the FQDN of the host. + * The name is unique across all existing MDB hosts in Yandex Cloud, as it defines the FQDN of the host. */ name: string; /** ID of the ClickHouse host. The ID is assigned by MDB at creation time. */ @@ -554,19 +553,23 @@ export interface Access { /** Allow to export data from the cluster to Yandex DataLens. */ dataLens: boolean; /** - * Allow SQL queries to the cluster databases from the Yandex.Cloud management console. + * Allow SQL queries to the cluster databases from the Yandex Cloud management console. * * See [SQL queries in the management console](/docs/managed-clickhouse/operations/web-sql-query) for more details. */ webSql: boolean; /** - * Allow to import data from Yandex.Metrica and AppMetrica to the cluster. + * Allow to import data from Yandex Metrica and AppMetrica to the cluster. * - * See [Export data to Yandex.Cloud](https://appmetrica.yandex.com/docs/cloud/index.html) for more details. + * See [Export data to Yandex Cloud](https://appmetrica.yandex.com/docs/cloud/index.html) for more details. */ metrika: boolean; /** Allow access to cluster for Serverless. */ serverless: boolean; + /** Allow access for DataTransfer */ + dataTransfer: boolean; + /** Allow access for YandexQuery */ + yandexQuery: boolean; } export interface CloudStorage { @@ -2217,6 +2220,8 @@ const baseAccess: object = { webSql: false, metrika: false, serverless: false, + dataTransfer: false, + yandexQuery: false, }; export const Access = { @@ -2238,6 +2243,12 @@ export const Access = { if (message.serverless === true) { writer.uint32(32).bool(message.serverless); } + if (message.dataTransfer === true) { + writer.uint32(40).bool(message.dataTransfer); + } + if (message.yandexQuery === true) { + writer.uint32(48).bool(message.yandexQuery); + } return writer; }, @@ -2260,6 +2271,12 @@ export const Access = { case 4: message.serverless = reader.bool(); break; + case 5: + message.dataTransfer = reader.bool(); + break; + case 6: + message.yandexQuery = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -2286,6 +2303,14 @@ export const Access = { object.serverless !== undefined && object.serverless !== null ? Boolean(object.serverless) : false; + message.dataTransfer = + object.dataTransfer !== undefined && object.dataTransfer !== null + ? Boolean(object.dataTransfer) + : false; + message.yandexQuery = + object.yandexQuery !== undefined && object.yandexQuery !== null + ? Boolean(object.yandexQuery) + : false; return message; }, @@ -2295,6 +2320,10 @@ export const Access = { message.webSql !== undefined && (obj.webSql = message.webSql); message.metrika !== undefined && (obj.metrika = message.metrika); message.serverless !== undefined && (obj.serverless = message.serverless); + message.dataTransfer !== undefined && + (obj.dataTransfer = message.dataTransfer); + message.yandexQuery !== undefined && + (obj.yandexQuery = message.yandexQuery); return obj; }, @@ -2304,6 +2333,8 @@ export const Access = { message.webSql = object.webSql ?? false; message.metrika = object.metrika ?? false; message.serverless = object.serverless ?? false; + message.dataTransfer = object.dataTransfer ?? false; + message.yandexQuery = object.yandexQuery ?? false; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/clickhouse/v1/cluster_service.ts b/src/generated/yandex/cloud/mdb/clickhouse/v1/cluster_service.ts index d8f3bad3..aad41db8 100644 --- a/src/generated/yandex/cloud/mdb/clickhouse/v1/cluster_service.ts +++ b/src/generated/yandex/cloud/mdb/clickhouse/v1/cluster_service.ts @@ -699,6 +699,38 @@ export interface AddClusterHostsMetadata { hostNames: string[]; } +export interface UpdateHostSpec { + $type: "yandex.cloud.mdb.clickhouse.v1.UpdateHostSpec"; + /** + * Name of the host to update. + * To get the ClickHouse host name, use a [ClusterService.ListHosts] request. + */ + hostName: string; + /** Field mask that specifies which fields of the ClickHouse host should be updated. */ + updateMask?: FieldMask; + /** Whether the host should get a public IP address on creation. */ + assignPublicIp?: boolean; +} + +export interface UpdateClusterHostsRequest { + $type: "yandex.cloud.mdb.clickhouse.v1.UpdateClusterHostsRequest"; + /** + * ID of the ClickHouse cluster to update hosts in. + * To get the ClickHouse cluster ID, use a [ClusterService.List] request. + */ + clusterId: string; + /** New configurations to apply to hosts. */ + updateHostSpecs: UpdateHostSpec[]; +} + +export interface UpdateClusterHostsMetadata { + $type: "yandex.cloud.mdb.clickhouse.v1.UpdateClusterHostsMetadata"; + /** ID of the ClickHouse cluster to modify hosts in. */ + clusterId: string; + /** Names of hosts that are being modified. */ + hostNames: string[]; +} + export interface DeleteClusterHostsRequest { $type: "yandex.cloud.mdb.clickhouse.v1.DeleteClusterHostsRequest"; /** @@ -5114,6 +5146,289 @@ export const AddClusterHostsMetadata = { messageTypeRegistry.set(AddClusterHostsMetadata.$type, AddClusterHostsMetadata); +const baseUpdateHostSpec: object = { + $type: "yandex.cloud.mdb.clickhouse.v1.UpdateHostSpec", + hostName: "", +}; + +export const UpdateHostSpec = { + $type: "yandex.cloud.mdb.clickhouse.v1.UpdateHostSpec" as const, + + encode( + message: UpdateHostSpec, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.hostName !== "") { + writer.uint32(10).string(message.hostName); + } + if (message.updateMask !== undefined) { + FieldMask.encode(message.updateMask, writer.uint32(18).fork()).ldelim(); + } + if (message.assignPublicIp !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.assignPublicIp! }, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): UpdateHostSpec { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseUpdateHostSpec } as UpdateHostSpec; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.hostName = reader.string(); + break; + case 2: + message.updateMask = FieldMask.decode(reader, reader.uint32()); + break; + case 3: + message.assignPublicIp = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateHostSpec { + const message = { ...baseUpdateHostSpec } as UpdateHostSpec; + message.hostName = + object.hostName !== undefined && object.hostName !== null + ? String(object.hostName) + : ""; + message.updateMask = + object.updateMask !== undefined && object.updateMask !== null + ? FieldMask.fromJSON(object.updateMask) + : undefined; + message.assignPublicIp = + object.assignPublicIp !== undefined && object.assignPublicIp !== null + ? Boolean(object.assignPublicIp) + : undefined; + return message; + }, + + toJSON(message: UpdateHostSpec): unknown { + const obj: any = {}; + message.hostName !== undefined && (obj.hostName = message.hostName); + message.updateMask !== undefined && + (obj.updateMask = message.updateMask + ? FieldMask.toJSON(message.updateMask) + : undefined); + message.assignPublicIp !== undefined && + (obj.assignPublicIp = message.assignPublicIp); + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateHostSpec { + const message = { ...baseUpdateHostSpec } as UpdateHostSpec; + message.hostName = object.hostName ?? ""; + message.updateMask = + object.updateMask !== undefined && object.updateMask !== null + ? FieldMask.fromPartial(object.updateMask) + : undefined; + message.assignPublicIp = object.assignPublicIp ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(UpdateHostSpec.$type, UpdateHostSpec); + +const baseUpdateClusterHostsRequest: object = { + $type: "yandex.cloud.mdb.clickhouse.v1.UpdateClusterHostsRequest", + clusterId: "", +}; + +export const UpdateClusterHostsRequest = { + $type: "yandex.cloud.mdb.clickhouse.v1.UpdateClusterHostsRequest" as const, + + encode( + message: UpdateClusterHostsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + for (const v of message.updateHostSpecs) { + UpdateHostSpec.encode(v!, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateClusterHostsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateClusterHostsRequest, + } as UpdateClusterHostsRequest; + message.updateHostSpecs = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.updateHostSpecs.push( + UpdateHostSpec.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateClusterHostsRequest { + const message = { + ...baseUpdateClusterHostsRequest, + } as UpdateClusterHostsRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.updateHostSpecs = (object.updateHostSpecs ?? []).map((e: any) => + UpdateHostSpec.fromJSON(e) + ); + return message; + }, + + toJSON(message: UpdateClusterHostsRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + if (message.updateHostSpecs) { + obj.updateHostSpecs = message.updateHostSpecs.map((e) => + e ? UpdateHostSpec.toJSON(e) : undefined + ); + } else { + obj.updateHostSpecs = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateClusterHostsRequest { + const message = { + ...baseUpdateClusterHostsRequest, + } as UpdateClusterHostsRequest; + message.clusterId = object.clusterId ?? ""; + message.updateHostSpecs = + object.updateHostSpecs?.map((e) => UpdateHostSpec.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set( + UpdateClusterHostsRequest.$type, + UpdateClusterHostsRequest +); + +const baseUpdateClusterHostsMetadata: object = { + $type: "yandex.cloud.mdb.clickhouse.v1.UpdateClusterHostsMetadata", + clusterId: "", + hostNames: "", +}; + +export const UpdateClusterHostsMetadata = { + $type: "yandex.cloud.mdb.clickhouse.v1.UpdateClusterHostsMetadata" as const, + + encode( + message: UpdateClusterHostsMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + for (const v of message.hostNames) { + writer.uint32(18).string(v!); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateClusterHostsMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateClusterHostsMetadata, + } as UpdateClusterHostsMetadata; + message.hostNames = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.hostNames.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateClusterHostsMetadata { + const message = { + ...baseUpdateClusterHostsMetadata, + } as UpdateClusterHostsMetadata; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.hostNames = (object.hostNames ?? []).map((e: any) => String(e)); + return message; + }, + + toJSON(message: UpdateClusterHostsMetadata): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + if (message.hostNames) { + obj.hostNames = message.hostNames.map((e) => e); + } else { + obj.hostNames = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateClusterHostsMetadata { + const message = { + ...baseUpdateClusterHostsMetadata, + } as UpdateClusterHostsMetadata; + message.clusterId = object.clusterId ?? ""; + message.hostNames = object.hostNames?.map((e) => e) || []; + return message; + }, +}; + +messageTypeRegistry.set( + UpdateClusterHostsMetadata.$type, + UpdateClusterHostsMetadata +); + const baseDeleteClusterHostsRequest: object = { $type: "yandex.cloud.mdb.clickhouse.v1.DeleteClusterHostsRequest", clusterId: "", @@ -8352,6 +8667,19 @@ export const ClusterServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, + /** Updates the specified hosts. */ + updateHosts: { + path: "/yandex.cloud.mdb.clickhouse.v1.ClusterService/UpdateHosts", + requestStream: false, + responseStream: false, + requestSerialize: (value: UpdateClusterHostsRequest) => + Buffer.from(UpdateClusterHostsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + UpdateClusterHostsRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, /** Deletes the specified hosts for a cluster. */ deleteHosts: { path: "/yandex.cloud.mdb.clickhouse.v1.ClusterService/DeleteHosts", @@ -8583,6 +8911,8 @@ export interface ClusterServiceServer extends UntypedServiceImplementation { listHosts: handleUnaryCall; /** Creates new hosts for a cluster. */ addHosts: handleUnaryCall; + /** Updates the specified hosts. */ + updateHosts: handleUnaryCall; /** Deletes the specified hosts for a cluster. */ deleteHosts: handleUnaryCall; /** Returns the specified shard. */ @@ -8958,6 +9288,22 @@ export interface ClusterServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; + /** Updates the specified hosts. */ + updateHosts( + request: UpdateClusterHostsRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + updateHosts( + request: UpdateClusterHostsRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + updateHosts( + request: UpdateClusterHostsRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; /** Deletes the specified hosts for a cluster. */ deleteHosts( request: DeleteClusterHostsRequest, diff --git a/src/generated/yandex/cloud/mdb/elasticsearch/v1/backup.ts b/src/generated/yandex/cloud/mdb/elasticsearch/v1/backup.ts new file mode 100644 index 00000000..a9cae4b8 --- /dev/null +++ b/src/generated/yandex/cloud/mdb/elasticsearch/v1/backup.ts @@ -0,0 +1,280 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { Timestamp } from "../../../../../google/protobuf/timestamp"; + +export const protobufPackage = "yandex.cloud.mdb.elasticsearch.v1"; + +export interface Backup { + $type: "yandex.cloud.mdb.elasticsearch.v1.Backup"; + /** Required. ID of the backup. */ + id: string; + /** ID of the folder that the backup belongs to. */ + folderId: string; + /** ID of the associated Elasticsearch cluster. */ + sourceClusterId: string; + /** The time when the backup operation was started. */ + startedAt?: Date; + /** The time when the backup was created (i.e. when the backup operation completed). */ + createdAt?: Date; + /** Indices names. (max 100) */ + indices: string[]; + /** Elasticsearch version used to create the snapshot */ + elasticsearchVersion: string; + /** Total size of all indices in backup. in bytes */ + sizeBytes: number; + /** Total count of indices in backup */ + indicesTotal: number; +} + +const baseBackup: object = { + $type: "yandex.cloud.mdb.elasticsearch.v1.Backup", + id: "", + folderId: "", + sourceClusterId: "", + indices: "", + elasticsearchVersion: "", + sizeBytes: 0, + indicesTotal: 0, +}; + +export const Backup = { + $type: "yandex.cloud.mdb.elasticsearch.v1.Backup" as const, + + encode( + message: Backup, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.id !== "") { + writer.uint32(10).string(message.id); + } + if (message.folderId !== "") { + writer.uint32(18).string(message.folderId); + } + if (message.sourceClusterId !== "") { + writer.uint32(26).string(message.sourceClusterId); + } + if (message.startedAt !== undefined) { + Timestamp.encode( + toTimestamp(message.startedAt), + writer.uint32(34).fork() + ).ldelim(); + } + if (message.createdAt !== undefined) { + Timestamp.encode( + toTimestamp(message.createdAt), + writer.uint32(42).fork() + ).ldelim(); + } + for (const v of message.indices) { + writer.uint32(50).string(v!); + } + if (message.elasticsearchVersion !== "") { + writer.uint32(58).string(message.elasticsearchVersion); + } + if (message.sizeBytes !== 0) { + writer.uint32(64).int64(message.sizeBytes); + } + if (message.indicesTotal !== 0) { + writer.uint32(72).int64(message.indicesTotal); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Backup { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseBackup } as Backup; + message.indices = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = reader.string(); + break; + case 2: + message.folderId = reader.string(); + break; + case 3: + message.sourceClusterId = reader.string(); + break; + case 4: + message.startedAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 5: + message.createdAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 6: + message.indices.push(reader.string()); + break; + case 7: + message.elasticsearchVersion = reader.string(); + break; + case 8: + message.sizeBytes = longToNumber(reader.int64() as Long); + break; + case 9: + message.indicesTotal = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Backup { + const message = { ...baseBackup } as Backup; + message.id = + object.id !== undefined && object.id !== null ? String(object.id) : ""; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.sourceClusterId = + object.sourceClusterId !== undefined && object.sourceClusterId !== null + ? String(object.sourceClusterId) + : ""; + message.startedAt = + object.startedAt !== undefined && object.startedAt !== null + ? fromJsonTimestamp(object.startedAt) + : undefined; + message.createdAt = + object.createdAt !== undefined && object.createdAt !== null + ? fromJsonTimestamp(object.createdAt) + : undefined; + message.indices = (object.indices ?? []).map((e: any) => String(e)); + message.elasticsearchVersion = + object.elasticsearchVersion !== undefined && + object.elasticsearchVersion !== null + ? String(object.elasticsearchVersion) + : ""; + message.sizeBytes = + object.sizeBytes !== undefined && object.sizeBytes !== null + ? Number(object.sizeBytes) + : 0; + message.indicesTotal = + object.indicesTotal !== undefined && object.indicesTotal !== null + ? Number(object.indicesTotal) + : 0; + return message; + }, + + toJSON(message: Backup): unknown { + const obj: any = {}; + message.id !== undefined && (obj.id = message.id); + message.folderId !== undefined && (obj.folderId = message.folderId); + message.sourceClusterId !== undefined && + (obj.sourceClusterId = message.sourceClusterId); + message.startedAt !== undefined && + (obj.startedAt = message.startedAt.toISOString()); + message.createdAt !== undefined && + (obj.createdAt = message.createdAt.toISOString()); + if (message.indices) { + obj.indices = message.indices.map((e) => e); + } else { + obj.indices = []; + } + message.elasticsearchVersion !== undefined && + (obj.elasticsearchVersion = message.elasticsearchVersion); + message.sizeBytes !== undefined && + (obj.sizeBytes = Math.round(message.sizeBytes)); + message.indicesTotal !== undefined && + (obj.indicesTotal = Math.round(message.indicesTotal)); + return obj; + }, + + fromPartial, I>>(object: I): Backup { + const message = { ...baseBackup } as Backup; + message.id = object.id ?? ""; + message.folderId = object.folderId ?? ""; + message.sourceClusterId = object.sourceClusterId ?? ""; + message.startedAt = object.startedAt ?? undefined; + message.createdAt = object.createdAt ?? undefined; + message.indices = object.indices?.map((e) => e) || []; + message.elasticsearchVersion = object.elasticsearchVersion ?? ""; + message.sizeBytes = object.sizeBytes ?? 0; + message.indicesTotal = object.indicesTotal ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(Backup.$type, Backup); + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function toTimestamp(date: Date): Timestamp { + const seconds = date.getTime() / 1_000; + const nanos = (date.getTime() % 1_000) * 1_000_000; + return { $type: "google.protobuf.Timestamp", seconds, nanos }; +} + +function fromTimestamp(t: Timestamp): Date { + let millis = t.seconds * 1_000; + millis += t.nanos / 1_000_000; + return new Date(millis); +} + +function fromJsonTimestamp(o: any): Date { + if (o instanceof Date) { + return o; + } else if (typeof o === "string") { + return new Date(o); + } else { + return fromTimestamp(Timestamp.fromJSON(o)); + } +} + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/elasticsearch/v1/backup_service.ts b/src/generated/yandex/cloud/mdb/elasticsearch/v1/backup_service.ts new file mode 100644 index 00000000..f0f875da --- /dev/null +++ b/src/generated/yandex/cloud/mdb/elasticsearch/v1/backup_service.ts @@ -0,0 +1,429 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleUnaryCall, + Client, + ClientUnaryCall, + Metadata, + CallOptions, + ServiceError, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; +import { Backup } from "../../../../../yandex/cloud/mdb/elasticsearch/v1/backup"; + +export const protobufPackage = "yandex.cloud.mdb.elasticsearch.v1"; + +export interface GetBackupRequest { + $type: "yandex.cloud.mdb.elasticsearch.v1.GetBackupRequest"; + /** Required. ID of the backup to return. */ + backupId: string; +} + +export interface ListBackupsRequest { + $type: "yandex.cloud.mdb.elasticsearch.v1.ListBackupsRequest"; + /** Required. ID of the folder to list backups in. */ + folderId: string; + /** + * The maximum number of results per page that should be returned. If the number of available + * results is larger than `page_size`, the service returns a `next_page_token` that can be used + * to get the next page of results in subsequent ListBackups requests. + * Acceptable values are 0 to 1000, inclusive. Default value: 100. + */ + pageSize: number; + /** + * Page token. Set `page_token` to the `next_page_token` returned by a previous ListBackups + * request to get the next page of results. + */ + pageToken: string; +} + +export interface ListBackupsResponse { + $type: "yandex.cloud.mdb.elasticsearch.v1.ListBackupsResponse"; + /** Requested list of backups. */ + backups: Backup[]; + /** + * This token allows you to get the next page of results for ListBackups requests, + * if the number of results is larger than `page_size` specified in the request. + * To get the next page, specify the value of `next_page_token` as a value for + * the `page_token` parameter in the next ListBackups request. Subsequent ListBackups + * requests will have their own `next_page_token` to continue paging through the results. + */ + nextPageToken: string; +} + +const baseGetBackupRequest: object = { + $type: "yandex.cloud.mdb.elasticsearch.v1.GetBackupRequest", + backupId: "", +}; + +export const GetBackupRequest = { + $type: "yandex.cloud.mdb.elasticsearch.v1.GetBackupRequest" as const, + + encode( + message: GetBackupRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.backupId !== "") { + writer.uint32(10).string(message.backupId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): GetBackupRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGetBackupRequest } as GetBackupRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.backupId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GetBackupRequest { + const message = { ...baseGetBackupRequest } as GetBackupRequest; + message.backupId = + object.backupId !== undefined && object.backupId !== null + ? String(object.backupId) + : ""; + return message; + }, + + toJSON(message: GetBackupRequest): unknown { + const obj: any = {}; + message.backupId !== undefined && (obj.backupId = message.backupId); + return obj; + }, + + fromPartial, I>>( + object: I + ): GetBackupRequest { + const message = { ...baseGetBackupRequest } as GetBackupRequest; + message.backupId = object.backupId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(GetBackupRequest.$type, GetBackupRequest); + +const baseListBackupsRequest: object = { + $type: "yandex.cloud.mdb.elasticsearch.v1.ListBackupsRequest", + folderId: "", + pageSize: 0, + pageToken: "", +}; + +export const ListBackupsRequest = { + $type: "yandex.cloud.mdb.elasticsearch.v1.ListBackupsRequest" as const, + + encode( + message: ListBackupsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.folderId !== "") { + writer.uint32(10).string(message.folderId); + } + if (message.pageSize !== 0) { + writer.uint32(16).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(26).string(message.pageToken); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ListBackupsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListBackupsRequest } as ListBackupsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.folderId = reader.string(); + break; + case 2: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 3: + message.pageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListBackupsRequest { + const message = { ...baseListBackupsRequest } as ListBackupsRequest; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + return message; + }, + + toJSON(message: ListBackupsRequest): unknown { + const obj: any = {}; + message.folderId !== undefined && (obj.folderId = message.folderId); + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListBackupsRequest { + const message = { ...baseListBackupsRequest } as ListBackupsRequest; + message.folderId = object.folderId ?? ""; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListBackupsRequest.$type, ListBackupsRequest); + +const baseListBackupsResponse: object = { + $type: "yandex.cloud.mdb.elasticsearch.v1.ListBackupsResponse", + nextPageToken: "", +}; + +export const ListBackupsResponse = { + $type: "yandex.cloud.mdb.elasticsearch.v1.ListBackupsResponse" as const, + + encode( + message: ListBackupsResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.backups) { + Backup.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ListBackupsResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListBackupsResponse } as ListBackupsResponse; + message.backups = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.backups.push(Backup.decode(reader, reader.uint32())); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListBackupsResponse { + const message = { ...baseListBackupsResponse } as ListBackupsResponse; + message.backups = (object.backups ?? []).map((e: any) => + Backup.fromJSON(e) + ); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListBackupsResponse): unknown { + const obj: any = {}; + if (message.backups) { + obj.backups = message.backups.map((e) => + e ? Backup.toJSON(e) : undefined + ); + } else { + obj.backups = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListBackupsResponse { + const message = { ...baseListBackupsResponse } as ListBackupsResponse; + message.backups = object.backups?.map((e) => Backup.fromPartial(e)) || []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListBackupsResponse.$type, ListBackupsResponse); + +export const BackupServiceService = { + /** Returns the specified backup of Elasticsearch cluster. */ + get: { + path: "/yandex.cloud.mdb.elasticsearch.v1.BackupService/Get", + requestStream: false, + responseStream: false, + requestSerialize: (value: GetBackupRequest) => + Buffer.from(GetBackupRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => GetBackupRequest.decode(value), + responseSerialize: (value: Backup) => + Buffer.from(Backup.encode(value).finish()), + responseDeserialize: (value: Buffer) => Backup.decode(value), + }, + /** Returns the list of available backups for the specified Elasticsearch cluster. */ + list: { + path: "/yandex.cloud.mdb.elasticsearch.v1.BackupService/List", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListBackupsRequest) => + Buffer.from(ListBackupsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => ListBackupsRequest.decode(value), + responseSerialize: (value: ListBackupsResponse) => + Buffer.from(ListBackupsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => ListBackupsResponse.decode(value), + }, +} as const; + +export interface BackupServiceServer extends UntypedServiceImplementation { + /** Returns the specified backup of Elasticsearch cluster. */ + get: handleUnaryCall; + /** Returns the list of available backups for the specified Elasticsearch cluster. */ + list: handleUnaryCall; +} + +export interface BackupServiceClient extends Client { + /** Returns the specified backup of Elasticsearch cluster. */ + get( + request: GetBackupRequest, + callback: (error: ServiceError | null, response: Backup) => void + ): ClientUnaryCall; + get( + request: GetBackupRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Backup) => void + ): ClientUnaryCall; + get( + request: GetBackupRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Backup) => void + ): ClientUnaryCall; + /** Returns the list of available backups for the specified Elasticsearch cluster. */ + list( + request: ListBackupsRequest, + callback: ( + error: ServiceError | null, + response: ListBackupsResponse + ) => void + ): ClientUnaryCall; + list( + request: ListBackupsRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListBackupsResponse + ) => void + ): ClientUnaryCall; + list( + request: ListBackupsRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListBackupsResponse + ) => void + ): ClientUnaryCall; +} + +export const BackupServiceClient = makeGenericClientConstructor( + BackupServiceService, + "yandex.cloud.mdb.elasticsearch.v1.BackupService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): BackupServiceClient; + service: typeof BackupServiceService; +}; + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/elasticsearch/v1/cluster_service.ts b/src/generated/yandex/cloud/mdb/elasticsearch/v1/cluster_service.ts index c205bc51..2c504528 100644 --- a/src/generated/yandex/cloud/mdb/elasticsearch/v1/cluster_service.ts +++ b/src/generated/yandex/cloud/mdb/elasticsearch/v1/cluster_service.ts @@ -31,8 +31,10 @@ import { MaintenanceWindow } from "../../../../../yandex/cloud/mdb/elasticsearch import { FieldMask } from "../../../../../google/protobuf/field_mask"; import { Timestamp } from "../../../../../google/protobuf/timestamp"; import { UserSpec } from "../../../../../yandex/cloud/mdb/elasticsearch/v1/user"; +import { ExtensionSpec } from "../../../../../yandex/cloud/mdb/elasticsearch/v1/extension"; import { Operation } from "../../../../../yandex/cloud/operation/operation"; import { ElasticsearchConfig7 } from "../../../../../yandex/cloud/mdb/elasticsearch/v1/config/elasticsearch"; +import { Backup } from "../../../../../yandex/cloud/mdb/elasticsearch/v1/backup"; export const protobufPackage = "yandex.cloud.mdb.elasticsearch.v1"; @@ -123,6 +125,8 @@ export interface CreateClusterRequest { deletionProtection: boolean; /** Window of maintenance operations. */ maintenanceWindow?: MaintenanceWindow; + /** optional */ + extensionSpecs: ExtensionSpec[]; } export interface CreateClusterRequest_LabelsEntry { @@ -721,6 +725,97 @@ export interface RescheduleMaintenanceMetadata { delayedUntil?: Date; } +export interface RestoreClusterRequest { + $type: "yandex.cloud.mdb.elasticsearch.v1.RestoreClusterRequest"; + /** Required. ID of the backup to restore from. */ + backupId: string; + /** Name of the ElasticSearch cluster. The name must be unique within the folder. */ + name: string; + /** Description of the ElasticSearch cluster. */ + description: string; + /** + * Custom labels for the ElasticSearch cluster as `` key:value `` pairs. Maximum 64 per resource. + * For example, "project": "mvp" or "source": "dictionary". + */ + labels: { [key: string]: string }; + /** Deployment environment of the ElasticSearch cluster. */ + environment: Cluster_Environment; + /** Configuration and resources for hosts that should be created for the ElasticSearch cluster. */ + configSpec?: ConfigSpec; + /** Required. Configuration of ElasticSearch hosts. */ + hostSpecs: HostSpec[]; + /** ID of the network to create the cluster in. */ + networkId: string; + /** User security groups */ + securityGroupIds: string[]; + /** ID of the service account used for access to Yandex Object Storage. */ + serviceAccountId: string; + /** Deletion Protection inhibits deletion of the cluster */ + deletionProtection: boolean; + /** ID of the folder to create the ElasticSearch cluster in. */ + folderId: string; + /** optional */ + extensionSpecs: ExtensionSpec[]; +} + +export interface RestoreClusterRequest_LabelsEntry { + $type: "yandex.cloud.mdb.elasticsearch.v1.RestoreClusterRequest.LabelsEntry"; + key: string; + value: string; +} + +export interface RestoreClusterMetadata { + $type: "yandex.cloud.mdb.elasticsearch.v1.RestoreClusterMetadata"; + /** Required. ID of the new ElasticSearch cluster. */ + clusterId: string; + /** Required. ID of the backup used for recovery. */ + backupId: string; +} + +export interface BackupClusterRequest { + $type: "yandex.cloud.mdb.elasticsearch.v1.BackupClusterRequest"; + /** Required. ID of the ElasticSearch cluster to back up. */ + clusterId: string; +} + +export interface BackupClusterMetadata { + $type: "yandex.cloud.mdb.elasticsearch.v1.BackupClusterMetadata"; + /** ID of the ElasticSearch cluster. */ + clusterId: string; +} + +export interface ListClusterBackupsRequest { + $type: "yandex.cloud.mdb.elasticsearch.v1.ListClusterBackupsRequest"; + /** Required. ID of the Elasticsearch cluster. */ + clusterId: string; + /** + * The maximum number of results per page that should be returned. If the number of available + * results is larger than `page_size`, the service returns a `next_page_token` that can be used + * to get the next page of results in subsequent ListClusterBackups requests. + * Acceptable values are 0 to 1000, inclusive. Default value: 100. + */ + pageSize: number; + /** + * Page token. Set `page_token` to the `next_page_token` returned by a previous ListClusterBackups + * request to get the next page of results. + */ + pageToken: string; +} + +export interface ListClusterBackupsResponse { + $type: "yandex.cloud.mdb.elasticsearch.v1.ListClusterBackupsResponse"; + /** Requested list of backups. */ + backups: Backup[]; + /** + * This token allows you to get the next page of results for ListClusterBackups requests, + * if the number of results is larger than `page_size` specified in the request. + * To get the next page, specify the value of `next_page_token` as a value for + * the `page_token` parameter in the next ListClusterBackups request. Subsequent ListClusterBackups + * requests will have their own `next_page_token` to continue paging through the results. + */ + nextPageToken: string; +} + const baseGetClusterRequest: object = { $type: "yandex.cloud.mdb.elasticsearch.v1.GetClusterRequest", clusterId: "", @@ -1039,6 +1134,9 @@ export const CreateClusterRequest = { writer.uint32(114).fork() ).ldelim(); } + for (const v of message.extensionSpecs) { + ExtensionSpec.encode(v!, writer.uint32(122).fork()).ldelim(); + } return writer; }, @@ -1053,6 +1151,7 @@ export const CreateClusterRequest = { message.userSpecs = []; message.hostSpecs = []; message.securityGroupIds = []; + message.extensionSpecs = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -1104,6 +1203,11 @@ export const CreateClusterRequest = { reader.uint32() ); break; + case 15: + message.extensionSpecs.push( + ExtensionSpec.decode(reader, reader.uint32()) + ); + break; default: reader.skipType(tag & 7); break; @@ -1167,6 +1271,9 @@ export const CreateClusterRequest = { object.maintenanceWindow !== null ? MaintenanceWindow.fromJSON(object.maintenanceWindow) : undefined; + message.extensionSpecs = (object.extensionSpecs ?? []).map((e: any) => + ExtensionSpec.fromJSON(e) + ); return message; }, @@ -1216,6 +1323,13 @@ export const CreateClusterRequest = { (obj.maintenanceWindow = message.maintenanceWindow ? MaintenanceWindow.toJSON(message.maintenanceWindow) : undefined); + if (message.extensionSpecs) { + obj.extensionSpecs = message.extensionSpecs.map((e) => + e ? ExtensionSpec.toJSON(e) : undefined + ); + } else { + obj.extensionSpecs = []; + } return obj; }, @@ -1252,6 +1366,8 @@ export const CreateClusterRequest = { object.maintenanceWindow !== null ? MaintenanceWindow.fromPartial(object.maintenanceWindow) : undefined; + message.extensionSpecs = + object.extensionSpecs?.map((e) => ExtensionSpec.fromPartial(e)) || []; return message; }, }; @@ -4635,95 +4751,861 @@ messageTypeRegistry.set( RescheduleMaintenanceMetadata ); -/** A set of methods for managing Elasticsearch clusters. */ -export const ClusterServiceService = { - /** - * Returns the specified Elasticsearch cluster. - * - * To get the list of available Elasticsearch clusters, make a [List] request. - */ - get: { - path: "/yandex.cloud.mdb.elasticsearch.v1.ClusterService/Get", - requestStream: false, - responseStream: false, - requestSerialize: (value: GetClusterRequest) => - Buffer.from(GetClusterRequest.encode(value).finish()), - requestDeserialize: (value: Buffer) => GetClusterRequest.decode(value), - responseSerialize: (value: Cluster) => - Buffer.from(Cluster.encode(value).finish()), - responseDeserialize: (value: Buffer) => Cluster.decode(value), - }, - /** Retrieves the list of Elasticsearch clusters that belong to the specified folder. */ - list: { - path: "/yandex.cloud.mdb.elasticsearch.v1.ClusterService/List", - requestStream: false, - responseStream: false, - requestSerialize: (value: ListClustersRequest) => - Buffer.from(ListClustersRequest.encode(value).finish()), - requestDeserialize: (value: Buffer) => ListClustersRequest.decode(value), - responseSerialize: (value: ListClustersResponse) => - Buffer.from(ListClustersResponse.encode(value).finish()), - responseDeserialize: (value: Buffer) => ListClustersResponse.decode(value), - }, - /** Creates a new Elasticsearch cluster in the specified folder. */ - create: { - path: "/yandex.cloud.mdb.elasticsearch.v1.ClusterService/Create", - requestStream: false, - responseStream: false, - requestSerialize: (value: CreateClusterRequest) => - Buffer.from(CreateClusterRequest.encode(value).finish()), - requestDeserialize: (value: Buffer) => CreateClusterRequest.decode(value), - responseSerialize: (value: Operation) => - Buffer.from(Operation.encode(value).finish()), - responseDeserialize: (value: Buffer) => Operation.decode(value), - }, - /** Updates the specified Elasticsearch cluster. */ - update: { - path: "/yandex.cloud.mdb.elasticsearch.v1.ClusterService/Update", - requestStream: false, - responseStream: false, - requestSerialize: (value: UpdateClusterRequest) => - Buffer.from(UpdateClusterRequest.encode(value).finish()), - requestDeserialize: (value: Buffer) => UpdateClusterRequest.decode(value), - responseSerialize: (value: Operation) => - Buffer.from(Operation.encode(value).finish()), - responseDeserialize: (value: Buffer) => Operation.decode(value), - }, - /** Deletes the specified Elasticsearch cluster. */ - delete: { - path: "/yandex.cloud.mdb.elasticsearch.v1.ClusterService/Delete", - requestStream: false, - responseStream: false, - requestSerialize: (value: DeleteClusterRequest) => - Buffer.from(DeleteClusterRequest.encode(value).finish()), - requestDeserialize: (value: Buffer) => DeleteClusterRequest.decode(value), - responseSerialize: (value: Operation) => - Buffer.from(Operation.encode(value).finish()), - responseDeserialize: (value: Buffer) => Operation.decode(value), +const baseRestoreClusterRequest: object = { + $type: "yandex.cloud.mdb.elasticsearch.v1.RestoreClusterRequest", + backupId: "", + name: "", + description: "", + environment: 0, + networkId: "", + securityGroupIds: "", + serviceAccountId: "", + deletionProtection: false, + folderId: "", +}; + +export const RestoreClusterRequest = { + $type: "yandex.cloud.mdb.elasticsearch.v1.RestoreClusterRequest" as const, + + encode( + message: RestoreClusterRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.backupId !== "") { + writer.uint32(10).string(message.backupId); + } + if (message.name !== "") { + writer.uint32(18).string(message.name); + } + if (message.description !== "") { + writer.uint32(26).string(message.description); + } + Object.entries(message.labels).forEach(([key, value]) => { + RestoreClusterRequest_LabelsEntry.encode( + { + $type: + "yandex.cloud.mdb.elasticsearch.v1.RestoreClusterRequest.LabelsEntry", + key: key as any, + value, + }, + writer.uint32(34).fork() + ).ldelim(); + }); + if (message.environment !== 0) { + writer.uint32(40).int32(message.environment); + } + if (message.configSpec !== undefined) { + ConfigSpec.encode(message.configSpec, writer.uint32(50).fork()).ldelim(); + } + for (const v of message.hostSpecs) { + HostSpec.encode(v!, writer.uint32(74).fork()).ldelim(); + } + if (message.networkId !== "") { + writer.uint32(82).string(message.networkId); + } + for (const v of message.securityGroupIds) { + writer.uint32(90).string(v!); + } + if (message.serviceAccountId !== "") { + writer.uint32(98).string(message.serviceAccountId); + } + if (message.deletionProtection === true) { + writer.uint32(104).bool(message.deletionProtection); + } + if (message.folderId !== "") { + writer.uint32(114).string(message.folderId); + } + for (const v of message.extensionSpecs) { + ExtensionSpec.encode(v!, writer.uint32(122).fork()).ldelim(); + } + return writer; }, - /** Moves the specified Elasticsearch cluster to the specified folder. */ - move: { - path: "/yandex.cloud.mdb.elasticsearch.v1.ClusterService/Move", - requestStream: false, - responseStream: false, - requestSerialize: (value: MoveClusterRequest) => - Buffer.from(MoveClusterRequest.encode(value).finish()), - requestDeserialize: (value: Buffer) => MoveClusterRequest.decode(value), - responseSerialize: (value: Operation) => - Buffer.from(Operation.encode(value).finish()), - responseDeserialize: (value: Buffer) => Operation.decode(value), + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): RestoreClusterRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseRestoreClusterRequest } as RestoreClusterRequest; + message.labels = {}; + message.hostSpecs = []; + message.securityGroupIds = []; + message.extensionSpecs = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.backupId = reader.string(); + break; + case 2: + message.name = reader.string(); + break; + case 3: + message.description = reader.string(); + break; + case 4: + const entry4 = RestoreClusterRequest_LabelsEntry.decode( + reader, + reader.uint32() + ); + if (entry4.value !== undefined) { + message.labels[entry4.key] = entry4.value; + } + break; + case 5: + message.environment = reader.int32() as any; + break; + case 6: + message.configSpec = ConfigSpec.decode(reader, reader.uint32()); + break; + case 9: + message.hostSpecs.push(HostSpec.decode(reader, reader.uint32())); + break; + case 10: + message.networkId = reader.string(); + break; + case 11: + message.securityGroupIds.push(reader.string()); + break; + case 12: + message.serviceAccountId = reader.string(); + break; + case 13: + message.deletionProtection = reader.bool(); + break; + case 14: + message.folderId = reader.string(); + break; + case 15: + message.extensionSpecs.push( + ExtensionSpec.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; }, - /** Starts the specified Elasticsearch cluster. */ - start: { - path: "/yandex.cloud.mdb.elasticsearch.v1.ClusterService/Start", - requestStream: false, - responseStream: false, - requestSerialize: (value: StartClusterRequest) => - Buffer.from(StartClusterRequest.encode(value).finish()), - requestDeserialize: (value: Buffer) => StartClusterRequest.decode(value), - responseSerialize: (value: Operation) => - Buffer.from(Operation.encode(value).finish()), - responseDeserialize: (value: Buffer) => Operation.decode(value), + + fromJSON(object: any): RestoreClusterRequest { + const message = { ...baseRestoreClusterRequest } as RestoreClusterRequest; + message.backupId = + object.backupId !== undefined && object.backupId !== null + ? String(object.backupId) + : ""; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); + message.environment = + object.environment !== undefined && object.environment !== null + ? cluster_EnvironmentFromJSON(object.environment) + : 0; + message.configSpec = + object.configSpec !== undefined && object.configSpec !== null + ? ConfigSpec.fromJSON(object.configSpec) + : undefined; + message.hostSpecs = (object.hostSpecs ?? []).map((e: any) => + HostSpec.fromJSON(e) + ); + message.networkId = + object.networkId !== undefined && object.networkId !== null + ? String(object.networkId) + : ""; + message.securityGroupIds = (object.securityGroupIds ?? []).map((e: any) => + String(e) + ); + message.serviceAccountId = + object.serviceAccountId !== undefined && object.serviceAccountId !== null + ? String(object.serviceAccountId) + : ""; + message.deletionProtection = + object.deletionProtection !== undefined && + object.deletionProtection !== null + ? Boolean(object.deletionProtection) + : false; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.extensionSpecs = (object.extensionSpecs ?? []).map((e: any) => + ExtensionSpec.fromJSON(e) + ); + return message; + }, + + toJSON(message: RestoreClusterRequest): unknown { + const obj: any = {}; + message.backupId !== undefined && (obj.backupId = message.backupId); + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + obj.labels = {}; + if (message.labels) { + Object.entries(message.labels).forEach(([k, v]) => { + obj.labels[k] = v; + }); + } + message.environment !== undefined && + (obj.environment = cluster_EnvironmentToJSON(message.environment)); + message.configSpec !== undefined && + (obj.configSpec = message.configSpec + ? ConfigSpec.toJSON(message.configSpec) + : undefined); + if (message.hostSpecs) { + obj.hostSpecs = message.hostSpecs.map((e) => + e ? HostSpec.toJSON(e) : undefined + ); + } else { + obj.hostSpecs = []; + } + message.networkId !== undefined && (obj.networkId = message.networkId); + if (message.securityGroupIds) { + obj.securityGroupIds = message.securityGroupIds.map((e) => e); + } else { + obj.securityGroupIds = []; + } + message.serviceAccountId !== undefined && + (obj.serviceAccountId = message.serviceAccountId); + message.deletionProtection !== undefined && + (obj.deletionProtection = message.deletionProtection); + message.folderId !== undefined && (obj.folderId = message.folderId); + if (message.extensionSpecs) { + obj.extensionSpecs = message.extensionSpecs.map((e) => + e ? ExtensionSpec.toJSON(e) : undefined + ); + } else { + obj.extensionSpecs = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): RestoreClusterRequest { + const message = { ...baseRestoreClusterRequest } as RestoreClusterRequest; + message.backupId = object.backupId ?? ""; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); + message.environment = object.environment ?? 0; + message.configSpec = + object.configSpec !== undefined && object.configSpec !== null + ? ConfigSpec.fromPartial(object.configSpec) + : undefined; + message.hostSpecs = + object.hostSpecs?.map((e) => HostSpec.fromPartial(e)) || []; + message.networkId = object.networkId ?? ""; + message.securityGroupIds = object.securityGroupIds?.map((e) => e) || []; + message.serviceAccountId = object.serviceAccountId ?? ""; + message.deletionProtection = object.deletionProtection ?? false; + message.folderId = object.folderId ?? ""; + message.extensionSpecs = + object.extensionSpecs?.map((e) => ExtensionSpec.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(RestoreClusterRequest.$type, RestoreClusterRequest); + +const baseRestoreClusterRequest_LabelsEntry: object = { + $type: "yandex.cloud.mdb.elasticsearch.v1.RestoreClusterRequest.LabelsEntry", + key: "", + value: "", +}; + +export const RestoreClusterRequest_LabelsEntry = { + $type: + "yandex.cloud.mdb.elasticsearch.v1.RestoreClusterRequest.LabelsEntry" as const, + + encode( + message: RestoreClusterRequest_LabelsEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): RestoreClusterRequest_LabelsEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseRestoreClusterRequest_LabelsEntry, + } as RestoreClusterRequest_LabelsEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RestoreClusterRequest_LabelsEntry { + const message = { + ...baseRestoreClusterRequest_LabelsEntry, + } as RestoreClusterRequest_LabelsEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: RestoreClusterRequest_LabelsEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): RestoreClusterRequest_LabelsEntry { + const message = { + ...baseRestoreClusterRequest_LabelsEntry, + } as RestoreClusterRequest_LabelsEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + RestoreClusterRequest_LabelsEntry.$type, + RestoreClusterRequest_LabelsEntry +); + +const baseRestoreClusterMetadata: object = { + $type: "yandex.cloud.mdb.elasticsearch.v1.RestoreClusterMetadata", + clusterId: "", + backupId: "", +}; + +export const RestoreClusterMetadata = { + $type: "yandex.cloud.mdb.elasticsearch.v1.RestoreClusterMetadata" as const, + + encode( + message: RestoreClusterMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.backupId !== "") { + writer.uint32(18).string(message.backupId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): RestoreClusterMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseRestoreClusterMetadata } as RestoreClusterMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.backupId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RestoreClusterMetadata { + const message = { ...baseRestoreClusterMetadata } as RestoreClusterMetadata; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.backupId = + object.backupId !== undefined && object.backupId !== null + ? String(object.backupId) + : ""; + return message; + }, + + toJSON(message: RestoreClusterMetadata): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.backupId !== undefined && (obj.backupId = message.backupId); + return obj; + }, + + fromPartial, I>>( + object: I + ): RestoreClusterMetadata { + const message = { ...baseRestoreClusterMetadata } as RestoreClusterMetadata; + message.clusterId = object.clusterId ?? ""; + message.backupId = object.backupId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(RestoreClusterMetadata.$type, RestoreClusterMetadata); + +const baseBackupClusterRequest: object = { + $type: "yandex.cloud.mdb.elasticsearch.v1.BackupClusterRequest", + clusterId: "", +}; + +export const BackupClusterRequest = { + $type: "yandex.cloud.mdb.elasticsearch.v1.BackupClusterRequest" as const, + + encode( + message: BackupClusterRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): BackupClusterRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseBackupClusterRequest } as BackupClusterRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): BackupClusterRequest { + const message = { ...baseBackupClusterRequest } as BackupClusterRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + return message; + }, + + toJSON(message: BackupClusterRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + return obj; + }, + + fromPartial, I>>( + object: I + ): BackupClusterRequest { + const message = { ...baseBackupClusterRequest } as BackupClusterRequest; + message.clusterId = object.clusterId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(BackupClusterRequest.$type, BackupClusterRequest); + +const baseBackupClusterMetadata: object = { + $type: "yandex.cloud.mdb.elasticsearch.v1.BackupClusterMetadata", + clusterId: "", +}; + +export const BackupClusterMetadata = { + $type: "yandex.cloud.mdb.elasticsearch.v1.BackupClusterMetadata" as const, + + encode( + message: BackupClusterMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): BackupClusterMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseBackupClusterMetadata } as BackupClusterMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): BackupClusterMetadata { + const message = { ...baseBackupClusterMetadata } as BackupClusterMetadata; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + return message; + }, + + toJSON(message: BackupClusterMetadata): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + return obj; + }, + + fromPartial, I>>( + object: I + ): BackupClusterMetadata { + const message = { ...baseBackupClusterMetadata } as BackupClusterMetadata; + message.clusterId = object.clusterId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(BackupClusterMetadata.$type, BackupClusterMetadata); + +const baseListClusterBackupsRequest: object = { + $type: "yandex.cloud.mdb.elasticsearch.v1.ListClusterBackupsRequest", + clusterId: "", + pageSize: 0, + pageToken: "", +}; + +export const ListClusterBackupsRequest = { + $type: "yandex.cloud.mdb.elasticsearch.v1.ListClusterBackupsRequest" as const, + + encode( + message: ListClusterBackupsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.pageSize !== 0) { + writer.uint32(16).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(26).string(message.pageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListClusterBackupsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListClusterBackupsRequest, + } as ListClusterBackupsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 3: + message.pageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListClusterBackupsRequest { + const message = { + ...baseListClusterBackupsRequest, + } as ListClusterBackupsRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + return message; + }, + + toJSON(message: ListClusterBackupsRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListClusterBackupsRequest { + const message = { + ...baseListClusterBackupsRequest, + } as ListClusterBackupsRequest; + message.clusterId = object.clusterId ?? ""; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListClusterBackupsRequest.$type, + ListClusterBackupsRequest +); + +const baseListClusterBackupsResponse: object = { + $type: "yandex.cloud.mdb.elasticsearch.v1.ListClusterBackupsResponse", + nextPageToken: "", +}; + +export const ListClusterBackupsResponse = { + $type: + "yandex.cloud.mdb.elasticsearch.v1.ListClusterBackupsResponse" as const, + + encode( + message: ListClusterBackupsResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.backups) { + Backup.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListClusterBackupsResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListClusterBackupsResponse, + } as ListClusterBackupsResponse; + message.backups = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.backups.push(Backup.decode(reader, reader.uint32())); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListClusterBackupsResponse { + const message = { + ...baseListClusterBackupsResponse, + } as ListClusterBackupsResponse; + message.backups = (object.backups ?? []).map((e: any) => + Backup.fromJSON(e) + ); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListClusterBackupsResponse): unknown { + const obj: any = {}; + if (message.backups) { + obj.backups = message.backups.map((e) => + e ? Backup.toJSON(e) : undefined + ); + } else { + obj.backups = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListClusterBackupsResponse { + const message = { + ...baseListClusterBackupsResponse, + } as ListClusterBackupsResponse; + message.backups = object.backups?.map((e) => Backup.fromPartial(e)) || []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListClusterBackupsResponse.$type, + ListClusterBackupsResponse +); + +/** A set of methods for managing Elasticsearch clusters. */ +export const ClusterServiceService = { + /** + * Returns the specified Elasticsearch cluster. + * + * To get the list of available Elasticsearch clusters, make a [List] request. + */ + get: { + path: "/yandex.cloud.mdb.elasticsearch.v1.ClusterService/Get", + requestStream: false, + responseStream: false, + requestSerialize: (value: GetClusterRequest) => + Buffer.from(GetClusterRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => GetClusterRequest.decode(value), + responseSerialize: (value: Cluster) => + Buffer.from(Cluster.encode(value).finish()), + responseDeserialize: (value: Buffer) => Cluster.decode(value), + }, + /** Retrieves the list of Elasticsearch clusters that belong to the specified folder. */ + list: { + path: "/yandex.cloud.mdb.elasticsearch.v1.ClusterService/List", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListClustersRequest) => + Buffer.from(ListClustersRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => ListClustersRequest.decode(value), + responseSerialize: (value: ListClustersResponse) => + Buffer.from(ListClustersResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => ListClustersResponse.decode(value), + }, + /** Creates a new Elasticsearch cluster in the specified folder. */ + create: { + path: "/yandex.cloud.mdb.elasticsearch.v1.ClusterService/Create", + requestStream: false, + responseStream: false, + requestSerialize: (value: CreateClusterRequest) => + Buffer.from(CreateClusterRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => CreateClusterRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Updates the specified Elasticsearch cluster. */ + update: { + path: "/yandex.cloud.mdb.elasticsearch.v1.ClusterService/Update", + requestStream: false, + responseStream: false, + requestSerialize: (value: UpdateClusterRequest) => + Buffer.from(UpdateClusterRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => UpdateClusterRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Deletes the specified Elasticsearch cluster. */ + delete: { + path: "/yandex.cloud.mdb.elasticsearch.v1.ClusterService/Delete", + requestStream: false, + responseStream: false, + requestSerialize: (value: DeleteClusterRequest) => + Buffer.from(DeleteClusterRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => DeleteClusterRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Moves the specified Elasticsearch cluster to the specified folder. */ + move: { + path: "/yandex.cloud.mdb.elasticsearch.v1.ClusterService/Move", + requestStream: false, + responseStream: false, + requestSerialize: (value: MoveClusterRequest) => + Buffer.from(MoveClusterRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => MoveClusterRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Starts the specified Elasticsearch cluster. */ + start: { + path: "/yandex.cloud.mdb.elasticsearch.v1.ClusterService/Start", + requestStream: false, + responseStream: false, + requestSerialize: (value: StartClusterRequest) => + Buffer.from(StartClusterRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => StartClusterRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), }, /** Stops the specified Elasticsearch cluster. */ stop: { @@ -4737,6 +5619,44 @@ export const ClusterServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, + /** Create a backup for the specified ElasticSearch cluster. */ + backup: { + path: "/yandex.cloud.mdb.elasticsearch.v1.ClusterService/Backup", + requestStream: false, + responseStream: false, + requestSerialize: (value: BackupClusterRequest) => + Buffer.from(BackupClusterRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => BackupClusterRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Returns the list of available backups for the specified Elasticsearch cluster. */ + listBackups: { + path: "/yandex.cloud.mdb.elasticsearch.v1.ClusterService/ListBackups", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListClusterBackupsRequest) => + Buffer.from(ListClusterBackupsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + ListClusterBackupsRequest.decode(value), + responseSerialize: (value: ListClusterBackupsResponse) => + Buffer.from(ListClusterBackupsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + ListClusterBackupsResponse.decode(value), + }, + /** Creates a new ElasticSearch cluster from the specified backup. */ + restore: { + path: "/yandex.cloud.mdb.elasticsearch.v1.ClusterService/Restore", + requestStream: false, + responseStream: false, + requestSerialize: (value: RestoreClusterRequest) => + Buffer.from(RestoreClusterRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => RestoreClusterRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, /** * Retrieves logs for the specified Elasticsearch cluster. * @@ -4856,6 +5776,15 @@ export interface ClusterServiceServer extends UntypedServiceImplementation { start: handleUnaryCall; /** Stops the specified Elasticsearch cluster. */ stop: handleUnaryCall; + /** Create a backup for the specified ElasticSearch cluster. */ + backup: handleUnaryCall; + /** Returns the list of available backups for the specified Elasticsearch cluster. */ + listBackups: handleUnaryCall< + ListClusterBackupsRequest, + ListClusterBackupsResponse + >; + /** Creates a new ElasticSearch cluster from the specified backup. */ + restore: handleUnaryCall; /** * Retrieves logs for the specified Elasticsearch cluster. * @@ -5027,6 +5956,63 @@ export interface ClusterServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; + /** Create a backup for the specified ElasticSearch cluster. */ + backup( + request: BackupClusterRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + backup( + request: BackupClusterRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + backup( + request: BackupClusterRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Returns the list of available backups for the specified Elasticsearch cluster. */ + listBackups( + request: ListClusterBackupsRequest, + callback: ( + error: ServiceError | null, + response: ListClusterBackupsResponse + ) => void + ): ClientUnaryCall; + listBackups( + request: ListClusterBackupsRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListClusterBackupsResponse + ) => void + ): ClientUnaryCall; + listBackups( + request: ListClusterBackupsRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListClusterBackupsResponse + ) => void + ): ClientUnaryCall; + /** Creates a new ElasticSearch cluster from the specified backup. */ + restore( + request: RestoreClusterRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + restore( + request: RestoreClusterRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + restore( + request: RestoreClusterRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; /** * Retrieves logs for the specified Elasticsearch cluster. * diff --git a/src/generated/yandex/cloud/mdb/elasticsearch/v1/extension.ts b/src/generated/yandex/cloud/mdb/elasticsearch/v1/extension.ts new file mode 100644 index 00000000..88c90fc8 --- /dev/null +++ b/src/generated/yandex/cloud/mdb/elasticsearch/v1/extension.ts @@ -0,0 +1,281 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; + +export const protobufPackage = "yandex.cloud.mdb.elasticsearch.v1"; + +export interface Extension { + $type: "yandex.cloud.mdb.elasticsearch.v1.Extension"; + /** Name of the extension. */ + name: string; + /** Extension unique ID */ + id: string; + /** ID of the Elasticsearch cluster the extension belongs to. */ + clusterId: string; + /** Extension version */ + version: number; + /** Flag is extension active now */ + active: boolean; +} + +export interface ExtensionSpec { + $type: "yandex.cloud.mdb.elasticsearch.v1.ExtensionSpec"; + /** Name of the extension. */ + name: string; + /** + * URI of the zip arhive to create the new extension from. + * Currently only supports links that are stored in Yandex Object Storage. + */ + uri: string; + disabled: boolean; +} + +const baseExtension: object = { + $type: "yandex.cloud.mdb.elasticsearch.v1.Extension", + name: "", + id: "", + clusterId: "", + version: 0, + active: false, +}; + +export const Extension = { + $type: "yandex.cloud.mdb.elasticsearch.v1.Extension" as const, + + encode( + message: Extension, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + if (message.id !== "") { + writer.uint32(18).string(message.id); + } + if (message.clusterId !== "") { + writer.uint32(26).string(message.clusterId); + } + if (message.version !== 0) { + writer.uint32(32).int64(message.version); + } + if (message.active === true) { + writer.uint32(40).bool(message.active); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Extension { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseExtension } as Extension; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.id = reader.string(); + break; + case 3: + message.clusterId = reader.string(); + break; + case 4: + message.version = longToNumber(reader.int64() as Long); + break; + case 5: + message.active = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Extension { + const message = { ...baseExtension } as Extension; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.id = + object.id !== undefined && object.id !== null ? String(object.id) : ""; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.version = + object.version !== undefined && object.version !== null + ? Number(object.version) + : 0; + message.active = + object.active !== undefined && object.active !== null + ? Boolean(object.active) + : false; + return message; + }, + + toJSON(message: Extension): unknown { + const obj: any = {}; + message.name !== undefined && (obj.name = message.name); + message.id !== undefined && (obj.id = message.id); + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.version !== undefined && + (obj.version = Math.round(message.version)); + message.active !== undefined && (obj.active = message.active); + return obj; + }, + + fromPartial, I>>( + object: I + ): Extension { + const message = { ...baseExtension } as Extension; + message.name = object.name ?? ""; + message.id = object.id ?? ""; + message.clusterId = object.clusterId ?? ""; + message.version = object.version ?? 0; + message.active = object.active ?? false; + return message; + }, +}; + +messageTypeRegistry.set(Extension.$type, Extension); + +const baseExtensionSpec: object = { + $type: "yandex.cloud.mdb.elasticsearch.v1.ExtensionSpec", + name: "", + uri: "", + disabled: false, +}; + +export const ExtensionSpec = { + $type: "yandex.cloud.mdb.elasticsearch.v1.ExtensionSpec" as const, + + encode( + message: ExtensionSpec, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + if (message.uri !== "") { + writer.uint32(18).string(message.uri); + } + if (message.disabled === true) { + writer.uint32(24).bool(message.disabled); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ExtensionSpec { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseExtensionSpec } as ExtensionSpec; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.uri = reader.string(); + break; + case 3: + message.disabled = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ExtensionSpec { + const message = { ...baseExtensionSpec } as ExtensionSpec; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.uri = + object.uri !== undefined && object.uri !== null ? String(object.uri) : ""; + message.disabled = + object.disabled !== undefined && object.disabled !== null + ? Boolean(object.disabled) + : false; + return message; + }, + + toJSON(message: ExtensionSpec): unknown { + const obj: any = {}; + message.name !== undefined && (obj.name = message.name); + message.uri !== undefined && (obj.uri = message.uri); + message.disabled !== undefined && (obj.disabled = message.disabled); + return obj; + }, + + fromPartial, I>>( + object: I + ): ExtensionSpec { + const message = { ...baseExtensionSpec } as ExtensionSpec; + message.name = object.name ?? ""; + message.uri = object.uri ?? ""; + message.disabled = object.disabled ?? false; + return message; + }, +}; + +messageTypeRegistry.set(ExtensionSpec.$type, ExtensionSpec); + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/elasticsearch/v1/extension_service.ts b/src/generated/yandex/cloud/mdb/elasticsearch/v1/extension_service.ts new file mode 100644 index 00000000..5cdf27a1 --- /dev/null +++ b/src/generated/yandex/cloud/mdb/elasticsearch/v1/extension_service.ts @@ -0,0 +1,1127 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleUnaryCall, + Client, + ClientUnaryCall, + Metadata, + CallOptions, + ServiceError, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; +import { Extension } from "../../../../../yandex/cloud/mdb/elasticsearch/v1/extension"; +import { Operation } from "../../../../../yandex/cloud/operation/operation"; + +export const protobufPackage = "yandex.cloud.mdb.elasticsearch.v1"; + +export interface GetExtensionRequest { + $type: "yandex.cloud.mdb.elasticsearch.v1.GetExtensionRequest"; + /** Required. ID of the cluster. */ + clusterId: string; + /** Required. ID of the extension to return. */ + extensionId: string; +} + +export interface ListExtensionsRequest { + $type: "yandex.cloud.mdb.elasticsearch.v1.ListExtensionsRequest"; + /** Required. ID of the cluster to list extensions in. */ + clusterId: string; + /** + * The maximum number of results per page that should be returned. If the number of available + * results is larger than `page_size`, the service returns a `next_page_token` that can be used + * to get the next page of results in subsequent ListBackups requests. + * Acceptable values are 0 to 1000, inclusive. Default value: 100. + */ + pageSize: number; + /** + * Page token. Set `page_token` to the `next_page_token` returned by a previous ListBackups + * request to get the next page of results. + */ + pageToken: string; +} + +export interface ListExtensionsResponse { + $type: "yandex.cloud.mdb.elasticsearch.v1.ListExtensionsResponse"; + /** Requested list of extensions. */ + extensions: Extension[]; + /** + * This token allows you to get the next page of results for ListBackups requests, + * if the number of results is larger than `page_size` specified in the request. + * To get the next page, specify the value of `next_page_token` as a value for + * the `page_token` parameter in the next ListBackups request. Subsequent ListBackups + * requests will have their own `next_page_token` to continue paging through the results. + */ + nextPageToken: string; +} + +export interface DeleteExtensionRequest { + $type: "yandex.cloud.mdb.elasticsearch.v1.DeleteExtensionRequest"; + /** Required. ID of the cluster. */ + clusterId: string; + /** Required. ID of the extension to delete. */ + extensionId: string; +} + +export interface DeleteExtensionMetadata { + $type: "yandex.cloud.mdb.elasticsearch.v1.DeleteExtensionMetadata"; + /** Required. ID of the cluster. */ + clusterId: string; + /** Required. ID of the extension to delete. */ + extensionId: string; +} + +export interface UpdateExtensionRequest { + $type: "yandex.cloud.mdb.elasticsearch.v1.UpdateExtensionRequest"; + /** Required. ID of the cluster. */ + clusterId: string; + /** Required. ID of the extension to delete. */ + extensionId: string; + active: boolean; +} + +export interface UpdateExtensionMetadata { + $type: "yandex.cloud.mdb.elasticsearch.v1.UpdateExtensionMetadata"; + /** Required. ID of the cluster. */ + clusterId: string; + /** Required. ID of the extension. */ + extensionId: string; +} + +export interface CreateExtensionRequest { + $type: "yandex.cloud.mdb.elasticsearch.v1.CreateExtensionRequest"; + /** Required. ID of the cluster. */ + clusterId: string; + /** Name of the extension. */ + name: string; + /** + * URI of the zip arhive to create the new extension from. + * Currently only supports links that are stored in Yandex Object Storage. + */ + uri: string; + disabled: boolean; +} + +export interface CreateExtensionMetadata { + $type: "yandex.cloud.mdb.elasticsearch.v1.CreateExtensionMetadata"; + /** Required. ID of the cluster. */ + clusterId: string; + /** Required. ID of the extension. */ + extensionId: string; +} + +const baseGetExtensionRequest: object = { + $type: "yandex.cloud.mdb.elasticsearch.v1.GetExtensionRequest", + clusterId: "", + extensionId: "", +}; + +export const GetExtensionRequest = { + $type: "yandex.cloud.mdb.elasticsearch.v1.GetExtensionRequest" as const, + + encode( + message: GetExtensionRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.extensionId !== "") { + writer.uint32(18).string(message.extensionId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): GetExtensionRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGetExtensionRequest } as GetExtensionRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.extensionId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GetExtensionRequest { + const message = { ...baseGetExtensionRequest } as GetExtensionRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.extensionId = + object.extensionId !== undefined && object.extensionId !== null + ? String(object.extensionId) + : ""; + return message; + }, + + toJSON(message: GetExtensionRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.extensionId !== undefined && + (obj.extensionId = message.extensionId); + return obj; + }, + + fromPartial, I>>( + object: I + ): GetExtensionRequest { + const message = { ...baseGetExtensionRequest } as GetExtensionRequest; + message.clusterId = object.clusterId ?? ""; + message.extensionId = object.extensionId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(GetExtensionRequest.$type, GetExtensionRequest); + +const baseListExtensionsRequest: object = { + $type: "yandex.cloud.mdb.elasticsearch.v1.ListExtensionsRequest", + clusterId: "", + pageSize: 0, + pageToken: "", +}; + +export const ListExtensionsRequest = { + $type: "yandex.cloud.mdb.elasticsearch.v1.ListExtensionsRequest" as const, + + encode( + message: ListExtensionsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.pageSize !== 0) { + writer.uint32(16).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(26).string(message.pageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListExtensionsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListExtensionsRequest } as ListExtensionsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 3: + message.pageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListExtensionsRequest { + const message = { ...baseListExtensionsRequest } as ListExtensionsRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + return message; + }, + + toJSON(message: ListExtensionsRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListExtensionsRequest { + const message = { ...baseListExtensionsRequest } as ListExtensionsRequest; + message.clusterId = object.clusterId ?? ""; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListExtensionsRequest.$type, ListExtensionsRequest); + +const baseListExtensionsResponse: object = { + $type: "yandex.cloud.mdb.elasticsearch.v1.ListExtensionsResponse", + nextPageToken: "", +}; + +export const ListExtensionsResponse = { + $type: "yandex.cloud.mdb.elasticsearch.v1.ListExtensionsResponse" as const, + + encode( + message: ListExtensionsResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.extensions) { + Extension.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListExtensionsResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListExtensionsResponse } as ListExtensionsResponse; + message.extensions = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.extensions.push(Extension.decode(reader, reader.uint32())); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListExtensionsResponse { + const message = { ...baseListExtensionsResponse } as ListExtensionsResponse; + message.extensions = (object.extensions ?? []).map((e: any) => + Extension.fromJSON(e) + ); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListExtensionsResponse): unknown { + const obj: any = {}; + if (message.extensions) { + obj.extensions = message.extensions.map((e) => + e ? Extension.toJSON(e) : undefined + ); + } else { + obj.extensions = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListExtensionsResponse { + const message = { ...baseListExtensionsResponse } as ListExtensionsResponse; + message.extensions = + object.extensions?.map((e) => Extension.fromPartial(e)) || []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListExtensionsResponse.$type, ListExtensionsResponse); + +const baseDeleteExtensionRequest: object = { + $type: "yandex.cloud.mdb.elasticsearch.v1.DeleteExtensionRequest", + clusterId: "", + extensionId: "", +}; + +export const DeleteExtensionRequest = { + $type: "yandex.cloud.mdb.elasticsearch.v1.DeleteExtensionRequest" as const, + + encode( + message: DeleteExtensionRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.extensionId !== "") { + writer.uint32(18).string(message.extensionId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DeleteExtensionRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDeleteExtensionRequest } as DeleteExtensionRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.extensionId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteExtensionRequest { + const message = { ...baseDeleteExtensionRequest } as DeleteExtensionRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.extensionId = + object.extensionId !== undefined && object.extensionId !== null + ? String(object.extensionId) + : ""; + return message; + }, + + toJSON(message: DeleteExtensionRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.extensionId !== undefined && + (obj.extensionId = message.extensionId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteExtensionRequest { + const message = { ...baseDeleteExtensionRequest } as DeleteExtensionRequest; + message.clusterId = object.clusterId ?? ""; + message.extensionId = object.extensionId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(DeleteExtensionRequest.$type, DeleteExtensionRequest); + +const baseDeleteExtensionMetadata: object = { + $type: "yandex.cloud.mdb.elasticsearch.v1.DeleteExtensionMetadata", + clusterId: "", + extensionId: "", +}; + +export const DeleteExtensionMetadata = { + $type: "yandex.cloud.mdb.elasticsearch.v1.DeleteExtensionMetadata" as const, + + encode( + message: DeleteExtensionMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.extensionId !== "") { + writer.uint32(18).string(message.extensionId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DeleteExtensionMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseDeleteExtensionMetadata, + } as DeleteExtensionMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.extensionId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteExtensionMetadata { + const message = { + ...baseDeleteExtensionMetadata, + } as DeleteExtensionMetadata; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.extensionId = + object.extensionId !== undefined && object.extensionId !== null + ? String(object.extensionId) + : ""; + return message; + }, + + toJSON(message: DeleteExtensionMetadata): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.extensionId !== undefined && + (obj.extensionId = message.extensionId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteExtensionMetadata { + const message = { + ...baseDeleteExtensionMetadata, + } as DeleteExtensionMetadata; + message.clusterId = object.clusterId ?? ""; + message.extensionId = object.extensionId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(DeleteExtensionMetadata.$type, DeleteExtensionMetadata); + +const baseUpdateExtensionRequest: object = { + $type: "yandex.cloud.mdb.elasticsearch.v1.UpdateExtensionRequest", + clusterId: "", + extensionId: "", + active: false, +}; + +export const UpdateExtensionRequest = { + $type: "yandex.cloud.mdb.elasticsearch.v1.UpdateExtensionRequest" as const, + + encode( + message: UpdateExtensionRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.extensionId !== "") { + writer.uint32(18).string(message.extensionId); + } + if (message.active === true) { + writer.uint32(24).bool(message.active); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateExtensionRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseUpdateExtensionRequest } as UpdateExtensionRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.extensionId = reader.string(); + break; + case 3: + message.active = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateExtensionRequest { + const message = { ...baseUpdateExtensionRequest } as UpdateExtensionRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.extensionId = + object.extensionId !== undefined && object.extensionId !== null + ? String(object.extensionId) + : ""; + message.active = + object.active !== undefined && object.active !== null + ? Boolean(object.active) + : false; + return message; + }, + + toJSON(message: UpdateExtensionRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.extensionId !== undefined && + (obj.extensionId = message.extensionId); + message.active !== undefined && (obj.active = message.active); + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateExtensionRequest { + const message = { ...baseUpdateExtensionRequest } as UpdateExtensionRequest; + message.clusterId = object.clusterId ?? ""; + message.extensionId = object.extensionId ?? ""; + message.active = object.active ?? false; + return message; + }, +}; + +messageTypeRegistry.set(UpdateExtensionRequest.$type, UpdateExtensionRequest); + +const baseUpdateExtensionMetadata: object = { + $type: "yandex.cloud.mdb.elasticsearch.v1.UpdateExtensionMetadata", + clusterId: "", + extensionId: "", +}; + +export const UpdateExtensionMetadata = { + $type: "yandex.cloud.mdb.elasticsearch.v1.UpdateExtensionMetadata" as const, + + encode( + message: UpdateExtensionMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.extensionId !== "") { + writer.uint32(18).string(message.extensionId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateExtensionMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateExtensionMetadata, + } as UpdateExtensionMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.extensionId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateExtensionMetadata { + const message = { + ...baseUpdateExtensionMetadata, + } as UpdateExtensionMetadata; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.extensionId = + object.extensionId !== undefined && object.extensionId !== null + ? String(object.extensionId) + : ""; + return message; + }, + + toJSON(message: UpdateExtensionMetadata): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.extensionId !== undefined && + (obj.extensionId = message.extensionId); + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateExtensionMetadata { + const message = { + ...baseUpdateExtensionMetadata, + } as UpdateExtensionMetadata; + message.clusterId = object.clusterId ?? ""; + message.extensionId = object.extensionId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(UpdateExtensionMetadata.$type, UpdateExtensionMetadata); + +const baseCreateExtensionRequest: object = { + $type: "yandex.cloud.mdb.elasticsearch.v1.CreateExtensionRequest", + clusterId: "", + name: "", + uri: "", + disabled: false, +}; + +export const CreateExtensionRequest = { + $type: "yandex.cloud.mdb.elasticsearch.v1.CreateExtensionRequest" as const, + + encode( + message: CreateExtensionRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.name !== "") { + writer.uint32(18).string(message.name); + } + if (message.uri !== "") { + writer.uint32(26).string(message.uri); + } + if (message.disabled === true) { + writer.uint32(32).bool(message.disabled); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): CreateExtensionRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseCreateExtensionRequest } as CreateExtensionRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.name = reader.string(); + break; + case 3: + message.uri = reader.string(); + break; + case 4: + message.disabled = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateExtensionRequest { + const message = { ...baseCreateExtensionRequest } as CreateExtensionRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.uri = + object.uri !== undefined && object.uri !== null ? String(object.uri) : ""; + message.disabled = + object.disabled !== undefined && object.disabled !== null + ? Boolean(object.disabled) + : false; + return message; + }, + + toJSON(message: CreateExtensionRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.name !== undefined && (obj.name = message.name); + message.uri !== undefined && (obj.uri = message.uri); + message.disabled !== undefined && (obj.disabled = message.disabled); + return obj; + }, + + fromPartial, I>>( + object: I + ): CreateExtensionRequest { + const message = { ...baseCreateExtensionRequest } as CreateExtensionRequest; + message.clusterId = object.clusterId ?? ""; + message.name = object.name ?? ""; + message.uri = object.uri ?? ""; + message.disabled = object.disabled ?? false; + return message; + }, +}; + +messageTypeRegistry.set(CreateExtensionRequest.$type, CreateExtensionRequest); + +const baseCreateExtensionMetadata: object = { + $type: "yandex.cloud.mdb.elasticsearch.v1.CreateExtensionMetadata", + clusterId: "", + extensionId: "", +}; + +export const CreateExtensionMetadata = { + $type: "yandex.cloud.mdb.elasticsearch.v1.CreateExtensionMetadata" as const, + + encode( + message: CreateExtensionMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.extensionId !== "") { + writer.uint32(18).string(message.extensionId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): CreateExtensionMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseCreateExtensionMetadata, + } as CreateExtensionMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.extensionId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateExtensionMetadata { + const message = { + ...baseCreateExtensionMetadata, + } as CreateExtensionMetadata; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.extensionId = + object.extensionId !== undefined && object.extensionId !== null + ? String(object.extensionId) + : ""; + return message; + }, + + toJSON(message: CreateExtensionMetadata): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.extensionId !== undefined && + (obj.extensionId = message.extensionId); + return obj; + }, + + fromPartial, I>>( + object: I + ): CreateExtensionMetadata { + const message = { + ...baseCreateExtensionMetadata, + } as CreateExtensionMetadata; + message.clusterId = object.clusterId ?? ""; + message.extensionId = object.extensionId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(CreateExtensionMetadata.$type, CreateExtensionMetadata); + +export const ExtensionServiceService = { + /** Returns the specified extension of Elasticsearch cluster. */ + get: { + path: "/yandex.cloud.mdb.elasticsearch.v1.ExtensionService/Get", + requestStream: false, + responseStream: false, + requestSerialize: (value: GetExtensionRequest) => + Buffer.from(GetExtensionRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => GetExtensionRequest.decode(value), + responseSerialize: (value: Extension) => + Buffer.from(Extension.encode(value).finish()), + responseDeserialize: (value: Buffer) => Extension.decode(value), + }, + /** Returns the list of available extensions for the specified Elasticsearch cluster. */ + list: { + path: "/yandex.cloud.mdb.elasticsearch.v1.ExtensionService/List", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListExtensionsRequest) => + Buffer.from(ListExtensionsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => ListExtensionsRequest.decode(value), + responseSerialize: (value: ListExtensionsResponse) => + Buffer.from(ListExtensionsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + ListExtensionsResponse.decode(value), + }, + /** Creates new extension version. */ + create: { + path: "/yandex.cloud.mdb.elasticsearch.v1.ExtensionService/Create", + requestStream: false, + responseStream: false, + requestSerialize: (value: CreateExtensionRequest) => + Buffer.from(CreateExtensionRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => CreateExtensionRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Updates the specified extension. */ + update: { + path: "/yandex.cloud.mdb.elasticsearch.v1.ExtensionService/Update", + requestStream: false, + responseStream: false, + requestSerialize: (value: UpdateExtensionRequest) => + Buffer.from(UpdateExtensionRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => UpdateExtensionRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Deletes the specified extension. */ + delete: { + path: "/yandex.cloud.mdb.elasticsearch.v1.ExtensionService/Delete", + requestStream: false, + responseStream: false, + requestSerialize: (value: DeleteExtensionRequest) => + Buffer.from(DeleteExtensionRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => DeleteExtensionRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, +} as const; + +export interface ExtensionServiceServer extends UntypedServiceImplementation { + /** Returns the specified extension of Elasticsearch cluster. */ + get: handleUnaryCall; + /** Returns the list of available extensions for the specified Elasticsearch cluster. */ + list: handleUnaryCall; + /** Creates new extension version. */ + create: handleUnaryCall; + /** Updates the specified extension. */ + update: handleUnaryCall; + /** Deletes the specified extension. */ + delete: handleUnaryCall; +} + +export interface ExtensionServiceClient extends Client { + /** Returns the specified extension of Elasticsearch cluster. */ + get( + request: GetExtensionRequest, + callback: (error: ServiceError | null, response: Extension) => void + ): ClientUnaryCall; + get( + request: GetExtensionRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Extension) => void + ): ClientUnaryCall; + get( + request: GetExtensionRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Extension) => void + ): ClientUnaryCall; + /** Returns the list of available extensions for the specified Elasticsearch cluster. */ + list( + request: ListExtensionsRequest, + callback: ( + error: ServiceError | null, + response: ListExtensionsResponse + ) => void + ): ClientUnaryCall; + list( + request: ListExtensionsRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListExtensionsResponse + ) => void + ): ClientUnaryCall; + list( + request: ListExtensionsRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListExtensionsResponse + ) => void + ): ClientUnaryCall; + /** Creates new extension version. */ + create( + request: CreateExtensionRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + create( + request: CreateExtensionRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + create( + request: CreateExtensionRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Updates the specified extension. */ + update( + request: UpdateExtensionRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + update( + request: UpdateExtensionRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + update( + request: UpdateExtensionRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Deletes the specified extension. */ + delete( + request: DeleteExtensionRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + delete( + request: DeleteExtensionRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + delete( + request: DeleteExtensionRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; +} + +export const ExtensionServiceClient = makeGenericClientConstructor( + ExtensionServiceService, + "yandex.cloud.mdb.elasticsearch.v1.ExtensionService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): ExtensionServiceClient; + service: typeof ExtensionServiceService; +}; + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/greenplum/v1/backup.ts b/src/generated/yandex/cloud/mdb/greenplum/v1/backup.ts new file mode 100644 index 00000000..e6169ecf --- /dev/null +++ b/src/generated/yandex/cloud/mdb/greenplum/v1/backup.ts @@ -0,0 +1,232 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { Timestamp } from "../../../../../google/protobuf/timestamp"; + +export const protobufPackage = "yandex.cloud.mdb.greenplum.v1"; + +export interface Backup { + $type: "yandex.cloud.mdb.greenplum.v1.Backup"; + /** Required. ID of the backup. */ + id: string; + /** ID of the folder that the backup belongs to. */ + folderId: string; + /** + * Creation timestamp in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format + * (i.e. when the backup operation was completed). + */ + createdAt?: Date; + /** ID of the PostgreSQL cluster that the backup was created for. */ + sourceClusterId: string; + /** Time when the backup operation was started. */ + startedAt?: Date; + /** Size of backup in bytes */ + size: number; +} + +const baseBackup: object = { + $type: "yandex.cloud.mdb.greenplum.v1.Backup", + id: "", + folderId: "", + sourceClusterId: "", + size: 0, +}; + +export const Backup = { + $type: "yandex.cloud.mdb.greenplum.v1.Backup" as const, + + encode( + message: Backup, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.id !== "") { + writer.uint32(10).string(message.id); + } + if (message.folderId !== "") { + writer.uint32(18).string(message.folderId); + } + if (message.createdAt !== undefined) { + Timestamp.encode( + toTimestamp(message.createdAt), + writer.uint32(26).fork() + ).ldelim(); + } + if (message.sourceClusterId !== "") { + writer.uint32(34).string(message.sourceClusterId); + } + if (message.startedAt !== undefined) { + Timestamp.encode( + toTimestamp(message.startedAt), + writer.uint32(42).fork() + ).ldelim(); + } + if (message.size !== 0) { + writer.uint32(48).int64(message.size); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Backup { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseBackup } as Backup; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = reader.string(); + break; + case 2: + message.folderId = reader.string(); + break; + case 3: + message.createdAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 4: + message.sourceClusterId = reader.string(); + break; + case 5: + message.startedAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 6: + message.size = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Backup { + const message = { ...baseBackup } as Backup; + message.id = + object.id !== undefined && object.id !== null ? String(object.id) : ""; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.createdAt = + object.createdAt !== undefined && object.createdAt !== null + ? fromJsonTimestamp(object.createdAt) + : undefined; + message.sourceClusterId = + object.sourceClusterId !== undefined && object.sourceClusterId !== null + ? String(object.sourceClusterId) + : ""; + message.startedAt = + object.startedAt !== undefined && object.startedAt !== null + ? fromJsonTimestamp(object.startedAt) + : undefined; + message.size = + object.size !== undefined && object.size !== null + ? Number(object.size) + : 0; + return message; + }, + + toJSON(message: Backup): unknown { + const obj: any = {}; + message.id !== undefined && (obj.id = message.id); + message.folderId !== undefined && (obj.folderId = message.folderId); + message.createdAt !== undefined && + (obj.createdAt = message.createdAt.toISOString()); + message.sourceClusterId !== undefined && + (obj.sourceClusterId = message.sourceClusterId); + message.startedAt !== undefined && + (obj.startedAt = message.startedAt.toISOString()); + message.size !== undefined && (obj.size = Math.round(message.size)); + return obj; + }, + + fromPartial, I>>(object: I): Backup { + const message = { ...baseBackup } as Backup; + message.id = object.id ?? ""; + message.folderId = object.folderId ?? ""; + message.createdAt = object.createdAt ?? undefined; + message.sourceClusterId = object.sourceClusterId ?? ""; + message.startedAt = object.startedAt ?? undefined; + message.size = object.size ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(Backup.$type, Backup); + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function toTimestamp(date: Date): Timestamp { + const seconds = date.getTime() / 1_000; + const nanos = (date.getTime() % 1_000) * 1_000_000; + return { $type: "google.protobuf.Timestamp", seconds, nanos }; +} + +function fromTimestamp(t: Timestamp): Date { + let millis = t.seconds * 1_000; + millis += t.nanos / 1_000_000; + return new Date(millis); +} + +function fromJsonTimestamp(o: any): Date { + if (o instanceof Date) { + return o; + } else if (typeof o === "string") { + return new Date(o); + } else { + return fromTimestamp(Timestamp.fromJSON(o)); + } +} + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/greenplum/v1/backup_service.ts b/src/generated/yandex/cloud/mdb/greenplum/v1/backup_service.ts new file mode 100644 index 00000000..769482a2 --- /dev/null +++ b/src/generated/yandex/cloud/mdb/greenplum/v1/backup_service.ts @@ -0,0 +1,429 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleUnaryCall, + Client, + ClientUnaryCall, + Metadata, + CallOptions, + ServiceError, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; +import { Backup } from "../../../../../yandex/cloud/mdb/greenplum/v1/backup"; + +export const protobufPackage = "yandex.cloud.mdb.greenplum.v1"; + +export interface GetBackupRequest { + $type: "yandex.cloud.mdb.greenplum.v1.GetBackupRequest"; + /** Required. ID of the backup to return. */ + backupId: string; +} + +export interface ListBackupsRequest { + $type: "yandex.cloud.mdb.greenplum.v1.ListBackupsRequest"; + /** Required. ID of the folder to list backups in. */ + folderId: string; + /** + * The maximum number of results per page that should be returned. If the number of available + * results is larger than `page_size`, the service returns a `next_page_token` that can be used + * to get the next page of results in subsequent ListBackups requests. + * Acceptable values are 0 to 1000, inclusive. Default value: 100. + */ + pageSize: number; + /** + * Page token. Set `page_token` to the `next_page_token` returned by a previous ListBackups + * request to get the next page of results. + */ + pageToken: string; +} + +export interface ListBackupsResponse { + $type: "yandex.cloud.mdb.greenplum.v1.ListBackupsResponse"; + /** Requested list of backups. */ + backups: Backup[]; + /** + * This token allows you to get the next page of results for ListBackups requests, + * if the number of results is larger than `page_size` specified in the request. + * To get the next page, specify the value of `next_page_token` as a value for + * the `page_token` parameter in the next ListBackups request. Subsequent ListBackups + * requests will have their own `next_page_token` to continue paging through the results. + */ + nextPageToken: string; +} + +const baseGetBackupRequest: object = { + $type: "yandex.cloud.mdb.greenplum.v1.GetBackupRequest", + backupId: "", +}; + +export const GetBackupRequest = { + $type: "yandex.cloud.mdb.greenplum.v1.GetBackupRequest" as const, + + encode( + message: GetBackupRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.backupId !== "") { + writer.uint32(10).string(message.backupId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): GetBackupRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGetBackupRequest } as GetBackupRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.backupId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GetBackupRequest { + const message = { ...baseGetBackupRequest } as GetBackupRequest; + message.backupId = + object.backupId !== undefined && object.backupId !== null + ? String(object.backupId) + : ""; + return message; + }, + + toJSON(message: GetBackupRequest): unknown { + const obj: any = {}; + message.backupId !== undefined && (obj.backupId = message.backupId); + return obj; + }, + + fromPartial, I>>( + object: I + ): GetBackupRequest { + const message = { ...baseGetBackupRequest } as GetBackupRequest; + message.backupId = object.backupId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(GetBackupRequest.$type, GetBackupRequest); + +const baseListBackupsRequest: object = { + $type: "yandex.cloud.mdb.greenplum.v1.ListBackupsRequest", + folderId: "", + pageSize: 0, + pageToken: "", +}; + +export const ListBackupsRequest = { + $type: "yandex.cloud.mdb.greenplum.v1.ListBackupsRequest" as const, + + encode( + message: ListBackupsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.folderId !== "") { + writer.uint32(10).string(message.folderId); + } + if (message.pageSize !== 0) { + writer.uint32(16).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(26).string(message.pageToken); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ListBackupsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListBackupsRequest } as ListBackupsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.folderId = reader.string(); + break; + case 2: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 3: + message.pageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListBackupsRequest { + const message = { ...baseListBackupsRequest } as ListBackupsRequest; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + return message; + }, + + toJSON(message: ListBackupsRequest): unknown { + const obj: any = {}; + message.folderId !== undefined && (obj.folderId = message.folderId); + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListBackupsRequest { + const message = { ...baseListBackupsRequest } as ListBackupsRequest; + message.folderId = object.folderId ?? ""; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListBackupsRequest.$type, ListBackupsRequest); + +const baseListBackupsResponse: object = { + $type: "yandex.cloud.mdb.greenplum.v1.ListBackupsResponse", + nextPageToken: "", +}; + +export const ListBackupsResponse = { + $type: "yandex.cloud.mdb.greenplum.v1.ListBackupsResponse" as const, + + encode( + message: ListBackupsResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.backups) { + Backup.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ListBackupsResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListBackupsResponse } as ListBackupsResponse; + message.backups = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.backups.push(Backup.decode(reader, reader.uint32())); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListBackupsResponse { + const message = { ...baseListBackupsResponse } as ListBackupsResponse; + message.backups = (object.backups ?? []).map((e: any) => + Backup.fromJSON(e) + ); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListBackupsResponse): unknown { + const obj: any = {}; + if (message.backups) { + obj.backups = message.backups.map((e) => + e ? Backup.toJSON(e) : undefined + ); + } else { + obj.backups = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListBackupsResponse { + const message = { ...baseListBackupsResponse } as ListBackupsResponse; + message.backups = object.backups?.map((e) => Backup.fromPartial(e)) || []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListBackupsResponse.$type, ListBackupsResponse); + +export const BackupServiceService = { + /** Returns the specified backup of Greenplum® cluster. */ + get: { + path: "/yandex.cloud.mdb.greenplum.v1.BackupService/Get", + requestStream: false, + responseStream: false, + requestSerialize: (value: GetBackupRequest) => + Buffer.from(GetBackupRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => GetBackupRequest.decode(value), + responseSerialize: (value: Backup) => + Buffer.from(Backup.encode(value).finish()), + responseDeserialize: (value: Buffer) => Backup.decode(value), + }, + /** Returns the list of available backups for the specified Greenplum® cluster. */ + list: { + path: "/yandex.cloud.mdb.greenplum.v1.BackupService/List", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListBackupsRequest) => + Buffer.from(ListBackupsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => ListBackupsRequest.decode(value), + responseSerialize: (value: ListBackupsResponse) => + Buffer.from(ListBackupsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => ListBackupsResponse.decode(value), + }, +} as const; + +export interface BackupServiceServer extends UntypedServiceImplementation { + /** Returns the specified backup of Greenplum® cluster. */ + get: handleUnaryCall; + /** Returns the list of available backups for the specified Greenplum® cluster. */ + list: handleUnaryCall; +} + +export interface BackupServiceClient extends Client { + /** Returns the specified backup of Greenplum® cluster. */ + get( + request: GetBackupRequest, + callback: (error: ServiceError | null, response: Backup) => void + ): ClientUnaryCall; + get( + request: GetBackupRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Backup) => void + ): ClientUnaryCall; + get( + request: GetBackupRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Backup) => void + ): ClientUnaryCall; + /** Returns the list of available backups for the specified Greenplum® cluster. */ + list( + request: ListBackupsRequest, + callback: ( + error: ServiceError | null, + response: ListBackupsResponse + ) => void + ): ClientUnaryCall; + list( + request: ListBackupsRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListBackupsResponse + ) => void + ): ClientUnaryCall; + list( + request: ListBackupsRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListBackupsResponse + ) => void + ): ClientUnaryCall; +} + +export const BackupServiceClient = makeGenericClientConstructor( + BackupServiceService, + "yandex.cloud.mdb.greenplum.v1.BackupService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): BackupServiceClient; + service: typeof BackupServiceService; +}; + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/greenplum/v1/cluster.ts b/src/generated/yandex/cloud/mdb/greenplum/v1/cluster.ts index 1bda2522..664dca58 100644 --- a/src/generated/yandex/cloud/mdb/greenplum/v1/cluster.ts +++ b/src/generated/yandex/cloud/mdb/greenplum/v1/cluster.ts @@ -5,6 +5,9 @@ import _m0 from "protobufjs/minimal"; import { MasterSubclusterConfig, SegmentSubclusterConfig, + ConnectionPoolerConfigSet, + Greenplumconfigset617, + Greenplumconfigset619, } from "../../../../../yandex/cloud/mdb/greenplum/v1/config"; import { MaintenanceWindow, @@ -15,44 +18,44 @@ import { Timestamp } from "../../../../../google/protobuf/timestamp"; export const protobufPackage = "yandex.cloud.mdb.greenplum.v1"; -/** A Greenplum Cluster resource. For more information, see the */ +/** A Greenplum® cluster resource. */ export interface Cluster { $type: "yandex.cloud.mdb.greenplum.v1.Cluster"; /** - * ID of the Greenplum cluster. - * This ID is assigned by MDB at creation time. + * ID of the Greenplum® cluster. + * This ID is assigned by Yandex Cloud at the time of cluster creation. */ id: string; - /** ID of the folder that the Greenplum cluster belongs to. */ + /** ID of the folder that the Greenplum® cluster belongs to. */ folderId: string; - /** Creation timestamp in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. */ + /** Cluster creation timestamp in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. */ createdAt?: Date; /** - * Name of the Greenplum cluster. - * The name is unique within the folder. 1-63 characters long. + * Name of the Greenplum® cluster. + * The name is unique within the folder and is 1-63 characters long. */ name: string; - /** Greenplum cluster config */ + /** Greenplum® cluster configuration. */ config?: GreenplumConfig; - /** Description of the Greenplum cluster. 0-256 characters long. */ + /** Description of the Greenplum® cluster. 0-256 characters long. */ description: string; - /** Custom labels for the Greenplum cluster as `key:value` pairs. Maximum 64 per resource. */ + /** Custom labels for the Greenplum® cluster as `key:value` pairs. Maximum 64 labels per resource. */ labels: { [key: string]: string }; - /** Deployment environment of the Greenplum cluster. */ + /** Deployment environment of the Greenplum® cluster. */ environment: Cluster_Environment; - /** Description of monitoring systems relevant to the Greenplum cluster. */ + /** Description of monitoring systems relevant to the Greenplum® cluster. */ monitoring: Monitoring[]; - /** Configuration of the Greenplum master subcluster. */ + /** Configuration of the Greenplum® master subcluster. */ masterConfig?: MasterSubclusterConfig; - /** Configuration of the Greenplum segment subcluster. */ + /** Configuration of the Greenplum® segment subcluster. */ segmentConfig?: SegmentSubclusterConfig; - /** Number of hosts of the master subcluster */ + /** Number of hosts in the master subcluster. */ masterHostCount: number; - /** Number of hosts of the segment subcluster */ + /** Number of hosts in the segment subcluster. */ segmentHostCount: number; - /** Number of segments in the host */ + /** Number of segments per host. */ segmentInHost: number; - /** ID of the network that the cluster belongs to. */ + /** ID of the cloud network that the cluster belongs to. */ networkId: string; /** Aggregated cluster health. */ health: Cluster_Health; @@ -60,19 +63,20 @@ export interface Cluster { status: Cluster_Status; /** Window of maintenance operations. */ maintenanceWindow?: MaintenanceWindow; - /** Maintenance operation planned at nearest maintenance_window. */ + /** Maintenance operation planned at nearest [maintenance_window]. */ plannedOperation?: MaintenanceOperation; - /** User security groups */ + /** User security groups. */ securityGroupIds: string[]; - /** Owner user name */ + /** Owner user name. */ userName: string; - /** Deletion Protection inhibits deletion of the cluster */ + /** Whether or not cluster is protected from being deleted. */ deletionProtection: boolean; /** Host groups hosting VMs of the cluster. */ hostGroupIds: string[]; + /** Greenplum and Odyssey configuration; */ + clusterConfig?: ClusterConfigSet; } -/** Deployment environment. */ export enum Cluster_Environment { ENVIRONMENT_UNSPECIFIED = 0, /** @@ -120,9 +124,9 @@ export function cluster_EnvironmentToJSON(object: Cluster_Environment): string { } export enum Cluster_Health { - /** HEALTH_UNKNOWN - State of the cluster is unknown ([Host.health] for every host in the cluster is UNKNOWN). */ + /** HEALTH_UNKNOWN - Health of the cluster is unknown ([Host.health] for every host in the cluster is UNKNOWN). */ HEALTH_UNKNOWN = 0, - /** ALIVE - Cluster is alive and well ([Host.health] for every host in the cluster is ALIVE). */ + /** ALIVE - Cluster is working normally ([Host.health] for every host in the cluster is ALIVE). */ ALIVE = 1, /** DEAD - Cluster is inoperable ([Host.health] for every host in the cluster is DEAD). */ DEAD = 2, @@ -174,6 +178,7 @@ export function cluster_HealthToJSON(object: Cluster_Health): string { } } +/** Current state of the cluster. */ export enum Cluster_Status { /** STATUS_UNKNOWN - Cluster state is unknown. */ STATUS_UNKNOWN = 0, @@ -181,13 +186,13 @@ export enum Cluster_Status { CREATING = 1, /** RUNNING - Cluster is running normally. */ RUNNING = 2, - /** ERROR - Cluster encountered a problem and cannot operate. */ + /** ERROR - Cluster has encountered a problem and cannot operate. */ ERROR = 3, /** UPDATING - Cluster is being updated. */ UPDATING = 4, /** STOPPING - Cluster is stopping. */ STOPPING = 5, - /** STOPPED - Cluster stopped. */ + /** STOPPED - Cluster has stopped. */ STOPPED = 6, /** STARTING - Cluster is starting. */ STARTING = 7, @@ -256,6 +261,14 @@ export interface Cluster_LabelsEntry { value: string; } +export interface ClusterConfigSet { + $type: "yandex.cloud.mdb.greenplum.v1.ClusterConfigSet"; + greenplumConfigSet617?: Greenplumconfigset617 | undefined; + greenplumConfigSet619?: Greenplumconfigset619 | undefined; + /** Odyssey pool settings */ + pool?: ConnectionPoolerConfigSet; +} + /** Monitoring system metadata. */ export interface Monitoring { $type: "yandex.cloud.mdb.greenplum.v1.Monitoring"; @@ -263,18 +276,54 @@ export interface Monitoring { name: string; /** Description of the monitoring system. */ description: string; - /** Link to the monitoring system charts for the Greenplum cluster. */ + /** Link to the monitoring system charts for the Greenplum® cluster. */ link: string; } +/** Greenplum® cluster configuration. */ export interface GreenplumConfig { $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfig"; - /** Version of the Greenplum server software. */ + /** Version of the Greenplum® server software. */ version: string; /** Time to start the daily backup, in the UTC timezone. */ backupWindowStart?: TimeOfDay; /** Access policy for external services. */ access?: Access; + /** + * ID of the availability zone the cluster belongs to. + * To get a list of available zones, use the [yandex.cloud.compute.v1.ZoneService.List] request. + */ + zoneId: string; + /** + * ID of the subnet the cluster belongs to. This subnet should be a part + * of the cloud network the cluster belongs to (see [Cluster.network_id]). + */ + subnetId: string; + /** + * Whether or not the cluster has a public IP address. + * + * After the cluster has been created, this setting cannot be changed. + */ + assignPublicIp: boolean; +} + +/** Greenplum® cluster access options. */ +export interface Access { + $type: "yandex.cloud.mdb.greenplum.v1.Access"; + /** Allows data export from the cluster to Yandex DataLens. */ + dataLens: boolean; + /** Allows SQL queries to the cluster databases from the Yandex Cloud management console. */ + webSql: boolean; + /** Allow access for DataTransfer. */ + dataTransfer: boolean; +} + +export interface GreenplumRestoreConfig { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumRestoreConfig"; + /** Time to start the daily backup, in the UTC timezone. */ + backupWindowStart?: TimeOfDay; + /** Access policy for external services. */ + access?: Access; /** * ID of the availability zone where the host resides. * To get a list of available zones, use the [yandex.cloud.compute.v1.ZoneService.List] request. @@ -299,12 +348,12 @@ export interface GreenplumConfig { assignPublicIp: boolean; } -export interface Access { - $type: "yandex.cloud.mdb.greenplum.v1.Access"; - /** Allow to export data from the cluster to Yandex DataLens. */ - dataLens: boolean; - /** Allow SQL queries to the cluster databases from the Yandex.Cloud management console. */ - webSql: boolean; +export interface RestoreResources { + $type: "yandex.cloud.mdb.greenplum.v1.RestoreResources"; + /** ID of the preset for computational resources available to a host (CPU, memory etc.). */ + resourcePresetId: string; + /** Volume of the storage available to a host. */ + diskSize: number; } const baseCluster: object = { @@ -424,6 +473,12 @@ export const Cluster = { for (const v of message.hostGroupIds) { writer.uint32(186).string(v!); } + if (message.clusterConfig !== undefined) { + ClusterConfigSet.encode( + message.clusterConfig, + writer.uint32(194).fork() + ).ldelim(); + } return writer; }, @@ -524,6 +579,12 @@ export const Cluster = { case 23: message.hostGroupIds.push(reader.string()); break; + case 24: + message.clusterConfig = ClusterConfigSet.decode( + reader, + reader.uint32() + ); + break; default: reader.skipType(tag & 7); break; @@ -625,6 +686,10 @@ export const Cluster = { message.hostGroupIds = (object.hostGroupIds ?? []).map((e: any) => String(e) ); + message.clusterConfig = + object.clusterConfig !== undefined && object.clusterConfig !== null + ? ClusterConfigSet.fromJSON(object.clusterConfig) + : undefined; return message; }, @@ -696,6 +761,10 @@ export const Cluster = { } else { obj.hostGroupIds = []; } + message.clusterConfig !== undefined && + (obj.clusterConfig = message.clusterConfig + ? ClusterConfigSet.toJSON(message.clusterConfig) + : undefined); return obj; }, @@ -748,6 +817,10 @@ export const Cluster = { message.userName = object.userName ?? ""; message.deletionProtection = object.deletionProtection ?? false; message.hostGroupIds = object.hostGroupIds?.map((e) => e) || []; + message.clusterConfig = + object.clusterConfig !== undefined && object.clusterConfig !== null + ? ClusterConfigSet.fromPartial(object.clusterConfig) + : undefined; return message; }, }; @@ -827,6 +900,131 @@ export const Cluster_LabelsEntry = { messageTypeRegistry.set(Cluster_LabelsEntry.$type, Cluster_LabelsEntry); +const baseClusterConfigSet: object = { + $type: "yandex.cloud.mdb.greenplum.v1.ClusterConfigSet", +}; + +export const ClusterConfigSet = { + $type: "yandex.cloud.mdb.greenplum.v1.ClusterConfigSet" as const, + + encode( + message: ClusterConfigSet, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.greenplumConfigSet617 !== undefined) { + Greenplumconfigset617.encode( + message.greenplumConfigSet617, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.greenplumConfigSet619 !== undefined) { + Greenplumconfigset619.encode( + message.greenplumConfigSet619, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.pool !== undefined) { + ConnectionPoolerConfigSet.encode( + message.pool, + writer.uint32(34).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ClusterConfigSet { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseClusterConfigSet } as ClusterConfigSet; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.greenplumConfigSet617 = Greenplumconfigset617.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.greenplumConfigSet619 = Greenplumconfigset619.decode( + reader, + reader.uint32() + ); + break; + case 4: + message.pool = ConnectionPoolerConfigSet.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ClusterConfigSet { + const message = { ...baseClusterConfigSet } as ClusterConfigSet; + message.greenplumConfigSet617 = + object.greenplumConfigSet_6_17 !== undefined && + object.greenplumConfigSet_6_17 !== null + ? Greenplumconfigset617.fromJSON(object.greenplumConfigSet_6_17) + : undefined; + message.greenplumConfigSet619 = + object.greenplumConfigSet_6_19 !== undefined && + object.greenplumConfigSet_6_19 !== null + ? Greenplumconfigset619.fromJSON(object.greenplumConfigSet_6_19) + : undefined; + message.pool = + object.pool !== undefined && object.pool !== null + ? ConnectionPoolerConfigSet.fromJSON(object.pool) + : undefined; + return message; + }, + + toJSON(message: ClusterConfigSet): unknown { + const obj: any = {}; + message.greenplumConfigSet617 !== undefined && + (obj.greenplumConfigSet_6_17 = message.greenplumConfigSet617 + ? Greenplumconfigset617.toJSON(message.greenplumConfigSet617) + : undefined); + message.greenplumConfigSet619 !== undefined && + (obj.greenplumConfigSet_6_19 = message.greenplumConfigSet619 + ? Greenplumconfigset619.toJSON(message.greenplumConfigSet619) + : undefined); + message.pool !== undefined && + (obj.pool = message.pool + ? ConnectionPoolerConfigSet.toJSON(message.pool) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): ClusterConfigSet { + const message = { ...baseClusterConfigSet } as ClusterConfigSet; + message.greenplumConfigSet617 = + object.greenplumConfigSet617 !== undefined && + object.greenplumConfigSet617 !== null + ? Greenplumconfigset617.fromPartial(object.greenplumConfigSet617) + : undefined; + message.greenplumConfigSet619 = + object.greenplumConfigSet619 !== undefined && + object.greenplumConfigSet619 !== null + ? Greenplumconfigset619.fromPartial(object.greenplumConfigSet619) + : undefined; + message.pool = + object.pool !== undefined && object.pool !== null + ? ConnectionPoolerConfigSet.fromPartial(object.pool) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(ClusterConfigSet.$type, ClusterConfigSet); + const baseMonitoring: object = { $type: "yandex.cloud.mdb.greenplum.v1.Monitoring", name: "", @@ -1061,6 +1259,7 @@ const baseAccess: object = { $type: "yandex.cloud.mdb.greenplum.v1.Access", dataLens: false, webSql: false, + dataTransfer: false, }; export const Access = { @@ -1076,6 +1275,9 @@ export const Access = { if (message.webSql === true) { writer.uint32(16).bool(message.webSql); } + if (message.dataTransfer === true) { + writer.uint32(24).bool(message.dataTransfer); + } return writer; }, @@ -1092,6 +1294,9 @@ export const Access = { case 2: message.webSql = reader.bool(); break; + case 3: + message.dataTransfer = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -1110,6 +1315,10 @@ export const Access = { object.webSql !== undefined && object.webSql !== null ? Boolean(object.webSql) : false; + message.dataTransfer = + object.dataTransfer !== undefined && object.dataTransfer !== null + ? Boolean(object.dataTransfer) + : false; return message; }, @@ -1117,6 +1326,8 @@ export const Access = { const obj: any = {}; message.dataLens !== undefined && (obj.dataLens = message.dataLens); message.webSql !== undefined && (obj.webSql = message.webSql); + message.dataTransfer !== undefined && + (obj.dataTransfer = message.dataTransfer); return obj; }, @@ -1124,12 +1335,221 @@ export const Access = { const message = { ...baseAccess } as Access; message.dataLens = object.dataLens ?? false; message.webSql = object.webSql ?? false; + message.dataTransfer = object.dataTransfer ?? false; return message; }, }; messageTypeRegistry.set(Access.$type, Access); +const baseGreenplumRestoreConfig: object = { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumRestoreConfig", + zoneId: "", + subnetId: "", + assignPublicIp: false, +}; + +export const GreenplumRestoreConfig = { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumRestoreConfig" as const, + + encode( + message: GreenplumRestoreConfig, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.backupWindowStart !== undefined) { + TimeOfDay.encode( + message.backupWindowStart, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.access !== undefined) { + Access.encode(message.access, writer.uint32(18).fork()).ldelim(); + } + if (message.zoneId !== "") { + writer.uint32(26).string(message.zoneId); + } + if (message.subnetId !== "") { + writer.uint32(34).string(message.subnetId); + } + if (message.assignPublicIp === true) { + writer.uint32(40).bool(message.assignPublicIp); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): GreenplumRestoreConfig { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGreenplumRestoreConfig } as GreenplumRestoreConfig; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.backupWindowStart = TimeOfDay.decode(reader, reader.uint32()); + break; + case 2: + message.access = Access.decode(reader, reader.uint32()); + break; + case 3: + message.zoneId = reader.string(); + break; + case 4: + message.subnetId = reader.string(); + break; + case 5: + message.assignPublicIp = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GreenplumRestoreConfig { + const message = { ...baseGreenplumRestoreConfig } as GreenplumRestoreConfig; + message.backupWindowStart = + object.backupWindowStart !== undefined && + object.backupWindowStart !== null + ? TimeOfDay.fromJSON(object.backupWindowStart) + : undefined; + message.access = + object.access !== undefined && object.access !== null + ? Access.fromJSON(object.access) + : undefined; + message.zoneId = + object.zoneId !== undefined && object.zoneId !== null + ? String(object.zoneId) + : ""; + message.subnetId = + object.subnetId !== undefined && object.subnetId !== null + ? String(object.subnetId) + : ""; + message.assignPublicIp = + object.assignPublicIp !== undefined && object.assignPublicIp !== null + ? Boolean(object.assignPublicIp) + : false; + return message; + }, + + toJSON(message: GreenplumRestoreConfig): unknown { + const obj: any = {}; + message.backupWindowStart !== undefined && + (obj.backupWindowStart = message.backupWindowStart + ? TimeOfDay.toJSON(message.backupWindowStart) + : undefined); + message.access !== undefined && + (obj.access = message.access ? Access.toJSON(message.access) : undefined); + message.zoneId !== undefined && (obj.zoneId = message.zoneId); + message.subnetId !== undefined && (obj.subnetId = message.subnetId); + message.assignPublicIp !== undefined && + (obj.assignPublicIp = message.assignPublicIp); + return obj; + }, + + fromPartial, I>>( + object: I + ): GreenplumRestoreConfig { + const message = { ...baseGreenplumRestoreConfig } as GreenplumRestoreConfig; + message.backupWindowStart = + object.backupWindowStart !== undefined && + object.backupWindowStart !== null + ? TimeOfDay.fromPartial(object.backupWindowStart) + : undefined; + message.access = + object.access !== undefined && object.access !== null + ? Access.fromPartial(object.access) + : undefined; + message.zoneId = object.zoneId ?? ""; + message.subnetId = object.subnetId ?? ""; + message.assignPublicIp = object.assignPublicIp ?? false; + return message; + }, +}; + +messageTypeRegistry.set(GreenplumRestoreConfig.$type, GreenplumRestoreConfig); + +const baseRestoreResources: object = { + $type: "yandex.cloud.mdb.greenplum.v1.RestoreResources", + resourcePresetId: "", + diskSize: 0, +}; + +export const RestoreResources = { + $type: "yandex.cloud.mdb.greenplum.v1.RestoreResources" as const, + + encode( + message: RestoreResources, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.resourcePresetId !== "") { + writer.uint32(10).string(message.resourcePresetId); + } + if (message.diskSize !== 0) { + writer.uint32(16).int64(message.diskSize); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): RestoreResources { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseRestoreResources } as RestoreResources; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.resourcePresetId = reader.string(); + break; + case 2: + message.diskSize = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RestoreResources { + const message = { ...baseRestoreResources } as RestoreResources; + message.resourcePresetId = + object.resourcePresetId !== undefined && object.resourcePresetId !== null + ? String(object.resourcePresetId) + : ""; + message.diskSize = + object.diskSize !== undefined && object.diskSize !== null + ? Number(object.diskSize) + : 0; + return message; + }, + + toJSON(message: RestoreResources): unknown { + const obj: any = {}; + message.resourcePresetId !== undefined && + (obj.resourcePresetId = message.resourcePresetId); + message.diskSize !== undefined && + (obj.diskSize = Math.round(message.diskSize)); + return obj; + }, + + fromPartial, I>>( + object: I + ): RestoreResources { + const message = { ...baseRestoreResources } as RestoreResources; + message.resourcePresetId = object.resourcePresetId ?? ""; + message.diskSize = object.diskSize ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(RestoreResources.$type, RestoreResources); + declare var self: any | undefined; declare var window: any | undefined; declare var global: any | undefined; diff --git a/src/generated/yandex/cloud/mdb/greenplum/v1/cluster_service.ts b/src/generated/yandex/cloud/mdb/greenplum/v1/cluster_service.ts index 0cd0d67d..23cb86f1 100644 --- a/src/generated/yandex/cloud/mdb/greenplum/v1/cluster_service.ts +++ b/src/generated/yandex/cloud/mdb/greenplum/v1/cluster_service.ts @@ -17,26 +17,30 @@ import _m0 from "protobufjs/minimal"; import { Cluster_Environment, GreenplumConfig, + GreenplumRestoreConfig, Cluster, cluster_EnvironmentFromJSON, cluster_EnvironmentToJSON, } from "../../../../../yandex/cloud/mdb/greenplum/v1/cluster"; import { MaintenanceWindow } from "../../../../../yandex/cloud/mdb/greenplum/v1/maintenance"; -import { FieldMask } from "../../../../../google/protobuf/field_mask"; import { + ConnectionPoolerConfig, Resources, - GreenplumMasterConfig, - GreenplumSegmentConfig, + Greenplumconfig617, + Greenplumconfig619, } from "../../../../../yandex/cloud/mdb/greenplum/v1/config"; +import { FieldMask } from "../../../../../google/protobuf/field_mask"; +import { Timestamp } from "../../../../../google/protobuf/timestamp"; import { Operation } from "../../../../../yandex/cloud/operation/operation"; import { Host } from "../../../../../yandex/cloud/mdb/greenplum/v1/host"; +import { Backup } from "../../../../../yandex/cloud/mdb/greenplum/v1/backup"; export const protobufPackage = "yandex.cloud.mdb.greenplum.v1"; export interface GetClusterRequest { $type: "yandex.cloud.mdb.greenplum.v1.GetClusterRequest"; /** - * ID of the Greenplum Cluster resource to return. + * ID of the Greenplum® Cluster resource to return. * To get the cluster ID, use a [ClusterService.List] request. */ clusterId: string; @@ -45,7 +49,7 @@ export interface GetClusterRequest { export interface ListClustersRequest { $type: "yandex.cloud.mdb.greenplum.v1.ListClustersRequest"; /** - * ID of the folder to list Greenplum clusters in. + * ID of the folder to list Greenplum® clusters in. * To get the folder ID, use a [yandex.cloud.resourcemanager.v1.FolderService.List] request. */ folderId: string; @@ -85,45 +89,46 @@ export interface ListClustersResponse { export interface CreateClusterRequest { $type: "yandex.cloud.mdb.greenplum.v1.CreateClusterRequest"; - /** ID of the folder to create the Greenplum cluster in. */ + /** ID of the folder to create the Greenplum® cluster in. */ folderId: string; - /** Name of the Greenplum cluster. The name must be unique within the folder. */ + /** Name of the Greenplum® cluster. The name must be unique within the folder. Maximum 63 characters. */ name: string; - /** Description of the Greenplum cluster. */ + /** Description of the Greenplum® cluster. */ description: string; /** - * Custom labels for the Greenplum cluster as `key:value` pairs. Maximum 64 per resource. - * For example, "project": "mvp" or "source": "dictionary". + * Custom labels for the Greenplum® cluster as `key:value` pairs. Maximum 64 per resource. + * For example, "project":"mvp" or "source":"dictionary". */ labels: { [key: string]: string }; - /** Deployment environment of the Greenplum cluster. */ + /** Deployment environment of the Greenplum® cluster. */ environment: Cluster_Environment; - /** Greenplum cluster config */ + /** Greenplum® cluster configuration. */ config?: GreenplumConfig; - /** Configuration of the Greenplum master subcluster. */ + /** Configuration of the Greenplum® master subcluster. */ masterConfig?: MasterSubclusterConfigSpec; - /** Configuration of the Greenplum segment subcluster. */ + /** Configuration of the Greenplum® segment subcluster. */ segmentConfig?: SegmentSubclusterConfigSpec; - /** Number of hosts of the master subcluster */ + /** Number of hosts in the master subcluster. */ masterHostCount: number; - /** Number of segments in the host */ + /** Number of segments per host. */ segmentInHost: number; - /** Number of hosts of the segment subcluster */ + /** Number of hosts in the segment subcluster. */ segmentHostCount: number; - /** Owner user name */ + /** Owner user name. */ userName: string; - /** Owner user password */ + /** Owner user password. Must be 8-128 characters long */ userPassword: string; /** ID of the network to create the cluster in. */ networkId: string; - /** User security groups */ + /** User security groups. */ securityGroupIds: string[]; - /** Deletion Protection inhibits deletion of the cluster */ + /** Whether or not cluster is protected from being deleted. */ deletionProtection: boolean; - /** Host groups to place VMs of cluster on. */ + /** Host groups to place VMs of the cluster in. */ hostGroupIds: string[]; /** Window of maintenance operations. */ maintenanceWindow?: MaintenanceWindow; + configSpec?: ConfigSpec; } export interface CreateClusterRequest_LabelsEntry { @@ -132,26 +137,35 @@ export interface CreateClusterRequest_LabelsEntry { value: string; } +/** Configuration of greenplum and odyssey */ +export interface ConfigSpec { + $type: "yandex.cloud.mdb.greenplum.v1.ConfigSpec"; + greenplumConfig617?: Greenplumconfig617 | undefined; + greenplumConfig619?: Greenplumconfig619 | undefined; + /** Odyssey pool settings */ + pool?: ConnectionPoolerConfig; +} + export interface CreateClusterMetadata { $type: "yandex.cloud.mdb.greenplum.v1.CreateClusterMetadata"; - /** ID of the Greenplum cluster that is being created. */ + /** ID of the Greenplum® cluster that is being created. */ clusterId: string; } export interface UpdateClusterRequest { $type: "yandex.cloud.mdb.greenplum.v1.UpdateClusterRequest"; /** - * ID of the Greenplum Cluster resource to update. - * To get the Greenplum cluster ID, use a [ClusterService.List] request. + * ID of the Greenplum® Cluster resource to update. + * To get the Greenplum® cluster ID, use a [ClusterService.List] request. */ clusterId: string; - /** Field mask that specifies which fields of the Greenplum Cluster resource should be updated. */ + /** Field mask that specifies which fields of the Greenplum® Cluster resource should be updated. */ updateMask?: FieldMask; - /** New description of the Greenplum cluster. */ + /** New description of the Greenplum® cluster. */ description: string; /** - * Custom labels for the Greenplum cluster as `key:value` pairs. Maximum 64 per resource. - * For example, "project": "mvp" or "source": "dictionary". + * Custom labels for the Greenplum® cluster as `key:value` pairs. Maximum 64 per resource. + * For example, "project":"mvp" or "source":"dictionary". * * The new set of labels will completely replace the old ones. To add a label, request the current * set with the [ClusterService.Get] method, then send an [ClusterService.Update] request with the new label added to the set. @@ -159,17 +173,17 @@ export interface UpdateClusterRequest { labels: { [key: string]: string }; /** New name for the cluster. */ name: string; - /** Greenplum cluster config */ + /** Greenplum® cluster configuration. */ config?: GreenplumConfig; - /** Configuration of the Greenplum master subcluster. */ + /** Configuration of the Greenplum® master subcluster. */ masterConfig?: MasterSubclusterConfigSpec; - /** Configuration of the Greenplum segment subcluster. */ + /** Configuration of the Greenplum® segment subcluster. */ segmentConfig?: SegmentSubclusterConfigSpec; /** Window of maintenance operations. */ maintenanceWindow?: MaintenanceWindow; - /** User security groups */ + /** User security groups. */ securityGroupIds: string[]; - /** Deletion Protection inhibits deletion of the cluster */ + /** Whether or not cluster is protected from being deleted. */ deletionProtection: boolean; } @@ -181,52 +195,58 @@ export interface UpdateClusterRequest_LabelsEntry { export interface UpdateClusterMetadata { $type: "yandex.cloud.mdb.greenplum.v1.UpdateClusterMetadata"; - /** ID of the Greenplum Cluster resource that is being updated. */ + /** ID of the Greenplum® Cluster resource that is being updated. */ clusterId: string; } export interface DeleteClusterRequest { $type: "yandex.cloud.mdb.greenplum.v1.DeleteClusterRequest"; /** - * ID of the Greenplum cluster to delete. - * To get the Greenplum cluster ID, use a [ClusterService.List] request. + * ID of the Greenplum® cluster to delete. + * To get the Greenplum® cluster ID, use a [ClusterService.List] request. */ clusterId: string; } export interface DeleteClusterMetadata { $type: "yandex.cloud.mdb.greenplum.v1.DeleteClusterMetadata"; - /** ID of the Greenplum cluster that is being deleted. */ + /** ID of the Greenplum® cluster that is being deleted. */ clusterId: string; } export interface StartClusterRequest { $type: "yandex.cloud.mdb.greenplum.v1.StartClusterRequest"; - /** ID of the Greenplum cluster to start. */ + /** + * ID of the Greenplum® cluster to start. + * To get the Greenplum® cluster ID, use a [ClusterService.List] request. + */ clusterId: string; } export interface StartClusterMetadata { $type: "yandex.cloud.mdb.greenplum.v1.StartClusterMetadata"; - /** ID of the Greenplum cluster being started. */ + /** ID of the Greenplum® cluster being started. */ clusterId: string; } export interface StopClusterRequest { $type: "yandex.cloud.mdb.greenplum.v1.StopClusterRequest"; - /** ID of the Greenplum cluster to stop. */ + /** + * ID of the Greenplum® cluster to stop. + * To get the Greenplum® cluster ID, use a [ClusterService.List] request. + */ clusterId: string; } export interface StopClusterMetadata { $type: "yandex.cloud.mdb.greenplum.v1.StopClusterMetadata"; - /** ID of the Greenplum cluster being stopped. */ + /** ID of the Greenplum® cluster being stopped. */ clusterId: string; } export interface ListClusterOperationsRequest { $type: "yandex.cloud.mdb.greenplum.v1.ListClusterOperationsRequest"; - /** ID of the Greenplum Cluster resource to list operations for. */ + /** ID of the Greenplum® Cluster resource to list operations for. */ clusterId: string; /** * The maximum number of results per page to return. If the number of available @@ -235,7 +255,7 @@ export interface ListClusterOperationsRequest { */ pageSize: number; /** - * Page token. To get the next page of results, set [page_token] to the [ListClusterOperationsResponse.next_page_token] + * Page token. To get the next page of results, set [page_token] to the [ListClusterOperationsResponse.next_page_token] * returned by a previous list request. */ pageToken: string; @@ -243,7 +263,7 @@ export interface ListClusterOperationsRequest { export interface ListClusterOperationsResponse { $type: "yandex.cloud.mdb.greenplum.v1.ListClusterOperationsResponse"; - /** List of Operation resources for the specified Greenplum cluster. */ + /** List of Operation resources for the specified Greenplum® cluster. */ operations: Operation[]; /** * This token allows you to get the next page of results for list requests. If the number of results @@ -257,8 +277,8 @@ export interface ListClusterOperationsResponse { export interface ListClusterHostsRequest { $type: "yandex.cloud.mdb.greenplum.v1.ListClusterHostsRequest"; /** - * ID of the Greenplum cluster. - * To get the Greenplum cluster ID use a [ClusterService.List] request. + * ID of the Greenplum® cluster. + * To get the Greenplum® cluster ID use a [ClusterService.List] request. */ clusterId: string; /** @@ -268,7 +288,7 @@ export interface ListClusterHostsRequest { */ pageSize: number; /** - * Page token. To get the next page of results, set [page_token] to the [ListClusterHostsResponse.next_page_token] + * Page token. To get the next page of results, set [page_token] to the [ListClusterHostsResponse.next_page_token] * returned by a previous list request. */ pageToken: string; @@ -287,22 +307,224 @@ export interface ListClusterHostsResponse { nextPageToken: string; } -/** Configuration of master subcluster */ +/** Configuration of the master subcluster. */ export interface MasterSubclusterConfigSpec { $type: "yandex.cloud.mdb.greenplum.v1.MasterSubclusterConfigSpec"; - /** Resources allocated to Greenplum master subcluster hosts. */ + /** Resources allocated to Greenplum® master subcluster hosts. */ resources?: Resources; - /** Configuration settings of a Greenplum master server. */ - config?: GreenplumMasterConfig; } -/** Configuration of segmet subcluster */ +/** Configuration of the segment subcluster. */ export interface SegmentSubclusterConfigSpec { $type: "yandex.cloud.mdb.greenplum.v1.SegmentSubclusterConfigSpec"; - /** Resources allocated to Greenplum segment subcluster hosts. */ + /** Resources allocated to Greenplum® segment subcluster hosts. */ resources?: Resources; - /** Configuration settings of a Greenplum segment server. */ - config?: GreenplumSegmentConfig; +} + +export interface ListClusterLogsResponse { + $type: "yandex.cloud.mdb.greenplum.v1.ListClusterLogsResponse"; + /** Requested log records. */ + logs: LogRecord[]; + /** + * This token allows you to get the next page of results for list requests. If the number of results + * is larger than [ListClusterLogsRequest.page_size], use the [next_page_token] as the value + * for the [ListClusterLogsRequest.page_token] query parameter in the next list request. + * Each subsequent list request will have its own [next_page_token] to continue paging through the results. + * This value is interchangeable with the [StreamLogRecord.next_record_token] from StreamLogs method. + */ + nextPageToken: string; +} + +export interface LogRecord { + $type: "yandex.cloud.mdb.greenplum.v1.LogRecord"; + /** Log record timestamp in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. */ + timestamp?: Date; + /** Contents of the log record. */ + message: { [key: string]: string }; +} + +export interface LogRecord_MessageEntry { + $type: "yandex.cloud.mdb.greenplum.v1.LogRecord.MessageEntry"; + key: string; + value: string; +} + +export interface ListClusterLogsRequest { + $type: "yandex.cloud.mdb.greenplum.v1.ListClusterLogsRequest"; + /** + * ID of the Greenplum® cluster to request logs for. + * To get the Greenplum® cluster ID, use a [ClusterService.List] request. + */ + clusterId: string; + /** + * Columns from logs table to request. + * If no columns are specified, entire log records are returned. + */ + columnFilter: string[]; + /** Type of the service to request logs about. */ + serviceType: ListClusterLogsRequest_ServiceType; + /** Start timestamp for the logs request, in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. */ + fromTime?: Date; + /** End timestamp for the logs request, in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. */ + toTime?: Date; + /** + * The maximum number of results per page to return. If the number of available + * results is larger than [page_size], the service returns a [ListClusterLogsResponse.next_page_token] + * that can be used to get the next page of results in subsequent list requests. + */ + pageSize: number; + /** + * Page token. To get the next page of results, set [page_token] to the [ListClusterLogsResponse.next_page_token] + * returned by a previous list request. + */ + pageToken: string; + /** Always return `next_page_token`, even if the current page is empty. */ + alwaysNextPageToken: boolean; + /** + * A filter expression that filters resources listed in the response. + * The expression must specify: + * 1. The field name. Currently filtering can be applied to the [LogRecord.logs.message.hostname], + * [LogRecord.logs.message.error_severity] (for `GREENPLUM` service) and [LogRecord.logs.message.level] (for `GREENPLUM_POOLER` service) fields. + * 2. A conditional operator. Can be either `=` or `!=` for single values, `IN` or `NOT IN` for lists of values. + * 3. The value. Must be 1-63 characters long and match the regular expression `^[a-z0-9.-]{1,61}$`. + * Examples of a filter: + * * `message.hostname='node1.db.cloud.yandex.net'` + * * `message.error_severity IN ("ERROR", "FATAL", "PANIC") AND message.hostname = "node1.db.cloud.yandex.net"` + */ + filter: string; +} + +/** Type of the service to request logs about. */ +export enum ListClusterLogsRequest_ServiceType { + /** SERVICE_TYPE_UNSPECIFIED - Type is not specified. */ + SERVICE_TYPE_UNSPECIFIED = 0, + /** GREENPLUM - Greenplum® activity logs. */ + GREENPLUM = 1, + /** GREENPLUM_POOLER - Greenplum® pooler logs. */ + GREENPLUM_POOLER = 2, + UNRECOGNIZED = -1, +} + +export function listClusterLogsRequest_ServiceTypeFromJSON( + object: any +): ListClusterLogsRequest_ServiceType { + switch (object) { + case 0: + case "SERVICE_TYPE_UNSPECIFIED": + return ListClusterLogsRequest_ServiceType.SERVICE_TYPE_UNSPECIFIED; + case 1: + case "GREENPLUM": + return ListClusterLogsRequest_ServiceType.GREENPLUM; + case 2: + case "GREENPLUM_POOLER": + return ListClusterLogsRequest_ServiceType.GREENPLUM_POOLER; + case -1: + case "UNRECOGNIZED": + default: + return ListClusterLogsRequest_ServiceType.UNRECOGNIZED; + } +} + +export function listClusterLogsRequest_ServiceTypeToJSON( + object: ListClusterLogsRequest_ServiceType +): string { + switch (object) { + case ListClusterLogsRequest_ServiceType.SERVICE_TYPE_UNSPECIFIED: + return "SERVICE_TYPE_UNSPECIFIED"; + case ListClusterLogsRequest_ServiceType.GREENPLUM: + return "GREENPLUM"; + case ListClusterLogsRequest_ServiceType.GREENPLUM_POOLER: + return "GREENPLUM_POOLER"; + default: + return "UNKNOWN"; + } +} + +export interface ListClusterBackupsRequest { + $type: "yandex.cloud.mdb.greenplum.v1.ListClusterBackupsRequest"; + /** + * ID of the Greenplum® cluster. + * To get the Greenplum® cluster ID use a [ClusterService.List] request. + */ + clusterId: string; + /** + * The maximum number of results per page to return. If the number of available + * results is larger than [page_size], the service returns a [ListClusterBackupsResponse.next_page_token] + * that can be used to get the next page of results in subsequent list requests. + */ + pageSize: number; + /** + * Page token. To get the next page of results, set [page_token] to the [ListClusterBackupsResponse.next_page_token] + * returned by a previous list request. + */ + pageToken: string; +} + +export interface ListClusterBackupsResponse { + $type: "yandex.cloud.mdb.greenplum.v1.ListClusterBackupsResponse"; + /** List of Greenplum® backups. */ + backups: Backup[]; + /** + * This token allows you to get the next page of results for list requests. If the number of results + * is larger than [ListClusterBackupsRequest.page_size], use the [next_page_token] as the value + * for the [ListClusterBackupsRequest.page_token] query parameter in the next list request. + * Each subsequent list request will have its own [next_page_token] to continue paging through the results. + */ + nextPageToken: string; +} + +export interface RestoreClusterRequest { + $type: "yandex.cloud.mdb.greenplum.v1.RestoreClusterRequest"; + /** + * ID of the backup to create a cluster from. + * To get the backup ID, use a [ClusterService.ListBackups] request. + */ + backupId: string; + /** ID of the folder to create the Greenplum® cluster in. */ + folderId: string; + /** Name of the Greenplum® cluster. The name must be unique within the folder. */ + name: string; + /** Description of the Greenplum® cluster. */ + description: string; + /** + * Custom labels for the Greenplum® cluster as `key:value` pairs. Maximum 64 per resource. + * For example, "project": "mvp" or "source": "dictionary". + */ + labels: { [key: string]: string }; + /** Deployment environment of the Greenplum® cluster. */ + environment: Cluster_Environment; + /** Greenplum® cluster config */ + config?: GreenplumRestoreConfig; + /** Resources of the Greenplum® master subcluster. */ + masterResources?: Resources; + /** Resources of the Greenplum® segment subcluster. */ + segmentResources?: Resources; + /** ID of the network to create the cluster in. */ + networkId: string; + /** User security groups */ + securityGroupIds: string[]; + /** Deletion Protection inhibits deletion of the cluster */ + deletionProtection: boolean; + /** Host groups to place VMs of cluster on. */ + hostGroupIds: string[]; + /** ID of placement group */ + placementGroupId: string; + /** Window of maintenance operations. */ + maintenanceWindow?: MaintenanceWindow; +} + +export interface RestoreClusterRequest_LabelsEntry { + $type: "yandex.cloud.mdb.greenplum.v1.RestoreClusterRequest.LabelsEntry"; + key: string; + value: string; +} + +export interface RestoreClusterMetadata { + $type: "yandex.cloud.mdb.greenplum.v1.RestoreClusterMetadata"; + /** ID of the new Greenplum® cluster that is being created from a backup. */ + clusterId: string; + /** ID of the backup that is being used for creating a cluster. */ + backupId: string; } const baseGetClusterRequest: object = { @@ -649,6 +871,9 @@ export const CreateClusterRequest = { writer.uint32(154).fork() ).ldelim(); } + if (message.configSpec !== undefined) { + ConfigSpec.encode(message.configSpec, writer.uint32(162).fork()).ldelim(); + } return writer; }, @@ -734,6 +959,9 @@ export const CreateClusterRequest = { reader.uint32() ); break; + case 20: + message.configSpec = ConfigSpec.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -818,6 +1046,10 @@ export const CreateClusterRequest = { object.maintenanceWindow !== null ? MaintenanceWindow.fromJSON(object.maintenanceWindow) : undefined; + message.configSpec = + object.configSpec !== undefined && object.configSpec !== null + ? ConfigSpec.fromJSON(object.configSpec) + : undefined; return message; }, @@ -873,6 +1105,10 @@ export const CreateClusterRequest = { (obj.maintenanceWindow = message.maintenanceWindow ? MaintenanceWindow.toJSON(message.maintenanceWindow) : undefined); + message.configSpec !== undefined && + (obj.configSpec = message.configSpec + ? ConfigSpec.toJSON(message.configSpec) + : undefined); return obj; }, @@ -918,6 +1154,10 @@ export const CreateClusterRequest = { object.maintenanceWindow !== null ? MaintenanceWindow.fromPartial(object.maintenanceWindow) : undefined; + message.configSpec = + object.configSpec !== undefined && object.configSpec !== null + ? ConfigSpec.fromPartial(object.configSpec) + : undefined; return message; }, }; @@ -1010,6 +1250,128 @@ messageTypeRegistry.set( CreateClusterRequest_LabelsEntry ); +const baseConfigSpec: object = { + $type: "yandex.cloud.mdb.greenplum.v1.ConfigSpec", +}; + +export const ConfigSpec = { + $type: "yandex.cloud.mdb.greenplum.v1.ConfigSpec" as const, + + encode( + message: ConfigSpec, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.greenplumConfig617 !== undefined) { + Greenplumconfig617.encode( + message.greenplumConfig617, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.greenplumConfig619 !== undefined) { + Greenplumconfig619.encode( + message.greenplumConfig619, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.pool !== undefined) { + ConnectionPoolerConfig.encode( + message.pool, + writer.uint32(34).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ConfigSpec { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseConfigSpec } as ConfigSpec; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.greenplumConfig617 = Greenplumconfig617.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.greenplumConfig619 = Greenplumconfig619.decode( + reader, + reader.uint32() + ); + break; + case 4: + message.pool = ConnectionPoolerConfig.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ConfigSpec { + const message = { ...baseConfigSpec } as ConfigSpec; + message.greenplumConfig617 = + object.greenplumConfig_6_17 !== undefined && + object.greenplumConfig_6_17 !== null + ? Greenplumconfig617.fromJSON(object.greenplumConfig_6_17) + : undefined; + message.greenplumConfig619 = + object.greenplumConfig_6_19 !== undefined && + object.greenplumConfig_6_19 !== null + ? Greenplumconfig619.fromJSON(object.greenplumConfig_6_19) + : undefined; + message.pool = + object.pool !== undefined && object.pool !== null + ? ConnectionPoolerConfig.fromJSON(object.pool) + : undefined; + return message; + }, + + toJSON(message: ConfigSpec): unknown { + const obj: any = {}; + message.greenplumConfig617 !== undefined && + (obj.greenplumConfig_6_17 = message.greenplumConfig617 + ? Greenplumconfig617.toJSON(message.greenplumConfig617) + : undefined); + message.greenplumConfig619 !== undefined && + (obj.greenplumConfig_6_19 = message.greenplumConfig619 + ? Greenplumconfig619.toJSON(message.greenplumConfig619) + : undefined); + message.pool !== undefined && + (obj.pool = message.pool + ? ConnectionPoolerConfig.toJSON(message.pool) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): ConfigSpec { + const message = { ...baseConfigSpec } as ConfigSpec; + message.greenplumConfig617 = + object.greenplumConfig617 !== undefined && + object.greenplumConfig617 !== null + ? Greenplumconfig617.fromPartial(object.greenplumConfig617) + : undefined; + message.greenplumConfig619 = + object.greenplumConfig619 !== undefined && + object.greenplumConfig619 !== null + ? Greenplumconfig619.fromPartial(object.greenplumConfig619) + : undefined; + message.pool = + object.pool !== undefined && object.pool !== null + ? ConnectionPoolerConfig.fromPartial(object.pool) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(ConfigSpec.$type, ConfigSpec); + const baseCreateClusterMetadata: object = { $type: "yandex.cloud.mdb.greenplum.v1.CreateClusterMetadata", clusterId: "", @@ -2278,12 +2640,6 @@ export const MasterSubclusterConfigSpec = { if (message.resources !== undefined) { Resources.encode(message.resources, writer.uint32(10).fork()).ldelim(); } - if (message.config !== undefined) { - GreenplumMasterConfig.encode( - message.config, - writer.uint32(18).fork() - ).ldelim(); - } return writer; }, @@ -2302,12 +2658,6 @@ export const MasterSubclusterConfigSpec = { case 1: message.resources = Resources.decode(reader, reader.uint32()); break; - case 2: - message.config = GreenplumMasterConfig.decode( - reader, - reader.uint32() - ); - break; default: reader.skipType(tag & 7); break; @@ -2324,10 +2674,6 @@ export const MasterSubclusterConfigSpec = { object.resources !== undefined && object.resources !== null ? Resources.fromJSON(object.resources) : undefined; - message.config = - object.config !== undefined && object.config !== null - ? GreenplumMasterConfig.fromJSON(object.config) - : undefined; return message; }, @@ -2337,10 +2683,6 @@ export const MasterSubclusterConfigSpec = { (obj.resources = message.resources ? Resources.toJSON(message.resources) : undefined); - message.config !== undefined && - (obj.config = message.config - ? GreenplumMasterConfig.toJSON(message.config) - : undefined); return obj; }, @@ -2354,10 +2696,6 @@ export const MasterSubclusterConfigSpec = { object.resources !== undefined && object.resources !== null ? Resources.fromPartial(object.resources) : undefined; - message.config = - object.config !== undefined && object.config !== null - ? GreenplumMasterConfig.fromPartial(object.config) - : undefined; return message; }, }; @@ -2381,12 +2719,6 @@ export const SegmentSubclusterConfigSpec = { if (message.resources !== undefined) { Resources.encode(message.resources, writer.uint32(10).fork()).ldelim(); } - if (message.config !== undefined) { - GreenplumSegmentConfig.encode( - message.config, - writer.uint32(18).fork() - ).ldelim(); - } return writer; }, @@ -2405,12 +2737,6 @@ export const SegmentSubclusterConfigSpec = { case 1: message.resources = Resources.decode(reader, reader.uint32()); break; - case 2: - message.config = GreenplumSegmentConfig.decode( - reader, - reader.uint32() - ); - break; default: reader.skipType(tag & 7); break; @@ -2427,10 +2753,6 @@ export const SegmentSubclusterConfigSpec = { object.resources !== undefined && object.resources !== null ? Resources.fromJSON(object.resources) : undefined; - message.config = - object.config !== undefined && object.config !== null - ? GreenplumSegmentConfig.fromJSON(object.config) - : undefined; return message; }, @@ -2440,10 +2762,6 @@ export const SegmentSubclusterConfigSpec = { (obj.resources = message.resources ? Resources.toJSON(message.resources) : undefined); - message.config !== undefined && - (obj.config = message.config - ? GreenplumSegmentConfig.toJSON(message.config) - : undefined); return obj; }, @@ -2457,10 +2775,6 @@ export const SegmentSubclusterConfigSpec = { object.resources !== undefined && object.resources !== null ? Resources.fromPartial(object.resources) : undefined; - message.config = - object.config !== undefined && object.config !== null - ? GreenplumSegmentConfig.fromPartial(object.config) - : undefined; return message; }, }; @@ -2470,43 +2784,1180 @@ messageTypeRegistry.set( SegmentSubclusterConfigSpec ); -/** A set of methods for managing Greenplum clusters. */ -export const ClusterServiceService = { - /** - * Returns the specified Greenplum cluster. - * - * To get the list of available Greenplum clusters, make a [List] request. - */ - get: { - path: "/yandex.cloud.mdb.greenplum.v1.ClusterService/Get", - requestStream: false, - responseStream: false, - requestSerialize: (value: GetClusterRequest) => - Buffer.from(GetClusterRequest.encode(value).finish()), - requestDeserialize: (value: Buffer) => GetClusterRequest.decode(value), - responseSerialize: (value: Cluster) => - Buffer.from(Cluster.encode(value).finish()), - responseDeserialize: (value: Buffer) => Cluster.decode(value), - }, - /** - * Retrieves a list of Greenplum clusters that belong - * to the specified folder. - */ - list: { - path: "/yandex.cloud.mdb.greenplum.v1.ClusterService/List", - requestStream: false, - responseStream: false, - requestSerialize: (value: ListClustersRequest) => - Buffer.from(ListClustersRequest.encode(value).finish()), - requestDeserialize: (value: Buffer) => ListClustersRequest.decode(value), - responseSerialize: (value: ListClustersResponse) => - Buffer.from(ListClustersResponse.encode(value).finish()), - responseDeserialize: (value: Buffer) => ListClustersResponse.decode(value), +const baseListClusterLogsResponse: object = { + $type: "yandex.cloud.mdb.greenplum.v1.ListClusterLogsResponse", + nextPageToken: "", +}; + +export const ListClusterLogsResponse = { + $type: "yandex.cloud.mdb.greenplum.v1.ListClusterLogsResponse" as const, + + encode( + message: ListClusterLogsResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.logs) { + LogRecord.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; }, - /** Creates a Greenplum cluster in the specified folder. */ - create: { - path: "/yandex.cloud.mdb.greenplum.v1.ClusterService/Create", - requestStream: false, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListClusterLogsResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListClusterLogsResponse, + } as ListClusterLogsResponse; + message.logs = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.logs.push(LogRecord.decode(reader, reader.uint32())); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListClusterLogsResponse { + const message = { + ...baseListClusterLogsResponse, + } as ListClusterLogsResponse; + message.logs = (object.logs ?? []).map((e: any) => LogRecord.fromJSON(e)); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListClusterLogsResponse): unknown { + const obj: any = {}; + if (message.logs) { + obj.logs = message.logs.map((e) => (e ? LogRecord.toJSON(e) : undefined)); + } else { + obj.logs = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListClusterLogsResponse { + const message = { + ...baseListClusterLogsResponse, + } as ListClusterLogsResponse; + message.logs = object.logs?.map((e) => LogRecord.fromPartial(e)) || []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListClusterLogsResponse.$type, ListClusterLogsResponse); + +const baseLogRecord: object = { + $type: "yandex.cloud.mdb.greenplum.v1.LogRecord", +}; + +export const LogRecord = { + $type: "yandex.cloud.mdb.greenplum.v1.LogRecord" as const, + + encode( + message: LogRecord, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.timestamp !== undefined) { + Timestamp.encode( + toTimestamp(message.timestamp), + writer.uint32(10).fork() + ).ldelim(); + } + Object.entries(message.message).forEach(([key, value]) => { + LogRecord_MessageEntry.encode( + { + $type: "yandex.cloud.mdb.greenplum.v1.LogRecord.MessageEntry", + key: key as any, + value, + }, + writer.uint32(18).fork() + ).ldelim(); + }); + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): LogRecord { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseLogRecord } as LogRecord; + message.message = {}; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.timestamp = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 2: + const entry2 = LogRecord_MessageEntry.decode(reader, reader.uint32()); + if (entry2.value !== undefined) { + message.message[entry2.key] = entry2.value; + } + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): LogRecord { + const message = { ...baseLogRecord } as LogRecord; + message.timestamp = + object.timestamp !== undefined && object.timestamp !== null + ? fromJsonTimestamp(object.timestamp) + : undefined; + message.message = Object.entries(object.message ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); + return message; + }, + + toJSON(message: LogRecord): unknown { + const obj: any = {}; + message.timestamp !== undefined && + (obj.timestamp = message.timestamp.toISOString()); + obj.message = {}; + if (message.message) { + Object.entries(message.message).forEach(([k, v]) => { + obj.message[k] = v; + }); + } + return obj; + }, + + fromPartial, I>>( + object: I + ): LogRecord { + const message = { ...baseLogRecord } as LogRecord; + message.timestamp = object.timestamp ?? undefined; + message.message = Object.entries(object.message ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); + return message; + }, +}; + +messageTypeRegistry.set(LogRecord.$type, LogRecord); + +const baseLogRecord_MessageEntry: object = { + $type: "yandex.cloud.mdb.greenplum.v1.LogRecord.MessageEntry", + key: "", + value: "", +}; + +export const LogRecord_MessageEntry = { + $type: "yandex.cloud.mdb.greenplum.v1.LogRecord.MessageEntry" as const, + + encode( + message: LogRecord_MessageEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): LogRecord_MessageEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseLogRecord_MessageEntry } as LogRecord_MessageEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): LogRecord_MessageEntry { + const message = { ...baseLogRecord_MessageEntry } as LogRecord_MessageEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: LogRecord_MessageEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial, I>>( + object: I + ): LogRecord_MessageEntry { + const message = { ...baseLogRecord_MessageEntry } as LogRecord_MessageEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(LogRecord_MessageEntry.$type, LogRecord_MessageEntry); + +const baseListClusterLogsRequest: object = { + $type: "yandex.cloud.mdb.greenplum.v1.ListClusterLogsRequest", + clusterId: "", + columnFilter: "", + serviceType: 0, + pageSize: 0, + pageToken: "", + alwaysNextPageToken: false, + filter: "", +}; + +export const ListClusterLogsRequest = { + $type: "yandex.cloud.mdb.greenplum.v1.ListClusterLogsRequest" as const, + + encode( + message: ListClusterLogsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + for (const v of message.columnFilter) { + writer.uint32(18).string(v!); + } + if (message.serviceType !== 0) { + writer.uint32(24).int32(message.serviceType); + } + if (message.fromTime !== undefined) { + Timestamp.encode( + toTimestamp(message.fromTime), + writer.uint32(34).fork() + ).ldelim(); + } + if (message.toTime !== undefined) { + Timestamp.encode( + toTimestamp(message.toTime), + writer.uint32(42).fork() + ).ldelim(); + } + if (message.pageSize !== 0) { + writer.uint32(48).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(58).string(message.pageToken); + } + if (message.alwaysNextPageToken === true) { + writer.uint32(64).bool(message.alwaysNextPageToken); + } + if (message.filter !== "") { + writer.uint32(74).string(message.filter); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListClusterLogsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListClusterLogsRequest } as ListClusterLogsRequest; + message.columnFilter = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.columnFilter.push(reader.string()); + break; + case 3: + message.serviceType = reader.int32() as any; + break; + case 4: + message.fromTime = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 5: + message.toTime = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 6: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 7: + message.pageToken = reader.string(); + break; + case 8: + message.alwaysNextPageToken = reader.bool(); + break; + case 9: + message.filter = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListClusterLogsRequest { + const message = { ...baseListClusterLogsRequest } as ListClusterLogsRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.columnFilter = (object.columnFilter ?? []).map((e: any) => + String(e) + ); + message.serviceType = + object.serviceType !== undefined && object.serviceType !== null + ? listClusterLogsRequest_ServiceTypeFromJSON(object.serviceType) + : 0; + message.fromTime = + object.fromTime !== undefined && object.fromTime !== null + ? fromJsonTimestamp(object.fromTime) + : undefined; + message.toTime = + object.toTime !== undefined && object.toTime !== null + ? fromJsonTimestamp(object.toTime) + : undefined; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + message.alwaysNextPageToken = + object.alwaysNextPageToken !== undefined && + object.alwaysNextPageToken !== null + ? Boolean(object.alwaysNextPageToken) + : false; + message.filter = + object.filter !== undefined && object.filter !== null + ? String(object.filter) + : ""; + return message; + }, + + toJSON(message: ListClusterLogsRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + if (message.columnFilter) { + obj.columnFilter = message.columnFilter.map((e) => e); + } else { + obj.columnFilter = []; + } + message.serviceType !== undefined && + (obj.serviceType = listClusterLogsRequest_ServiceTypeToJSON( + message.serviceType + )); + message.fromTime !== undefined && + (obj.fromTime = message.fromTime.toISOString()); + message.toTime !== undefined && (obj.toTime = message.toTime.toISOString()); + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + message.alwaysNextPageToken !== undefined && + (obj.alwaysNextPageToken = message.alwaysNextPageToken); + message.filter !== undefined && (obj.filter = message.filter); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListClusterLogsRequest { + const message = { ...baseListClusterLogsRequest } as ListClusterLogsRequest; + message.clusterId = object.clusterId ?? ""; + message.columnFilter = object.columnFilter?.map((e) => e) || []; + message.serviceType = object.serviceType ?? 0; + message.fromTime = object.fromTime ?? undefined; + message.toTime = object.toTime ?? undefined; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + message.alwaysNextPageToken = object.alwaysNextPageToken ?? false; + message.filter = object.filter ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListClusterLogsRequest.$type, ListClusterLogsRequest); + +const baseListClusterBackupsRequest: object = { + $type: "yandex.cloud.mdb.greenplum.v1.ListClusterBackupsRequest", + clusterId: "", + pageSize: 0, + pageToken: "", +}; + +export const ListClusterBackupsRequest = { + $type: "yandex.cloud.mdb.greenplum.v1.ListClusterBackupsRequest" as const, + + encode( + message: ListClusterBackupsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.pageSize !== 0) { + writer.uint32(16).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(26).string(message.pageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListClusterBackupsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListClusterBackupsRequest, + } as ListClusterBackupsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 3: + message.pageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListClusterBackupsRequest { + const message = { + ...baseListClusterBackupsRequest, + } as ListClusterBackupsRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + return message; + }, + + toJSON(message: ListClusterBackupsRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListClusterBackupsRequest { + const message = { + ...baseListClusterBackupsRequest, + } as ListClusterBackupsRequest; + message.clusterId = object.clusterId ?? ""; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListClusterBackupsRequest.$type, + ListClusterBackupsRequest +); + +const baseListClusterBackupsResponse: object = { + $type: "yandex.cloud.mdb.greenplum.v1.ListClusterBackupsResponse", + nextPageToken: "", +}; + +export const ListClusterBackupsResponse = { + $type: "yandex.cloud.mdb.greenplum.v1.ListClusterBackupsResponse" as const, + + encode( + message: ListClusterBackupsResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.backups) { + Backup.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListClusterBackupsResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListClusterBackupsResponse, + } as ListClusterBackupsResponse; + message.backups = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.backups.push(Backup.decode(reader, reader.uint32())); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListClusterBackupsResponse { + const message = { + ...baseListClusterBackupsResponse, + } as ListClusterBackupsResponse; + message.backups = (object.backups ?? []).map((e: any) => + Backup.fromJSON(e) + ); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListClusterBackupsResponse): unknown { + const obj: any = {}; + if (message.backups) { + obj.backups = message.backups.map((e) => + e ? Backup.toJSON(e) : undefined + ); + } else { + obj.backups = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListClusterBackupsResponse { + const message = { + ...baseListClusterBackupsResponse, + } as ListClusterBackupsResponse; + message.backups = object.backups?.map((e) => Backup.fromPartial(e)) || []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListClusterBackupsResponse.$type, + ListClusterBackupsResponse +); + +const baseRestoreClusterRequest: object = { + $type: "yandex.cloud.mdb.greenplum.v1.RestoreClusterRequest", + backupId: "", + folderId: "", + name: "", + description: "", + environment: 0, + networkId: "", + securityGroupIds: "", + deletionProtection: false, + hostGroupIds: "", + placementGroupId: "", +}; + +export const RestoreClusterRequest = { + $type: "yandex.cloud.mdb.greenplum.v1.RestoreClusterRequest" as const, + + encode( + message: RestoreClusterRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.backupId !== "") { + writer.uint32(10).string(message.backupId); + } + if (message.folderId !== "") { + writer.uint32(18).string(message.folderId); + } + if (message.name !== "") { + writer.uint32(26).string(message.name); + } + if (message.description !== "") { + writer.uint32(34).string(message.description); + } + Object.entries(message.labels).forEach(([key, value]) => { + RestoreClusterRequest_LabelsEntry.encode( + { + $type: + "yandex.cloud.mdb.greenplum.v1.RestoreClusterRequest.LabelsEntry", + key: key as any, + value, + }, + writer.uint32(42).fork() + ).ldelim(); + }); + if (message.environment !== 0) { + writer.uint32(48).int32(message.environment); + } + if (message.config !== undefined) { + GreenplumRestoreConfig.encode( + message.config, + writer.uint32(58).fork() + ).ldelim(); + } + if (message.masterResources !== undefined) { + Resources.encode( + message.masterResources, + writer.uint32(66).fork() + ).ldelim(); + } + if (message.segmentResources !== undefined) { + Resources.encode( + message.segmentResources, + writer.uint32(74).fork() + ).ldelim(); + } + if (message.networkId !== "") { + writer.uint32(82).string(message.networkId); + } + for (const v of message.securityGroupIds) { + writer.uint32(90).string(v!); + } + if (message.deletionProtection === true) { + writer.uint32(96).bool(message.deletionProtection); + } + for (const v of message.hostGroupIds) { + writer.uint32(106).string(v!); + } + if (message.placementGroupId !== "") { + writer.uint32(114).string(message.placementGroupId); + } + if (message.maintenanceWindow !== undefined) { + MaintenanceWindow.encode( + message.maintenanceWindow, + writer.uint32(122).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): RestoreClusterRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseRestoreClusterRequest } as RestoreClusterRequest; + message.labels = {}; + message.securityGroupIds = []; + message.hostGroupIds = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.backupId = reader.string(); + break; + case 2: + message.folderId = reader.string(); + break; + case 3: + message.name = reader.string(); + break; + case 4: + message.description = reader.string(); + break; + case 5: + const entry5 = RestoreClusterRequest_LabelsEntry.decode( + reader, + reader.uint32() + ); + if (entry5.value !== undefined) { + message.labels[entry5.key] = entry5.value; + } + break; + case 6: + message.environment = reader.int32() as any; + break; + case 7: + message.config = GreenplumRestoreConfig.decode( + reader, + reader.uint32() + ); + break; + case 8: + message.masterResources = Resources.decode(reader, reader.uint32()); + break; + case 9: + message.segmentResources = Resources.decode(reader, reader.uint32()); + break; + case 10: + message.networkId = reader.string(); + break; + case 11: + message.securityGroupIds.push(reader.string()); + break; + case 12: + message.deletionProtection = reader.bool(); + break; + case 13: + message.hostGroupIds.push(reader.string()); + break; + case 14: + message.placementGroupId = reader.string(); + break; + case 15: + message.maintenanceWindow = MaintenanceWindow.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RestoreClusterRequest { + const message = { ...baseRestoreClusterRequest } as RestoreClusterRequest; + message.backupId = + object.backupId !== undefined && object.backupId !== null + ? String(object.backupId) + : ""; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); + message.environment = + object.environment !== undefined && object.environment !== null + ? cluster_EnvironmentFromJSON(object.environment) + : 0; + message.config = + object.config !== undefined && object.config !== null + ? GreenplumRestoreConfig.fromJSON(object.config) + : undefined; + message.masterResources = + object.masterResources !== undefined && object.masterResources !== null + ? Resources.fromJSON(object.masterResources) + : undefined; + message.segmentResources = + object.segmentResources !== undefined && object.segmentResources !== null + ? Resources.fromJSON(object.segmentResources) + : undefined; + message.networkId = + object.networkId !== undefined && object.networkId !== null + ? String(object.networkId) + : ""; + message.securityGroupIds = (object.securityGroupIds ?? []).map((e: any) => + String(e) + ); + message.deletionProtection = + object.deletionProtection !== undefined && + object.deletionProtection !== null + ? Boolean(object.deletionProtection) + : false; + message.hostGroupIds = (object.hostGroupIds ?? []).map((e: any) => + String(e) + ); + message.placementGroupId = + object.placementGroupId !== undefined && object.placementGroupId !== null + ? String(object.placementGroupId) + : ""; + message.maintenanceWindow = + object.maintenanceWindow !== undefined && + object.maintenanceWindow !== null + ? MaintenanceWindow.fromJSON(object.maintenanceWindow) + : undefined; + return message; + }, + + toJSON(message: RestoreClusterRequest): unknown { + const obj: any = {}; + message.backupId !== undefined && (obj.backupId = message.backupId); + message.folderId !== undefined && (obj.folderId = message.folderId); + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + obj.labels = {}; + if (message.labels) { + Object.entries(message.labels).forEach(([k, v]) => { + obj.labels[k] = v; + }); + } + message.environment !== undefined && + (obj.environment = cluster_EnvironmentToJSON(message.environment)); + message.config !== undefined && + (obj.config = message.config + ? GreenplumRestoreConfig.toJSON(message.config) + : undefined); + message.masterResources !== undefined && + (obj.masterResources = message.masterResources + ? Resources.toJSON(message.masterResources) + : undefined); + message.segmentResources !== undefined && + (obj.segmentResources = message.segmentResources + ? Resources.toJSON(message.segmentResources) + : undefined); + message.networkId !== undefined && (obj.networkId = message.networkId); + if (message.securityGroupIds) { + obj.securityGroupIds = message.securityGroupIds.map((e) => e); + } else { + obj.securityGroupIds = []; + } + message.deletionProtection !== undefined && + (obj.deletionProtection = message.deletionProtection); + if (message.hostGroupIds) { + obj.hostGroupIds = message.hostGroupIds.map((e) => e); + } else { + obj.hostGroupIds = []; + } + message.placementGroupId !== undefined && + (obj.placementGroupId = message.placementGroupId); + message.maintenanceWindow !== undefined && + (obj.maintenanceWindow = message.maintenanceWindow + ? MaintenanceWindow.toJSON(message.maintenanceWindow) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): RestoreClusterRequest { + const message = { ...baseRestoreClusterRequest } as RestoreClusterRequest; + message.backupId = object.backupId ?? ""; + message.folderId = object.folderId ?? ""; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); + message.environment = object.environment ?? 0; + message.config = + object.config !== undefined && object.config !== null + ? GreenplumRestoreConfig.fromPartial(object.config) + : undefined; + message.masterResources = + object.masterResources !== undefined && object.masterResources !== null + ? Resources.fromPartial(object.masterResources) + : undefined; + message.segmentResources = + object.segmentResources !== undefined && object.segmentResources !== null + ? Resources.fromPartial(object.segmentResources) + : undefined; + message.networkId = object.networkId ?? ""; + message.securityGroupIds = object.securityGroupIds?.map((e) => e) || []; + message.deletionProtection = object.deletionProtection ?? false; + message.hostGroupIds = object.hostGroupIds?.map((e) => e) || []; + message.placementGroupId = object.placementGroupId ?? ""; + message.maintenanceWindow = + object.maintenanceWindow !== undefined && + object.maintenanceWindow !== null + ? MaintenanceWindow.fromPartial(object.maintenanceWindow) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(RestoreClusterRequest.$type, RestoreClusterRequest); + +const baseRestoreClusterRequest_LabelsEntry: object = { + $type: "yandex.cloud.mdb.greenplum.v1.RestoreClusterRequest.LabelsEntry", + key: "", + value: "", +}; + +export const RestoreClusterRequest_LabelsEntry = { + $type: + "yandex.cloud.mdb.greenplum.v1.RestoreClusterRequest.LabelsEntry" as const, + + encode( + message: RestoreClusterRequest_LabelsEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): RestoreClusterRequest_LabelsEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseRestoreClusterRequest_LabelsEntry, + } as RestoreClusterRequest_LabelsEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RestoreClusterRequest_LabelsEntry { + const message = { + ...baseRestoreClusterRequest_LabelsEntry, + } as RestoreClusterRequest_LabelsEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: RestoreClusterRequest_LabelsEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): RestoreClusterRequest_LabelsEntry { + const message = { + ...baseRestoreClusterRequest_LabelsEntry, + } as RestoreClusterRequest_LabelsEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + RestoreClusterRequest_LabelsEntry.$type, + RestoreClusterRequest_LabelsEntry +); + +const baseRestoreClusterMetadata: object = { + $type: "yandex.cloud.mdb.greenplum.v1.RestoreClusterMetadata", + clusterId: "", + backupId: "", +}; + +export const RestoreClusterMetadata = { + $type: "yandex.cloud.mdb.greenplum.v1.RestoreClusterMetadata" as const, + + encode( + message: RestoreClusterMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.backupId !== "") { + writer.uint32(18).string(message.backupId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): RestoreClusterMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseRestoreClusterMetadata } as RestoreClusterMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.backupId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RestoreClusterMetadata { + const message = { ...baseRestoreClusterMetadata } as RestoreClusterMetadata; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.backupId = + object.backupId !== undefined && object.backupId !== null + ? String(object.backupId) + : ""; + return message; + }, + + toJSON(message: RestoreClusterMetadata): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.backupId !== undefined && (obj.backupId = message.backupId); + return obj; + }, + + fromPartial, I>>( + object: I + ): RestoreClusterMetadata { + const message = { ...baseRestoreClusterMetadata } as RestoreClusterMetadata; + message.clusterId = object.clusterId ?? ""; + message.backupId = object.backupId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(RestoreClusterMetadata.$type, RestoreClusterMetadata); + +/** A set of methods for managing Greenplum® clusters. */ +export const ClusterServiceService = { + /** + * Returns the specified Greenplum® cluster. + * + * To get the list of available Greenplum® clusters, make a [List] request. + */ + get: { + path: "/yandex.cloud.mdb.greenplum.v1.ClusterService/Get", + requestStream: false, + responseStream: false, + requestSerialize: (value: GetClusterRequest) => + Buffer.from(GetClusterRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => GetClusterRequest.decode(value), + responseSerialize: (value: Cluster) => + Buffer.from(Cluster.encode(value).finish()), + responseDeserialize: (value: Buffer) => Cluster.decode(value), + }, + /** Retrieves a list of Greenplum® clusters that belong to the specified folder. */ + list: { + path: "/yandex.cloud.mdb.greenplum.v1.ClusterService/List", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListClustersRequest) => + Buffer.from(ListClustersRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => ListClustersRequest.decode(value), + responseSerialize: (value: ListClustersResponse) => + Buffer.from(ListClustersResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => ListClustersResponse.decode(value), + }, + /** Creates a Greenplum® cluster in the specified folder. */ + create: { + path: "/yandex.cloud.mdb.greenplum.v1.ClusterService/Create", + requestStream: false, responseStream: false, requestSerialize: (value: CreateClusterRequest) => Buffer.from(CreateClusterRequest.encode(value).finish()), @@ -2515,7 +3966,7 @@ export const ClusterServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Updates the specified Greenplum cluster. */ + /** Updates the specified Greenplum® cluster. */ update: { path: "/yandex.cloud.mdb.greenplum.v1.ClusterService/Update", requestStream: false, @@ -2527,7 +3978,7 @@ export const ClusterServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Deletes the specified Greenplum cluster. */ + /** Deletes the specified Greenplum® cluster. */ delete: { path: "/yandex.cloud.mdb.greenplum.v1.ClusterService/Delete", requestStream: false, @@ -2539,7 +3990,7 @@ export const ClusterServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Starts the specified Greenplum cluster. */ + /** Starts the specified Greenplum® cluster. */ start: { path: "/yandex.cloud.mdb.greenplum.v1.ClusterService/Start", requestStream: false, @@ -2551,7 +4002,7 @@ export const ClusterServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Stops the specified Greenplum cluster. */ + /** Stops the specified Greenplum® cluster. */ stop: { path: "/yandex.cloud.mdb.greenplum.v1.ClusterService/Stop", requestStream: false, @@ -2605,29 +4056,65 @@ export const ClusterServiceService = { responseDeserialize: (value: Buffer) => ListClusterHostsResponse.decode(value), }, + /** Retrieves logs for the specified Greenplum® cluster. */ + listLogs: { + path: "/yandex.cloud.mdb.greenplum.v1.ClusterService/ListLogs", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListClusterLogsRequest) => + Buffer.from(ListClusterLogsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => ListClusterLogsRequest.decode(value), + responseSerialize: (value: ListClusterLogsResponse) => + Buffer.from(ListClusterLogsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + ListClusterLogsResponse.decode(value), + }, + /** Retrieves the list of available backups for the specified Greenplum cluster. */ + listBackups: { + path: "/yandex.cloud.mdb.greenplum.v1.ClusterService/ListBackups", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListClusterBackupsRequest) => + Buffer.from(ListClusterBackupsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + ListClusterBackupsRequest.decode(value), + responseSerialize: (value: ListClusterBackupsResponse) => + Buffer.from(ListClusterBackupsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + ListClusterBackupsResponse.decode(value), + }, + /** Creates a new Greenplum® cluster using the specified backup. */ + restore: { + path: "/yandex.cloud.mdb.greenplum.v1.ClusterService/Restore", + requestStream: false, + responseStream: false, + requestSerialize: (value: RestoreClusterRequest) => + Buffer.from(RestoreClusterRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => RestoreClusterRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, } as const; export interface ClusterServiceServer extends UntypedServiceImplementation { /** - * Returns the specified Greenplum cluster. + * Returns the specified Greenplum® cluster. * - * To get the list of available Greenplum clusters, make a [List] request. + * To get the list of available Greenplum® clusters, make a [List] request. */ get: handleUnaryCall; - /** - * Retrieves a list of Greenplum clusters that belong - * to the specified folder. - */ + /** Retrieves a list of Greenplum® clusters that belong to the specified folder. */ list: handleUnaryCall; - /** Creates a Greenplum cluster in the specified folder. */ + /** Creates a Greenplum® cluster in the specified folder. */ create: handleUnaryCall; - /** Updates the specified Greenplum cluster. */ + /** Updates the specified Greenplum® cluster. */ update: handleUnaryCall; - /** Deletes the specified Greenplum cluster. */ + /** Deletes the specified Greenplum® cluster. */ delete: handleUnaryCall; - /** Starts the specified Greenplum cluster. */ + /** Starts the specified Greenplum® cluster. */ start: handleUnaryCall; - /** Stops the specified Greenplum cluster. */ + /** Stops the specified Greenplum® cluster. */ stop: handleUnaryCall; /** Retrieves the list of Operation resources for the specified cluster. */ listOperations: handleUnaryCall< @@ -2644,13 +4131,22 @@ export interface ClusterServiceServer extends UntypedServiceImplementation { ListClusterHostsRequest, ListClusterHostsResponse >; + /** Retrieves logs for the specified Greenplum® cluster. */ + listLogs: handleUnaryCall; + /** Retrieves the list of available backups for the specified Greenplum cluster. */ + listBackups: handleUnaryCall< + ListClusterBackupsRequest, + ListClusterBackupsResponse + >; + /** Creates a new Greenplum® cluster using the specified backup. */ + restore: handleUnaryCall; } export interface ClusterServiceClient extends Client { /** - * Returns the specified Greenplum cluster. + * Returns the specified Greenplum® cluster. * - * To get the list of available Greenplum clusters, make a [List] request. + * To get the list of available Greenplum® clusters, make a [List] request. */ get( request: GetClusterRequest, @@ -2667,10 +4163,7 @@ export interface ClusterServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Cluster) => void ): ClientUnaryCall; - /** - * Retrieves a list of Greenplum clusters that belong - * to the specified folder. - */ + /** Retrieves a list of Greenplum® clusters that belong to the specified folder. */ list( request: ListClustersRequest, callback: ( @@ -2695,7 +4188,7 @@ export interface ClusterServiceClient extends Client { response: ListClustersResponse ) => void ): ClientUnaryCall; - /** Creates a Greenplum cluster in the specified folder. */ + /** Creates a Greenplum® cluster in the specified folder. */ create( request: CreateClusterRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -2711,7 +4204,7 @@ export interface ClusterServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Updates the specified Greenplum cluster. */ + /** Updates the specified Greenplum® cluster. */ update( request: UpdateClusterRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -2727,7 +4220,7 @@ export interface ClusterServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Deletes the specified Greenplum cluster. */ + /** Deletes the specified Greenplum® cluster. */ delete( request: DeleteClusterRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -2743,7 +4236,7 @@ export interface ClusterServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Starts the specified Greenplum cluster. */ + /** Starts the specified Greenplum® cluster. */ start( request: StartClusterRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -2759,7 +4252,7 @@ export interface ClusterServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Stops the specified Greenplum cluster. */ + /** Stops the specified Greenplum® cluster. */ stop( request: StopClusterRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -2850,6 +4343,72 @@ export interface ClusterServiceClient extends Client { response: ListClusterHostsResponse ) => void ): ClientUnaryCall; + /** Retrieves logs for the specified Greenplum® cluster. */ + listLogs( + request: ListClusterLogsRequest, + callback: ( + error: ServiceError | null, + response: ListClusterLogsResponse + ) => void + ): ClientUnaryCall; + listLogs( + request: ListClusterLogsRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListClusterLogsResponse + ) => void + ): ClientUnaryCall; + listLogs( + request: ListClusterLogsRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListClusterLogsResponse + ) => void + ): ClientUnaryCall; + /** Retrieves the list of available backups for the specified Greenplum cluster. */ + listBackups( + request: ListClusterBackupsRequest, + callback: ( + error: ServiceError | null, + response: ListClusterBackupsResponse + ) => void + ): ClientUnaryCall; + listBackups( + request: ListClusterBackupsRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListClusterBackupsResponse + ) => void + ): ClientUnaryCall; + listBackups( + request: ListClusterBackupsRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListClusterBackupsResponse + ) => void + ): ClientUnaryCall; + /** Creates a new Greenplum® cluster using the specified backup. */ + restore( + request: RestoreClusterRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + restore( + request: RestoreClusterRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + restore( + request: RestoreClusterRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; } export const ClusterServiceClient = makeGenericClientConstructor( @@ -2902,6 +4461,28 @@ export type Exact = P extends Builtin never >; +function toTimestamp(date: Date): Timestamp { + const seconds = date.getTime() / 1_000; + const nanos = (date.getTime() % 1_000) * 1_000_000; + return { $type: "google.protobuf.Timestamp", seconds, nanos }; +} + +function fromTimestamp(t: Timestamp): Date { + let millis = t.seconds * 1_000; + millis += t.nanos / 1_000_000; + return new Date(millis); +} + +function fromJsonTimestamp(o: any): Date { + if (o instanceof Date) { + return o; + } else if (typeof o === "string") { + return new Date(o); + } else { + return fromTimestamp(Timestamp.fromJSON(o)); + } +} + function longToNumber(long: Long): number { if (long.gt(Number.MAX_SAFE_INTEGER)) { throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); diff --git a/src/generated/yandex/cloud/mdb/greenplum/v1/config.ts b/src/generated/yandex/cloud/mdb/greenplum/v1/config.ts index f938d1cb..8a25ddfc 100644 --- a/src/generated/yandex/cloud/mdb/greenplum/v1/config.ts +++ b/src/generated/yandex/cloud/mdb/greenplum/v1/config.ts @@ -2,61 +2,47 @@ import { messageTypeRegistry } from "../../../../../typeRegistry"; import Long from "long"; import _m0 from "protobufjs/minimal"; -import { - Int64Value, - StringValue, - BoolValue, - FloatValue, -} from "../../../../../google/protobuf/wrappers"; +import { Int64Value, BoolValue } from "../../../../../google/protobuf/wrappers"; export const protobufPackage = "yandex.cloud.mdb.greenplum.v1"; +/** A list of computational resources allocated to a host. */ export interface Resources { $type: "yandex.cloud.mdb.greenplum.v1.Resources"; /** - * ID of the preset for computational resources available to a host (CPU, memory etc.). - * All available presets are listed in the [documentation](/docs/managed-greenplum/concepts/instance-types). + * ID of the preset for computational resources allocated to a host. + * Available presets are listed in the [documentation](/docs/managed-greenplum/concepts/instance-types). */ resourcePresetId: string; - /** Volume of the storage available to a host. */ + /** Volume of the storage used by the host, in bytes. */ diskSize: number; - /** - * Type of the storage environment for the host. - * - * Possible values: - * * network-hdd - network HDD drive, - * * network-ssd - network SSD drive, - * * local-ssd - local SSD storage. - */ + /** Type of the storage used by the host: `network-hdd`, `network-ssd` or `local-ssd`. */ diskTypeId: string; } +/** Route server configuration. */ export interface ConnectionPoolerConfig { $type: "yandex.cloud.mdb.greenplum.v1.ConnectionPoolerConfig"; - /** - * Odyssey route server pool mode. Default is session mode. - * https://github.com/yandex/odyssey/blob/master/documentation/configuration.md#pool-string - */ + /** Route server pool mode. */ mode: ConnectionPoolerConfig_PoolMode; /** - * Odyssey Server pool size. - * Keep the number of servers in the pool as much as 'pool_size'. Clients are put in a wait queue, when all servers are busy. + * The number of servers in the server pool. Clients are placed in a wait queue when all servers are busy. * Set to zero to disable the limit. - * https://github.com/yandex/odyssey/blob/master/documentation/configuration.md#pool_size-integer */ size?: number; /** - * Server pool idle timeout. - * Close an server connection when it becomes idle for 'pool_ttl' seconds. - * Set to zero to disable. - * https://github.com/yandex/odyssey/blob/master/documentation/configuration.md#pool_ttl-integer + * Server pool idle timeout, in seconds. A server connection closes after it has been idle for the specified duration. + * Set to zero to disable the limit. */ clientIdleTimeout?: number; } +/** Route server pool mode. */ export enum ConnectionPoolerConfig_PoolMode { POOL_MODE_UNSPECIFIED = 0, + /** SESSION - Assign server connection to a client until it disconnects. Default value. */ SESSION = 1, + /** TRANSACTION - Assign server connection to a client for a transaction processing. */ TRANSACTION = 2, UNRECOGNIZED = -1, } @@ -96,296 +82,65 @@ export function connectionPoolerConfig_PoolModeToJSON( } } -/** Configuration of master subcluster */ +/** Configuration of the master subcluster. */ export interface MasterSubclusterConfig { $type: "yandex.cloud.mdb.greenplum.v1.MasterSubclusterConfig"; - /** Resources allocated to Greenplum master subcluster hosts. */ + /** Computational resources allocated to Greenplum® master subcluster hosts. */ resources?: Resources; - /** Configuration settings of a Greenplum master server. */ - config?: GreenplumMasterConfigSet; } -/** Configuration of segmet subcluster */ +/** Configuration of the segment subcluster. */ export interface SegmentSubclusterConfig { $type: "yandex.cloud.mdb.greenplum.v1.SegmentSubclusterConfig"; - /** Resources allocated to Greenplum segment subcluster hosts. */ + /** Computational resources allocated to Greenplum® segment subcluster hosts. */ resources?: Resources; - /** Configuration settings of a Greenplum segment server. */ - config?: GreenplumSegmentConfigSet; } -/** - * Greenplum master subcluster configuration options. Detailed description for each set of options - * - * Any options not listed here are not supported. - */ -export interface GreenplumMasterConfig { - $type: "yandex.cloud.mdb.greenplum.v1.GreenplumMasterConfig"; - /** Logging level for the Greenplum master subcluster. Possible values: TRACE, DEBUG, INFORMATION, WARNING, ERROR. */ - logLevel: GreenplumMasterConfig_LogLevel; - /** Maximum number of inbound connections. */ +export interface Greenplumconfig617 { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_17"; + /** Maximum number of inbound connections on master segment */ maxConnections?: number; - /** The server's time zone to be used in DateTime fields conversions. Specified as an IANA identifier. */ - timezone?: string; - /** Odyssey pool settings */ - pool?: ConnectionPoolerConfig; - /** - * Sets the maximum number of transactions that can be in the "prepared" state simultaneously - * https://www.postgresql.org/docs/9.6/runtime-config-resource.html - */ - maxPreparedTransactions?: number; - /** - * For queries that are managed by resource queues or resource groups, - * this parameter determines when Greenplum Database terminates running queries based on the amount of memory the queries are using. - * A value of 100 disables the automatic termination of queries based on the percentage of memory that is utilized. - * https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#runaway_detector_activation_percent - */ - runawayDetectorActivationPercent?: number; - /** - * How many keepalives may be lost before the connection is considered dead. A value of 0 uses the system default. - * If TCP_KEEPCNT is not supported, this parameter must be 0. - * https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#tcp_keepalives_count - */ - tcpKeepalivesCount?: number; - /** - * How many seconds to wait for a response to a keepalive before retransmitting. A value of 0 uses the system default. - * If TCP_KEEPINTVL is not supported, this parameter must be 0. - * https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#tcp_keepalives_interval - */ - tcpKeepalivesInterval?: number; - /** - * When an SQL query reads from an external table, the parameter value specifies the amount of time in seconds that - * Greenplum Database waits before cancelling the query when data stops being returned from the external table. - * The default value of 0, specifies no time out. Greenplum Database does not cancel the query. - * https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#readable_external_table_timeout - */ - readableExternalTableTimeout?: number; - /** - * Sets the amount of data per-peer to be queued by the default UDPIFC interconnect on senders. - * Increasing the depth from its default value will cause the system to use more memory, but may increase performance. - * Reasonable values for this parameter are between 1 and 4. Increasing the value might radically increase the amount of memory used by the system. - * https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_interconnect_snd_queue_depth - */ - gpInterconnectSndQueueDepth?: number; - /** - * Sets the amount of data per-peer to be queued by the Greenplum Database interconnect on receivers - * (when data is received but no space is available to receive it the data will be dropped, and the transmitter will need to resend it) - * for the default UDPIFC interconnect. - * https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_interconnect_queue_depth - */ - gpInterconnectQueueDepth?: number; /** - * Controls which SQL statements are logged. DDL logs all data definition commands like CREATE, ALTER, and DROP commands. - * MOD logs all DDL statements, plus INSERT, UPDATE, DELETE, TRUNCATE, and COPY FROM. - * PREPARE and EXPLAIN ANALYZE statements are also logged if their contained command is of an appropriate type. - * https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#log_statement - * Default value is ddl + * Specify the maximum size of WAL files that replication slots are allowed to retain in the pg_wal directory at checkpoint time. + * https://www.postgresql.org/docs/current/runtime-config-replication.html */ - logStatement: GreenplumMasterConfig_LogStatement; + maxSlotWalKeepSize?: number; /** - * Causes the duration of every completed statement which satisfies log_statement to be logged. - * https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#log_duration + * Sets the maximum total disk size that all running queries are allowed to use for creating temporary spill files at each segment. + * The default value is 0, which means a limit is not enforced. + * https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_workfile_limit_per_segment */ - logDuration?: boolean; + gpWorkfileLimitPerSegment?: number; /** - * For a partitioned table, controls whether the ROOTPARTITION keyword is required to collect root partition statistics - * when the ANALYZE command is run on the table. GPORCA uses the root partition statistics when generating a query plan. - * https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#optimizer_analyze_root_partition + * Sets the maximum disk size an individual query is allowed to use for creating temporary spill files at each segment. + * The default value is 0, which means a limit is not enforced. + * https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_workfile_limit_per_query */ - optimizerAnalyzeRootPartition?: boolean; + gpWorkfileLimitPerQuery?: number; /** - * Sets the number of segments that will scan external table data during an external table operation, - * the purpose being not to overload the system with scanning data and take away resources from other concurrent operations. - * This only applies to external tables that use the gpfdist:// protocol to access external table data. - * https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_external_max_segs + * Sets the maximum number of temporary spill files (also known as workfiles) allowed per query per segment. + * Spill files are created when executing a query that requires more memory than it is allocated. + * The current query is terminated when the limit is exceeded. + * Set the value to 0 (zero) to allow an unlimited number of spill files. master session reload + * https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_workfile_limit_files_per_query + * Default value is 10000 */ - gpExternalMaxSegs?: number; + gpWorkfileLimitFilesPerQuery?: number; /** - * Specifies the allowed timeout for the fault detection process (ftsprobe) to establish a connection to a segment before declaring it down. - * https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_fts_probe_timeout + * Sets the maximum number of transactions that can be in the "prepared" state simultaneously + * https://www.postgresql.org/docs/9.6/runtime-config-resource.html */ - gpFtsProbeTimeout?: number; + maxPreparedTransactions?: number; /** * Specifies whether the temporary files created, when a hash aggregation or hash join operation spills to disk, are compressed. * https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_workfile_compression */ gpWorkfileCompression?: boolean; - /** https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_autostats_mode_in_functions */ - gpAutostatsModeInFunctions: GreenplumMasterConfig_AutostatsModeInFunctions; -} - -export enum GreenplumMasterConfig_LogLevel { - LOG_LEVEL_UNSPECIFIED = 0, - TRACE = 1, - DEBUG = 2, - INFORMATION = 3, - WARNING = 4, - ERROR = 5, - UNRECOGNIZED = -1, -} - -export function greenplumMasterConfig_LogLevelFromJSON( - object: any -): GreenplumMasterConfig_LogLevel { - switch (object) { - case 0: - case "LOG_LEVEL_UNSPECIFIED": - return GreenplumMasterConfig_LogLevel.LOG_LEVEL_UNSPECIFIED; - case 1: - case "TRACE": - return GreenplumMasterConfig_LogLevel.TRACE; - case 2: - case "DEBUG": - return GreenplumMasterConfig_LogLevel.DEBUG; - case 3: - case "INFORMATION": - return GreenplumMasterConfig_LogLevel.INFORMATION; - case 4: - case "WARNING": - return GreenplumMasterConfig_LogLevel.WARNING; - case 5: - case "ERROR": - return GreenplumMasterConfig_LogLevel.ERROR; - case -1: - case "UNRECOGNIZED": - default: - return GreenplumMasterConfig_LogLevel.UNRECOGNIZED; - } -} - -export function greenplumMasterConfig_LogLevelToJSON( - object: GreenplumMasterConfig_LogLevel -): string { - switch (object) { - case GreenplumMasterConfig_LogLevel.LOG_LEVEL_UNSPECIFIED: - return "LOG_LEVEL_UNSPECIFIED"; - case GreenplumMasterConfig_LogLevel.TRACE: - return "TRACE"; - case GreenplumMasterConfig_LogLevel.DEBUG: - return "DEBUG"; - case GreenplumMasterConfig_LogLevel.INFORMATION: - return "INFORMATION"; - case GreenplumMasterConfig_LogLevel.WARNING: - return "WARNING"; - case GreenplumMasterConfig_LogLevel.ERROR: - return "ERROR"; - default: - return "UNKNOWN"; - } -} - -export enum GreenplumMasterConfig_LogStatement { - LOG_STATEMENT_UNSPECIFIED = 0, - NONE = 1, - DDL = 2, - MOD = 3, - ALL = 4, - UNRECOGNIZED = -1, -} - -export function greenplumMasterConfig_LogStatementFromJSON( - object: any -): GreenplumMasterConfig_LogStatement { - switch (object) { - case 0: - case "LOG_STATEMENT_UNSPECIFIED": - return GreenplumMasterConfig_LogStatement.LOG_STATEMENT_UNSPECIFIED; - case 1: - case "NONE": - return GreenplumMasterConfig_LogStatement.NONE; - case 2: - case "DDL": - return GreenplumMasterConfig_LogStatement.DDL; - case 3: - case "MOD": - return GreenplumMasterConfig_LogStatement.MOD; - case 4: - case "ALL": - return GreenplumMasterConfig_LogStatement.ALL; - case -1: - case "UNRECOGNIZED": - default: - return GreenplumMasterConfig_LogStatement.UNRECOGNIZED; - } -} - -export function greenplumMasterConfig_LogStatementToJSON( - object: GreenplumMasterConfig_LogStatement -): string { - switch (object) { - case GreenplumMasterConfig_LogStatement.LOG_STATEMENT_UNSPECIFIED: - return "LOG_STATEMENT_UNSPECIFIED"; - case GreenplumMasterConfig_LogStatement.NONE: - return "NONE"; - case GreenplumMasterConfig_LogStatement.DDL: - return "DDL"; - case GreenplumMasterConfig_LogStatement.MOD: - return "MOD"; - case GreenplumMasterConfig_LogStatement.ALL: - return "ALL"; - default: - return "UNKNOWN"; - } -} - -export enum GreenplumMasterConfig_AutostatsModeInFunctions { - AUTOSTATS_MODE_IN_FUNCTIONS_UNSPECIFIED = 0, - MODE_NONE = 1, - ON_CHANGE = 2, - ON_NO_STATS = 3, - UNRECOGNIZED = -1, -} - -export function greenplumMasterConfig_AutostatsModeInFunctionsFromJSON( - object: any -): GreenplumMasterConfig_AutostatsModeInFunctions { - switch (object) { - case 0: - case "AUTOSTATS_MODE_IN_FUNCTIONS_UNSPECIFIED": - return GreenplumMasterConfig_AutostatsModeInFunctions.AUTOSTATS_MODE_IN_FUNCTIONS_UNSPECIFIED; - case 1: - case "MODE_NONE": - return GreenplumMasterConfig_AutostatsModeInFunctions.MODE_NONE; - case 2: - case "ON_CHANGE": - return GreenplumMasterConfig_AutostatsModeInFunctions.ON_CHANGE; - case 3: - case "ON_NO_STATS": - return GreenplumMasterConfig_AutostatsModeInFunctions.ON_NO_STATS; - case -1: - case "UNRECOGNIZED": - default: - return GreenplumMasterConfig_AutostatsModeInFunctions.UNRECOGNIZED; - } -} - -export function greenplumMasterConfig_AutostatsModeInFunctionsToJSON( - object: GreenplumMasterConfig_AutostatsModeInFunctions -): string { - switch (object) { - case GreenplumMasterConfig_AutostatsModeInFunctions.AUTOSTATS_MODE_IN_FUNCTIONS_UNSPECIFIED: - return "AUTOSTATS_MODE_IN_FUNCTIONS_UNSPECIFIED"; - case GreenplumMasterConfig_AutostatsModeInFunctions.MODE_NONE: - return "MODE_NONE"; - case GreenplumMasterConfig_AutostatsModeInFunctions.ON_CHANGE: - return "ON_CHANGE"; - case GreenplumMasterConfig_AutostatsModeInFunctions.ON_NO_STATS: - return "ON_NO_STATS"; - default: - return "UNKNOWN"; - } } -/** - * Greenplum segment subcluster configuration options. Detailed description for each set of options - * - * Any options not listed here are not supported. - */ -export interface GreenplumSegmentConfig { - $type: "yandex.cloud.mdb.greenplum.v1.GreenplumSegmentConfig"; - /** Logging level for the Greenplum segment subcluster. Possible values: TRACE, DEBUG, INFORMATION, WARNING, ERROR. */ - logLevel: GreenplumSegmentConfig_LogLevel; - /** Maximum number of inbound connections. */ +export interface Greenplumconfig619 { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_19"; + /** Maximum number of inbound connections on master segment */ maxConnections?: number; /** * Specify the maximum size of WAL files that replication slots are allowed to retain in the pg_wal directory at checkpoint time. @@ -414,151 +169,54 @@ export interface GreenplumSegmentConfig { */ gpWorkfileLimitFilesPerQuery?: number; /** - * Identifies the resource management scheme currently enabled in the Greenplum Database cluster. The default scheme is to use resource queues. - * https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_resource_manager - * "group" is the default value - */ - gpResourceManager: GreenplumSegmentConfig_GPResourceManager; - /** - * Identifies the maximum percentage of system CPU resources to allocate to resource groups on each Greenplum Database segment node. - * Note: The gp_resource_group_cpu_limit server configuration parameter is enforced only when resource group-based resource management is active. - * https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_resource_group_cpu_limit + * Sets the maximum number of transactions that can be in the "prepared" state simultaneously + * https://www.postgresql.org/docs/9.6/runtime-config-resource.html */ - gpResourceGroupCpuLimit?: number; + maxPreparedTransactions?: number; /** - * Identifies the maximum percentage of system memory resources to allocate to resource groups on each Greenplum Database segment node. - * Note: The gp_resource_group_memory_limit server configuration parameter is enforced only when resource group-based resource management is active. - * https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_resource_group_memory_limit + * Specifies whether the temporary files created, when a hash aggregation or hash join operation spills to disk, are compressed. + * https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_workfile_compression */ - gpResourceGroupMemoryLimit?: number; -} - -export enum GreenplumSegmentConfig_LogLevel { - LOG_LEVEL_UNSPECIFIED = 0, - TRACE = 1, - DEBUG = 2, - INFORMATION = 3, - WARNING = 4, - ERROR = 5, - UNRECOGNIZED = -1, -} - -export function greenplumSegmentConfig_LogLevelFromJSON( - object: any -): GreenplumSegmentConfig_LogLevel { - switch (object) { - case 0: - case "LOG_LEVEL_UNSPECIFIED": - return GreenplumSegmentConfig_LogLevel.LOG_LEVEL_UNSPECIFIED; - case 1: - case "TRACE": - return GreenplumSegmentConfig_LogLevel.TRACE; - case 2: - case "DEBUG": - return GreenplumSegmentConfig_LogLevel.DEBUG; - case 3: - case "INFORMATION": - return GreenplumSegmentConfig_LogLevel.INFORMATION; - case 4: - case "WARNING": - return GreenplumSegmentConfig_LogLevel.WARNING; - case 5: - case "ERROR": - return GreenplumSegmentConfig_LogLevel.ERROR; - case -1: - case "UNRECOGNIZED": - default: - return GreenplumSegmentConfig_LogLevel.UNRECOGNIZED; - } -} - -export function greenplumSegmentConfig_LogLevelToJSON( - object: GreenplumSegmentConfig_LogLevel -): string { - switch (object) { - case GreenplumSegmentConfig_LogLevel.LOG_LEVEL_UNSPECIFIED: - return "LOG_LEVEL_UNSPECIFIED"; - case GreenplumSegmentConfig_LogLevel.TRACE: - return "TRACE"; - case GreenplumSegmentConfig_LogLevel.DEBUG: - return "DEBUG"; - case GreenplumSegmentConfig_LogLevel.INFORMATION: - return "INFORMATION"; - case GreenplumSegmentConfig_LogLevel.WARNING: - return "WARNING"; - case GreenplumSegmentConfig_LogLevel.ERROR: - return "ERROR"; - default: - return "UNKNOWN"; - } -} - -export enum GreenplumSegmentConfig_GPResourceManager { - GP_RESOURCE_MANAGER_UNSPECIFIED = 0, - QUEUE = 1, - GROUP = 2, - UNRECOGNIZED = -1, -} - -export function greenplumSegmentConfig_GPResourceManagerFromJSON( - object: any -): GreenplumSegmentConfig_GPResourceManager { - switch (object) { - case 0: - case "GP_RESOURCE_MANAGER_UNSPECIFIED": - return GreenplumSegmentConfig_GPResourceManager.GP_RESOURCE_MANAGER_UNSPECIFIED; - case 1: - case "QUEUE": - return GreenplumSegmentConfig_GPResourceManager.QUEUE; - case 2: - case "GROUP": - return GreenplumSegmentConfig_GPResourceManager.GROUP; - case -1: - case "UNRECOGNIZED": - default: - return GreenplumSegmentConfig_GPResourceManager.UNRECOGNIZED; - } + gpWorkfileCompression?: boolean; } -export function greenplumSegmentConfig_GPResourceManagerToJSON( - object: GreenplumSegmentConfig_GPResourceManager -): string { - switch (object) { - case GreenplumSegmentConfig_GPResourceManager.GP_RESOURCE_MANAGER_UNSPECIFIED: - return "GP_RESOURCE_MANAGER_UNSPECIFIED"; - case GreenplumSegmentConfig_GPResourceManager.QUEUE: - return "QUEUE"; - case GreenplumSegmentConfig_GPResourceManager.GROUP: - return "GROUP"; - default: - return "UNKNOWN"; - } +export interface Greenplumconfigset617 { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfigSet6_17"; + /** + * Effective settings for a Greenplum (a combination of settings defined + * in [user_config] and [default_config]). + */ + effectiveConfig?: Greenplumconfig617; + /** User-defined settings for a Greenplum. */ + userConfig?: Greenplumconfig617; + /** Default configuration for a Greenplum. */ + defaultConfig?: Greenplumconfig617; } -export interface GreenplumMasterConfigSet { - $type: "yandex.cloud.mdb.greenplum.v1.GreenplumMasterConfigSet"; +export interface Greenplumconfigset619 { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfigSet6_19"; /** - * Effective settings for a Greenplum master subcluster (a combination of settings defined + * Effective settings for a Greenplum (a combination of settings defined * in [user_config] and [default_config]). */ - effectiveConfig?: GreenplumMasterConfig; - /** User-defined settings for a Greenplum master subcluster. */ - userConfig?: GreenplumMasterConfig; - /** Default configuration for a Greenplum master subcluster. */ - defaultConfig?: GreenplumMasterConfig; + effectiveConfig?: Greenplumconfig619; + /** User-defined settings for a Greenplum. */ + userConfig?: Greenplumconfig619; + /** Default configuration for a Greenplum. */ + defaultConfig?: Greenplumconfig619; } -export interface GreenplumSegmentConfigSet { - $type: "yandex.cloud.mdb.greenplum.v1.GreenplumSegmentConfigSet"; +export interface ConnectionPoolerConfigSet { + $type: "yandex.cloud.mdb.greenplum.v1.ConnectionPoolerConfigSet"; /** - * Effective settings for a Greenplum segment subcluster (a combination of settings defined + * Effective settings for a odyssey (a combination of settings defined * in [user_config] and [default_config]). */ - effectiveConfig?: GreenplumSegmentConfig; - /** User-defined settings for a Greenplum segment subcluster. */ - userConfig?: GreenplumSegmentConfig; - /** Default configuration for a Greenplum segment subcluster. */ - defaultConfig?: GreenplumSegmentConfig; + effectiveConfig?: ConnectionPoolerConfig; + /** User-defined settings for a odyssey. */ + userConfig?: ConnectionPoolerConfig; + /** Default configuration for a odyssey. */ + defaultConfig?: ConnectionPoolerConfig; } const baseResources: object = { @@ -769,12 +427,6 @@ export const MasterSubclusterConfig = { if (message.resources !== undefined) { Resources.encode(message.resources, writer.uint32(10).fork()).ldelim(); } - if (message.config !== undefined) { - GreenplumMasterConfigSet.encode( - message.config, - writer.uint32(18).fork() - ).ldelim(); - } return writer; }, @@ -791,12 +443,6 @@ export const MasterSubclusterConfig = { case 1: message.resources = Resources.decode(reader, reader.uint32()); break; - case 2: - message.config = GreenplumMasterConfigSet.decode( - reader, - reader.uint32() - ); - break; default: reader.skipType(tag & 7); break; @@ -811,10 +457,6 @@ export const MasterSubclusterConfig = { object.resources !== undefined && object.resources !== null ? Resources.fromJSON(object.resources) : undefined; - message.config = - object.config !== undefined && object.config !== null - ? GreenplumMasterConfigSet.fromJSON(object.config) - : undefined; return message; }, @@ -824,10 +466,6 @@ export const MasterSubclusterConfig = { (obj.resources = message.resources ? Resources.toJSON(message.resources) : undefined); - message.config !== undefined && - (obj.config = message.config - ? GreenplumMasterConfigSet.toJSON(message.config) - : undefined); return obj; }, @@ -839,10 +477,6 @@ export const MasterSubclusterConfig = { object.resources !== undefined && object.resources !== null ? Resources.fromPartial(object.resources) : undefined; - message.config = - object.config !== undefined && object.config !== null - ? GreenplumMasterConfigSet.fromPartial(object.config) - : undefined; return message; }, }; @@ -863,12 +497,6 @@ export const SegmentSubclusterConfig = { if (message.resources !== undefined) { Resources.encode(message.resources, writer.uint32(10).fork()).ldelim(); } - if (message.config !== undefined) { - GreenplumSegmentConfigSet.encode( - message.config, - writer.uint32(18).fork() - ).ldelim(); - } return writer; }, @@ -887,12 +515,6 @@ export const SegmentSubclusterConfig = { case 1: message.resources = Resources.decode(reader, reader.uint32()); break; - case 2: - message.config = GreenplumSegmentConfigSet.decode( - reader, - reader.uint32() - ); - break; default: reader.skipType(tag & 7); break; @@ -909,10 +531,6 @@ export const SegmentSubclusterConfig = { object.resources !== undefined && object.resources !== null ? Resources.fromJSON(object.resources) : undefined; - message.config = - object.config !== undefined && object.config !== null - ? GreenplumSegmentConfigSet.fromJSON(object.config) - : undefined; return message; }, @@ -922,10 +540,6 @@ export const SegmentSubclusterConfig = { (obj.resources = message.resources ? Resources.toJSON(message.resources) : undefined); - message.config !== undefined && - (obj.config = message.config - ? GreenplumSegmentConfigSet.toJSON(message.config) - : undefined); return obj; }, @@ -939,148 +553,72 @@ export const SegmentSubclusterConfig = { object.resources !== undefined && object.resources !== null ? Resources.fromPartial(object.resources) : undefined; - message.config = - object.config !== undefined && object.config !== null - ? GreenplumSegmentConfigSet.fromPartial(object.config) - : undefined; return message; }, }; messageTypeRegistry.set(SegmentSubclusterConfig.$type, SegmentSubclusterConfig); -const baseGreenplumMasterConfig: object = { - $type: "yandex.cloud.mdb.greenplum.v1.GreenplumMasterConfig", - logLevel: 0, - logStatement: 0, - gpAutostatsModeInFunctions: 0, +const baseGreenplumconfig617: object = { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_17", }; -export const GreenplumMasterConfig = { - $type: "yandex.cloud.mdb.greenplum.v1.GreenplumMasterConfig" as const, +export const Greenplumconfig617 = { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_17" as const, encode( - message: GreenplumMasterConfig, + message: Greenplumconfig617, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { - if (message.logLevel !== 0) { - writer.uint32(8).int32(message.logLevel); - } if (message.maxConnections !== undefined) { Int64Value.encode( { $type: "google.protobuf.Int64Value", value: message.maxConnections! }, - writer.uint32(18).fork() - ).ldelim(); - } - if (message.timezone !== undefined) { - StringValue.encode( - { $type: "google.protobuf.StringValue", value: message.timezone! }, - writer.uint32(26).fork() - ).ldelim(); - } - if (message.pool !== undefined) { - ConnectionPoolerConfig.encode( - message.pool, - writer.uint32(34).fork() - ).ldelim(); - } - if (message.maxPreparedTransactions !== undefined) { - Int64Value.encode( - { - $type: "google.protobuf.Int64Value", - value: message.maxPreparedTransactions!, - }, - writer.uint32(106).fork() - ).ldelim(); - } - if (message.runawayDetectorActivationPercent !== undefined) { - Int64Value.encode( - { - $type: "google.protobuf.Int64Value", - value: message.runawayDetectorActivationPercent!, - }, - writer.uint32(114).fork() - ).ldelim(); - } - if (message.tcpKeepalivesCount !== undefined) { - Int64Value.encode( - { - $type: "google.protobuf.Int64Value", - value: message.tcpKeepalivesCount!, - }, - writer.uint32(122).fork() - ).ldelim(); - } - if (message.tcpKeepalivesInterval !== undefined) { - Int64Value.encode( - { - $type: "google.protobuf.Int64Value", - value: message.tcpKeepalivesInterval!, - }, - writer.uint32(130).fork() + writer.uint32(10).fork() ).ldelim(); } - if (message.readableExternalTableTimeout !== undefined) { + if (message.maxSlotWalKeepSize !== undefined) { Int64Value.encode( { $type: "google.protobuf.Int64Value", - value: message.readableExternalTableTimeout!, + value: message.maxSlotWalKeepSize!, }, - writer.uint32(154).fork() + writer.uint32(18).fork() ).ldelim(); } - if (message.gpInterconnectSndQueueDepth !== undefined) { + if (message.gpWorkfileLimitPerSegment !== undefined) { Int64Value.encode( { $type: "google.protobuf.Int64Value", - value: message.gpInterconnectSndQueueDepth!, + value: message.gpWorkfileLimitPerSegment!, }, - writer.uint32(162).fork() + writer.uint32(26).fork() ).ldelim(); } - if (message.gpInterconnectQueueDepth !== undefined) { + if (message.gpWorkfileLimitPerQuery !== undefined) { Int64Value.encode( { $type: "google.protobuf.Int64Value", - value: message.gpInterconnectQueueDepth!, - }, - writer.uint32(170).fork() - ).ldelim(); - } - if (message.logStatement !== 0) { - writer.uint32(176).int32(message.logStatement); - } - if (message.logDuration !== undefined) { - BoolValue.encode( - { $type: "google.protobuf.BoolValue", value: message.logDuration! }, - writer.uint32(186).fork() - ).ldelim(); - } - if (message.optimizerAnalyzeRootPartition !== undefined) { - BoolValue.encode( - { - $type: "google.protobuf.BoolValue", - value: message.optimizerAnalyzeRootPartition!, + value: message.gpWorkfileLimitPerQuery!, }, - writer.uint32(194).fork() + writer.uint32(34).fork() ).ldelim(); } - if (message.gpExternalMaxSegs !== undefined) { + if (message.gpWorkfileLimitFilesPerQuery !== undefined) { Int64Value.encode( { $type: "google.protobuf.Int64Value", - value: message.gpExternalMaxSegs!, + value: message.gpWorkfileLimitFilesPerQuery!, }, - writer.uint32(202).fork() + writer.uint32(42).fork() ).ldelim(); } - if (message.gpFtsProbeTimeout !== undefined) { + if (message.maxPreparedTransactions !== undefined) { Int64Value.encode( { $type: "google.protobuf.Int64Value", - value: message.gpFtsProbeTimeout!, + value: message.maxPreparedTransactions!, }, - writer.uint32(210).fork() + writer.uint32(50).fork() ).ldelim(); } if (message.gpWorkfileCompression !== undefined) { @@ -1089,115 +627,61 @@ export const GreenplumMasterConfig = { $type: "google.protobuf.BoolValue", value: message.gpWorkfileCompression!, }, - writer.uint32(218).fork() + writer.uint32(58).fork() ).ldelim(); } - if (message.gpAutostatsModeInFunctions !== 0) { - writer.uint32(224).int32(message.gpAutostatsModeInFunctions); - } return writer; }, - decode( - input: _m0.Reader | Uint8Array, - length?: number - ): GreenplumMasterConfig { + decode(input: _m0.Reader | Uint8Array, length?: number): Greenplumconfig617 { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; - const message = { ...baseGreenplumMasterConfig } as GreenplumMasterConfig; + const message = { ...baseGreenplumconfig617 } as Greenplumconfig617; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.logLevel = reader.int32() as any; - break; - case 2: message.maxConnections = Int64Value.decode( reader, reader.uint32() ).value; break; - case 3: - message.timezone = StringValue.decode(reader, reader.uint32()).value; - break; - case 4: - message.pool = ConnectionPoolerConfig.decode(reader, reader.uint32()); - break; - case 13: - message.maxPreparedTransactions = Int64Value.decode( - reader, - reader.uint32() - ).value; - break; - case 14: - message.runawayDetectorActivationPercent = Int64Value.decode( - reader, - reader.uint32() - ).value; - break; - case 15: - message.tcpKeepalivesCount = Int64Value.decode( - reader, - reader.uint32() - ).value; - break; - case 16: - message.tcpKeepalivesInterval = Int64Value.decode( - reader, - reader.uint32() - ).value; - break; - case 19: - message.readableExternalTableTimeout = Int64Value.decode( - reader, - reader.uint32() - ).value; - break; - case 20: - message.gpInterconnectSndQueueDepth = Int64Value.decode( + case 2: + message.maxSlotWalKeepSize = Int64Value.decode( reader, reader.uint32() ).value; break; - case 21: - message.gpInterconnectQueueDepth = Int64Value.decode( + case 3: + message.gpWorkfileLimitPerSegment = Int64Value.decode( reader, reader.uint32() ).value; break; - case 22: - message.logStatement = reader.int32() as any; - break; - case 23: - message.logDuration = BoolValue.decode(reader, reader.uint32()).value; - break; - case 24: - message.optimizerAnalyzeRootPartition = BoolValue.decode( + case 4: + message.gpWorkfileLimitPerQuery = Int64Value.decode( reader, reader.uint32() ).value; break; - case 25: - message.gpExternalMaxSegs = Int64Value.decode( + case 5: + message.gpWorkfileLimitFilesPerQuery = Int64Value.decode( reader, reader.uint32() ).value; break; - case 26: - message.gpFtsProbeTimeout = Int64Value.decode( + case 6: + message.maxPreparedTransactions = Int64Value.decode( reader, reader.uint32() ).value; break; - case 27: + case 7: message.gpWorkfileCompression = BoolValue.decode( reader, reader.uint32() ).value; break; - case 28: - message.gpAutostatsModeInFunctions = reader.int32() as any; - break; default: reader.skipType(tag & 7); break; @@ -1206,203 +690,100 @@ export const GreenplumMasterConfig = { return message; }, - fromJSON(object: any): GreenplumMasterConfig { - const message = { ...baseGreenplumMasterConfig } as GreenplumMasterConfig; - message.logLevel = - object.logLevel !== undefined && object.logLevel !== null - ? greenplumMasterConfig_LogLevelFromJSON(object.logLevel) - : 0; + fromJSON(object: any): Greenplumconfig617 { + const message = { ...baseGreenplumconfig617 } as Greenplumconfig617; message.maxConnections = object.maxConnections !== undefined && object.maxConnections !== null ? Number(object.maxConnections) : undefined; - message.timezone = - object.timezone !== undefined && object.timezone !== null - ? String(object.timezone) + message.maxSlotWalKeepSize = + object.maxSlotWalKeepSize !== undefined && + object.maxSlotWalKeepSize !== null + ? Number(object.maxSlotWalKeepSize) + : undefined; + message.gpWorkfileLimitPerSegment = + object.gpWorkfileLimitPerSegment !== undefined && + object.gpWorkfileLimitPerSegment !== null + ? Number(object.gpWorkfileLimitPerSegment) : undefined; - message.pool = - object.pool !== undefined && object.pool !== null - ? ConnectionPoolerConfig.fromJSON(object.pool) + message.gpWorkfileLimitPerQuery = + object.gpWorkfileLimitPerQuery !== undefined && + object.gpWorkfileLimitPerQuery !== null + ? Number(object.gpWorkfileLimitPerQuery) + : undefined; + message.gpWorkfileLimitFilesPerQuery = + object.gpWorkfileLimitFilesPerQuery !== undefined && + object.gpWorkfileLimitFilesPerQuery !== null + ? Number(object.gpWorkfileLimitFilesPerQuery) : undefined; message.maxPreparedTransactions = object.maxPreparedTransactions !== undefined && object.maxPreparedTransactions !== null ? Number(object.maxPreparedTransactions) : undefined; - message.runawayDetectorActivationPercent = - object.runawayDetectorActivationPercent !== undefined && - object.runawayDetectorActivationPercent !== null - ? Number(object.runawayDetectorActivationPercent) - : undefined; - message.tcpKeepalivesCount = - object.tcpKeepalivesCount !== undefined && - object.tcpKeepalivesCount !== null - ? Number(object.tcpKeepalivesCount) - : undefined; - message.tcpKeepalivesInterval = - object.tcpKeepalivesInterval !== undefined && - object.tcpKeepalivesInterval !== null - ? Number(object.tcpKeepalivesInterval) - : undefined; - message.readableExternalTableTimeout = - object.readableExternalTableTimeout !== undefined && - object.readableExternalTableTimeout !== null - ? Number(object.readableExternalTableTimeout) - : undefined; - message.gpInterconnectSndQueueDepth = - object.gpInterconnectSndQueueDepth !== undefined && - object.gpInterconnectSndQueueDepth !== null - ? Number(object.gpInterconnectSndQueueDepth) - : undefined; - message.gpInterconnectQueueDepth = - object.gpInterconnectQueueDepth !== undefined && - object.gpInterconnectQueueDepth !== null - ? Number(object.gpInterconnectQueueDepth) - : undefined; - message.logStatement = - object.logStatement !== undefined && object.logStatement !== null - ? greenplumMasterConfig_LogStatementFromJSON(object.logStatement) - : 0; - message.logDuration = - object.logDuration !== undefined && object.logDuration !== null - ? Boolean(object.logDuration) - : undefined; - message.optimizerAnalyzeRootPartition = - object.optimizerAnalyzeRootPartition !== undefined && - object.optimizerAnalyzeRootPartition !== null - ? Boolean(object.optimizerAnalyzeRootPartition) - : undefined; - message.gpExternalMaxSegs = - object.gpExternalMaxSegs !== undefined && - object.gpExternalMaxSegs !== null - ? Number(object.gpExternalMaxSegs) - : undefined; - message.gpFtsProbeTimeout = - object.gpFtsProbeTimeout !== undefined && - object.gpFtsProbeTimeout !== null - ? Number(object.gpFtsProbeTimeout) - : undefined; message.gpWorkfileCompression = object.gpWorkfileCompression !== undefined && object.gpWorkfileCompression !== null ? Boolean(object.gpWorkfileCompression) : undefined; - message.gpAutostatsModeInFunctions = - object.gpAutostatsModeInFunctions !== undefined && - object.gpAutostatsModeInFunctions !== null - ? greenplumMasterConfig_AutostatsModeInFunctionsFromJSON( - object.gpAutostatsModeInFunctions - ) - : 0; return message; }, - toJSON(message: GreenplumMasterConfig): unknown { + toJSON(message: Greenplumconfig617): unknown { const obj: any = {}; - message.logLevel !== undefined && - (obj.logLevel = greenplumMasterConfig_LogLevelToJSON(message.logLevel)); message.maxConnections !== undefined && (obj.maxConnections = message.maxConnections); - message.timezone !== undefined && (obj.timezone = message.timezone); - message.pool !== undefined && - (obj.pool = message.pool - ? ConnectionPoolerConfig.toJSON(message.pool) - : undefined); + message.maxSlotWalKeepSize !== undefined && + (obj.maxSlotWalKeepSize = message.maxSlotWalKeepSize); + message.gpWorkfileLimitPerSegment !== undefined && + (obj.gpWorkfileLimitPerSegment = message.gpWorkfileLimitPerSegment); + message.gpWorkfileLimitPerQuery !== undefined && + (obj.gpWorkfileLimitPerQuery = message.gpWorkfileLimitPerQuery); + message.gpWorkfileLimitFilesPerQuery !== undefined && + (obj.gpWorkfileLimitFilesPerQuery = message.gpWorkfileLimitFilesPerQuery); message.maxPreparedTransactions !== undefined && (obj.maxPreparedTransactions = message.maxPreparedTransactions); - message.runawayDetectorActivationPercent !== undefined && - (obj.runawayDetectorActivationPercent = - message.runawayDetectorActivationPercent); - message.tcpKeepalivesCount !== undefined && - (obj.tcpKeepalivesCount = message.tcpKeepalivesCount); - message.tcpKeepalivesInterval !== undefined && - (obj.tcpKeepalivesInterval = message.tcpKeepalivesInterval); - message.readableExternalTableTimeout !== undefined && - (obj.readableExternalTableTimeout = message.readableExternalTableTimeout); - message.gpInterconnectSndQueueDepth !== undefined && - (obj.gpInterconnectSndQueueDepth = message.gpInterconnectSndQueueDepth); - message.gpInterconnectQueueDepth !== undefined && - (obj.gpInterconnectQueueDepth = message.gpInterconnectQueueDepth); - message.logStatement !== undefined && - (obj.logStatement = greenplumMasterConfig_LogStatementToJSON( - message.logStatement - )); - message.logDuration !== undefined && - (obj.logDuration = message.logDuration); - message.optimizerAnalyzeRootPartition !== undefined && - (obj.optimizerAnalyzeRootPartition = - message.optimizerAnalyzeRootPartition); - message.gpExternalMaxSegs !== undefined && - (obj.gpExternalMaxSegs = message.gpExternalMaxSegs); - message.gpFtsProbeTimeout !== undefined && - (obj.gpFtsProbeTimeout = message.gpFtsProbeTimeout); message.gpWorkfileCompression !== undefined && (obj.gpWorkfileCompression = message.gpWorkfileCompression); - message.gpAutostatsModeInFunctions !== undefined && - (obj.gpAutostatsModeInFunctions = - greenplumMasterConfig_AutostatsModeInFunctionsToJSON( - message.gpAutostatsModeInFunctions - )); return obj; }, - fromPartial, I>>( + fromPartial, I>>( object: I - ): GreenplumMasterConfig { - const message = { ...baseGreenplumMasterConfig } as GreenplumMasterConfig; - message.logLevel = object.logLevel ?? 0; + ): Greenplumconfig617 { + const message = { ...baseGreenplumconfig617 } as Greenplumconfig617; message.maxConnections = object.maxConnections ?? undefined; - message.timezone = object.timezone ?? undefined; - message.pool = - object.pool !== undefined && object.pool !== null - ? ConnectionPoolerConfig.fromPartial(object.pool) - : undefined; + message.maxSlotWalKeepSize = object.maxSlotWalKeepSize ?? undefined; + message.gpWorkfileLimitPerSegment = + object.gpWorkfileLimitPerSegment ?? undefined; + message.gpWorkfileLimitPerQuery = + object.gpWorkfileLimitPerQuery ?? undefined; + message.gpWorkfileLimitFilesPerQuery = + object.gpWorkfileLimitFilesPerQuery ?? undefined; message.maxPreparedTransactions = object.maxPreparedTransactions ?? undefined; - message.runawayDetectorActivationPercent = - object.runawayDetectorActivationPercent ?? undefined; - message.tcpKeepalivesCount = object.tcpKeepalivesCount ?? undefined; - message.tcpKeepalivesInterval = object.tcpKeepalivesInterval ?? undefined; - message.readableExternalTableTimeout = - object.readableExternalTableTimeout ?? undefined; - message.gpInterconnectSndQueueDepth = - object.gpInterconnectSndQueueDepth ?? undefined; - message.gpInterconnectQueueDepth = - object.gpInterconnectQueueDepth ?? undefined; - message.logStatement = object.logStatement ?? 0; - message.logDuration = object.logDuration ?? undefined; - message.optimizerAnalyzeRootPartition = - object.optimizerAnalyzeRootPartition ?? undefined; - message.gpExternalMaxSegs = object.gpExternalMaxSegs ?? undefined; - message.gpFtsProbeTimeout = object.gpFtsProbeTimeout ?? undefined; message.gpWorkfileCompression = object.gpWorkfileCompression ?? undefined; - message.gpAutostatsModeInFunctions = object.gpAutostatsModeInFunctions ?? 0; return message; }, }; -messageTypeRegistry.set(GreenplumMasterConfig.$type, GreenplumMasterConfig); +messageTypeRegistry.set(Greenplumconfig617.$type, Greenplumconfig617); -const baseGreenplumSegmentConfig: object = { - $type: "yandex.cloud.mdb.greenplum.v1.GreenplumSegmentConfig", - logLevel: 0, - gpResourceManager: 0, +const baseGreenplumconfig619: object = { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_19", }; -export const GreenplumSegmentConfig = { - $type: "yandex.cloud.mdb.greenplum.v1.GreenplumSegmentConfig" as const, +export const Greenplumconfig619 = { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_19" as const, encode( - message: GreenplumSegmentConfig, + message: Greenplumconfig619, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { - if (message.logLevel !== 0) { - writer.uint32(8).int32(message.logLevel); - } if (message.maxConnections !== undefined) { Int64Value.encode( { $type: "google.protobuf.Int64Value", value: message.maxConnections! }, - writer.uint32(18).fork() + writer.uint32(10).fork() ).ldelim(); } if (message.maxSlotWalKeepSize !== undefined) { @@ -1411,7 +792,7 @@ export const GreenplumSegmentConfig = { $type: "google.protobuf.Int64Value", value: message.maxSlotWalKeepSize!, }, - writer.uint32(58).fork() + writer.uint32(18).fork() ).ldelim(); } if (message.gpWorkfileLimitPerSegment !== undefined) { @@ -1420,7 +801,7 @@ export const GreenplumSegmentConfig = { $type: "google.protobuf.Int64Value", value: message.gpWorkfileLimitPerSegment!, }, - writer.uint32(66).fork() + writer.uint32(26).fork() ).ldelim(); } if (message.gpWorkfileLimitPerQuery !== undefined) { @@ -1429,7 +810,7 @@ export const GreenplumSegmentConfig = { $type: "google.protobuf.Int64Value", value: message.gpWorkfileLimitPerQuery!, }, - writer.uint32(74).fork() + writer.uint32(34).fork() ).ldelim(); } if (message.gpWorkfileLimitFilesPerQuery !== undefined) { @@ -1438,87 +819,75 @@ export const GreenplumSegmentConfig = { $type: "google.protobuf.Int64Value", value: message.gpWorkfileLimitFilesPerQuery!, }, - writer.uint32(82).fork() + writer.uint32(42).fork() ).ldelim(); } - if (message.gpResourceManager !== 0) { - writer.uint32(88).int32(message.gpResourceManager); - } - if (message.gpResourceGroupCpuLimit !== undefined) { - FloatValue.encode( + if (message.maxPreparedTransactions !== undefined) { + Int64Value.encode( { - $type: "google.protobuf.FloatValue", - value: message.gpResourceGroupCpuLimit!, + $type: "google.protobuf.Int64Value", + value: message.maxPreparedTransactions!, }, - writer.uint32(138).fork() + writer.uint32(50).fork() ).ldelim(); } - if (message.gpResourceGroupMemoryLimit !== undefined) { - FloatValue.encode( + if (message.gpWorkfileCompression !== undefined) { + BoolValue.encode( { - $type: "google.protobuf.FloatValue", - value: message.gpResourceGroupMemoryLimit!, + $type: "google.protobuf.BoolValue", + value: message.gpWorkfileCompression!, }, - writer.uint32(146).fork() + writer.uint32(58).fork() ).ldelim(); } return writer; }, - decode( - input: _m0.Reader | Uint8Array, - length?: number - ): GreenplumSegmentConfig { + decode(input: _m0.Reader | Uint8Array, length?: number): Greenplumconfig619 { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; - const message = { ...baseGreenplumSegmentConfig } as GreenplumSegmentConfig; + const message = { ...baseGreenplumconfig619 } as Greenplumconfig619; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.logLevel = reader.int32() as any; - break; - case 2: message.maxConnections = Int64Value.decode( reader, reader.uint32() ).value; break; - case 7: + case 2: message.maxSlotWalKeepSize = Int64Value.decode( reader, reader.uint32() ).value; break; - case 8: + case 3: message.gpWorkfileLimitPerSegment = Int64Value.decode( reader, reader.uint32() ).value; break; - case 9: + case 4: message.gpWorkfileLimitPerQuery = Int64Value.decode( reader, reader.uint32() ).value; break; - case 10: + case 5: message.gpWorkfileLimitFilesPerQuery = Int64Value.decode( reader, reader.uint32() ).value; break; - case 11: - message.gpResourceManager = reader.int32() as any; - break; - case 17: - message.gpResourceGroupCpuLimit = FloatValue.decode( + case 6: + message.maxPreparedTransactions = Int64Value.decode( reader, reader.uint32() ).value; break; - case 18: - message.gpResourceGroupMemoryLimit = FloatValue.decode( + case 7: + message.gpWorkfileCompression = BoolValue.decode( reader, reader.uint32() ).value; @@ -1531,12 +900,8 @@ export const GreenplumSegmentConfig = { return message; }, - fromJSON(object: any): GreenplumSegmentConfig { - const message = { ...baseGreenplumSegmentConfig } as GreenplumSegmentConfig; - message.logLevel = - object.logLevel !== undefined && object.logLevel !== null - ? greenplumSegmentConfig_LogLevelFromJSON(object.logLevel) - : 0; + fromJSON(object: any): Greenplumconfig619 { + const message = { ...baseGreenplumconfig619 } as Greenplumconfig619; message.maxConnections = object.maxConnections !== undefined && object.maxConnections !== null ? Number(object.maxConnections) @@ -1561,30 +926,21 @@ export const GreenplumSegmentConfig = { object.gpWorkfileLimitFilesPerQuery !== null ? Number(object.gpWorkfileLimitFilesPerQuery) : undefined; - message.gpResourceManager = - object.gpResourceManager !== undefined && - object.gpResourceManager !== null - ? greenplumSegmentConfig_GPResourceManagerFromJSON( - object.gpResourceManager - ) - : 0; - message.gpResourceGroupCpuLimit = - object.gpResourceGroupCpuLimit !== undefined && - object.gpResourceGroupCpuLimit !== null - ? Number(object.gpResourceGroupCpuLimit) + message.maxPreparedTransactions = + object.maxPreparedTransactions !== undefined && + object.maxPreparedTransactions !== null + ? Number(object.maxPreparedTransactions) : undefined; - message.gpResourceGroupMemoryLimit = - object.gpResourceGroupMemoryLimit !== undefined && - object.gpResourceGroupMemoryLimit !== null - ? Number(object.gpResourceGroupMemoryLimit) + message.gpWorkfileCompression = + object.gpWorkfileCompression !== undefined && + object.gpWorkfileCompression !== null + ? Boolean(object.gpWorkfileCompression) : undefined; return message; }, - toJSON(message: GreenplumSegmentConfig): unknown { + toJSON(message: Greenplumconfig619): unknown { const obj: any = {}; - message.logLevel !== undefined && - (obj.logLevel = greenplumSegmentConfig_LogLevelToJSON(message.logLevel)); message.maxConnections !== undefined && (obj.maxConnections = message.maxConnections); message.maxSlotWalKeepSize !== undefined && @@ -1595,22 +951,17 @@ export const GreenplumSegmentConfig = { (obj.gpWorkfileLimitPerQuery = message.gpWorkfileLimitPerQuery); message.gpWorkfileLimitFilesPerQuery !== undefined && (obj.gpWorkfileLimitFilesPerQuery = message.gpWorkfileLimitFilesPerQuery); - message.gpResourceManager !== undefined && - (obj.gpResourceManager = greenplumSegmentConfig_GPResourceManagerToJSON( - message.gpResourceManager - )); - message.gpResourceGroupCpuLimit !== undefined && - (obj.gpResourceGroupCpuLimit = message.gpResourceGroupCpuLimit); - message.gpResourceGroupMemoryLimit !== undefined && - (obj.gpResourceGroupMemoryLimit = message.gpResourceGroupMemoryLimit); + message.maxPreparedTransactions !== undefined && + (obj.maxPreparedTransactions = message.maxPreparedTransactions); + message.gpWorkfileCompression !== undefined && + (obj.gpWorkfileCompression = message.gpWorkfileCompression); return obj; }, - fromPartial, I>>( + fromPartial, I>>( object: I - ): GreenplumSegmentConfig { - const message = { ...baseGreenplumSegmentConfig } as GreenplumSegmentConfig; - message.logLevel = object.logLevel ?? 0; + ): Greenplumconfig619 { + const message = { ...baseGreenplumconfig619 } as Greenplumconfig619; message.maxConnections = object.maxConnections ?? undefined; message.maxSlotWalKeepSize = object.maxSlotWalKeepSize ?? undefined; message.gpWorkfileLimitPerSegment = @@ -1619,42 +970,40 @@ export const GreenplumSegmentConfig = { object.gpWorkfileLimitPerQuery ?? undefined; message.gpWorkfileLimitFilesPerQuery = object.gpWorkfileLimitFilesPerQuery ?? undefined; - message.gpResourceManager = object.gpResourceManager ?? 0; - message.gpResourceGroupCpuLimit = - object.gpResourceGroupCpuLimit ?? undefined; - message.gpResourceGroupMemoryLimit = - object.gpResourceGroupMemoryLimit ?? undefined; + message.maxPreparedTransactions = + object.maxPreparedTransactions ?? undefined; + message.gpWorkfileCompression = object.gpWorkfileCompression ?? undefined; return message; }, }; -messageTypeRegistry.set(GreenplumSegmentConfig.$type, GreenplumSegmentConfig); +messageTypeRegistry.set(Greenplumconfig619.$type, Greenplumconfig619); -const baseGreenplumMasterConfigSet: object = { - $type: "yandex.cloud.mdb.greenplum.v1.GreenplumMasterConfigSet", +const baseGreenplumconfigset617: object = { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfigSet6_17", }; -export const GreenplumMasterConfigSet = { - $type: "yandex.cloud.mdb.greenplum.v1.GreenplumMasterConfigSet" as const, +export const Greenplumconfigset617 = { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfigSet6_17" as const, encode( - message: GreenplumMasterConfigSet, + message: Greenplumconfigset617, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { if (message.effectiveConfig !== undefined) { - GreenplumMasterConfig.encode( + Greenplumconfig617.encode( message.effectiveConfig, writer.uint32(10).fork() ).ldelim(); } if (message.userConfig !== undefined) { - GreenplumMasterConfig.encode( + Greenplumconfig617.encode( message.userConfig, writer.uint32(18).fork() ).ldelim(); } if (message.defaultConfig !== undefined) { - GreenplumMasterConfig.encode( + Greenplumconfig617.encode( message.defaultConfig, writer.uint32(26).fork() ).ldelim(); @@ -1665,29 +1014,27 @@ export const GreenplumMasterConfigSet = { decode( input: _m0.Reader | Uint8Array, length?: number - ): GreenplumMasterConfigSet { + ): Greenplumconfigset617 { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; - const message = { - ...baseGreenplumMasterConfigSet, - } as GreenplumMasterConfigSet; + const message = { ...baseGreenplumconfigset617 } as Greenplumconfigset617; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.effectiveConfig = GreenplumMasterConfig.decode( + message.effectiveConfig = Greenplumconfig617.decode( reader, reader.uint32() ); break; case 2: - message.userConfig = GreenplumMasterConfig.decode( + message.userConfig = Greenplumconfig617.decode( reader, reader.uint32() ); break; case 3: - message.defaultConfig = GreenplumMasterConfig.decode( + message.defaultConfig = Greenplumconfig617.decode( reader, reader.uint32() ); @@ -1700,94 +1047,211 @@ export const GreenplumMasterConfigSet = { return message; }, - fromJSON(object: any): GreenplumMasterConfigSet { - const message = { - ...baseGreenplumMasterConfigSet, - } as GreenplumMasterConfigSet; + fromJSON(object: any): Greenplumconfigset617 { + const message = { ...baseGreenplumconfigset617 } as Greenplumconfigset617; message.effectiveConfig = object.effectiveConfig !== undefined && object.effectiveConfig !== null - ? GreenplumMasterConfig.fromJSON(object.effectiveConfig) + ? Greenplumconfig617.fromJSON(object.effectiveConfig) : undefined; message.userConfig = object.userConfig !== undefined && object.userConfig !== null - ? GreenplumMasterConfig.fromJSON(object.userConfig) + ? Greenplumconfig617.fromJSON(object.userConfig) : undefined; message.defaultConfig = object.defaultConfig !== undefined && object.defaultConfig !== null - ? GreenplumMasterConfig.fromJSON(object.defaultConfig) + ? Greenplumconfig617.fromJSON(object.defaultConfig) : undefined; return message; }, - toJSON(message: GreenplumMasterConfigSet): unknown { + toJSON(message: Greenplumconfigset617): unknown { const obj: any = {}; message.effectiveConfig !== undefined && (obj.effectiveConfig = message.effectiveConfig - ? GreenplumMasterConfig.toJSON(message.effectiveConfig) + ? Greenplumconfig617.toJSON(message.effectiveConfig) : undefined); message.userConfig !== undefined && (obj.userConfig = message.userConfig - ? GreenplumMasterConfig.toJSON(message.userConfig) + ? Greenplumconfig617.toJSON(message.userConfig) : undefined); message.defaultConfig !== undefined && (obj.defaultConfig = message.defaultConfig - ? GreenplumMasterConfig.toJSON(message.defaultConfig) + ? Greenplumconfig617.toJSON(message.defaultConfig) : undefined); return obj; }, - fromPartial, I>>( + fromPartial, I>>( object: I - ): GreenplumMasterConfigSet { - const message = { - ...baseGreenplumMasterConfigSet, - } as GreenplumMasterConfigSet; + ): Greenplumconfigset617 { + const message = { ...baseGreenplumconfigset617 } as Greenplumconfigset617; message.effectiveConfig = object.effectiveConfig !== undefined && object.effectiveConfig !== null - ? GreenplumMasterConfig.fromPartial(object.effectiveConfig) + ? Greenplumconfig617.fromPartial(object.effectiveConfig) : undefined; message.userConfig = object.userConfig !== undefined && object.userConfig !== null - ? GreenplumMasterConfig.fromPartial(object.userConfig) + ? Greenplumconfig617.fromPartial(object.userConfig) : undefined; message.defaultConfig = object.defaultConfig !== undefined && object.defaultConfig !== null - ? GreenplumMasterConfig.fromPartial(object.defaultConfig) + ? Greenplumconfig617.fromPartial(object.defaultConfig) : undefined; return message; }, }; -messageTypeRegistry.set( - GreenplumMasterConfigSet.$type, - GreenplumMasterConfigSet -); +messageTypeRegistry.set(Greenplumconfigset617.$type, Greenplumconfigset617); + +const baseGreenplumconfigset619: object = { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfigSet6_19", +}; + +export const Greenplumconfigset619 = { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfigSet6_19" as const, + + encode( + message: Greenplumconfigset619, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.effectiveConfig !== undefined) { + Greenplumconfig619.encode( + message.effectiveConfig, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.userConfig !== undefined) { + Greenplumconfig619.encode( + message.userConfig, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.defaultConfig !== undefined) { + Greenplumconfig619.encode( + message.defaultConfig, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Greenplumconfigset619 { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGreenplumconfigset619 } as Greenplumconfigset619; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.effectiveConfig = Greenplumconfig619.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.userConfig = Greenplumconfig619.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.defaultConfig = Greenplumconfig619.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Greenplumconfigset619 { + const message = { ...baseGreenplumconfigset619 } as Greenplumconfigset619; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Greenplumconfig619.fromJSON(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Greenplumconfig619.fromJSON(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Greenplumconfig619.fromJSON(object.defaultConfig) + : undefined; + return message; + }, + + toJSON(message: Greenplumconfigset619): unknown { + const obj: any = {}; + message.effectiveConfig !== undefined && + (obj.effectiveConfig = message.effectiveConfig + ? Greenplumconfig619.toJSON(message.effectiveConfig) + : undefined); + message.userConfig !== undefined && + (obj.userConfig = message.userConfig + ? Greenplumconfig619.toJSON(message.userConfig) + : undefined); + message.defaultConfig !== undefined && + (obj.defaultConfig = message.defaultConfig + ? Greenplumconfig619.toJSON(message.defaultConfig) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Greenplumconfigset619 { + const message = { ...baseGreenplumconfigset619 } as Greenplumconfigset619; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Greenplumconfig619.fromPartial(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Greenplumconfig619.fromPartial(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Greenplumconfig619.fromPartial(object.defaultConfig) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Greenplumconfigset619.$type, Greenplumconfigset619); -const baseGreenplumSegmentConfigSet: object = { - $type: "yandex.cloud.mdb.greenplum.v1.GreenplumSegmentConfigSet", +const baseConnectionPoolerConfigSet: object = { + $type: "yandex.cloud.mdb.greenplum.v1.ConnectionPoolerConfigSet", }; -export const GreenplumSegmentConfigSet = { - $type: "yandex.cloud.mdb.greenplum.v1.GreenplumSegmentConfigSet" as const, +export const ConnectionPoolerConfigSet = { + $type: "yandex.cloud.mdb.greenplum.v1.ConnectionPoolerConfigSet" as const, encode( - message: GreenplumSegmentConfigSet, + message: ConnectionPoolerConfigSet, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { if (message.effectiveConfig !== undefined) { - GreenplumSegmentConfig.encode( + ConnectionPoolerConfig.encode( message.effectiveConfig, writer.uint32(10).fork() ).ldelim(); } if (message.userConfig !== undefined) { - GreenplumSegmentConfig.encode( + ConnectionPoolerConfig.encode( message.userConfig, writer.uint32(18).fork() ).ldelim(); } if (message.defaultConfig !== undefined) { - GreenplumSegmentConfig.encode( + ConnectionPoolerConfig.encode( message.defaultConfig, writer.uint32(26).fork() ).ldelim(); @@ -1798,29 +1262,29 @@ export const GreenplumSegmentConfigSet = { decode( input: _m0.Reader | Uint8Array, length?: number - ): GreenplumSegmentConfigSet { + ): ConnectionPoolerConfigSet { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = { - ...baseGreenplumSegmentConfigSet, - } as GreenplumSegmentConfigSet; + ...baseConnectionPoolerConfigSet, + } as ConnectionPoolerConfigSet; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.effectiveConfig = GreenplumSegmentConfig.decode( + message.effectiveConfig = ConnectionPoolerConfig.decode( reader, reader.uint32() ); break; case 2: - message.userConfig = GreenplumSegmentConfig.decode( + message.userConfig = ConnectionPoolerConfig.decode( reader, reader.uint32() ); break; case 3: - message.defaultConfig = GreenplumSegmentConfig.decode( + message.defaultConfig = ConnectionPoolerConfig.decode( reader, reader.uint32() ); @@ -1833,67 +1297,67 @@ export const GreenplumSegmentConfigSet = { return message; }, - fromJSON(object: any): GreenplumSegmentConfigSet { + fromJSON(object: any): ConnectionPoolerConfigSet { const message = { - ...baseGreenplumSegmentConfigSet, - } as GreenplumSegmentConfigSet; + ...baseConnectionPoolerConfigSet, + } as ConnectionPoolerConfigSet; message.effectiveConfig = object.effectiveConfig !== undefined && object.effectiveConfig !== null - ? GreenplumSegmentConfig.fromJSON(object.effectiveConfig) + ? ConnectionPoolerConfig.fromJSON(object.effectiveConfig) : undefined; message.userConfig = object.userConfig !== undefined && object.userConfig !== null - ? GreenplumSegmentConfig.fromJSON(object.userConfig) + ? ConnectionPoolerConfig.fromJSON(object.userConfig) : undefined; message.defaultConfig = object.defaultConfig !== undefined && object.defaultConfig !== null - ? GreenplumSegmentConfig.fromJSON(object.defaultConfig) + ? ConnectionPoolerConfig.fromJSON(object.defaultConfig) : undefined; return message; }, - toJSON(message: GreenplumSegmentConfigSet): unknown { + toJSON(message: ConnectionPoolerConfigSet): unknown { const obj: any = {}; message.effectiveConfig !== undefined && (obj.effectiveConfig = message.effectiveConfig - ? GreenplumSegmentConfig.toJSON(message.effectiveConfig) + ? ConnectionPoolerConfig.toJSON(message.effectiveConfig) : undefined); message.userConfig !== undefined && (obj.userConfig = message.userConfig - ? GreenplumSegmentConfig.toJSON(message.userConfig) + ? ConnectionPoolerConfig.toJSON(message.userConfig) : undefined); message.defaultConfig !== undefined && (obj.defaultConfig = message.defaultConfig - ? GreenplumSegmentConfig.toJSON(message.defaultConfig) + ? ConnectionPoolerConfig.toJSON(message.defaultConfig) : undefined); return obj; }, - fromPartial, I>>( + fromPartial, I>>( object: I - ): GreenplumSegmentConfigSet { + ): ConnectionPoolerConfigSet { const message = { - ...baseGreenplumSegmentConfigSet, - } as GreenplumSegmentConfigSet; + ...baseConnectionPoolerConfigSet, + } as ConnectionPoolerConfigSet; message.effectiveConfig = object.effectiveConfig !== undefined && object.effectiveConfig !== null - ? GreenplumSegmentConfig.fromPartial(object.effectiveConfig) + ? ConnectionPoolerConfig.fromPartial(object.effectiveConfig) : undefined; message.userConfig = object.userConfig !== undefined && object.userConfig !== null - ? GreenplumSegmentConfig.fromPartial(object.userConfig) + ? ConnectionPoolerConfig.fromPartial(object.userConfig) : undefined; message.defaultConfig = object.defaultConfig !== undefined && object.defaultConfig !== null - ? GreenplumSegmentConfig.fromPartial(object.defaultConfig) + ? ConnectionPoolerConfig.fromPartial(object.defaultConfig) : undefined; return message; }, }; messageTypeRegistry.set( - GreenplumSegmentConfigSet.$type, - GreenplumSegmentConfigSet + ConnectionPoolerConfigSet.$type, + ConnectionPoolerConfigSet ); declare var self: any | undefined; diff --git a/src/generated/yandex/cloud/mdb/greenplum/v1/host.ts b/src/generated/yandex/cloud/mdb/greenplum/v1/host.ts index 5b8432c0..183b66de 100644 --- a/src/generated/yandex/cloud/mdb/greenplum/v1/host.ts +++ b/src/generated/yandex/cloud/mdb/greenplum/v1/host.ts @@ -6,38 +6,40 @@ import { Resources } from "../../../../../yandex/cloud/mdb/greenplum/v1/config"; export const protobufPackage = "yandex.cloud.mdb.greenplum.v1"; +/** A Greenplum® cluster host resource. */ export interface Host { $type: "yandex.cloud.mdb.greenplum.v1.Host"; /** - * Name of the Greenplum host. The host name is assigned by MDB at creation time, and cannot be changed. + * Name of the Greenplum® host. The host name is assigned by Yandex Cloud at creation time and cannot be changed. * 1-63 characters long. * - * The name is unique across all existing MDB hosts in Yandex.Cloud, as it defines the FQDN of the host. + * The name is unique across all existing MDB hosts in Yandex Cloud, as it defines the FQDN of the host. */ name: string; - /** ID of the Greenplum cluster. The ID is assigned by MDB at creation time. */ + /** ID of the Greenplum® cluster. The ID is assigned by Yandex Cloud at creation time. */ clusterId: string; - /** ID of the availability zone where the Greenplum host resides. */ + /** ID of the availability zone the Greenplum® host belongs to. */ zoneId: string; /** Type of the host. */ type: Host_Type; - /** Resources allocated to the Greenplum host. */ + /** Resources allocated to the Greenplum® host. */ resources?: Resources; /** Status code of the aggregated health of the host. */ health: Host_Health; /** ID of the subnet that the host belongs to. */ subnetId: string; - /** Flag showing public IP assignment status to this host. */ + /** Whether or not a public IP is assigned to the host. */ assignPublicIp: boolean; } export enum Host_Type { + /** TYPE_UNSPECIFIED - The type is not specified. */ TYPE_UNSPECIFIED = 0, - /** MASTER - Greenplum master host. */ + /** MASTER - A Greenplum® master host. */ MASTER = 1, - /** REPLICA - Greenplum master host. */ + /** REPLICA - A Greenplum® master replica host. */ REPLICA = 2, - /** SEGMENT - Greenplum segment host. */ + /** SEGMENT - A Greenplum® segment host. */ SEGMENT = 3, UNRECOGNIZED = -1, } @@ -83,11 +85,11 @@ export enum Host_Health { UNKNOWN = 0, /** ALIVE - The host is performing all its functions normally. */ ALIVE = 1, - /** DEAD - The host is inoperable, and cannot perform any of its essential functions. */ + /** DEAD - The host is inoperable and cannot perform any of its essential functions. */ DEAD = 2, /** DEGRADED - The host is working below capacity or not fully functional. */ DEGRADED = 3, - /** UNBALANCED - One or more segments are not in prefer role. */ + /** UNBALANCED - One or more segments are not in preferred role. */ UNBALANCED = 4, UNRECOGNIZED = -1, } diff --git a/src/generated/yandex/cloud/mdb/greenplum/v1/maintenance.ts b/src/generated/yandex/cloud/mdb/greenplum/v1/maintenance.ts index 2c23e7da..41b0a76c 100644 --- a/src/generated/yandex/cloud/mdb/greenplum/v1/maintenance.ts +++ b/src/generated/yandex/cloud/mdb/greenplum/v1/maintenance.ts @@ -6,20 +6,26 @@ import { Timestamp } from "../../../../../google/protobuf/timestamp"; export const protobufPackage = "yandex.cloud.mdb.greenplum.v1"; +/** A Greenplum® cluster maintenance window. Should be defined by either one of the two options. */ export interface MaintenanceWindow { $type: "yandex.cloud.mdb.greenplum.v1.MaintenanceWindow"; + /** An any-time maintenance window. */ anytime?: AnytimeMaintenanceWindow | undefined; + /** A weekly maintenance window. */ weeklyMaintenanceWindow?: WeeklyMaintenanceWindow | undefined; } +/** An any-time maintenance window. */ export interface AnytimeMaintenanceWindow { $type: "yandex.cloud.mdb.greenplum.v1.AnytimeMaintenanceWindow"; } +/** A weekly maintenance window. */ export interface WeeklyMaintenanceWindow { $type: "yandex.cloud.mdb.greenplum.v1.WeeklyMaintenanceWindow"; + /** Day of the week. */ day: WeeklyMaintenanceWindow_WeekDay; - /** Hour of the day in UTC. */ + /** Hour of the day in the UTC timezone. */ hour: number; } @@ -95,9 +101,12 @@ export function weeklyMaintenanceWindow_WeekDayToJSON( } } +/** The operation to perform during maintenance. */ export interface MaintenanceOperation { $type: "yandex.cloud.mdb.greenplum.v1.MaintenanceOperation"; + /** The description of the operation, 1-256 characters long. */ info: string; + /** Delay time for the maintenance operation. */ delayedUntil?: Date; } diff --git a/src/generated/yandex/cloud/mdb/greenplum/v1/resource_preset.ts b/src/generated/yandex/cloud/mdb/greenplum/v1/resource_preset.ts new file mode 100644 index 00000000..e4bf3175 --- /dev/null +++ b/src/generated/yandex/cloud/mdb/greenplum/v1/resource_preset.ts @@ -0,0 +1,291 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; + +export const protobufPackage = "yandex.cloud.mdb.greenplum.v1"; + +/** A preset of resources for hardware configuration of Greenplum hosts. */ +export interface ResourcePreset { + $type: "yandex.cloud.mdb.greenplum.v1.ResourcePreset"; + /** ID of the resource preset. */ + id: string; + /** IDs of availability zones where the resource preset is available. */ + zoneIds: string[]; + /** Number of CPU cores for a Greenplum host created with the preset. */ + cores: number; + /** RAM volume for a Greenplum host created with the preset, in bytes. */ + memory: number; + /** Host type */ + type: ResourcePreset_Type; + /** Min host count */ + minHostCount: number; + /** Max host count */ + maxHostCount: number; + /** The number of hosts must be divisible by host_count_divider */ + hostCountDivider: number; + /** Max segment count in host (actual only for segment host) */ + maxSegmentInHostCount: number; +} + +export enum ResourcePreset_Type { + TYPE_UNSPECIFIED = 0, + /** MASTER - Greenplum master host. */ + MASTER = 1, + /** SEGMENT - Greenplum segment host. */ + SEGMENT = 2, + UNRECOGNIZED = -1, +} + +export function resourcePreset_TypeFromJSON(object: any): ResourcePreset_Type { + switch (object) { + case 0: + case "TYPE_UNSPECIFIED": + return ResourcePreset_Type.TYPE_UNSPECIFIED; + case 1: + case "MASTER": + return ResourcePreset_Type.MASTER; + case 2: + case "SEGMENT": + return ResourcePreset_Type.SEGMENT; + case -1: + case "UNRECOGNIZED": + default: + return ResourcePreset_Type.UNRECOGNIZED; + } +} + +export function resourcePreset_TypeToJSON(object: ResourcePreset_Type): string { + switch (object) { + case ResourcePreset_Type.TYPE_UNSPECIFIED: + return "TYPE_UNSPECIFIED"; + case ResourcePreset_Type.MASTER: + return "MASTER"; + case ResourcePreset_Type.SEGMENT: + return "SEGMENT"; + default: + return "UNKNOWN"; + } +} + +const baseResourcePreset: object = { + $type: "yandex.cloud.mdb.greenplum.v1.ResourcePreset", + id: "", + zoneIds: "", + cores: 0, + memory: 0, + type: 0, + minHostCount: 0, + maxHostCount: 0, + hostCountDivider: 0, + maxSegmentInHostCount: 0, +}; + +export const ResourcePreset = { + $type: "yandex.cloud.mdb.greenplum.v1.ResourcePreset" as const, + + encode( + message: ResourcePreset, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.id !== "") { + writer.uint32(10).string(message.id); + } + for (const v of message.zoneIds) { + writer.uint32(18).string(v!); + } + if (message.cores !== 0) { + writer.uint32(24).int64(message.cores); + } + if (message.memory !== 0) { + writer.uint32(32).int64(message.memory); + } + if (message.type !== 0) { + writer.uint32(40).int32(message.type); + } + if (message.minHostCount !== 0) { + writer.uint32(48).int64(message.minHostCount); + } + if (message.maxHostCount !== 0) { + writer.uint32(56).int64(message.maxHostCount); + } + if (message.hostCountDivider !== 0) { + writer.uint32(64).int64(message.hostCountDivider); + } + if (message.maxSegmentInHostCount !== 0) { + writer.uint32(72).int64(message.maxSegmentInHostCount); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ResourcePreset { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseResourcePreset } as ResourcePreset; + message.zoneIds = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = reader.string(); + break; + case 2: + message.zoneIds.push(reader.string()); + break; + case 3: + message.cores = longToNumber(reader.int64() as Long); + break; + case 4: + message.memory = longToNumber(reader.int64() as Long); + break; + case 5: + message.type = reader.int32() as any; + break; + case 6: + message.minHostCount = longToNumber(reader.int64() as Long); + break; + case 7: + message.maxHostCount = longToNumber(reader.int64() as Long); + break; + case 8: + message.hostCountDivider = longToNumber(reader.int64() as Long); + break; + case 9: + message.maxSegmentInHostCount = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ResourcePreset { + const message = { ...baseResourcePreset } as ResourcePreset; + message.id = + object.id !== undefined && object.id !== null ? String(object.id) : ""; + message.zoneIds = (object.zoneIds ?? []).map((e: any) => String(e)); + message.cores = + object.cores !== undefined && object.cores !== null + ? Number(object.cores) + : 0; + message.memory = + object.memory !== undefined && object.memory !== null + ? Number(object.memory) + : 0; + message.type = + object.type !== undefined && object.type !== null + ? resourcePreset_TypeFromJSON(object.type) + : 0; + message.minHostCount = + object.minHostCount !== undefined && object.minHostCount !== null + ? Number(object.minHostCount) + : 0; + message.maxHostCount = + object.maxHostCount !== undefined && object.maxHostCount !== null + ? Number(object.maxHostCount) + : 0; + message.hostCountDivider = + object.hostCountDivider !== undefined && object.hostCountDivider !== null + ? Number(object.hostCountDivider) + : 0; + message.maxSegmentInHostCount = + object.maxSegmentInHostCount !== undefined && + object.maxSegmentInHostCount !== null + ? Number(object.maxSegmentInHostCount) + : 0; + return message; + }, + + toJSON(message: ResourcePreset): unknown { + const obj: any = {}; + message.id !== undefined && (obj.id = message.id); + if (message.zoneIds) { + obj.zoneIds = message.zoneIds.map((e) => e); + } else { + obj.zoneIds = []; + } + message.cores !== undefined && (obj.cores = Math.round(message.cores)); + message.memory !== undefined && (obj.memory = Math.round(message.memory)); + message.type !== undefined && + (obj.type = resourcePreset_TypeToJSON(message.type)); + message.minHostCount !== undefined && + (obj.minHostCount = Math.round(message.minHostCount)); + message.maxHostCount !== undefined && + (obj.maxHostCount = Math.round(message.maxHostCount)); + message.hostCountDivider !== undefined && + (obj.hostCountDivider = Math.round(message.hostCountDivider)); + message.maxSegmentInHostCount !== undefined && + (obj.maxSegmentInHostCount = Math.round(message.maxSegmentInHostCount)); + return obj; + }, + + fromPartial, I>>( + object: I + ): ResourcePreset { + const message = { ...baseResourcePreset } as ResourcePreset; + message.id = object.id ?? ""; + message.zoneIds = object.zoneIds?.map((e) => e) || []; + message.cores = object.cores ?? 0; + message.memory = object.memory ?? 0; + message.type = object.type ?? 0; + message.minHostCount = object.minHostCount ?? 0; + message.maxHostCount = object.maxHostCount ?? 0; + message.hostCountDivider = object.hostCountDivider ?? 0; + message.maxSegmentInHostCount = object.maxSegmentInHostCount ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(ResourcePreset.$type, ResourcePreset); + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/greenplum/v1/resource_preset_service.ts b/src/generated/yandex/cloud/mdb/greenplum/v1/resource_preset_service.ts new file mode 100644 index 00000000..ee362f83 --- /dev/null +++ b/src/generated/yandex/cloud/mdb/greenplum/v1/resource_preset_service.ts @@ -0,0 +1,475 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleUnaryCall, + Client, + ClientUnaryCall, + Metadata, + CallOptions, + ServiceError, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; +import { ResourcePreset } from "../../../../../yandex/cloud/mdb/greenplum/v1/resource_preset"; + +export const protobufPackage = "yandex.cloud.mdb.greenplum.v1"; + +export interface GetResourcePresetRequest { + $type: "yandex.cloud.mdb.greenplum.v1.GetResourcePresetRequest"; + /** + * Required. ID of the resource preset to return. + * To get the resource preset ID, use a [ResourcePresetService.List] request. + */ + resourcePresetId: string; +} + +export interface ListResourcePresetsRequest { + $type: "yandex.cloud.mdb.greenplum.v1.ListResourcePresetsRequest"; + /** + * The maximum number of results per page to return. If the number of available + * results is larger than [page_size], the service returns a [ListResourcePresetsResponse.next_page_token] + * that can be used to get the next page of results in subsequent list requests. + */ + pageSize: number; + /** + * Page token. To get the next page of results, set [page_token] to the [ListResourcePresetsResponse.next_page_token] + * returned by a previous list request. + */ + pageToken: string; +} + +export interface ListResourcePresetsResponse { + $type: "yandex.cloud.mdb.greenplum.v1.ListResourcePresetsResponse"; + /** List of resource presets. */ + resourcePresets: ResourcePreset[]; + /** + * This token allows you to get the next page of results for list requests. If the number of results + * is larger than [ListResourcePresetsRequest.page_size], use the [next_page_token] as the value + * for the [ListResourcePresetsRequest.page_token] parameter in the next list request. Each subsequent + * list request will have its own [next_page_token] to continue paging through the results. + */ + nextPageToken: string; +} + +const baseGetResourcePresetRequest: object = { + $type: "yandex.cloud.mdb.greenplum.v1.GetResourcePresetRequest", + resourcePresetId: "", +}; + +export const GetResourcePresetRequest = { + $type: "yandex.cloud.mdb.greenplum.v1.GetResourcePresetRequest" as const, + + encode( + message: GetResourcePresetRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.resourcePresetId !== "") { + writer.uint32(10).string(message.resourcePresetId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): GetResourcePresetRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseGetResourcePresetRequest, + } as GetResourcePresetRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.resourcePresetId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GetResourcePresetRequest { + const message = { + ...baseGetResourcePresetRequest, + } as GetResourcePresetRequest; + message.resourcePresetId = + object.resourcePresetId !== undefined && object.resourcePresetId !== null + ? String(object.resourcePresetId) + : ""; + return message; + }, + + toJSON(message: GetResourcePresetRequest): unknown { + const obj: any = {}; + message.resourcePresetId !== undefined && + (obj.resourcePresetId = message.resourcePresetId); + return obj; + }, + + fromPartial, I>>( + object: I + ): GetResourcePresetRequest { + const message = { + ...baseGetResourcePresetRequest, + } as GetResourcePresetRequest; + message.resourcePresetId = object.resourcePresetId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + GetResourcePresetRequest.$type, + GetResourcePresetRequest +); + +const baseListResourcePresetsRequest: object = { + $type: "yandex.cloud.mdb.greenplum.v1.ListResourcePresetsRequest", + pageSize: 0, + pageToken: "", +}; + +export const ListResourcePresetsRequest = { + $type: "yandex.cloud.mdb.greenplum.v1.ListResourcePresetsRequest" as const, + + encode( + message: ListResourcePresetsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.pageSize !== 0) { + writer.uint32(16).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(26).string(message.pageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListResourcePresetsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListResourcePresetsRequest, + } as ListResourcePresetsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 3: + message.pageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListResourcePresetsRequest { + const message = { + ...baseListResourcePresetsRequest, + } as ListResourcePresetsRequest; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + return message; + }, + + toJSON(message: ListResourcePresetsRequest): unknown { + const obj: any = {}; + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListResourcePresetsRequest { + const message = { + ...baseListResourcePresetsRequest, + } as ListResourcePresetsRequest; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListResourcePresetsRequest.$type, + ListResourcePresetsRequest +); + +const baseListResourcePresetsResponse: object = { + $type: "yandex.cloud.mdb.greenplum.v1.ListResourcePresetsResponse", + nextPageToken: "", +}; + +export const ListResourcePresetsResponse = { + $type: "yandex.cloud.mdb.greenplum.v1.ListResourcePresetsResponse" as const, + + encode( + message: ListResourcePresetsResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.resourcePresets) { + ResourcePreset.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListResourcePresetsResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListResourcePresetsResponse, + } as ListResourcePresetsResponse; + message.resourcePresets = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.resourcePresets.push( + ResourcePreset.decode(reader, reader.uint32()) + ); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListResourcePresetsResponse { + const message = { + ...baseListResourcePresetsResponse, + } as ListResourcePresetsResponse; + message.resourcePresets = (object.resourcePresets ?? []).map((e: any) => + ResourcePreset.fromJSON(e) + ); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListResourcePresetsResponse): unknown { + const obj: any = {}; + if (message.resourcePresets) { + obj.resourcePresets = message.resourcePresets.map((e) => + e ? ResourcePreset.toJSON(e) : undefined + ); + } else { + obj.resourcePresets = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListResourcePresetsResponse { + const message = { + ...baseListResourcePresetsResponse, + } as ListResourcePresetsResponse; + message.resourcePresets = + object.resourcePresets?.map((e) => ResourcePreset.fromPartial(e)) || []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListResourcePresetsResponse.$type, + ListResourcePresetsResponse +); + +/** A set of methods for managing resource presets. */ +export const ResourcePresetServiceService = { + /** + * Returns the specified resource preset. + * + * To get the list of available resource presets, make a [List] request. + */ + get: { + path: "/yandex.cloud.mdb.greenplum.v1.ResourcePresetService/Get", + requestStream: false, + responseStream: false, + requestSerialize: (value: GetResourcePresetRequest) => + Buffer.from(GetResourcePresetRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + GetResourcePresetRequest.decode(value), + responseSerialize: (value: ResourcePreset) => + Buffer.from(ResourcePreset.encode(value).finish()), + responseDeserialize: (value: Buffer) => ResourcePreset.decode(value), + }, + /** Retrieves the list of available resource presets. */ + list: { + path: "/yandex.cloud.mdb.greenplum.v1.ResourcePresetService/List", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListResourcePresetsRequest) => + Buffer.from(ListResourcePresetsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + ListResourcePresetsRequest.decode(value), + responseSerialize: (value: ListResourcePresetsResponse) => + Buffer.from(ListResourcePresetsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + ListResourcePresetsResponse.decode(value), + }, +} as const; + +export interface ResourcePresetServiceServer + extends UntypedServiceImplementation { + /** + * Returns the specified resource preset. + * + * To get the list of available resource presets, make a [List] request. + */ + get: handleUnaryCall; + /** Retrieves the list of available resource presets. */ + list: handleUnaryCall< + ListResourcePresetsRequest, + ListResourcePresetsResponse + >; +} + +export interface ResourcePresetServiceClient extends Client { + /** + * Returns the specified resource preset. + * + * To get the list of available resource presets, make a [List] request. + */ + get( + request: GetResourcePresetRequest, + callback: (error: ServiceError | null, response: ResourcePreset) => void + ): ClientUnaryCall; + get( + request: GetResourcePresetRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: ResourcePreset) => void + ): ClientUnaryCall; + get( + request: GetResourcePresetRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: ResourcePreset) => void + ): ClientUnaryCall; + /** Retrieves the list of available resource presets. */ + list( + request: ListResourcePresetsRequest, + callback: ( + error: ServiceError | null, + response: ListResourcePresetsResponse + ) => void + ): ClientUnaryCall; + list( + request: ListResourcePresetsRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListResourcePresetsResponse + ) => void + ): ClientUnaryCall; + list( + request: ListResourcePresetsRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListResourcePresetsResponse + ) => void + ): ClientUnaryCall; +} + +export const ResourcePresetServiceClient = makeGenericClientConstructor( + ResourcePresetServiceService, + "yandex.cloud.mdb.greenplum.v1.ResourcePresetService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): ResourcePresetServiceClient; + service: typeof ResourcePresetServiceService; +}; + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/index.ts b/src/generated/yandex/cloud/mdb/index.ts index 90587677..0144703a 100644 --- a/src/generated/yandex/cloud/mdb/index.ts +++ b/src/generated/yandex/cloud/mdb/index.ts @@ -17,18 +17,26 @@ export * as clickhouse_version from './clickhouse/v1/version' export * as clickhouse_versions_service from './clickhouse/v1/versions_service' export * as elasticsearch_auth from './elasticsearch/v1/auth' export * as elasticsearch_auth_service from './elasticsearch/v1/auth_service' +export * as elasticsearch_backup from './elasticsearch/v1/backup' +export * as elasticsearch_backup_service from './elasticsearch/v1/backup_service' export * as elasticsearch_cluster from './elasticsearch/v1/cluster' export * as elasticsearch_cluster_service from './elasticsearch/v1/cluster_service' +export * as elasticsearch_extension from './elasticsearch/v1/extension' +export * as elasticsearch_extension_service from './elasticsearch/v1/extension_service' export * as elasticsearch_maintenance from './elasticsearch/v1/maintenance' export * as elasticsearch_resource_preset from './elasticsearch/v1/resource_preset' export * as elasticsearch_resource_preset_service from './elasticsearch/v1/resource_preset_service' export * as elasticsearch_user from './elasticsearch/v1/user' export * as elasticsearch_user_service from './elasticsearch/v1/user_service' +export * as greenplum_backup from './greenplum/v1/backup' +export * as greenplum_backup_service from './greenplum/v1/backup_service' export * as greenplum_cluster from './greenplum/v1/cluster' export * as greenplum_cluster_service from './greenplum/v1/cluster_service' export * as greenplum_config from './greenplum/v1/config' export * as greenplum_host from './greenplum/v1/host' export * as greenplum_maintenance from './greenplum/v1/maintenance' +export * as greenplum_resource_preset from './greenplum/v1/resource_preset' +export * as greenplum_resource_preset_service from './greenplum/v1/resource_preset_service' export * as kafka_cluster from './kafka/v1/cluster' export * as kafka_cluster_service from './kafka/v1/cluster_service' export * as kafka_common from './kafka/v1/common' @@ -97,7 +105,9 @@ export * as mongodb_mongodb3_6 from './mongodb/v1/config/mongodb3_6' export * as mongodb_mongodb4_0 from './mongodb/v1/config/mongodb4_0' export * as mongodb_mongodb4_2 from './mongodb/v1/config/mongodb4_2' export * as mongodb_mongodb4_4 from './mongodb/v1/config/mongodb4_4' +export * as mongodb_mongodb4_4_enterprise from './mongodb/v1/config/mongodb4_4_enterprise' export * as mongodb_mongodb5_0 from './mongodb/v1/config/mongodb5_0' +export * as mongodb_mongodb5_0_enterprise from './mongodb/v1/config/mongodb5_0_enterprise' export * as mysql_mysql5_7 from './mysql/v1/config/mysql5_7' export * as mysql_mysql8_0 from './mysql/v1/config/mysql8_0' export * as postgresql_host10 from './postgresql/v1/config/host10' @@ -107,6 +117,7 @@ export * as postgresql_host11_1c from './postgresql/v1/config/host11_1c' export * as postgresql_host12 from './postgresql/v1/config/host12' export * as postgresql_host12_1c from './postgresql/v1/config/host12_1c' export * as postgresql_host13 from './postgresql/v1/config/host13' +export * as postgresql_host14 from './postgresql/v1/config/host14' export * as postgresql_host9_6 from './postgresql/v1/config/host9_6' export * as postgresql_postgresql10 from './postgresql/v1/config/postgresql10' export * as postgresql_postgresql10_1c from './postgresql/v1/config/postgresql10_1c' @@ -115,6 +126,7 @@ export * as postgresql_postgresql11_1c from './postgresql/v1/config/postgresql11 export * as postgresql_postgresql12 from './postgresql/v1/config/postgresql12' export * as postgresql_postgresql12_1c from './postgresql/v1/config/postgresql12_1c' export * as postgresql_postgresql13 from './postgresql/v1/config/postgresql13' +export * as postgresql_postgresql14 from './postgresql/v1/config/postgresql14' export * as postgresql_postgresql9_6 from './postgresql/v1/config/postgresql9_6' export * as redis_redis5_0 from './redis/v1/config/redis5_0' export * as redis_redis6_0 from './redis/v1/config/redis6_0' diff --git a/src/generated/yandex/cloud/mdb/kafka/v1/cluster.ts b/src/generated/yandex/cloud/mdb/kafka/v1/cluster.ts index 8d888eb7..acb38a43 100644 --- a/src/generated/yandex/cloud/mdb/kafka/v1/cluster.ts +++ b/src/generated/yandex/cloud/mdb/kafka/v1/cluster.ts @@ -269,6 +269,8 @@ export interface ConfigSpec { unmanagedTopics: boolean; /** Enables managed schema registry on cluster */ schemaRegistry: boolean; + /** Access policy for external services. */ + access?: Access; } export interface ConfigSpec_Kafka { @@ -293,7 +295,7 @@ export interface Resources { * All available presets are listed in the [documentation](/docs/managed-kafka/concepts/instance-types). */ resourcePresetId: string; - /** Volume of the storage available to a host, in bytes. */ + /** Volume of the storage available to a host, in bytes. Must be greater than 2 * partition segment size in bytes * partitions count, so each partition can have one active segment file and one closed segment file that can be deleted. */ diskSize: number; /** Type of the storage environment for the host. */ diskTypeId: string; @@ -622,6 +624,12 @@ export function host_HealthToJSON(object: Host_Health): string { } } +export interface Access { + $type: "yandex.cloud.mdb.kafka.v1.Access"; + /** Allow access for DataTransfer. */ + dataTransfer: boolean; +} + const baseCluster: object = { $type: "yandex.cloud.mdb.kafka.v1.Cluster", id: "", @@ -1174,6 +1182,9 @@ export const ConfigSpec = { if (message.schemaRegistry === true) { writer.uint32(64).bool(message.schemaRegistry); } + if (message.access !== undefined) { + Access.encode(message.access, writer.uint32(74).fork()).ldelim(); + } return writer; }, @@ -1215,6 +1226,9 @@ export const ConfigSpec = { case 8: message.schemaRegistry = reader.bool(); break; + case 9: + message.access = Access.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -1254,6 +1268,10 @@ export const ConfigSpec = { object.schemaRegistry !== undefined && object.schemaRegistry !== null ? Boolean(object.schemaRegistry) : false; + message.access = + object.access !== undefined && object.access !== null + ? Access.fromJSON(object.access) + : undefined; return message; }, @@ -1281,6 +1299,8 @@ export const ConfigSpec = { (obj.unmanagedTopics = message.unmanagedTopics); message.schemaRegistry !== undefined && (obj.schemaRegistry = message.schemaRegistry); + message.access !== undefined && + (obj.access = message.access ? Access.toJSON(message.access) : undefined); return obj; }, @@ -1302,6 +1322,10 @@ export const ConfigSpec = { message.assignPublicIp = object.assignPublicIp ?? false; message.unmanagedTopics = object.unmanagedTopics ?? false; message.schemaRegistry = object.schemaRegistry ?? false; + message.access = + object.access !== undefined && object.access !== null + ? Access.fromPartial(object.access) + : undefined; return message; }, }; @@ -2881,6 +2905,67 @@ export const Host = { messageTypeRegistry.set(Host.$type, Host); +const baseAccess: object = { + $type: "yandex.cloud.mdb.kafka.v1.Access", + dataTransfer: false, +}; + +export const Access = { + $type: "yandex.cloud.mdb.kafka.v1.Access" as const, + + encode( + message: Access, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.dataTransfer === true) { + writer.uint32(8).bool(message.dataTransfer); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Access { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseAccess } as Access; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.dataTransfer = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Access { + const message = { ...baseAccess } as Access; + message.dataTransfer = + object.dataTransfer !== undefined && object.dataTransfer !== null + ? Boolean(object.dataTransfer) + : false; + return message; + }, + + toJSON(message: Access): unknown { + const obj: any = {}; + message.dataTransfer !== undefined && + (obj.dataTransfer = message.dataTransfer); + return obj; + }, + + fromPartial, I>>(object: I): Access { + const message = { ...baseAccess } as Access; + message.dataTransfer = object.dataTransfer ?? false; + return message; + }, +}; + +messageTypeRegistry.set(Access.$type, Access); + declare var self: any | undefined; declare var window: any | undefined; declare var global: any | undefined; diff --git a/src/generated/yandex/cloud/mdb/mongodb/v1/cluster.ts b/src/generated/yandex/cloud/mdb/mongodb/v1/cluster.ts index 10980586..e889a4fa 100644 --- a/src/generated/yandex/cloud/mdb/mongodb/v1/cluster.ts +++ b/src/generated/yandex/cloud/mdb/mongodb/v1/cluster.ts @@ -27,11 +27,21 @@ import { Mongocfgconfigset44, Mongosconfigset44, } from "../../../../../yandex/cloud/mdb/mongodb/v1/config/mongodb4_4"; +import { + Mongodconfigset44Enterprise, + Mongocfgconfigset44Enterprise, + Mongosconfigset44Enterprise, +} from "../../../../../yandex/cloud/mdb/mongodb/v1/config/mongodb4_4_enterprise"; import { Mongodconfigset50, Mongocfgconfigset50, Mongosconfigset50, } from "../../../../../yandex/cloud/mdb/mongodb/v1/config/mongodb5_0"; +import { + Mongodconfigset50Enterprise, + Mongocfgconfigset50Enterprise, + Mongosconfigset50Enterprise, +} from "../../../../../yandex/cloud/mdb/mongodb/v1/config/mongodb5_0_enterprise"; import { Timestamp } from "../../../../../google/protobuf/timestamp"; import { Int64Value } from "../../../../../google/protobuf/wrappers"; @@ -82,7 +92,6 @@ export interface Cluster { deletionProtection: boolean; } -/** Deployment environment. */ export enum Cluster_Environment { ENVIRONMENT_UNSPECIFIED = 0, /** @@ -272,7 +281,7 @@ export interface Monitoring { export interface ClusterConfig { $type: "yandex.cloud.mdb.mongodb.v1.ClusterConfig"; - /** Version of MongoDB server software. Possible values: `3.6`, `4.0`, `4.2`, `4.4`, `5.0`. */ + /** Version of MongoDB server software. Possible values: `3.6`, `4.0`, `4.2`, `4.4`, `4.4-enterprise`, `5.0`, `5.0-enterprise`. */ version: string; /** * MongoDB feature compatibility version. See usage details in [MongoDB documentation](https://docs.mongodb.com/manual/reference/command/setFeatureCompatibilityVersion/). @@ -295,6 +304,10 @@ export interface ClusterConfig { mongodb44?: Mongodb44 | undefined; /** Configuration and resource allocation for a MongoDB 5.0 cluster. */ mongodb50?: Mongodb50 | undefined; + /** Configuration and resource allocation for a MongoDB 4.4 Enterprise cluster. */ + mongodb44Enterprise?: Mongodb44Enterprise | undefined; + /** Configuration and resource allocation for a MongoDB 5.0 Enterprise cluster. */ + mongodb50Enterprise?: Mongodb50Enterprise | undefined; /** Time to start the daily backup, in the UTC timezone. */ backupWindowStart?: TimeOfDay; /** Retain period of automatically created backup in days */ @@ -477,6 +490,50 @@ export interface Mongodb44_MongoInfra { resources?: Resources; } +export interface Mongodb44Enterprise { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb4_4_enterprise"; + /** Configuration and resource allocation for mongod in a MongoDB 4.4 cluster. */ + mongod?: Mongodb44Enterprise_Mongod; + /** Configuration and resource allocation for mongocfg in a MongoDB 4.4 cluster. */ + mongocfg?: Mongodb44Enterprise_MongoCfg; + /** Configuration and resource allocation for mongos in a MongoDB 4.4 cluster. */ + mongos?: Mongodb44Enterprise_Mongos; + /** Configuration and resource allocation for mongoinfra (mongos+mongocfg) in a MongoDB 4.4 cluster. */ + mongoinfra?: Mongodb44Enterprise_MongoInfra; +} + +export interface Mongodb44Enterprise_Mongod { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb4_4_enterprise.Mongod"; + /** Configuration for mongod 4.4 hosts. */ + config?: Mongodconfigset44Enterprise; + /** Resources allocated to mongod hosts. */ + resources?: Resources; +} + +export interface Mongodb44Enterprise_MongoCfg { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb4_4_enterprise.MongoCfg"; + /** Configuration for mongocfg 4.4 hosts. */ + config?: Mongocfgconfigset44Enterprise; + /** Resources allocated to mongocfg hosts. */ + resources?: Resources; +} + +export interface Mongodb44Enterprise_Mongos { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb4_4_enterprise.Mongos"; + /** Configuration for mongos 4.4 hosts. */ + config?: Mongosconfigset44Enterprise; + /** Resources allocated to mongos hosts. */ + resources?: Resources; +} + +export interface Mongodb44Enterprise_MongoInfra { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb4_4_enterprise.MongoInfra"; + configMongos?: Mongosconfigset44Enterprise; + configMongocfg?: Mongocfgconfigset44Enterprise; + /** Resources allocated to mongoinfra (mongos+mongocfg) hosts. */ + resources?: Resources; +} + export interface Mongodb50 { $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0"; /** Configuration and resource allocation for mongod in a MongoDB 5.0 cluster. */ @@ -521,6 +578,50 @@ export interface Mongodb50_MongoInfra { resources?: Resources; } +export interface Mongodb50Enterprise { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0_enterprise"; + /** Configuration and resource allocation for mongod in a MongoDB 5.0 cluster. */ + mongod?: Mongodb50Enterprise_Mongod; + /** Configuration and resource allocation for mongocfg in a MongoDB 5.0 cluster. */ + mongocfg?: Mongodb50Enterprise_MongoCfg; + /** Configuration and resource allocation for mongos in a MongoDB 5.0 cluster. */ + mongos?: Mongodb50Enterprise_Mongos; + /** Configuration and resource allocation for mongoinfra (mongos+mongocfg) in a MongoDB 5.0 cluster. */ + mongoinfra?: Mongodb50Enterprise_MongoInfra; +} + +export interface Mongodb50Enterprise_Mongod { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0_enterprise.Mongod"; + /** Configuration for mongod 5.0 hosts. */ + config?: Mongodconfigset50Enterprise; + /** Resources allocated to mongod hosts. */ + resources?: Resources; +} + +export interface Mongodb50Enterprise_MongoCfg { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0_enterprise.MongoCfg"; + /** Configuration for mongocfg 5.0 hosts. */ + config?: Mongocfgconfigset50Enterprise; + /** Resources allocated to mongocfg hosts. */ + resources?: Resources; +} + +export interface Mongodb50Enterprise_Mongos { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0_enterprise.Mongos"; + /** Configuration for mongos 5.0 hosts. */ + config?: Mongosconfigset50Enterprise; + /** Resources allocated to mongos hosts. */ + resources?: Resources; +} + +export interface Mongodb50Enterprise_MongoInfra { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0_enterprise.MongoInfra"; + configMongos?: Mongosconfigset50Enterprise; + configMongocfg?: Mongocfgconfigset50Enterprise; + /** Resources allocated to mongoinfra (mongos+mongocfg) hosts. */ + resources?: Resources; +} + export interface Shard { $type: "yandex.cloud.mdb.mongodb.v1.Shard"; /** Name of the shard. */ @@ -535,7 +636,7 @@ export interface Host { * Name of the MongoDB host. The host name is assigned by MDB at creation time, and cannot be changed. * 1-63 characters long. * - * The name is unique across all existing MDB hosts in Yandex.Cloud, as it defines the FQDN of the host. + * The name is unique across all existing MDB hosts in Yandex Cloud, as it defines the FQDN of the host. */ name: string; /** ID of the MongoDB host. The ID is assigned by MDB at creation time. */ @@ -822,6 +923,8 @@ export interface Access { $type: "yandex.cloud.mdb.mongodb.v1.Access"; /** Allow access for DataLens */ dataLens: boolean; + /** Allow access for DataTransfer. */ + dataTransfer: boolean; } const baseCluster: object = { @@ -1360,6 +1463,18 @@ export const ClusterConfig = { if (message.mongodb50 !== undefined) { Mongodb50.encode(message.mongodb50, writer.uint32(82).fork()).ldelim(); } + if (message.mongodb44Enterprise !== undefined) { + Mongodb44Enterprise.encode( + message.mongodb44Enterprise, + writer.uint32(90).fork() + ).ldelim(); + } + if (message.mongodb50Enterprise !== undefined) { + Mongodb50Enterprise.encode( + message.mongodb50Enterprise, + writer.uint32(98).fork() + ).ldelim(); + } if (message.backupWindowStart !== undefined) { TimeOfDay.encode( message.backupWindowStart, @@ -1409,6 +1524,18 @@ export const ClusterConfig = { case 10: message.mongodb50 = Mongodb50.decode(reader, reader.uint32()); break; + case 11: + message.mongodb44Enterprise = Mongodb44Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 12: + message.mongodb50Enterprise = Mongodb50Enterprise.decode( + reader, + reader.uint32() + ); + break; case 3: message.backupWindowStart = TimeOfDay.decode(reader, reader.uint32()); break; @@ -1460,6 +1587,16 @@ export const ClusterConfig = { object.mongodb_5_0 !== undefined && object.mongodb_5_0 !== null ? Mongodb50.fromJSON(object.mongodb_5_0) : undefined; + message.mongodb44Enterprise = + object.mongodb_4_4_enterprise !== undefined && + object.mongodb_4_4_enterprise !== null + ? Mongodb44Enterprise.fromJSON(object.mongodb_4_4_enterprise) + : undefined; + message.mongodb50Enterprise = + object.mongodb_5_0_enterprise !== undefined && + object.mongodb_5_0_enterprise !== null + ? Mongodb50Enterprise.fromJSON(object.mongodb_5_0_enterprise) + : undefined; message.backupWindowStart = object.backupWindowStart !== undefined && object.backupWindowStart !== null @@ -1502,6 +1639,14 @@ export const ClusterConfig = { (obj.mongodb_5_0 = message.mongodb50 ? Mongodb50.toJSON(message.mongodb50) : undefined); + message.mongodb44Enterprise !== undefined && + (obj.mongodb_4_4_enterprise = message.mongodb44Enterprise + ? Mongodb44Enterprise.toJSON(message.mongodb44Enterprise) + : undefined); + message.mongodb50Enterprise !== undefined && + (obj.mongodb_5_0_enterprise = message.mongodb50Enterprise + ? Mongodb50Enterprise.toJSON(message.mongodb50Enterprise) + : undefined); message.backupWindowStart !== undefined && (obj.backupWindowStart = message.backupWindowStart ? TimeOfDay.toJSON(message.backupWindowStart) @@ -1540,6 +1685,16 @@ export const ClusterConfig = { object.mongodb50 !== undefined && object.mongodb50 !== null ? Mongodb50.fromPartial(object.mongodb50) : undefined; + message.mongodb44Enterprise = + object.mongodb44Enterprise !== undefined && + object.mongodb44Enterprise !== null + ? Mongodb44Enterprise.fromPartial(object.mongodb44Enterprise) + : undefined; + message.mongodb50Enterprise = + object.mongodb50Enterprise !== undefined && + object.mongodb50Enterprise !== null + ? Mongodb50Enterprise.fromPartial(object.mongodb50Enterprise) + : undefined; message.backupWindowStart = object.backupWindowStart !== undefined && object.backupWindowStart !== null @@ -3628,37 +3783,37 @@ export const Mongodb44_MongoInfra = { messageTypeRegistry.set(Mongodb44_MongoInfra.$type, Mongodb44_MongoInfra); -const baseMongodb50: object = { - $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0", +const baseMongodb44Enterprise: object = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb4_4_enterprise", }; -export const Mongodb50 = { - $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0" as const, +export const Mongodb44Enterprise = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb4_4_enterprise" as const, encode( - message: Mongodb50, + message: Mongodb44Enterprise, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { if (message.mongod !== undefined) { - Mongodb50_Mongod.encode( + Mongodb44Enterprise_Mongod.encode( message.mongod, writer.uint32(10).fork() ).ldelim(); } if (message.mongocfg !== undefined) { - Mongodb50_MongoCfg.encode( + Mongodb44Enterprise_MongoCfg.encode( message.mongocfg, writer.uint32(18).fork() ).ldelim(); } if (message.mongos !== undefined) { - Mongodb50_Mongos.encode( + Mongodb44Enterprise_Mongos.encode( message.mongos, writer.uint32(26).fork() ).ldelim(); } if (message.mongoinfra !== undefined) { - Mongodb50_MongoInfra.encode( + Mongodb44Enterprise_MongoInfra.encode( message.mongoinfra, writer.uint32(34).fork() ).ldelim(); @@ -3666,24 +3821,33 @@ export const Mongodb50 = { return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): Mongodb50 { + decode(input: _m0.Reader | Uint8Array, length?: number): Mongodb44Enterprise { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; - const message = { ...baseMongodb50 } as Mongodb50; + const message = { ...baseMongodb44Enterprise } as Mongodb44Enterprise; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.mongod = Mongodb50_Mongod.decode(reader, reader.uint32()); + message.mongod = Mongodb44Enterprise_Mongod.decode( + reader, + reader.uint32() + ); break; case 2: - message.mongocfg = Mongodb50_MongoCfg.decode(reader, reader.uint32()); + message.mongocfg = Mongodb44Enterprise_MongoCfg.decode( + reader, + reader.uint32() + ); break; case 3: - message.mongos = Mongodb50_Mongos.decode(reader, reader.uint32()); + message.mongos = Mongodb44Enterprise_Mongos.decode( + reader, + reader.uint32() + ); break; case 4: - message.mongoinfra = Mongodb50_MongoInfra.decode( + message.mongoinfra = Mongodb44Enterprise_MongoInfra.decode( reader, reader.uint32() ); @@ -3696,87 +3860,87 @@ export const Mongodb50 = { return message; }, - fromJSON(object: any): Mongodb50 { - const message = { ...baseMongodb50 } as Mongodb50; + fromJSON(object: any): Mongodb44Enterprise { + const message = { ...baseMongodb44Enterprise } as Mongodb44Enterprise; message.mongod = object.mongod !== undefined && object.mongod !== null - ? Mongodb50_Mongod.fromJSON(object.mongod) + ? Mongodb44Enterprise_Mongod.fromJSON(object.mongod) : undefined; message.mongocfg = object.mongocfg !== undefined && object.mongocfg !== null - ? Mongodb50_MongoCfg.fromJSON(object.mongocfg) + ? Mongodb44Enterprise_MongoCfg.fromJSON(object.mongocfg) : undefined; message.mongos = object.mongos !== undefined && object.mongos !== null - ? Mongodb50_Mongos.fromJSON(object.mongos) + ? Mongodb44Enterprise_Mongos.fromJSON(object.mongos) : undefined; message.mongoinfra = object.mongoinfra !== undefined && object.mongoinfra !== null - ? Mongodb50_MongoInfra.fromJSON(object.mongoinfra) + ? Mongodb44Enterprise_MongoInfra.fromJSON(object.mongoinfra) : undefined; return message; }, - toJSON(message: Mongodb50): unknown { + toJSON(message: Mongodb44Enterprise): unknown { const obj: any = {}; message.mongod !== undefined && (obj.mongod = message.mongod - ? Mongodb50_Mongod.toJSON(message.mongod) + ? Mongodb44Enterprise_Mongod.toJSON(message.mongod) : undefined); message.mongocfg !== undefined && (obj.mongocfg = message.mongocfg - ? Mongodb50_MongoCfg.toJSON(message.mongocfg) + ? Mongodb44Enterprise_MongoCfg.toJSON(message.mongocfg) : undefined); message.mongos !== undefined && (obj.mongos = message.mongos - ? Mongodb50_Mongos.toJSON(message.mongos) + ? Mongodb44Enterprise_Mongos.toJSON(message.mongos) : undefined); message.mongoinfra !== undefined && (obj.mongoinfra = message.mongoinfra - ? Mongodb50_MongoInfra.toJSON(message.mongoinfra) + ? Mongodb44Enterprise_MongoInfra.toJSON(message.mongoinfra) : undefined); return obj; }, - fromPartial, I>>( + fromPartial, I>>( object: I - ): Mongodb50 { - const message = { ...baseMongodb50 } as Mongodb50; + ): Mongodb44Enterprise { + const message = { ...baseMongodb44Enterprise } as Mongodb44Enterprise; message.mongod = object.mongod !== undefined && object.mongod !== null - ? Mongodb50_Mongod.fromPartial(object.mongod) + ? Mongodb44Enterprise_Mongod.fromPartial(object.mongod) : undefined; message.mongocfg = object.mongocfg !== undefined && object.mongocfg !== null - ? Mongodb50_MongoCfg.fromPartial(object.mongocfg) + ? Mongodb44Enterprise_MongoCfg.fromPartial(object.mongocfg) : undefined; message.mongos = object.mongos !== undefined && object.mongos !== null - ? Mongodb50_Mongos.fromPartial(object.mongos) + ? Mongodb44Enterprise_Mongos.fromPartial(object.mongos) : undefined; message.mongoinfra = object.mongoinfra !== undefined && object.mongoinfra !== null - ? Mongodb50_MongoInfra.fromPartial(object.mongoinfra) + ? Mongodb44Enterprise_MongoInfra.fromPartial(object.mongoinfra) : undefined; return message; }, }; -messageTypeRegistry.set(Mongodb50.$type, Mongodb50); +messageTypeRegistry.set(Mongodb44Enterprise.$type, Mongodb44Enterprise); -const baseMongodb50_Mongod: object = { - $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0.Mongod", +const baseMongodb44Enterprise_Mongod: object = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb4_4_enterprise.Mongod", }; -export const Mongodb50_Mongod = { - $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0.Mongod" as const, +export const Mongodb44Enterprise_Mongod = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb4_4_enterprise.Mongod" as const, encode( - message: Mongodb50_Mongod, + message: Mongodb44Enterprise_Mongod, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { if (message.config !== undefined) { - Mongodconfigset50.encode( + Mongodconfigset44Enterprise.encode( message.config, writer.uint32(10).fork() ).ldelim(); @@ -3787,15 +3951,23 @@ export const Mongodb50_Mongod = { return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): Mongodb50_Mongod { + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodb44Enterprise_Mongod { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; - const message = { ...baseMongodb50_Mongod } as Mongodb50_Mongod; + const message = { + ...baseMongodb44Enterprise_Mongod, + } as Mongodb44Enterprise_Mongod; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.config = Mongodconfigset50.decode(reader, reader.uint32()); + message.config = Mongodconfigset44Enterprise.decode( + reader, + reader.uint32() + ); break; case 2: message.resources = Resources.decode(reader, reader.uint32()); @@ -3808,11 +3980,13 @@ export const Mongodb50_Mongod = { return message; }, - fromJSON(object: any): Mongodb50_Mongod { - const message = { ...baseMongodb50_Mongod } as Mongodb50_Mongod; + fromJSON(object: any): Mongodb44Enterprise_Mongod { + const message = { + ...baseMongodb44Enterprise_Mongod, + } as Mongodb44Enterprise_Mongod; message.config = object.config !== undefined && object.config !== null - ? Mongodconfigset50.fromJSON(object.config) + ? Mongodconfigset44Enterprise.fromJSON(object.config) : undefined; message.resources = object.resources !== undefined && object.resources !== null @@ -3821,11 +3995,11 @@ export const Mongodb50_Mongod = { return message; }, - toJSON(message: Mongodb50_Mongod): unknown { + toJSON(message: Mongodb44Enterprise_Mongod): unknown { const obj: any = {}; message.config !== undefined && (obj.config = message.config - ? Mongodconfigset50.toJSON(message.config) + ? Mongodconfigset44Enterprise.toJSON(message.config) : undefined); message.resources !== undefined && (obj.resources = message.resources @@ -3834,13 +4008,15 @@ export const Mongodb50_Mongod = { return obj; }, - fromPartial, I>>( + fromPartial, I>>( object: I - ): Mongodb50_Mongod { - const message = { ...baseMongodb50_Mongod } as Mongodb50_Mongod; + ): Mongodb44Enterprise_Mongod { + const message = { + ...baseMongodb44Enterprise_Mongod, + } as Mongodb44Enterprise_Mongod; message.config = object.config !== undefined && object.config !== null - ? Mongodconfigset50.fromPartial(object.config) + ? Mongodconfigset44Enterprise.fromPartial(object.config) : undefined; message.resources = object.resources !== undefined && object.resources !== null @@ -3850,21 +4026,24 @@ export const Mongodb50_Mongod = { }, }; -messageTypeRegistry.set(Mongodb50_Mongod.$type, Mongodb50_Mongod); +messageTypeRegistry.set( + Mongodb44Enterprise_Mongod.$type, + Mongodb44Enterprise_Mongod +); -const baseMongodb50_MongoCfg: object = { - $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0.MongoCfg", +const baseMongodb44Enterprise_MongoCfg: object = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb4_4_enterprise.MongoCfg", }; -export const Mongodb50_MongoCfg = { - $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0.MongoCfg" as const, +export const Mongodb44Enterprise_MongoCfg = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb4_4_enterprise.MongoCfg" as const, encode( - message: Mongodb50_MongoCfg, + message: Mongodb44Enterprise_MongoCfg, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { if (message.config !== undefined) { - Mongocfgconfigset50.encode( + Mongocfgconfigset44Enterprise.encode( message.config, writer.uint32(10).fork() ).ldelim(); @@ -3875,15 +4054,23 @@ export const Mongodb50_MongoCfg = { return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): Mongodb50_MongoCfg { + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodb44Enterprise_MongoCfg { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; - const message = { ...baseMongodb50_MongoCfg } as Mongodb50_MongoCfg; + const message = { + ...baseMongodb44Enterprise_MongoCfg, + } as Mongodb44Enterprise_MongoCfg; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.config = Mongocfgconfigset50.decode(reader, reader.uint32()); + message.config = Mongocfgconfigset44Enterprise.decode( + reader, + reader.uint32() + ); break; case 2: message.resources = Resources.decode(reader, reader.uint32()); @@ -3896,11 +4083,13 @@ export const Mongodb50_MongoCfg = { return message; }, - fromJSON(object: any): Mongodb50_MongoCfg { - const message = { ...baseMongodb50_MongoCfg } as Mongodb50_MongoCfg; + fromJSON(object: any): Mongodb44Enterprise_MongoCfg { + const message = { + ...baseMongodb44Enterprise_MongoCfg, + } as Mongodb44Enterprise_MongoCfg; message.config = object.config !== undefined && object.config !== null - ? Mongocfgconfigset50.fromJSON(object.config) + ? Mongocfgconfigset44Enterprise.fromJSON(object.config) : undefined; message.resources = object.resources !== undefined && object.resources !== null @@ -3909,11 +4098,11 @@ export const Mongodb50_MongoCfg = { return message; }, - toJSON(message: Mongodb50_MongoCfg): unknown { + toJSON(message: Mongodb44Enterprise_MongoCfg): unknown { const obj: any = {}; message.config !== undefined && (obj.config = message.config - ? Mongocfgconfigset50.toJSON(message.config) + ? Mongocfgconfigset44Enterprise.toJSON(message.config) : undefined); message.resources !== undefined && (obj.resources = message.resources @@ -3922,13 +4111,15 @@ export const Mongodb50_MongoCfg = { return obj; }, - fromPartial, I>>( + fromPartial, I>>( object: I - ): Mongodb50_MongoCfg { - const message = { ...baseMongodb50_MongoCfg } as Mongodb50_MongoCfg; + ): Mongodb44Enterprise_MongoCfg { + const message = { + ...baseMongodb44Enterprise_MongoCfg, + } as Mongodb44Enterprise_MongoCfg; message.config = object.config !== undefined && object.config !== null - ? Mongocfgconfigset50.fromPartial(object.config) + ? Mongocfgconfigset44Enterprise.fromPartial(object.config) : undefined; message.resources = object.resources !== undefined && object.resources !== null @@ -3938,21 +4129,24 @@ export const Mongodb50_MongoCfg = { }, }; -messageTypeRegistry.set(Mongodb50_MongoCfg.$type, Mongodb50_MongoCfg); +messageTypeRegistry.set( + Mongodb44Enterprise_MongoCfg.$type, + Mongodb44Enterprise_MongoCfg +); -const baseMongodb50_Mongos: object = { - $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0.Mongos", +const baseMongodb44Enterprise_Mongos: object = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb4_4_enterprise.Mongos", }; -export const Mongodb50_Mongos = { - $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0.Mongos" as const, +export const Mongodb44Enterprise_Mongos = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb4_4_enterprise.Mongos" as const, encode( - message: Mongodb50_Mongos, + message: Mongodb44Enterprise_Mongos, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { if (message.config !== undefined) { - Mongosconfigset50.encode( + Mongosconfigset44Enterprise.encode( message.config, writer.uint32(10).fork() ).ldelim(); @@ -3963,15 +4157,23 @@ export const Mongodb50_Mongos = { return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): Mongodb50_Mongos { + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodb44Enterprise_Mongos { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; - const message = { ...baseMongodb50_Mongos } as Mongodb50_Mongos; + const message = { + ...baseMongodb44Enterprise_Mongos, + } as Mongodb44Enterprise_Mongos; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.config = Mongosconfigset50.decode(reader, reader.uint32()); + message.config = Mongosconfigset44Enterprise.decode( + reader, + reader.uint32() + ); break; case 2: message.resources = Resources.decode(reader, reader.uint32()); @@ -3984,11 +4186,13 @@ export const Mongodb50_Mongos = { return message; }, - fromJSON(object: any): Mongodb50_Mongos { - const message = { ...baseMongodb50_Mongos } as Mongodb50_Mongos; + fromJSON(object: any): Mongodb44Enterprise_Mongos { + const message = { + ...baseMongodb44Enterprise_Mongos, + } as Mongodb44Enterprise_Mongos; message.config = object.config !== undefined && object.config !== null - ? Mongosconfigset50.fromJSON(object.config) + ? Mongosconfigset44Enterprise.fromJSON(object.config) : undefined; message.resources = object.resources !== undefined && object.resources !== null @@ -3997,11 +4201,11 @@ export const Mongodb50_Mongos = { return message; }, - toJSON(message: Mongodb50_Mongos): unknown { + toJSON(message: Mongodb44Enterprise_Mongos): unknown { const obj: any = {}; message.config !== undefined && (obj.config = message.config - ? Mongosconfigset50.toJSON(message.config) + ? Mongosconfigset44Enterprise.toJSON(message.config) : undefined); message.resources !== undefined && (obj.resources = message.resources @@ -4010,13 +4214,15 @@ export const Mongodb50_Mongos = { return obj; }, - fromPartial, I>>( + fromPartial, I>>( object: I - ): Mongodb50_Mongos { - const message = { ...baseMongodb50_Mongos } as Mongodb50_Mongos; + ): Mongodb44Enterprise_Mongos { + const message = { + ...baseMongodb44Enterprise_Mongos, + } as Mongodb44Enterprise_Mongos; message.config = object.config !== undefined && object.config !== null - ? Mongosconfigset50.fromPartial(object.config) + ? Mongosconfigset44Enterprise.fromPartial(object.config) : undefined; message.resources = object.resources !== undefined && object.resources !== null @@ -4026,27 +4232,31 @@ export const Mongodb50_Mongos = { }, }; -messageTypeRegistry.set(Mongodb50_Mongos.$type, Mongodb50_Mongos); +messageTypeRegistry.set( + Mongodb44Enterprise_Mongos.$type, + Mongodb44Enterprise_Mongos +); -const baseMongodb50_MongoInfra: object = { - $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0.MongoInfra", +const baseMongodb44Enterprise_MongoInfra: object = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb4_4_enterprise.MongoInfra", }; -export const Mongodb50_MongoInfra = { - $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0.MongoInfra" as const, +export const Mongodb44Enterprise_MongoInfra = { + $type: + "yandex.cloud.mdb.mongodb.v1.Mongodb4_4_enterprise.MongoInfra" as const, encode( - message: Mongodb50_MongoInfra, + message: Mongodb44Enterprise_MongoInfra, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { if (message.configMongos !== undefined) { - Mongosconfigset50.encode( + Mongosconfigset44Enterprise.encode( message.configMongos, writer.uint32(10).fork() ).ldelim(); } if (message.configMongocfg !== undefined) { - Mongocfgconfigset50.encode( + Mongocfgconfigset44Enterprise.encode( message.configMongocfg, writer.uint32(18).fork() ).ldelim(); @@ -4060,21 +4270,23 @@ export const Mongodb50_MongoInfra = { decode( input: _m0.Reader | Uint8Array, length?: number - ): Mongodb50_MongoInfra { + ): Mongodb44Enterprise_MongoInfra { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; - const message = { ...baseMongodb50_MongoInfra } as Mongodb50_MongoInfra; + const message = { + ...baseMongodb44Enterprise_MongoInfra, + } as Mongodb44Enterprise_MongoInfra; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.configMongos = Mongosconfigset50.decode( + message.configMongos = Mongosconfigset44Enterprise.decode( reader, reader.uint32() ); break; case 2: - message.configMongocfg = Mongocfgconfigset50.decode( + message.configMongocfg = Mongocfgconfigset44Enterprise.decode( reader, reader.uint32() ); @@ -4090,15 +4302,17 @@ export const Mongodb50_MongoInfra = { return message; }, - fromJSON(object: any): Mongodb50_MongoInfra { - const message = { ...baseMongodb50_MongoInfra } as Mongodb50_MongoInfra; + fromJSON(object: any): Mongodb44Enterprise_MongoInfra { + const message = { + ...baseMongodb44Enterprise_MongoInfra, + } as Mongodb44Enterprise_MongoInfra; message.configMongos = object.configMongos !== undefined && object.configMongos !== null - ? Mongosconfigset50.fromJSON(object.configMongos) + ? Mongosconfigset44Enterprise.fromJSON(object.configMongos) : undefined; message.configMongocfg = object.configMongocfg !== undefined && object.configMongocfg !== null - ? Mongocfgconfigset50.fromJSON(object.configMongocfg) + ? Mongocfgconfigset44Enterprise.fromJSON(object.configMongocfg) : undefined; message.resources = object.resources !== undefined && object.resources !== null @@ -4107,15 +4321,15 @@ export const Mongodb50_MongoInfra = { return message; }, - toJSON(message: Mongodb50_MongoInfra): unknown { + toJSON(message: Mongodb44Enterprise_MongoInfra): unknown { const obj: any = {}; message.configMongos !== undefined && (obj.configMongos = message.configMongos - ? Mongosconfigset50.toJSON(message.configMongos) + ? Mongosconfigset44Enterprise.toJSON(message.configMongos) : undefined); message.configMongocfg !== undefined && (obj.configMongocfg = message.configMongocfg - ? Mongocfgconfigset50.toJSON(message.configMongocfg) + ? Mongocfgconfigset44Enterprise.toJSON(message.configMongocfg) : undefined); message.resources !== undefined && (obj.resources = message.resources @@ -4124,17 +4338,19 @@ export const Mongodb50_MongoInfra = { return obj; }, - fromPartial, I>>( + fromPartial, I>>( object: I - ): Mongodb50_MongoInfra { - const message = { ...baseMongodb50_MongoInfra } as Mongodb50_MongoInfra; + ): Mongodb44Enterprise_MongoInfra { + const message = { + ...baseMongodb44Enterprise_MongoInfra, + } as Mongodb44Enterprise_MongoInfra; message.configMongos = object.configMongos !== undefined && object.configMongos !== null - ? Mongosconfigset50.fromPartial(object.configMongos) + ? Mongosconfigset44Enterprise.fromPartial(object.configMongos) : undefined; message.configMongocfg = object.configMongocfg !== undefined && object.configMongocfg !== null - ? Mongocfgconfigset50.fromPartial(object.configMongocfg) + ? Mongocfgconfigset44Enterprise.fromPartial(object.configMongocfg) : undefined; message.resources = object.resources !== undefined && object.resources !== null @@ -4144,28 +4360,1131 @@ export const Mongodb50_MongoInfra = { }, }; -messageTypeRegistry.set(Mongodb50_MongoInfra.$type, Mongodb50_MongoInfra); +messageTypeRegistry.set( + Mongodb44Enterprise_MongoInfra.$type, + Mongodb44Enterprise_MongoInfra +); -const baseShard: object = { - $type: "yandex.cloud.mdb.mongodb.v1.Shard", - name: "", - clusterId: "", +const baseMongodb50: object = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0", }; -export const Shard = { - $type: "yandex.cloud.mdb.mongodb.v1.Shard" as const, +export const Mongodb50 = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0" as const, - encode(message: Shard, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.name !== "") { - writer.uint32(10).string(message.name); + encode( + message: Mongodb50, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.mongod !== undefined) { + Mongodb50_Mongod.encode( + message.mongod, + writer.uint32(10).fork() + ).ldelim(); } - if (message.clusterId !== "") { - writer.uint32(18).string(message.clusterId); + if (message.mongocfg !== undefined) { + Mongodb50_MongoCfg.encode( + message.mongocfg, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.mongos !== undefined) { + Mongodb50_Mongos.encode( + message.mongos, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.mongoinfra !== undefined) { + Mongodb50_MongoInfra.encode( + message.mongoinfra, + writer.uint32(34).fork() + ).ldelim(); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): Shard { + decode(input: _m0.Reader | Uint8Array, length?: number): Mongodb50 { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMongodb50 } as Mongodb50; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.mongod = Mongodb50_Mongod.decode(reader, reader.uint32()); + break; + case 2: + message.mongocfg = Mongodb50_MongoCfg.decode(reader, reader.uint32()); + break; + case 3: + message.mongos = Mongodb50_Mongos.decode(reader, reader.uint32()); + break; + case 4: + message.mongoinfra = Mongodb50_MongoInfra.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodb50 { + const message = { ...baseMongodb50 } as Mongodb50; + message.mongod = + object.mongod !== undefined && object.mongod !== null + ? Mongodb50_Mongod.fromJSON(object.mongod) + : undefined; + message.mongocfg = + object.mongocfg !== undefined && object.mongocfg !== null + ? Mongodb50_MongoCfg.fromJSON(object.mongocfg) + : undefined; + message.mongos = + object.mongos !== undefined && object.mongos !== null + ? Mongodb50_Mongos.fromJSON(object.mongos) + : undefined; + message.mongoinfra = + object.mongoinfra !== undefined && object.mongoinfra !== null + ? Mongodb50_MongoInfra.fromJSON(object.mongoinfra) + : undefined; + return message; + }, + + toJSON(message: Mongodb50): unknown { + const obj: any = {}; + message.mongod !== undefined && + (obj.mongod = message.mongod + ? Mongodb50_Mongod.toJSON(message.mongod) + : undefined); + message.mongocfg !== undefined && + (obj.mongocfg = message.mongocfg + ? Mongodb50_MongoCfg.toJSON(message.mongocfg) + : undefined); + message.mongos !== undefined && + (obj.mongos = message.mongos + ? Mongodb50_Mongos.toJSON(message.mongos) + : undefined); + message.mongoinfra !== undefined && + (obj.mongoinfra = message.mongoinfra + ? Mongodb50_MongoInfra.toJSON(message.mongoinfra) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodb50 { + const message = { ...baseMongodb50 } as Mongodb50; + message.mongod = + object.mongod !== undefined && object.mongod !== null + ? Mongodb50_Mongod.fromPartial(object.mongod) + : undefined; + message.mongocfg = + object.mongocfg !== undefined && object.mongocfg !== null + ? Mongodb50_MongoCfg.fromPartial(object.mongocfg) + : undefined; + message.mongos = + object.mongos !== undefined && object.mongos !== null + ? Mongodb50_Mongos.fromPartial(object.mongos) + : undefined; + message.mongoinfra = + object.mongoinfra !== undefined && object.mongoinfra !== null + ? Mongodb50_MongoInfra.fromPartial(object.mongoinfra) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Mongodb50.$type, Mongodb50); + +const baseMongodb50_Mongod: object = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0.Mongod", +}; + +export const Mongodb50_Mongod = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0.Mongod" as const, + + encode( + message: Mongodb50_Mongod, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.config !== undefined) { + Mongodconfigset50.encode( + message.config, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Mongodb50_Mongod { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMongodb50_Mongod } as Mongodb50_Mongod; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.config = Mongodconfigset50.decode(reader, reader.uint32()); + break; + case 2: + message.resources = Resources.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodb50_Mongod { + const message = { ...baseMongodb50_Mongod } as Mongodb50_Mongod; + message.config = + object.config !== undefined && object.config !== null + ? Mongodconfigset50.fromJSON(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + return message; + }, + + toJSON(message: Mongodb50_Mongod): unknown { + const obj: any = {}; + message.config !== undefined && + (obj.config = message.config + ? Mongodconfigset50.toJSON(message.config) + : undefined); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodb50_Mongod { + const message = { ...baseMongodb50_Mongod } as Mongodb50_Mongod; + message.config = + object.config !== undefined && object.config !== null + ? Mongodconfigset50.fromPartial(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Mongodb50_Mongod.$type, Mongodb50_Mongod); + +const baseMongodb50_MongoCfg: object = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0.MongoCfg", +}; + +export const Mongodb50_MongoCfg = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0.MongoCfg" as const, + + encode( + message: Mongodb50_MongoCfg, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.config !== undefined) { + Mongocfgconfigset50.encode( + message.config, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Mongodb50_MongoCfg { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMongodb50_MongoCfg } as Mongodb50_MongoCfg; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.config = Mongocfgconfigset50.decode(reader, reader.uint32()); + break; + case 2: + message.resources = Resources.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodb50_MongoCfg { + const message = { ...baseMongodb50_MongoCfg } as Mongodb50_MongoCfg; + message.config = + object.config !== undefined && object.config !== null + ? Mongocfgconfigset50.fromJSON(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + return message; + }, + + toJSON(message: Mongodb50_MongoCfg): unknown { + const obj: any = {}; + message.config !== undefined && + (obj.config = message.config + ? Mongocfgconfigset50.toJSON(message.config) + : undefined); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodb50_MongoCfg { + const message = { ...baseMongodb50_MongoCfg } as Mongodb50_MongoCfg; + message.config = + object.config !== undefined && object.config !== null + ? Mongocfgconfigset50.fromPartial(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Mongodb50_MongoCfg.$type, Mongodb50_MongoCfg); + +const baseMongodb50_Mongos: object = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0.Mongos", +}; + +export const Mongodb50_Mongos = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0.Mongos" as const, + + encode( + message: Mongodb50_Mongos, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.config !== undefined) { + Mongosconfigset50.encode( + message.config, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Mongodb50_Mongos { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMongodb50_Mongos } as Mongodb50_Mongos; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.config = Mongosconfigset50.decode(reader, reader.uint32()); + break; + case 2: + message.resources = Resources.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodb50_Mongos { + const message = { ...baseMongodb50_Mongos } as Mongodb50_Mongos; + message.config = + object.config !== undefined && object.config !== null + ? Mongosconfigset50.fromJSON(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + return message; + }, + + toJSON(message: Mongodb50_Mongos): unknown { + const obj: any = {}; + message.config !== undefined && + (obj.config = message.config + ? Mongosconfigset50.toJSON(message.config) + : undefined); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodb50_Mongos { + const message = { ...baseMongodb50_Mongos } as Mongodb50_Mongos; + message.config = + object.config !== undefined && object.config !== null + ? Mongosconfigset50.fromPartial(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Mongodb50_Mongos.$type, Mongodb50_Mongos); + +const baseMongodb50_MongoInfra: object = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0.MongoInfra", +}; + +export const Mongodb50_MongoInfra = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0.MongoInfra" as const, + + encode( + message: Mongodb50_MongoInfra, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.configMongos !== undefined) { + Mongosconfigset50.encode( + message.configMongos, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.configMongocfg !== undefined) { + Mongocfgconfigset50.encode( + message.configMongocfg, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(26).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodb50_MongoInfra { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMongodb50_MongoInfra } as Mongodb50_MongoInfra; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.configMongos = Mongosconfigset50.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.configMongocfg = Mongocfgconfigset50.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.resources = Resources.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodb50_MongoInfra { + const message = { ...baseMongodb50_MongoInfra } as Mongodb50_MongoInfra; + message.configMongos = + object.configMongos !== undefined && object.configMongos !== null + ? Mongosconfigset50.fromJSON(object.configMongos) + : undefined; + message.configMongocfg = + object.configMongocfg !== undefined && object.configMongocfg !== null + ? Mongocfgconfigset50.fromJSON(object.configMongocfg) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + return message; + }, + + toJSON(message: Mongodb50_MongoInfra): unknown { + const obj: any = {}; + message.configMongos !== undefined && + (obj.configMongos = message.configMongos + ? Mongosconfigset50.toJSON(message.configMongos) + : undefined); + message.configMongocfg !== undefined && + (obj.configMongocfg = message.configMongocfg + ? Mongocfgconfigset50.toJSON(message.configMongocfg) + : undefined); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodb50_MongoInfra { + const message = { ...baseMongodb50_MongoInfra } as Mongodb50_MongoInfra; + message.configMongos = + object.configMongos !== undefined && object.configMongos !== null + ? Mongosconfigset50.fromPartial(object.configMongos) + : undefined; + message.configMongocfg = + object.configMongocfg !== undefined && object.configMongocfg !== null + ? Mongocfgconfigset50.fromPartial(object.configMongocfg) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Mongodb50_MongoInfra.$type, Mongodb50_MongoInfra); + +const baseMongodb50Enterprise: object = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0_enterprise", +}; + +export const Mongodb50Enterprise = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0_enterprise" as const, + + encode( + message: Mongodb50Enterprise, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.mongod !== undefined) { + Mongodb50Enterprise_Mongod.encode( + message.mongod, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.mongocfg !== undefined) { + Mongodb50Enterprise_MongoCfg.encode( + message.mongocfg, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.mongos !== undefined) { + Mongodb50Enterprise_Mongos.encode( + message.mongos, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.mongoinfra !== undefined) { + Mongodb50Enterprise_MongoInfra.encode( + message.mongoinfra, + writer.uint32(34).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Mongodb50Enterprise { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMongodb50Enterprise } as Mongodb50Enterprise; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.mongod = Mongodb50Enterprise_Mongod.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.mongocfg = Mongodb50Enterprise_MongoCfg.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.mongos = Mongodb50Enterprise_Mongos.decode( + reader, + reader.uint32() + ); + break; + case 4: + message.mongoinfra = Mongodb50Enterprise_MongoInfra.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodb50Enterprise { + const message = { ...baseMongodb50Enterprise } as Mongodb50Enterprise; + message.mongod = + object.mongod !== undefined && object.mongod !== null + ? Mongodb50Enterprise_Mongod.fromJSON(object.mongod) + : undefined; + message.mongocfg = + object.mongocfg !== undefined && object.mongocfg !== null + ? Mongodb50Enterprise_MongoCfg.fromJSON(object.mongocfg) + : undefined; + message.mongos = + object.mongos !== undefined && object.mongos !== null + ? Mongodb50Enterprise_Mongos.fromJSON(object.mongos) + : undefined; + message.mongoinfra = + object.mongoinfra !== undefined && object.mongoinfra !== null + ? Mongodb50Enterprise_MongoInfra.fromJSON(object.mongoinfra) + : undefined; + return message; + }, + + toJSON(message: Mongodb50Enterprise): unknown { + const obj: any = {}; + message.mongod !== undefined && + (obj.mongod = message.mongod + ? Mongodb50Enterprise_Mongod.toJSON(message.mongod) + : undefined); + message.mongocfg !== undefined && + (obj.mongocfg = message.mongocfg + ? Mongodb50Enterprise_MongoCfg.toJSON(message.mongocfg) + : undefined); + message.mongos !== undefined && + (obj.mongos = message.mongos + ? Mongodb50Enterprise_Mongos.toJSON(message.mongos) + : undefined); + message.mongoinfra !== undefined && + (obj.mongoinfra = message.mongoinfra + ? Mongodb50Enterprise_MongoInfra.toJSON(message.mongoinfra) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodb50Enterprise { + const message = { ...baseMongodb50Enterprise } as Mongodb50Enterprise; + message.mongod = + object.mongod !== undefined && object.mongod !== null + ? Mongodb50Enterprise_Mongod.fromPartial(object.mongod) + : undefined; + message.mongocfg = + object.mongocfg !== undefined && object.mongocfg !== null + ? Mongodb50Enterprise_MongoCfg.fromPartial(object.mongocfg) + : undefined; + message.mongos = + object.mongos !== undefined && object.mongos !== null + ? Mongodb50Enterprise_Mongos.fromPartial(object.mongos) + : undefined; + message.mongoinfra = + object.mongoinfra !== undefined && object.mongoinfra !== null + ? Mongodb50Enterprise_MongoInfra.fromPartial(object.mongoinfra) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Mongodb50Enterprise.$type, Mongodb50Enterprise); + +const baseMongodb50Enterprise_Mongod: object = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0_enterprise.Mongod", +}; + +export const Mongodb50Enterprise_Mongod = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0_enterprise.Mongod" as const, + + encode( + message: Mongodb50Enterprise_Mongod, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.config !== undefined) { + Mongodconfigset50Enterprise.encode( + message.config, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodb50Enterprise_Mongod { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodb50Enterprise_Mongod, + } as Mongodb50Enterprise_Mongod; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.config = Mongodconfigset50Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.resources = Resources.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodb50Enterprise_Mongod { + const message = { + ...baseMongodb50Enterprise_Mongod, + } as Mongodb50Enterprise_Mongod; + message.config = + object.config !== undefined && object.config !== null + ? Mongodconfigset50Enterprise.fromJSON(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + return message; + }, + + toJSON(message: Mongodb50Enterprise_Mongod): unknown { + const obj: any = {}; + message.config !== undefined && + (obj.config = message.config + ? Mongodconfigset50Enterprise.toJSON(message.config) + : undefined); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodb50Enterprise_Mongod { + const message = { + ...baseMongodb50Enterprise_Mongod, + } as Mongodb50Enterprise_Mongod; + message.config = + object.config !== undefined && object.config !== null + ? Mongodconfigset50Enterprise.fromPartial(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodb50Enterprise_Mongod.$type, + Mongodb50Enterprise_Mongod +); + +const baseMongodb50Enterprise_MongoCfg: object = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0_enterprise.MongoCfg", +}; + +export const Mongodb50Enterprise_MongoCfg = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0_enterprise.MongoCfg" as const, + + encode( + message: Mongodb50Enterprise_MongoCfg, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.config !== undefined) { + Mongocfgconfigset50Enterprise.encode( + message.config, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodb50Enterprise_MongoCfg { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodb50Enterprise_MongoCfg, + } as Mongodb50Enterprise_MongoCfg; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.config = Mongocfgconfigset50Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.resources = Resources.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodb50Enterprise_MongoCfg { + const message = { + ...baseMongodb50Enterprise_MongoCfg, + } as Mongodb50Enterprise_MongoCfg; + message.config = + object.config !== undefined && object.config !== null + ? Mongocfgconfigset50Enterprise.fromJSON(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + return message; + }, + + toJSON(message: Mongodb50Enterprise_MongoCfg): unknown { + const obj: any = {}; + message.config !== undefined && + (obj.config = message.config + ? Mongocfgconfigset50Enterprise.toJSON(message.config) + : undefined); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodb50Enterprise_MongoCfg { + const message = { + ...baseMongodb50Enterprise_MongoCfg, + } as Mongodb50Enterprise_MongoCfg; + message.config = + object.config !== undefined && object.config !== null + ? Mongocfgconfigset50Enterprise.fromPartial(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodb50Enterprise_MongoCfg.$type, + Mongodb50Enterprise_MongoCfg +); + +const baseMongodb50Enterprise_Mongos: object = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0_enterprise.Mongos", +}; + +export const Mongodb50Enterprise_Mongos = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0_enterprise.Mongos" as const, + + encode( + message: Mongodb50Enterprise_Mongos, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.config !== undefined) { + Mongosconfigset50Enterprise.encode( + message.config, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodb50Enterprise_Mongos { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodb50Enterprise_Mongos, + } as Mongodb50Enterprise_Mongos; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.config = Mongosconfigset50Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.resources = Resources.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodb50Enterprise_Mongos { + const message = { + ...baseMongodb50Enterprise_Mongos, + } as Mongodb50Enterprise_Mongos; + message.config = + object.config !== undefined && object.config !== null + ? Mongosconfigset50Enterprise.fromJSON(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + return message; + }, + + toJSON(message: Mongodb50Enterprise_Mongos): unknown { + const obj: any = {}; + message.config !== undefined && + (obj.config = message.config + ? Mongosconfigset50Enterprise.toJSON(message.config) + : undefined); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodb50Enterprise_Mongos { + const message = { + ...baseMongodb50Enterprise_Mongos, + } as Mongodb50Enterprise_Mongos; + message.config = + object.config !== undefined && object.config !== null + ? Mongosconfigset50Enterprise.fromPartial(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodb50Enterprise_Mongos.$type, + Mongodb50Enterprise_Mongos +); + +const baseMongodb50Enterprise_MongoInfra: object = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0_enterprise.MongoInfra", +}; + +export const Mongodb50Enterprise_MongoInfra = { + $type: + "yandex.cloud.mdb.mongodb.v1.Mongodb5_0_enterprise.MongoInfra" as const, + + encode( + message: Mongodb50Enterprise_MongoInfra, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.configMongos !== undefined) { + Mongosconfigset50Enterprise.encode( + message.configMongos, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.configMongocfg !== undefined) { + Mongocfgconfigset50Enterprise.encode( + message.configMongocfg, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(26).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodb50Enterprise_MongoInfra { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodb50Enterprise_MongoInfra, + } as Mongodb50Enterprise_MongoInfra; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.configMongos = Mongosconfigset50Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.configMongocfg = Mongocfgconfigset50Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.resources = Resources.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodb50Enterprise_MongoInfra { + const message = { + ...baseMongodb50Enterprise_MongoInfra, + } as Mongodb50Enterprise_MongoInfra; + message.configMongos = + object.configMongos !== undefined && object.configMongos !== null + ? Mongosconfigset50Enterprise.fromJSON(object.configMongos) + : undefined; + message.configMongocfg = + object.configMongocfg !== undefined && object.configMongocfg !== null + ? Mongocfgconfigset50Enterprise.fromJSON(object.configMongocfg) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + return message; + }, + + toJSON(message: Mongodb50Enterprise_MongoInfra): unknown { + const obj: any = {}; + message.configMongos !== undefined && + (obj.configMongos = message.configMongos + ? Mongosconfigset50Enterprise.toJSON(message.configMongos) + : undefined); + message.configMongocfg !== undefined && + (obj.configMongocfg = message.configMongocfg + ? Mongocfgconfigset50Enterprise.toJSON(message.configMongocfg) + : undefined); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodb50Enterprise_MongoInfra { + const message = { + ...baseMongodb50Enterprise_MongoInfra, + } as Mongodb50Enterprise_MongoInfra; + message.configMongos = + object.configMongos !== undefined && object.configMongos !== null + ? Mongosconfigset50Enterprise.fromPartial(object.configMongos) + : undefined; + message.configMongocfg = + object.configMongocfg !== undefined && object.configMongocfg !== null + ? Mongocfgconfigset50Enterprise.fromPartial(object.configMongocfg) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodb50Enterprise_MongoInfra.$type, + Mongodb50Enterprise_MongoInfra +); + +const baseShard: object = { + $type: "yandex.cloud.mdb.mongodb.v1.Shard", + name: "", + clusterId: "", +}; + +export const Shard = { + $type: "yandex.cloud.mdb.mongodb.v1.Shard" as const, + + encode(message: Shard, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + if (message.clusterId !== "") { + writer.uint32(18).string(message.clusterId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Shard { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = { ...baseShard } as Shard; @@ -4583,6 +5902,7 @@ messageTypeRegistry.set(Resources.$type, Resources); const baseAccess: object = { $type: "yandex.cloud.mdb.mongodb.v1.Access", dataLens: false, + dataTransfer: false, }; export const Access = { @@ -4595,6 +5915,9 @@ export const Access = { if (message.dataLens === true) { writer.uint32(8).bool(message.dataLens); } + if (message.dataTransfer === true) { + writer.uint32(24).bool(message.dataTransfer); + } return writer; }, @@ -4608,6 +5931,9 @@ export const Access = { case 1: message.dataLens = reader.bool(); break; + case 3: + message.dataTransfer = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -4622,18 +5948,25 @@ export const Access = { object.dataLens !== undefined && object.dataLens !== null ? Boolean(object.dataLens) : false; + message.dataTransfer = + object.dataTransfer !== undefined && object.dataTransfer !== null + ? Boolean(object.dataTransfer) + : false; return message; }, toJSON(message: Access): unknown { const obj: any = {}; message.dataLens !== undefined && (obj.dataLens = message.dataLens); + message.dataTransfer !== undefined && + (obj.dataTransfer = message.dataTransfer); return obj; }, fromPartial, I>>(object: I): Access { const message = { ...baseAccess } as Access; message.dataLens = object.dataLens ?? false; + message.dataTransfer = object.dataTransfer ?? false; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/mongodb/v1/cluster_service.ts b/src/generated/yandex/cloud/mdb/mongodb/v1/cluster_service.ts index 7382a19c..ee25b5a5 100644 --- a/src/generated/yandex/cloud/mdb/mongodb/v1/cluster_service.ts +++ b/src/generated/yandex/cloud/mdb/mongodb/v1/cluster_service.ts @@ -51,11 +51,21 @@ import { Mongocfgconfig44, Mongosconfig44, } from "../../../../../yandex/cloud/mdb/mongodb/v1/config/mongodb4_4"; +import { + Mongodconfig44Enterprise, + Mongocfgconfig44Enterprise, + Mongosconfig44Enterprise, +} from "../../../../../yandex/cloud/mdb/mongodb/v1/config/mongodb4_4_enterprise"; import { Mongodconfig50, Mongocfgconfig50, Mongosconfig50, } from "../../../../../yandex/cloud/mdb/mongodb/v1/config/mongodb5_0"; +import { + Mongodconfig50Enterprise, + Mongocfgconfig50Enterprise, + Mongosconfig50Enterprise, +} from "../../../../../yandex/cloud/mdb/mongodb/v1/config/mongodb5_0_enterprise"; import { TimeOfDay } from "../../../../../google/type/timeofday"; import { Timestamp } from "../../../../../google/protobuf/timestamp"; import { DatabaseSpec } from "../../../../../yandex/cloud/mdb/mongodb/v1/database"; @@ -440,6 +450,8 @@ export enum ListClusterLogsRequest_ServiceType { MONGOD = 1, MONGOS = 2, MONGOCFG = 3, + /** AUDIT - MongoDB Enterprise audit logs */ + AUDIT = 4, UNRECOGNIZED = -1, } @@ -459,6 +471,9 @@ export function listClusterLogsRequest_ServiceTypeFromJSON( case 3: case "MONGOCFG": return ListClusterLogsRequest_ServiceType.MONGOCFG; + case 4: + case "AUDIT": + return ListClusterLogsRequest_ServiceType.AUDIT; case -1: case "UNRECOGNIZED": default: @@ -478,6 +493,8 @@ export function listClusterLogsRequest_ServiceTypeToJSON( return "MONGOS"; case ListClusterLogsRequest_ServiceType.MONGOCFG: return "MONGOCFG"; + case ListClusterLogsRequest_ServiceType.AUDIT: + return "AUDIT"; default: return "UNKNOWN"; } @@ -547,6 +564,8 @@ export enum StreamClusterLogsRequest_ServiceType { MONGOD = 1, MONGOS = 2, MONGOCFG = 3, + /** AUDIT - MongoDB Enterprise audit logs */ + AUDIT = 4, UNRECOGNIZED = -1, } @@ -566,6 +585,9 @@ export function streamClusterLogsRequest_ServiceTypeFromJSON( case 3: case "MONGOCFG": return StreamClusterLogsRequest_ServiceType.MONGOCFG; + case 4: + case "AUDIT": + return StreamClusterLogsRequest_ServiceType.AUDIT; case -1: case "UNRECOGNIZED": default: @@ -585,6 +607,8 @@ export function streamClusterLogsRequest_ServiceTypeToJSON( return "MONGOS"; case StreamClusterLogsRequest_ServiceType.MONGOCFG: return "MONGOCFG"; + case StreamClusterLogsRequest_ServiceType.AUDIT: + return "AUDIT"; default: return "UNKNOWN"; } @@ -1095,6 +1119,51 @@ export interface Mongodbspec44_MongoInfra { resources?: Resources; } +export interface Mongodbspec44Enterprise { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec4_4_enterprise"; + /** Configuration and resource allocation for mongod 4.4 hosts. */ + mongod?: Mongodbspec44Enterprise_Mongod; + /** Configuration and resource allocation for mongocfg 4.4 hosts. */ + mongocfg?: Mongodbspec44Enterprise_MongoCfg; + /** Configuration and resource allocation for mongos 4.4 hosts. */ + mongos?: Mongodbspec44Enterprise_Mongos; + /** Configuration and resource allocation for mongoinfra (mongos+mongocfg) 4.4 hosts. */ + mongoinfra?: Mongodbspec44Enterprise_MongoInfra; +} + +export interface Mongodbspec44Enterprise_Mongod { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec4_4_enterprise.Mongod"; + /** Configuration for mongod 4.4 hosts. */ + config?: Mongodconfig44Enterprise; + /** Resources allocated to each mongod host. */ + resources?: Resources; +} + +export interface Mongodbspec44Enterprise_MongoCfg { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec4_4_enterprise.MongoCfg"; + /** Configuration for mongocfg 4.4 hosts. */ + config?: Mongocfgconfig44Enterprise; + /** Resources allocated to each mongocfg host. */ + resources?: Resources; +} + +export interface Mongodbspec44Enterprise_Mongos { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec4_4_enterprise.Mongos"; + /** Configuration for mongos 4.4 hosts. */ + config?: Mongosconfig44Enterprise; + /** Resources allocated to each mongos host. */ + resources?: Resources; +} + +export interface Mongodbspec44Enterprise_MongoInfra { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec4_4_enterprise.MongoInfra"; + /** Configuration for mongoinfra 4.4 hosts. */ + configMongos?: Mongosconfig44Enterprise; + configMongocfg?: Mongocfgconfig44Enterprise; + /** Resources allocated to each mongoinfra (mongos+mongocfg) host. */ + resources?: Resources; +} + export interface Mongodbspec50 { $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0"; /** Configuration and resource allocation for mongod 5.0 hosts. */ @@ -1140,9 +1209,54 @@ export interface Mongodbspec50_MongoInfra { resources?: Resources; } +export interface Mongodbspec50Enterprise { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0_enterprise"; + /** Configuration and resource allocation for mongod 5.0 hosts. */ + mongod?: Mongodbspec50Enterprise_Mongod; + /** Configuration and resource allocation for mongocfg 5.0 hosts. */ + mongocfg?: Mongodbspec50Enterprise_MongoCfg; + /** Configuration and resource allocation for mongos 5.0 hosts. */ + mongos?: Mongodbspec50Enterprise_Mongos; + /** Configuration and resource allocation for mongoinfra (mongos+mongocfg) 5.0 hosts. */ + mongoinfra?: Mongodbspec50Enterprise_MongoInfra; +} + +export interface Mongodbspec50Enterprise_Mongod { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0_enterprise.Mongod"; + /** Configuration for mongod 5.0 hosts. */ + config?: Mongodconfig50Enterprise; + /** Resources allocated to each mongod host. */ + resources?: Resources; +} + +export interface Mongodbspec50Enterprise_MongoCfg { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0_enterprise.MongoCfg"; + /** Configuration for mongocfg 5.0 hosts. */ + config?: Mongocfgconfig50Enterprise; + /** Resources allocated to each mongocfg host. */ + resources?: Resources; +} + +export interface Mongodbspec50Enterprise_Mongos { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0_enterprise.Mongos"; + /** Configuration for mongos 5.0 hosts. */ + config?: Mongosconfig50Enterprise; + /** Resources allocated to each mongos host. */ + resources?: Resources; +} + +export interface Mongodbspec50Enterprise_MongoInfra { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0_enterprise.MongoInfra"; + /** Configuration for mongoinfra 5.0 hosts. */ + configMongos?: Mongosconfig50Enterprise; + configMongocfg?: Mongocfgconfig50Enterprise; + /** Resources allocated to each mongoinfra (mongos+mongocfg) host. */ + resources?: Resources; +} + export interface ConfigSpec { $type: "yandex.cloud.mdb.mongodb.v1.ConfigSpec"; - /** Version of MongoDB used in the cluster. Possible values: `3.6`, `4.0`, `4.2`, `4.4`, `5.0`. */ + /** Version of MongoDB used in the cluster. Possible values: `3.6`, `4.0`, `4.2`, `4.4`, `4.4-enterprise`, `5.0`, `5.0-enterprise`. */ version: string; /** * MongoDB feature compatibility version. See usage details in [MongoDB documentation](https://docs.mongodb.com/manual/reference/command/setFeatureCompatibilityVersion/). @@ -1165,6 +1279,10 @@ export interface ConfigSpec { mongodbSpec44?: Mongodbspec44 | undefined; /** Configuration and resource allocation for a MongoDB 5.0 cluster. */ mongodbSpec50?: Mongodbspec50 | undefined; + /** Configuration and resource allocation for a MongoDB 4.4 Enterprise cluster. */ + mongodbSpec44Enterprise?: Mongodbspec44Enterprise | undefined; + /** Configuration and resource allocation for a MongoDB 5.0 Enterprise cluster. */ + mongodbSpec50Enterprise?: Mongodbspec50Enterprise | undefined; /** Time to start the daily backup, in the UTC timezone. */ backupWindowStart?: TimeOfDay; /** Retain period of automatically created backup in days */ @@ -9008,37 +9126,37 @@ messageTypeRegistry.set( Mongodbspec44_MongoInfra ); -const baseMongodbspec50: object = { - $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0", +const baseMongodbspec44Enterprise: object = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec4_4_enterprise", }; -export const Mongodbspec50 = { - $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0" as const, +export const Mongodbspec44Enterprise = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec4_4_enterprise" as const, encode( - message: Mongodbspec50, + message: Mongodbspec44Enterprise, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { if (message.mongod !== undefined) { - Mongodbspec50_Mongod.encode( + Mongodbspec44Enterprise_Mongod.encode( message.mongod, writer.uint32(10).fork() ).ldelim(); } if (message.mongocfg !== undefined) { - Mongodbspec50_MongoCfg.encode( + Mongodbspec44Enterprise_MongoCfg.encode( message.mongocfg, writer.uint32(18).fork() ).ldelim(); } if (message.mongos !== undefined) { - Mongodbspec50_Mongos.encode( + Mongodbspec44Enterprise_Mongos.encode( message.mongos, writer.uint32(26).fork() ).ldelim(); } if (message.mongoinfra !== undefined) { - Mongodbspec50_MongoInfra.encode( + Mongodbspec44Enterprise_MongoInfra.encode( message.mongoinfra, writer.uint32(34).fork() ).ldelim(); @@ -9046,27 +9164,38 @@ export const Mongodbspec50 = { return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): Mongodbspec50 { + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodbspec44Enterprise { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; - const message = { ...baseMongodbspec50 } as Mongodbspec50; + const message = { + ...baseMongodbspec44Enterprise, + } as Mongodbspec44Enterprise; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.mongod = Mongodbspec50_Mongod.decode(reader, reader.uint32()); + message.mongod = Mongodbspec44Enterprise_Mongod.decode( + reader, + reader.uint32() + ); break; case 2: - message.mongocfg = Mongodbspec50_MongoCfg.decode( + message.mongocfg = Mongodbspec44Enterprise_MongoCfg.decode( reader, reader.uint32() ); break; case 3: - message.mongos = Mongodbspec50_Mongos.decode(reader, reader.uint32()); + message.mongos = Mongodbspec44Enterprise_Mongos.decode( + reader, + reader.uint32() + ); break; case 4: - message.mongoinfra = Mongodbspec50_MongoInfra.decode( + message.mongoinfra = Mongodbspec44Enterprise_MongoInfra.decode( reader, reader.uint32() ); @@ -9079,87 +9208,95 @@ export const Mongodbspec50 = { return message; }, - fromJSON(object: any): Mongodbspec50 { - const message = { ...baseMongodbspec50 } as Mongodbspec50; + fromJSON(object: any): Mongodbspec44Enterprise { + const message = { + ...baseMongodbspec44Enterprise, + } as Mongodbspec44Enterprise; message.mongod = object.mongod !== undefined && object.mongod !== null - ? Mongodbspec50_Mongod.fromJSON(object.mongod) + ? Mongodbspec44Enterprise_Mongod.fromJSON(object.mongod) : undefined; message.mongocfg = object.mongocfg !== undefined && object.mongocfg !== null - ? Mongodbspec50_MongoCfg.fromJSON(object.mongocfg) + ? Mongodbspec44Enterprise_MongoCfg.fromJSON(object.mongocfg) : undefined; message.mongos = object.mongos !== undefined && object.mongos !== null - ? Mongodbspec50_Mongos.fromJSON(object.mongos) + ? Mongodbspec44Enterprise_Mongos.fromJSON(object.mongos) : undefined; message.mongoinfra = object.mongoinfra !== undefined && object.mongoinfra !== null - ? Mongodbspec50_MongoInfra.fromJSON(object.mongoinfra) + ? Mongodbspec44Enterprise_MongoInfra.fromJSON(object.mongoinfra) : undefined; return message; }, - toJSON(message: Mongodbspec50): unknown { + toJSON(message: Mongodbspec44Enterprise): unknown { const obj: any = {}; message.mongod !== undefined && (obj.mongod = message.mongod - ? Mongodbspec50_Mongod.toJSON(message.mongod) + ? Mongodbspec44Enterprise_Mongod.toJSON(message.mongod) : undefined); message.mongocfg !== undefined && (obj.mongocfg = message.mongocfg - ? Mongodbspec50_MongoCfg.toJSON(message.mongocfg) + ? Mongodbspec44Enterprise_MongoCfg.toJSON(message.mongocfg) : undefined); message.mongos !== undefined && (obj.mongos = message.mongos - ? Mongodbspec50_Mongos.toJSON(message.mongos) + ? Mongodbspec44Enterprise_Mongos.toJSON(message.mongos) : undefined); message.mongoinfra !== undefined && (obj.mongoinfra = message.mongoinfra - ? Mongodbspec50_MongoInfra.toJSON(message.mongoinfra) + ? Mongodbspec44Enterprise_MongoInfra.toJSON(message.mongoinfra) : undefined); return obj; }, - fromPartial, I>>( + fromPartial, I>>( object: I - ): Mongodbspec50 { - const message = { ...baseMongodbspec50 } as Mongodbspec50; + ): Mongodbspec44Enterprise { + const message = { + ...baseMongodbspec44Enterprise, + } as Mongodbspec44Enterprise; message.mongod = object.mongod !== undefined && object.mongod !== null - ? Mongodbspec50_Mongod.fromPartial(object.mongod) + ? Mongodbspec44Enterprise_Mongod.fromPartial(object.mongod) : undefined; message.mongocfg = object.mongocfg !== undefined && object.mongocfg !== null - ? Mongodbspec50_MongoCfg.fromPartial(object.mongocfg) + ? Mongodbspec44Enterprise_MongoCfg.fromPartial(object.mongocfg) : undefined; message.mongos = object.mongos !== undefined && object.mongos !== null - ? Mongodbspec50_Mongos.fromPartial(object.mongos) + ? Mongodbspec44Enterprise_Mongos.fromPartial(object.mongos) : undefined; message.mongoinfra = object.mongoinfra !== undefined && object.mongoinfra !== null - ? Mongodbspec50_MongoInfra.fromPartial(object.mongoinfra) + ? Mongodbspec44Enterprise_MongoInfra.fromPartial(object.mongoinfra) : undefined; return message; }, }; -messageTypeRegistry.set(Mongodbspec50.$type, Mongodbspec50); +messageTypeRegistry.set(Mongodbspec44Enterprise.$type, Mongodbspec44Enterprise); -const baseMongodbspec50_Mongod: object = { - $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0.Mongod", +const baseMongodbspec44Enterprise_Mongod: object = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec4_4_enterprise.Mongod", }; -export const Mongodbspec50_Mongod = { - $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0.Mongod" as const, +export const Mongodbspec44Enterprise_Mongod = { + $type: + "yandex.cloud.mdb.mongodb.v1.MongodbSpec4_4_enterprise.Mongod" as const, encode( - message: Mongodbspec50_Mongod, + message: Mongodbspec44Enterprise_Mongod, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { if (message.config !== undefined) { - Mongodconfig50.encode(message.config, writer.uint32(10).fork()).ldelim(); + Mongodconfig44Enterprise.encode( + message.config, + writer.uint32(10).fork() + ).ldelim(); } if (message.resources !== undefined) { Resources.encode(message.resources, writer.uint32(18).fork()).ldelim(); @@ -9170,15 +9307,20 @@ export const Mongodbspec50_Mongod = { decode( input: _m0.Reader | Uint8Array, length?: number - ): Mongodbspec50_Mongod { + ): Mongodbspec44Enterprise_Mongod { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; - const message = { ...baseMongodbspec50_Mongod } as Mongodbspec50_Mongod; + const message = { + ...baseMongodbspec44Enterprise_Mongod, + } as Mongodbspec44Enterprise_Mongod; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.config = Mongodconfig50.decode(reader, reader.uint32()); + message.config = Mongodconfig44Enterprise.decode( + reader, + reader.uint32() + ); break; case 2: message.resources = Resources.decode(reader, reader.uint32()); @@ -9191,11 +9333,13 @@ export const Mongodbspec50_Mongod = { return message; }, - fromJSON(object: any): Mongodbspec50_Mongod { - const message = { ...baseMongodbspec50_Mongod } as Mongodbspec50_Mongod; + fromJSON(object: any): Mongodbspec44Enterprise_Mongod { + const message = { + ...baseMongodbspec44Enterprise_Mongod, + } as Mongodbspec44Enterprise_Mongod; message.config = object.config !== undefined && object.config !== null - ? Mongodconfig50.fromJSON(object.config) + ? Mongodconfig44Enterprise.fromJSON(object.config) : undefined; message.resources = object.resources !== undefined && object.resources !== null @@ -9204,11 +9348,11 @@ export const Mongodbspec50_Mongod = { return message; }, - toJSON(message: Mongodbspec50_Mongod): unknown { + toJSON(message: Mongodbspec44Enterprise_Mongod): unknown { const obj: any = {}; message.config !== undefined && (obj.config = message.config - ? Mongodconfig50.toJSON(message.config) + ? Mongodconfig44Enterprise.toJSON(message.config) : undefined); message.resources !== undefined && (obj.resources = message.resources @@ -9217,13 +9361,15 @@ export const Mongodbspec50_Mongod = { return obj; }, - fromPartial, I>>( + fromPartial, I>>( object: I - ): Mongodbspec50_Mongod { - const message = { ...baseMongodbspec50_Mongod } as Mongodbspec50_Mongod; + ): Mongodbspec44Enterprise_Mongod { + const message = { + ...baseMongodbspec44Enterprise_Mongod, + } as Mongodbspec44Enterprise_Mongod; message.config = object.config !== undefined && object.config !== null - ? Mongodconfig50.fromPartial(object.config) + ? Mongodconfig44Enterprise.fromPartial(object.config) : undefined; message.resources = object.resources !== undefined && object.resources !== null @@ -9233,21 +9379,25 @@ export const Mongodbspec50_Mongod = { }, }; -messageTypeRegistry.set(Mongodbspec50_Mongod.$type, Mongodbspec50_Mongod); +messageTypeRegistry.set( + Mongodbspec44Enterprise_Mongod.$type, + Mongodbspec44Enterprise_Mongod +); -const baseMongodbspec50_MongoCfg: object = { - $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0.MongoCfg", +const baseMongodbspec44Enterprise_MongoCfg: object = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec4_4_enterprise.MongoCfg", }; -export const Mongodbspec50_MongoCfg = { - $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0.MongoCfg" as const, +export const Mongodbspec44Enterprise_MongoCfg = { + $type: + "yandex.cloud.mdb.mongodb.v1.MongodbSpec4_4_enterprise.MongoCfg" as const, encode( - message: Mongodbspec50_MongoCfg, + message: Mongodbspec44Enterprise_MongoCfg, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { if (message.config !== undefined) { - Mongocfgconfig50.encode( + Mongocfgconfig44Enterprise.encode( message.config, writer.uint32(10).fork() ).ldelim(); @@ -9261,15 +9411,20 @@ export const Mongodbspec50_MongoCfg = { decode( input: _m0.Reader | Uint8Array, length?: number - ): Mongodbspec50_MongoCfg { + ): Mongodbspec44Enterprise_MongoCfg { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; - const message = { ...baseMongodbspec50_MongoCfg } as Mongodbspec50_MongoCfg; + const message = { + ...baseMongodbspec44Enterprise_MongoCfg, + } as Mongodbspec44Enterprise_MongoCfg; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.config = Mongocfgconfig50.decode(reader, reader.uint32()); + message.config = Mongocfgconfig44Enterprise.decode( + reader, + reader.uint32() + ); break; case 2: message.resources = Resources.decode(reader, reader.uint32()); @@ -9282,11 +9437,13 @@ export const Mongodbspec50_MongoCfg = { return message; }, - fromJSON(object: any): Mongodbspec50_MongoCfg { - const message = { ...baseMongodbspec50_MongoCfg } as Mongodbspec50_MongoCfg; + fromJSON(object: any): Mongodbspec44Enterprise_MongoCfg { + const message = { + ...baseMongodbspec44Enterprise_MongoCfg, + } as Mongodbspec44Enterprise_MongoCfg; message.config = object.config !== undefined && object.config !== null - ? Mongocfgconfig50.fromJSON(object.config) + ? Mongocfgconfig44Enterprise.fromJSON(object.config) : undefined; message.resources = object.resources !== undefined && object.resources !== null @@ -9295,11 +9452,11 @@ export const Mongodbspec50_MongoCfg = { return message; }, - toJSON(message: Mongodbspec50_MongoCfg): unknown { + toJSON(message: Mongodbspec44Enterprise_MongoCfg): unknown { const obj: any = {}; message.config !== undefined && (obj.config = message.config - ? Mongocfgconfig50.toJSON(message.config) + ? Mongocfgconfig44Enterprise.toJSON(message.config) : undefined); message.resources !== undefined && (obj.resources = message.resources @@ -9308,13 +9465,15 @@ export const Mongodbspec50_MongoCfg = { return obj; }, - fromPartial, I>>( - object: I - ): Mongodbspec50_MongoCfg { - const message = { ...baseMongodbspec50_MongoCfg } as Mongodbspec50_MongoCfg; + fromPartial< + I extends Exact, I> + >(object: I): Mongodbspec44Enterprise_MongoCfg { + const message = { + ...baseMongodbspec44Enterprise_MongoCfg, + } as Mongodbspec44Enterprise_MongoCfg; message.config = object.config !== undefined && object.config !== null - ? Mongocfgconfig50.fromPartial(object.config) + ? Mongocfgconfig44Enterprise.fromPartial(object.config) : undefined; message.resources = object.resources !== undefined && object.resources !== null @@ -9324,21 +9483,28 @@ export const Mongodbspec50_MongoCfg = { }, }; -messageTypeRegistry.set(Mongodbspec50_MongoCfg.$type, Mongodbspec50_MongoCfg); +messageTypeRegistry.set( + Mongodbspec44Enterprise_MongoCfg.$type, + Mongodbspec44Enterprise_MongoCfg +); -const baseMongodbspec50_Mongos: object = { - $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0.Mongos", +const baseMongodbspec44Enterprise_Mongos: object = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec4_4_enterprise.Mongos", }; -export const Mongodbspec50_Mongos = { - $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0.Mongos" as const, +export const Mongodbspec44Enterprise_Mongos = { + $type: + "yandex.cloud.mdb.mongodb.v1.MongodbSpec4_4_enterprise.Mongos" as const, encode( - message: Mongodbspec50_Mongos, + message: Mongodbspec44Enterprise_Mongos, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { if (message.config !== undefined) { - Mongosconfig50.encode(message.config, writer.uint32(10).fork()).ldelim(); + Mongosconfig44Enterprise.encode( + message.config, + writer.uint32(10).fork() + ).ldelim(); } if (message.resources !== undefined) { Resources.encode(message.resources, writer.uint32(18).fork()).ldelim(); @@ -9349,15 +9515,20 @@ export const Mongodbspec50_Mongos = { decode( input: _m0.Reader | Uint8Array, length?: number - ): Mongodbspec50_Mongos { + ): Mongodbspec44Enterprise_Mongos { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; - const message = { ...baseMongodbspec50_Mongos } as Mongodbspec50_Mongos; + const message = { + ...baseMongodbspec44Enterprise_Mongos, + } as Mongodbspec44Enterprise_Mongos; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.config = Mongosconfig50.decode(reader, reader.uint32()); + message.config = Mongosconfig44Enterprise.decode( + reader, + reader.uint32() + ); break; case 2: message.resources = Resources.decode(reader, reader.uint32()); @@ -9370,11 +9541,13 @@ export const Mongodbspec50_Mongos = { return message; }, - fromJSON(object: any): Mongodbspec50_Mongos { - const message = { ...baseMongodbspec50_Mongos } as Mongodbspec50_Mongos; + fromJSON(object: any): Mongodbspec44Enterprise_Mongos { + const message = { + ...baseMongodbspec44Enterprise_Mongos, + } as Mongodbspec44Enterprise_Mongos; message.config = object.config !== undefined && object.config !== null - ? Mongosconfig50.fromJSON(object.config) + ? Mongosconfig44Enterprise.fromJSON(object.config) : undefined; message.resources = object.resources !== undefined && object.resources !== null @@ -9383,11 +9556,11 @@ export const Mongodbspec50_Mongos = { return message; }, - toJSON(message: Mongodbspec50_Mongos): unknown { + toJSON(message: Mongodbspec44Enterprise_Mongos): unknown { const obj: any = {}; message.config !== undefined && (obj.config = message.config - ? Mongosconfig50.toJSON(message.config) + ? Mongosconfig44Enterprise.toJSON(message.config) : undefined); message.resources !== undefined && (obj.resources = message.resources @@ -9396,13 +9569,15 @@ export const Mongodbspec50_Mongos = { return obj; }, - fromPartial, I>>( + fromPartial, I>>( object: I - ): Mongodbspec50_Mongos { - const message = { ...baseMongodbspec50_Mongos } as Mongodbspec50_Mongos; + ): Mongodbspec44Enterprise_Mongos { + const message = { + ...baseMongodbspec44Enterprise_Mongos, + } as Mongodbspec44Enterprise_Mongos; message.config = object.config !== undefined && object.config !== null - ? Mongosconfig50.fromPartial(object.config) + ? Mongosconfig44Enterprise.fromPartial(object.config) : undefined; message.resources = object.resources !== undefined && object.resources !== null @@ -9412,27 +9587,31 @@ export const Mongodbspec50_Mongos = { }, }; -messageTypeRegistry.set(Mongodbspec50_Mongos.$type, Mongodbspec50_Mongos); +messageTypeRegistry.set( + Mongodbspec44Enterprise_Mongos.$type, + Mongodbspec44Enterprise_Mongos +); -const baseMongodbspec50_MongoInfra: object = { - $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0.MongoInfra", +const baseMongodbspec44Enterprise_MongoInfra: object = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec4_4_enterprise.MongoInfra", }; -export const Mongodbspec50_MongoInfra = { - $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0.MongoInfra" as const, +export const Mongodbspec44Enterprise_MongoInfra = { + $type: + "yandex.cloud.mdb.mongodb.v1.MongodbSpec4_4_enterprise.MongoInfra" as const, encode( - message: Mongodbspec50_MongoInfra, + message: Mongodbspec44Enterprise_MongoInfra, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { if (message.configMongos !== undefined) { - Mongosconfig50.encode( + Mongosconfig44Enterprise.encode( message.configMongos, writer.uint32(10).fork() ).ldelim(); } if (message.configMongocfg !== undefined) { - Mongocfgconfig50.encode( + Mongocfgconfig44Enterprise.encode( message.configMongocfg, writer.uint32(18).fork() ).ldelim(); @@ -9446,20 +9625,23 @@ export const Mongodbspec50_MongoInfra = { decode( input: _m0.Reader | Uint8Array, length?: number - ): Mongodbspec50_MongoInfra { + ): Mongodbspec44Enterprise_MongoInfra { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = { - ...baseMongodbspec50_MongoInfra, - } as Mongodbspec50_MongoInfra; + ...baseMongodbspec44Enterprise_MongoInfra, + } as Mongodbspec44Enterprise_MongoInfra; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.configMongos = Mongosconfig50.decode(reader, reader.uint32()); + message.configMongos = Mongosconfig44Enterprise.decode( + reader, + reader.uint32() + ); break; case 2: - message.configMongocfg = Mongocfgconfig50.decode( + message.configMongocfg = Mongocfgconfig44Enterprise.decode( reader, reader.uint32() ); @@ -9475,17 +9657,17 @@ export const Mongodbspec50_MongoInfra = { return message; }, - fromJSON(object: any): Mongodbspec50_MongoInfra { + fromJSON(object: any): Mongodbspec44Enterprise_MongoInfra { const message = { - ...baseMongodbspec50_MongoInfra, - } as Mongodbspec50_MongoInfra; + ...baseMongodbspec44Enterprise_MongoInfra, + } as Mongodbspec44Enterprise_MongoInfra; message.configMongos = object.configMongos !== undefined && object.configMongos !== null - ? Mongosconfig50.fromJSON(object.configMongos) + ? Mongosconfig44Enterprise.fromJSON(object.configMongos) : undefined; message.configMongocfg = object.configMongocfg !== undefined && object.configMongocfg !== null - ? Mongocfgconfig50.fromJSON(object.configMongocfg) + ? Mongocfgconfig44Enterprise.fromJSON(object.configMongocfg) : undefined; message.resources = object.resources !== undefined && object.resources !== null @@ -9494,15 +9676,15 @@ export const Mongodbspec50_MongoInfra = { return message; }, - toJSON(message: Mongodbspec50_MongoInfra): unknown { + toJSON(message: Mongodbspec44Enterprise_MongoInfra): unknown { const obj: any = {}; message.configMongos !== undefined && (obj.configMongos = message.configMongos - ? Mongosconfig50.toJSON(message.configMongos) + ? Mongosconfig44Enterprise.toJSON(message.configMongos) : undefined); message.configMongocfg !== undefined && (obj.configMongocfg = message.configMongocfg - ? Mongocfgconfig50.toJSON(message.configMongocfg) + ? Mongocfgconfig44Enterprise.toJSON(message.configMongocfg) : undefined); message.resources !== undefined && (obj.resources = message.resources @@ -9511,19 +9693,19 @@ export const Mongodbspec50_MongoInfra = { return obj; }, - fromPartial, I>>( - object: I - ): Mongodbspec50_MongoInfra { + fromPartial< + I extends Exact, I> + >(object: I): Mongodbspec44Enterprise_MongoInfra { const message = { - ...baseMongodbspec50_MongoInfra, - } as Mongodbspec50_MongoInfra; + ...baseMongodbspec44Enterprise_MongoInfra, + } as Mongodbspec44Enterprise_MongoInfra; message.configMongos = object.configMongos !== undefined && object.configMongos !== null - ? Mongosconfig50.fromPartial(object.configMongos) + ? Mongosconfig44Enterprise.fromPartial(object.configMongos) : undefined; message.configMongocfg = object.configMongocfg !== undefined && object.configMongocfg !== null - ? Mongocfgconfig50.fromPartial(object.configMongocfg) + ? Mongocfgconfig44Enterprise.fromPartial(object.configMongocfg) : undefined; message.resources = object.resources !== undefined && object.resources !== null @@ -9534,37 +9716,1161 @@ export const Mongodbspec50_MongoInfra = { }; messageTypeRegistry.set( - Mongodbspec50_MongoInfra.$type, - Mongodbspec50_MongoInfra + Mongodbspec44Enterprise_MongoInfra.$type, + Mongodbspec44Enterprise_MongoInfra ); -const baseConfigSpec: object = { - $type: "yandex.cloud.mdb.mongodb.v1.ConfigSpec", - version: "", - featureCompatibilityVersion: "", +const baseMongodbspec50: object = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0", }; -export const ConfigSpec = { - $type: "yandex.cloud.mdb.mongodb.v1.ConfigSpec" as const, +export const Mongodbspec50 = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0" as const, encode( - message: ConfigSpec, + message: Mongodbspec50, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { - if (message.version !== "") { - writer.uint32(10).string(message.version); - } - if (message.featureCompatibilityVersion !== "") { - writer.uint32(42).string(message.featureCompatibilityVersion); + if (message.mongod !== undefined) { + Mongodbspec50_Mongod.encode( + message.mongod, + writer.uint32(10).fork() + ).ldelim(); } - if (message.mongodbSpec36 !== undefined) { - Mongodbspec36.encode( - message.mongodbSpec36, + if (message.mongocfg !== undefined) { + Mongodbspec50_MongoCfg.encode( + message.mongocfg, writer.uint32(18).fork() ).ldelim(); } - if (message.mongodbSpec40 !== undefined) { - Mongodbspec40.encode( + if (message.mongos !== undefined) { + Mongodbspec50_Mongos.encode( + message.mongos, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.mongoinfra !== undefined) { + Mongodbspec50_MongoInfra.encode( + message.mongoinfra, + writer.uint32(34).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Mongodbspec50 { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMongodbspec50 } as Mongodbspec50; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.mongod = Mongodbspec50_Mongod.decode(reader, reader.uint32()); + break; + case 2: + message.mongocfg = Mongodbspec50_MongoCfg.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.mongos = Mongodbspec50_Mongos.decode(reader, reader.uint32()); + break; + case 4: + message.mongoinfra = Mongodbspec50_MongoInfra.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodbspec50 { + const message = { ...baseMongodbspec50 } as Mongodbspec50; + message.mongod = + object.mongod !== undefined && object.mongod !== null + ? Mongodbspec50_Mongod.fromJSON(object.mongod) + : undefined; + message.mongocfg = + object.mongocfg !== undefined && object.mongocfg !== null + ? Mongodbspec50_MongoCfg.fromJSON(object.mongocfg) + : undefined; + message.mongos = + object.mongos !== undefined && object.mongos !== null + ? Mongodbspec50_Mongos.fromJSON(object.mongos) + : undefined; + message.mongoinfra = + object.mongoinfra !== undefined && object.mongoinfra !== null + ? Mongodbspec50_MongoInfra.fromJSON(object.mongoinfra) + : undefined; + return message; + }, + + toJSON(message: Mongodbspec50): unknown { + const obj: any = {}; + message.mongod !== undefined && + (obj.mongod = message.mongod + ? Mongodbspec50_Mongod.toJSON(message.mongod) + : undefined); + message.mongocfg !== undefined && + (obj.mongocfg = message.mongocfg + ? Mongodbspec50_MongoCfg.toJSON(message.mongocfg) + : undefined); + message.mongos !== undefined && + (obj.mongos = message.mongos + ? Mongodbspec50_Mongos.toJSON(message.mongos) + : undefined); + message.mongoinfra !== undefined && + (obj.mongoinfra = message.mongoinfra + ? Mongodbspec50_MongoInfra.toJSON(message.mongoinfra) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodbspec50 { + const message = { ...baseMongodbspec50 } as Mongodbspec50; + message.mongod = + object.mongod !== undefined && object.mongod !== null + ? Mongodbspec50_Mongod.fromPartial(object.mongod) + : undefined; + message.mongocfg = + object.mongocfg !== undefined && object.mongocfg !== null + ? Mongodbspec50_MongoCfg.fromPartial(object.mongocfg) + : undefined; + message.mongos = + object.mongos !== undefined && object.mongos !== null + ? Mongodbspec50_Mongos.fromPartial(object.mongos) + : undefined; + message.mongoinfra = + object.mongoinfra !== undefined && object.mongoinfra !== null + ? Mongodbspec50_MongoInfra.fromPartial(object.mongoinfra) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Mongodbspec50.$type, Mongodbspec50); + +const baseMongodbspec50_Mongod: object = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0.Mongod", +}; + +export const Mongodbspec50_Mongod = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0.Mongod" as const, + + encode( + message: Mongodbspec50_Mongod, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.config !== undefined) { + Mongodconfig50.encode(message.config, writer.uint32(10).fork()).ldelim(); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodbspec50_Mongod { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMongodbspec50_Mongod } as Mongodbspec50_Mongod; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.config = Mongodconfig50.decode(reader, reader.uint32()); + break; + case 2: + message.resources = Resources.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodbspec50_Mongod { + const message = { ...baseMongodbspec50_Mongod } as Mongodbspec50_Mongod; + message.config = + object.config !== undefined && object.config !== null + ? Mongodconfig50.fromJSON(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + return message; + }, + + toJSON(message: Mongodbspec50_Mongod): unknown { + const obj: any = {}; + message.config !== undefined && + (obj.config = message.config + ? Mongodconfig50.toJSON(message.config) + : undefined); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodbspec50_Mongod { + const message = { ...baseMongodbspec50_Mongod } as Mongodbspec50_Mongod; + message.config = + object.config !== undefined && object.config !== null + ? Mongodconfig50.fromPartial(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Mongodbspec50_Mongod.$type, Mongodbspec50_Mongod); + +const baseMongodbspec50_MongoCfg: object = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0.MongoCfg", +}; + +export const Mongodbspec50_MongoCfg = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0.MongoCfg" as const, + + encode( + message: Mongodbspec50_MongoCfg, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.config !== undefined) { + Mongocfgconfig50.encode( + message.config, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodbspec50_MongoCfg { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMongodbspec50_MongoCfg } as Mongodbspec50_MongoCfg; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.config = Mongocfgconfig50.decode(reader, reader.uint32()); + break; + case 2: + message.resources = Resources.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodbspec50_MongoCfg { + const message = { ...baseMongodbspec50_MongoCfg } as Mongodbspec50_MongoCfg; + message.config = + object.config !== undefined && object.config !== null + ? Mongocfgconfig50.fromJSON(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + return message; + }, + + toJSON(message: Mongodbspec50_MongoCfg): unknown { + const obj: any = {}; + message.config !== undefined && + (obj.config = message.config + ? Mongocfgconfig50.toJSON(message.config) + : undefined); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodbspec50_MongoCfg { + const message = { ...baseMongodbspec50_MongoCfg } as Mongodbspec50_MongoCfg; + message.config = + object.config !== undefined && object.config !== null + ? Mongocfgconfig50.fromPartial(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Mongodbspec50_MongoCfg.$type, Mongodbspec50_MongoCfg); + +const baseMongodbspec50_Mongos: object = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0.Mongos", +}; + +export const Mongodbspec50_Mongos = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0.Mongos" as const, + + encode( + message: Mongodbspec50_Mongos, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.config !== undefined) { + Mongosconfig50.encode(message.config, writer.uint32(10).fork()).ldelim(); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodbspec50_Mongos { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMongodbspec50_Mongos } as Mongodbspec50_Mongos; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.config = Mongosconfig50.decode(reader, reader.uint32()); + break; + case 2: + message.resources = Resources.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodbspec50_Mongos { + const message = { ...baseMongodbspec50_Mongos } as Mongodbspec50_Mongos; + message.config = + object.config !== undefined && object.config !== null + ? Mongosconfig50.fromJSON(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + return message; + }, + + toJSON(message: Mongodbspec50_Mongos): unknown { + const obj: any = {}; + message.config !== undefined && + (obj.config = message.config + ? Mongosconfig50.toJSON(message.config) + : undefined); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodbspec50_Mongos { + const message = { ...baseMongodbspec50_Mongos } as Mongodbspec50_Mongos; + message.config = + object.config !== undefined && object.config !== null + ? Mongosconfig50.fromPartial(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Mongodbspec50_Mongos.$type, Mongodbspec50_Mongos); + +const baseMongodbspec50_MongoInfra: object = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0.MongoInfra", +}; + +export const Mongodbspec50_MongoInfra = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0.MongoInfra" as const, + + encode( + message: Mongodbspec50_MongoInfra, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.configMongos !== undefined) { + Mongosconfig50.encode( + message.configMongos, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.configMongocfg !== undefined) { + Mongocfgconfig50.encode( + message.configMongocfg, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(26).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodbspec50_MongoInfra { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodbspec50_MongoInfra, + } as Mongodbspec50_MongoInfra; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.configMongos = Mongosconfig50.decode(reader, reader.uint32()); + break; + case 2: + message.configMongocfg = Mongocfgconfig50.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.resources = Resources.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodbspec50_MongoInfra { + const message = { + ...baseMongodbspec50_MongoInfra, + } as Mongodbspec50_MongoInfra; + message.configMongos = + object.configMongos !== undefined && object.configMongos !== null + ? Mongosconfig50.fromJSON(object.configMongos) + : undefined; + message.configMongocfg = + object.configMongocfg !== undefined && object.configMongocfg !== null + ? Mongocfgconfig50.fromJSON(object.configMongocfg) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + return message; + }, + + toJSON(message: Mongodbspec50_MongoInfra): unknown { + const obj: any = {}; + message.configMongos !== undefined && + (obj.configMongos = message.configMongos + ? Mongosconfig50.toJSON(message.configMongos) + : undefined); + message.configMongocfg !== undefined && + (obj.configMongocfg = message.configMongocfg + ? Mongocfgconfig50.toJSON(message.configMongocfg) + : undefined); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodbspec50_MongoInfra { + const message = { + ...baseMongodbspec50_MongoInfra, + } as Mongodbspec50_MongoInfra; + message.configMongos = + object.configMongos !== undefined && object.configMongos !== null + ? Mongosconfig50.fromPartial(object.configMongos) + : undefined; + message.configMongocfg = + object.configMongocfg !== undefined && object.configMongocfg !== null + ? Mongocfgconfig50.fromPartial(object.configMongocfg) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodbspec50_MongoInfra.$type, + Mongodbspec50_MongoInfra +); + +const baseMongodbspec50Enterprise: object = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0_enterprise", +}; + +export const Mongodbspec50Enterprise = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0_enterprise" as const, + + encode( + message: Mongodbspec50Enterprise, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.mongod !== undefined) { + Mongodbspec50Enterprise_Mongod.encode( + message.mongod, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.mongocfg !== undefined) { + Mongodbspec50Enterprise_MongoCfg.encode( + message.mongocfg, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.mongos !== undefined) { + Mongodbspec50Enterprise_Mongos.encode( + message.mongos, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.mongoinfra !== undefined) { + Mongodbspec50Enterprise_MongoInfra.encode( + message.mongoinfra, + writer.uint32(34).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodbspec50Enterprise { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodbspec50Enterprise, + } as Mongodbspec50Enterprise; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.mongod = Mongodbspec50Enterprise_Mongod.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.mongocfg = Mongodbspec50Enterprise_MongoCfg.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.mongos = Mongodbspec50Enterprise_Mongos.decode( + reader, + reader.uint32() + ); + break; + case 4: + message.mongoinfra = Mongodbspec50Enterprise_MongoInfra.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodbspec50Enterprise { + const message = { + ...baseMongodbspec50Enterprise, + } as Mongodbspec50Enterprise; + message.mongod = + object.mongod !== undefined && object.mongod !== null + ? Mongodbspec50Enterprise_Mongod.fromJSON(object.mongod) + : undefined; + message.mongocfg = + object.mongocfg !== undefined && object.mongocfg !== null + ? Mongodbspec50Enterprise_MongoCfg.fromJSON(object.mongocfg) + : undefined; + message.mongos = + object.mongos !== undefined && object.mongos !== null + ? Mongodbspec50Enterprise_Mongos.fromJSON(object.mongos) + : undefined; + message.mongoinfra = + object.mongoinfra !== undefined && object.mongoinfra !== null + ? Mongodbspec50Enterprise_MongoInfra.fromJSON(object.mongoinfra) + : undefined; + return message; + }, + + toJSON(message: Mongodbspec50Enterprise): unknown { + const obj: any = {}; + message.mongod !== undefined && + (obj.mongod = message.mongod + ? Mongodbspec50Enterprise_Mongod.toJSON(message.mongod) + : undefined); + message.mongocfg !== undefined && + (obj.mongocfg = message.mongocfg + ? Mongodbspec50Enterprise_MongoCfg.toJSON(message.mongocfg) + : undefined); + message.mongos !== undefined && + (obj.mongos = message.mongos + ? Mongodbspec50Enterprise_Mongos.toJSON(message.mongos) + : undefined); + message.mongoinfra !== undefined && + (obj.mongoinfra = message.mongoinfra + ? Mongodbspec50Enterprise_MongoInfra.toJSON(message.mongoinfra) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodbspec50Enterprise { + const message = { + ...baseMongodbspec50Enterprise, + } as Mongodbspec50Enterprise; + message.mongod = + object.mongod !== undefined && object.mongod !== null + ? Mongodbspec50Enterprise_Mongod.fromPartial(object.mongod) + : undefined; + message.mongocfg = + object.mongocfg !== undefined && object.mongocfg !== null + ? Mongodbspec50Enterprise_MongoCfg.fromPartial(object.mongocfg) + : undefined; + message.mongos = + object.mongos !== undefined && object.mongos !== null + ? Mongodbspec50Enterprise_Mongos.fromPartial(object.mongos) + : undefined; + message.mongoinfra = + object.mongoinfra !== undefined && object.mongoinfra !== null + ? Mongodbspec50Enterprise_MongoInfra.fromPartial(object.mongoinfra) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Mongodbspec50Enterprise.$type, Mongodbspec50Enterprise); + +const baseMongodbspec50Enterprise_Mongod: object = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0_enterprise.Mongod", +}; + +export const Mongodbspec50Enterprise_Mongod = { + $type: + "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0_enterprise.Mongod" as const, + + encode( + message: Mongodbspec50Enterprise_Mongod, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.config !== undefined) { + Mongodconfig50Enterprise.encode( + message.config, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodbspec50Enterprise_Mongod { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodbspec50Enterprise_Mongod, + } as Mongodbspec50Enterprise_Mongod; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.config = Mongodconfig50Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.resources = Resources.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodbspec50Enterprise_Mongod { + const message = { + ...baseMongodbspec50Enterprise_Mongod, + } as Mongodbspec50Enterprise_Mongod; + message.config = + object.config !== undefined && object.config !== null + ? Mongodconfig50Enterprise.fromJSON(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + return message; + }, + + toJSON(message: Mongodbspec50Enterprise_Mongod): unknown { + const obj: any = {}; + message.config !== undefined && + (obj.config = message.config + ? Mongodconfig50Enterprise.toJSON(message.config) + : undefined); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodbspec50Enterprise_Mongod { + const message = { + ...baseMongodbspec50Enterprise_Mongod, + } as Mongodbspec50Enterprise_Mongod; + message.config = + object.config !== undefined && object.config !== null + ? Mongodconfig50Enterprise.fromPartial(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodbspec50Enterprise_Mongod.$type, + Mongodbspec50Enterprise_Mongod +); + +const baseMongodbspec50Enterprise_MongoCfg: object = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0_enterprise.MongoCfg", +}; + +export const Mongodbspec50Enterprise_MongoCfg = { + $type: + "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0_enterprise.MongoCfg" as const, + + encode( + message: Mongodbspec50Enterprise_MongoCfg, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.config !== undefined) { + Mongocfgconfig50Enterprise.encode( + message.config, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodbspec50Enterprise_MongoCfg { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodbspec50Enterprise_MongoCfg, + } as Mongodbspec50Enterprise_MongoCfg; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.config = Mongocfgconfig50Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.resources = Resources.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodbspec50Enterprise_MongoCfg { + const message = { + ...baseMongodbspec50Enterprise_MongoCfg, + } as Mongodbspec50Enterprise_MongoCfg; + message.config = + object.config !== undefined && object.config !== null + ? Mongocfgconfig50Enterprise.fromJSON(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + return message; + }, + + toJSON(message: Mongodbspec50Enterprise_MongoCfg): unknown { + const obj: any = {}; + message.config !== undefined && + (obj.config = message.config + ? Mongocfgconfig50Enterprise.toJSON(message.config) + : undefined); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodbspec50Enterprise_MongoCfg { + const message = { + ...baseMongodbspec50Enterprise_MongoCfg, + } as Mongodbspec50Enterprise_MongoCfg; + message.config = + object.config !== undefined && object.config !== null + ? Mongocfgconfig50Enterprise.fromPartial(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodbspec50Enterprise_MongoCfg.$type, + Mongodbspec50Enterprise_MongoCfg +); + +const baseMongodbspec50Enterprise_Mongos: object = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0_enterprise.Mongos", +}; + +export const Mongodbspec50Enterprise_Mongos = { + $type: + "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0_enterprise.Mongos" as const, + + encode( + message: Mongodbspec50Enterprise_Mongos, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.config !== undefined) { + Mongosconfig50Enterprise.encode( + message.config, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodbspec50Enterprise_Mongos { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodbspec50Enterprise_Mongos, + } as Mongodbspec50Enterprise_Mongos; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.config = Mongosconfig50Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.resources = Resources.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodbspec50Enterprise_Mongos { + const message = { + ...baseMongodbspec50Enterprise_Mongos, + } as Mongodbspec50Enterprise_Mongos; + message.config = + object.config !== undefined && object.config !== null + ? Mongosconfig50Enterprise.fromJSON(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + return message; + }, + + toJSON(message: Mongodbspec50Enterprise_Mongos): unknown { + const obj: any = {}; + message.config !== undefined && + (obj.config = message.config + ? Mongosconfig50Enterprise.toJSON(message.config) + : undefined); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodbspec50Enterprise_Mongos { + const message = { + ...baseMongodbspec50Enterprise_Mongos, + } as Mongodbspec50Enterprise_Mongos; + message.config = + object.config !== undefined && object.config !== null + ? Mongosconfig50Enterprise.fromPartial(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodbspec50Enterprise_Mongos.$type, + Mongodbspec50Enterprise_Mongos +); + +const baseMongodbspec50Enterprise_MongoInfra: object = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0_enterprise.MongoInfra", +}; + +export const Mongodbspec50Enterprise_MongoInfra = { + $type: + "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0_enterprise.MongoInfra" as const, + + encode( + message: Mongodbspec50Enterprise_MongoInfra, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.configMongos !== undefined) { + Mongosconfig50Enterprise.encode( + message.configMongos, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.configMongocfg !== undefined) { + Mongocfgconfig50Enterprise.encode( + message.configMongocfg, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(26).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodbspec50Enterprise_MongoInfra { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodbspec50Enterprise_MongoInfra, + } as Mongodbspec50Enterprise_MongoInfra; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.configMongos = Mongosconfig50Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.configMongocfg = Mongocfgconfig50Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.resources = Resources.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodbspec50Enterprise_MongoInfra { + const message = { + ...baseMongodbspec50Enterprise_MongoInfra, + } as Mongodbspec50Enterprise_MongoInfra; + message.configMongos = + object.configMongos !== undefined && object.configMongos !== null + ? Mongosconfig50Enterprise.fromJSON(object.configMongos) + : undefined; + message.configMongocfg = + object.configMongocfg !== undefined && object.configMongocfg !== null + ? Mongocfgconfig50Enterprise.fromJSON(object.configMongocfg) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + return message; + }, + + toJSON(message: Mongodbspec50Enterprise_MongoInfra): unknown { + const obj: any = {}; + message.configMongos !== undefined && + (obj.configMongos = message.configMongos + ? Mongosconfig50Enterprise.toJSON(message.configMongos) + : undefined); + message.configMongocfg !== undefined && + (obj.configMongocfg = message.configMongocfg + ? Mongocfgconfig50Enterprise.toJSON(message.configMongocfg) + : undefined); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodbspec50Enterprise_MongoInfra { + const message = { + ...baseMongodbspec50Enterprise_MongoInfra, + } as Mongodbspec50Enterprise_MongoInfra; + message.configMongos = + object.configMongos !== undefined && object.configMongos !== null + ? Mongosconfig50Enterprise.fromPartial(object.configMongos) + : undefined; + message.configMongocfg = + object.configMongocfg !== undefined && object.configMongocfg !== null + ? Mongocfgconfig50Enterprise.fromPartial(object.configMongocfg) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodbspec50Enterprise_MongoInfra.$type, + Mongodbspec50Enterprise_MongoInfra +); + +const baseConfigSpec: object = { + $type: "yandex.cloud.mdb.mongodb.v1.ConfigSpec", + version: "", + featureCompatibilityVersion: "", +}; + +export const ConfigSpec = { + $type: "yandex.cloud.mdb.mongodb.v1.ConfigSpec" as const, + + encode( + message: ConfigSpec, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.version !== "") { + writer.uint32(10).string(message.version); + } + if (message.featureCompatibilityVersion !== "") { + writer.uint32(42).string(message.featureCompatibilityVersion); + } + if (message.mongodbSpec36 !== undefined) { + Mongodbspec36.encode( + message.mongodbSpec36, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.mongodbSpec40 !== undefined) { + Mongodbspec40.encode( message.mongodbSpec40, writer.uint32(34).fork() ).ldelim(); @@ -9587,6 +10893,18 @@ export const ConfigSpec = { writer.uint32(82).fork() ).ldelim(); } + if (message.mongodbSpec44Enterprise !== undefined) { + Mongodbspec44Enterprise.encode( + message.mongodbSpec44Enterprise, + writer.uint32(90).fork() + ).ldelim(); + } + if (message.mongodbSpec50Enterprise !== undefined) { + Mongodbspec50Enterprise.encode( + message.mongodbSpec50Enterprise, + writer.uint32(98).fork() + ).ldelim(); + } if (message.backupWindowStart !== undefined) { TimeOfDay.encode( message.backupWindowStart, @@ -9636,6 +10954,18 @@ export const ConfigSpec = { case 10: message.mongodbSpec50 = Mongodbspec50.decode(reader, reader.uint32()); break; + case 11: + message.mongodbSpec44Enterprise = Mongodbspec44Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 12: + message.mongodbSpec50Enterprise = Mongodbspec50Enterprise.decode( + reader, + reader.uint32() + ); + break; case 3: message.backupWindowStart = TimeOfDay.decode(reader, reader.uint32()); break; @@ -9687,6 +11017,16 @@ export const ConfigSpec = { object.mongodbSpec_5_0 !== undefined && object.mongodbSpec_5_0 !== null ? Mongodbspec50.fromJSON(object.mongodbSpec_5_0) : undefined; + message.mongodbSpec44Enterprise = + object.mongodbSpec_4_4_enterprise !== undefined && + object.mongodbSpec_4_4_enterprise !== null + ? Mongodbspec44Enterprise.fromJSON(object.mongodbSpec_4_4_enterprise) + : undefined; + message.mongodbSpec50Enterprise = + object.mongodbSpec_5_0_enterprise !== undefined && + object.mongodbSpec_5_0_enterprise !== null + ? Mongodbspec50Enterprise.fromJSON(object.mongodbSpec_5_0_enterprise) + : undefined; message.backupWindowStart = object.backupWindowStart !== undefined && object.backupWindowStart !== null @@ -9729,6 +11069,14 @@ export const ConfigSpec = { (obj.mongodbSpec_5_0 = message.mongodbSpec50 ? Mongodbspec50.toJSON(message.mongodbSpec50) : undefined); + message.mongodbSpec44Enterprise !== undefined && + (obj.mongodbSpec_4_4_enterprise = message.mongodbSpec44Enterprise + ? Mongodbspec44Enterprise.toJSON(message.mongodbSpec44Enterprise) + : undefined); + message.mongodbSpec50Enterprise !== undefined && + (obj.mongodbSpec_5_0_enterprise = message.mongodbSpec50Enterprise + ? Mongodbspec50Enterprise.toJSON(message.mongodbSpec50Enterprise) + : undefined); message.backupWindowStart !== undefined && (obj.backupWindowStart = message.backupWindowStart ? TimeOfDay.toJSON(message.backupWindowStart) @@ -9767,6 +11115,16 @@ export const ConfigSpec = { object.mongodbSpec50 !== undefined && object.mongodbSpec50 !== null ? Mongodbspec50.fromPartial(object.mongodbSpec50) : undefined; + message.mongodbSpec44Enterprise = + object.mongodbSpec44Enterprise !== undefined && + object.mongodbSpec44Enterprise !== null + ? Mongodbspec44Enterprise.fromPartial(object.mongodbSpec44Enterprise) + : undefined; + message.mongodbSpec50Enterprise = + object.mongodbSpec50Enterprise !== undefined && + object.mongodbSpec50Enterprise !== null + ? Mongodbspec50Enterprise.fromPartial(object.mongodbSpec50Enterprise) + : undefined; message.backupWindowStart = object.backupWindowStart !== undefined && object.backupWindowStart !== null diff --git a/src/generated/yandex/cloud/mdb/mongodb/v1/config/mongodb4_4_enterprise.ts b/src/generated/yandex/cloud/mdb/mongodb/v1/config/mongodb4_4_enterprise.ts new file mode 100644 index 00000000..7ccec218 --- /dev/null +++ b/src/generated/yandex/cloud/mdb/mongodb/v1/config/mongodb4_4_enterprise.ts @@ -0,0 +1,2902 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { + DoubleValue, + Int64Value, + BoolValue, +} from "../../../../../../google/protobuf/wrappers"; + +export const protobufPackage = "yandex.cloud.mdb.mongodb.v1.config"; + +/** + * Configuration of a mongod daemon. Supported options are a limited subset of all + * options described in [MongoDB documentation](https://docs.mongodb.com/v4.4/reference/configuration-options/). + */ +export interface Mongodconfig44Enterprise { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise"; + /** `storage` section of mongod configuration. */ + storage?: Mongodconfig44Enterprise_Storage; + /** `operationProfiling` section of mongod configuration. */ + operationProfiling?: Mongodconfig44Enterprise_OperationProfiling; + /** `net` section of mongod configuration. */ + net?: Mongodconfig44Enterprise_Network; + /** `security` section of mongod configuration. */ + security?: Mongodconfig44Enterprise_Security; + /** `AuditLog` section of mongod configuration. */ + auditLog?: Mongodconfig44Enterprise_AuditLog; + /** `SetParameter` section of mongod configuration. */ + setParameter?: Mongodconfig44Enterprise_SetParameter; +} + +export interface Mongodconfig44Enterprise_Storage { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.Storage"; + /** Configuration of the WiredTiger storage engine. */ + wiredTiger?: Mongodconfig44Enterprise_Storage_WiredTiger; + /** Configuration of the MongoDB [journal](https://docs.mongodb.com/v4.4/reference/glossary/#term-journal). */ + journal?: Mongodconfig44Enterprise_Storage_Journal; +} + +/** Configuration of WiredTiger storage engine. */ +export interface Mongodconfig44Enterprise_Storage_WiredTiger { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.Storage.WiredTiger"; + /** Engine configuration for WiredTiger. */ + engineConfig?: Mongodconfig44Enterprise_Storage_WiredTiger_EngineConfig; + /** Collection configuration for WiredTiger. */ + collectionConfig?: Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig; +} + +export interface Mongodconfig44Enterprise_Storage_WiredTiger_EngineConfig { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.Storage.WiredTiger.EngineConfig"; + /** The maximum size of the internal cache that WiredTiger will use for all data. */ + cacheSizeGb?: number; +} + +export interface Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.Storage.WiredTiger.CollectionConfig"; + /** Default type of compression to use for collection data. */ + blockCompressor: Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig_Compressor; +} + +export enum Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig_Compressor { + COMPRESSOR_UNSPECIFIED = 0, + /** NONE - No compression. */ + NONE = 1, + /** SNAPPY - The [Snappy](https://docs.mongodb.com/v4.4/reference/glossary/#term-snappy) compression. */ + SNAPPY = 2, + /** ZLIB - The [zlib](https://docs.mongodb.com/v4.4/reference/glossary/#term-zlib) compression. */ + ZLIB = 3, + UNRECOGNIZED = -1, +} + +export function mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig_CompressorFromJSON( + object: any +): Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig_Compressor { + switch (object) { + case 0: + case "COMPRESSOR_UNSPECIFIED": + return Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.COMPRESSOR_UNSPECIFIED; + case 1: + case "NONE": + return Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.NONE; + case 2: + case "SNAPPY": + return Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.SNAPPY; + case 3: + case "ZLIB": + return Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.ZLIB; + case -1: + case "UNRECOGNIZED": + default: + return Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.UNRECOGNIZED; + } +} + +export function mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig_CompressorToJSON( + object: Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig_Compressor +): string { + switch (object) { + case Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.COMPRESSOR_UNSPECIFIED: + return "COMPRESSOR_UNSPECIFIED"; + case Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.NONE: + return "NONE"; + case Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.SNAPPY: + return "SNAPPY"; + case Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.ZLIB: + return "ZLIB"; + default: + return "UNKNOWN"; + } +} + +export interface Mongodconfig44Enterprise_Storage_Journal { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.Storage.Journal"; + /** + * Commit interval between journal operations, in milliseconds. + * Default: 100. + */ + commitInterval?: number; +} + +export interface Mongodconfig44Enterprise_OperationProfiling { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.OperationProfiling"; + /** Mode which specifies operations that should be profiled. */ + mode: Mongodconfig44Enterprise_OperationProfiling_Mode; + /** + * The slow operation time threshold, in milliseconds. Operations that run + * for longer than this threshold are considered slow, and are processed by the profiler + * running in the SLOW_OP mode. + */ + slowOpThreshold?: number; +} + +export enum Mongodconfig44Enterprise_OperationProfiling_Mode { + MODE_UNSPECIFIED = 0, + /** OFF - The profiler is off and does not collect any data. */ + OFF = 1, + /** SLOW_OP - The profiler collects data for operations that take longer than the value of [slow_op_threshold]. */ + SLOW_OP = 2, + /** ALL - The profiler collects data for all operations. */ + ALL = 3, + UNRECOGNIZED = -1, +} + +export function mongodconfig44Enterprise_OperationProfiling_ModeFromJSON( + object: any +): Mongodconfig44Enterprise_OperationProfiling_Mode { + switch (object) { + case 0: + case "MODE_UNSPECIFIED": + return Mongodconfig44Enterprise_OperationProfiling_Mode.MODE_UNSPECIFIED; + case 1: + case "OFF": + return Mongodconfig44Enterprise_OperationProfiling_Mode.OFF; + case 2: + case "SLOW_OP": + return Mongodconfig44Enterprise_OperationProfiling_Mode.SLOW_OP; + case 3: + case "ALL": + return Mongodconfig44Enterprise_OperationProfiling_Mode.ALL; + case -1: + case "UNRECOGNIZED": + default: + return Mongodconfig44Enterprise_OperationProfiling_Mode.UNRECOGNIZED; + } +} + +export function mongodconfig44Enterprise_OperationProfiling_ModeToJSON( + object: Mongodconfig44Enterprise_OperationProfiling_Mode +): string { + switch (object) { + case Mongodconfig44Enterprise_OperationProfiling_Mode.MODE_UNSPECIFIED: + return "MODE_UNSPECIFIED"; + case Mongodconfig44Enterprise_OperationProfiling_Mode.OFF: + return "OFF"; + case Mongodconfig44Enterprise_OperationProfiling_Mode.SLOW_OP: + return "SLOW_OP"; + case Mongodconfig44Enterprise_OperationProfiling_Mode.ALL: + return "ALL"; + default: + return "UNKNOWN"; + } +} + +export interface Mongodconfig44Enterprise_Network { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.Network"; + /** The maximum number of simultaneous connections that mongod will accept. */ + maxIncomingConnections?: number; +} + +export interface Mongodconfig44Enterprise_Security { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.Security"; + /** If encryption at rest should be enabled or not */ + enableEncryption?: boolean; + /** `kmip` section of mongod security config */ + kmip?: Mongodconfig44Enterprise_Security_KMIP; +} + +export interface Mongodconfig44Enterprise_Security_KMIP { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.Security.KMIP"; + /** KMIP server name */ + serverName: string; + /** KMIP server port */ + port?: number; + /** KMIP Server CA */ + serverCa: string; + /** KMIP client certificate + private key (unencrypted) */ + clientCertificate: string; + /** KMIP Key identifier (if any) */ + keyIdentifier: string; +} + +export interface Mongodconfig44Enterprise_AuditLog { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.AuditLog"; + /** Audit filter */ + filter: string; +} + +export interface Mongodconfig44Enterprise_SetParameter { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.SetParameter"; + /** Enables the auditing of authorization successes */ + auditAuthorizationSuccess?: boolean; +} + +export interface Mongocfgconfig44Enterprise { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig4_4_enterprise"; + /** `storage` section of mongocfg configuration. */ + storage?: Mongocfgconfig44Enterprise_Storage; + /** `operationProfiling` section of mongocfg configuration. */ + operationProfiling?: Mongocfgconfig44Enterprise_OperationProfiling; + /** `net` section of mongocfg configuration. */ + net?: Mongocfgconfig44Enterprise_Network; +} + +export interface Mongocfgconfig44Enterprise_Storage { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig4_4_enterprise.Storage"; + /** Configuration of the WiredTiger storage engine. */ + wiredTiger?: Mongocfgconfig44Enterprise_Storage_WiredTiger; +} + +/** Configuration of WiredTiger storage engine. */ +export interface Mongocfgconfig44Enterprise_Storage_WiredTiger { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig4_4_enterprise.Storage.WiredTiger"; + /** Engine configuration for WiredTiger. */ + engineConfig?: Mongocfgconfig44Enterprise_Storage_WiredTiger_EngineConfig; +} + +export interface Mongocfgconfig44Enterprise_Storage_WiredTiger_EngineConfig { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig4_4_enterprise.Storage.WiredTiger.EngineConfig"; + /** The maximum size of the internal cache that WiredTiger will use for all data. */ + cacheSizeGb?: number; +} + +export interface Mongocfgconfig44Enterprise_OperationProfiling { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig4_4_enterprise.OperationProfiling"; + /** Mode which specifies operations that should be profiled. */ + mode: Mongocfgconfig44Enterprise_OperationProfiling_Mode; + /** + * The slow operation time threshold, in milliseconds. Operations that run + * for longer than this threshold are considered slow, and are processed by the profiler + * running in the SLOW_OP mode. For details see [MongoDB documentation](https://docs.mongodb.com/v4.4/reference/configuration-options/#operationProfiling.slowOpThresholdMs). + */ + slowOpThreshold?: number; +} + +export enum Mongocfgconfig44Enterprise_OperationProfiling_Mode { + MODE_UNSPECIFIED = 0, + /** OFF - The profiler is off and does not collect any data. */ + OFF = 1, + /** SLOW_OP - The profiler collects data for operations that take longer than the value of [slow_op_threshold]. */ + SLOW_OP = 2, + /** ALL - The profiler collects data for all operations. */ + ALL = 3, + UNRECOGNIZED = -1, +} + +export function mongocfgconfig44Enterprise_OperationProfiling_ModeFromJSON( + object: any +): Mongocfgconfig44Enterprise_OperationProfiling_Mode { + switch (object) { + case 0: + case "MODE_UNSPECIFIED": + return Mongocfgconfig44Enterprise_OperationProfiling_Mode.MODE_UNSPECIFIED; + case 1: + case "OFF": + return Mongocfgconfig44Enterprise_OperationProfiling_Mode.OFF; + case 2: + case "SLOW_OP": + return Mongocfgconfig44Enterprise_OperationProfiling_Mode.SLOW_OP; + case 3: + case "ALL": + return Mongocfgconfig44Enterprise_OperationProfiling_Mode.ALL; + case -1: + case "UNRECOGNIZED": + default: + return Mongocfgconfig44Enterprise_OperationProfiling_Mode.UNRECOGNIZED; + } +} + +export function mongocfgconfig44Enterprise_OperationProfiling_ModeToJSON( + object: Mongocfgconfig44Enterprise_OperationProfiling_Mode +): string { + switch (object) { + case Mongocfgconfig44Enterprise_OperationProfiling_Mode.MODE_UNSPECIFIED: + return "MODE_UNSPECIFIED"; + case Mongocfgconfig44Enterprise_OperationProfiling_Mode.OFF: + return "OFF"; + case Mongocfgconfig44Enterprise_OperationProfiling_Mode.SLOW_OP: + return "SLOW_OP"; + case Mongocfgconfig44Enterprise_OperationProfiling_Mode.ALL: + return "ALL"; + default: + return "UNKNOWN"; + } +} + +export interface Mongocfgconfig44Enterprise_Network { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig4_4_enterprise.Network"; + /** The maximum number of simultaneous connections that mongocfg will accept. */ + maxIncomingConnections?: number; +} + +export interface Mongosconfig44Enterprise { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongosConfig4_4_enterprise"; + /** Network settings for mongos. */ + net?: Mongosconfig44Enterprise_Network; +} + +export interface Mongosconfig44Enterprise_Network { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongosConfig4_4_enterprise.Network"; + /** The maximum number of simultaneous connections that mongos will accept. */ + maxIncomingConnections?: number; +} + +export interface Mongodconfigset44Enterprise { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfigSet4_4_enterprise"; + /** + * Effective mongod settings for a MongoDB 4.4 cluster (a combination of settings defined + * in [user_config] and [default_config]). + */ + effectiveConfig?: Mongodconfig44Enterprise; + /** User-defined mongod settings for a MongoDB 4.4 cluster. */ + userConfig?: Mongodconfig44Enterprise; + /** Default mongod configuration for a MongoDB 4.4 cluster. */ + defaultConfig?: Mongodconfig44Enterprise; +} + +export interface Mongocfgconfigset44Enterprise { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfigSet4_4_enterprise"; + /** + * Effective mongocfg settings for a MongoDB 4.4 cluster (a combination of settings defined + * in [user_config] and [default_config]). + */ + effectiveConfig?: Mongocfgconfig44Enterprise; + /** User-defined mongocfg settings for a MongoDB 4.4 cluster. */ + userConfig?: Mongocfgconfig44Enterprise; + /** Default mongocfg configuration for a MongoDB 4.4 cluster. */ + defaultConfig?: Mongocfgconfig44Enterprise; +} + +export interface Mongosconfigset44Enterprise { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongosConfigSet4_4_enterprise"; + /** + * Effective mongos settings for a MongoDB 4.4 cluster (a combination of settings defined + * in [user_config] and [default_config]). + */ + effectiveConfig?: Mongosconfig44Enterprise; + /** User-defined mongos settings for a MongoDB 4.4 cluster. */ + userConfig?: Mongosconfig44Enterprise; + /** Default mongos configuration for a MongoDB 4.4 cluster. */ + defaultConfig?: Mongosconfig44Enterprise; +} + +const baseMongodconfig44Enterprise: object = { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise", +}; + +export const Mongodconfig44Enterprise = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise" as const, + + encode( + message: Mongodconfig44Enterprise, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.storage !== undefined) { + Mongodconfig44Enterprise_Storage.encode( + message.storage, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.operationProfiling !== undefined) { + Mongodconfig44Enterprise_OperationProfiling.encode( + message.operationProfiling, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.net !== undefined) { + Mongodconfig44Enterprise_Network.encode( + message.net, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.security !== undefined) { + Mongodconfig44Enterprise_Security.encode( + message.security, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.auditLog !== undefined) { + Mongodconfig44Enterprise_AuditLog.encode( + message.auditLog, + writer.uint32(42).fork() + ).ldelim(); + } + if (message.setParameter !== undefined) { + Mongodconfig44Enterprise_SetParameter.encode( + message.setParameter, + writer.uint32(50).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig44Enterprise { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig44Enterprise, + } as Mongodconfig44Enterprise; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.storage = Mongodconfig44Enterprise_Storage.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.operationProfiling = + Mongodconfig44Enterprise_OperationProfiling.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.net = Mongodconfig44Enterprise_Network.decode( + reader, + reader.uint32() + ); + break; + case 4: + message.security = Mongodconfig44Enterprise_Security.decode( + reader, + reader.uint32() + ); + break; + case 5: + message.auditLog = Mongodconfig44Enterprise_AuditLog.decode( + reader, + reader.uint32() + ); + break; + case 6: + message.setParameter = Mongodconfig44Enterprise_SetParameter.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig44Enterprise { + const message = { + ...baseMongodconfig44Enterprise, + } as Mongodconfig44Enterprise; + message.storage = + object.storage !== undefined && object.storage !== null + ? Mongodconfig44Enterprise_Storage.fromJSON(object.storage) + : undefined; + message.operationProfiling = + object.operationProfiling !== undefined && + object.operationProfiling !== null + ? Mongodconfig44Enterprise_OperationProfiling.fromJSON( + object.operationProfiling + ) + : undefined; + message.net = + object.net !== undefined && object.net !== null + ? Mongodconfig44Enterprise_Network.fromJSON(object.net) + : undefined; + message.security = + object.security !== undefined && object.security !== null + ? Mongodconfig44Enterprise_Security.fromJSON(object.security) + : undefined; + message.auditLog = + object.auditLog !== undefined && object.auditLog !== null + ? Mongodconfig44Enterprise_AuditLog.fromJSON(object.auditLog) + : undefined; + message.setParameter = + object.setParameter !== undefined && object.setParameter !== null + ? Mongodconfig44Enterprise_SetParameter.fromJSON(object.setParameter) + : undefined; + return message; + }, + + toJSON(message: Mongodconfig44Enterprise): unknown { + const obj: any = {}; + message.storage !== undefined && + (obj.storage = message.storage + ? Mongodconfig44Enterprise_Storage.toJSON(message.storage) + : undefined); + message.operationProfiling !== undefined && + (obj.operationProfiling = message.operationProfiling + ? Mongodconfig44Enterprise_OperationProfiling.toJSON( + message.operationProfiling + ) + : undefined); + message.net !== undefined && + (obj.net = message.net + ? Mongodconfig44Enterprise_Network.toJSON(message.net) + : undefined); + message.security !== undefined && + (obj.security = message.security + ? Mongodconfig44Enterprise_Security.toJSON(message.security) + : undefined); + message.auditLog !== undefined && + (obj.auditLog = message.auditLog + ? Mongodconfig44Enterprise_AuditLog.toJSON(message.auditLog) + : undefined); + message.setParameter !== undefined && + (obj.setParameter = message.setParameter + ? Mongodconfig44Enterprise_SetParameter.toJSON(message.setParameter) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodconfig44Enterprise { + const message = { + ...baseMongodconfig44Enterprise, + } as Mongodconfig44Enterprise; + message.storage = + object.storage !== undefined && object.storage !== null + ? Mongodconfig44Enterprise_Storage.fromPartial(object.storage) + : undefined; + message.operationProfiling = + object.operationProfiling !== undefined && + object.operationProfiling !== null + ? Mongodconfig44Enterprise_OperationProfiling.fromPartial( + object.operationProfiling + ) + : undefined; + message.net = + object.net !== undefined && object.net !== null + ? Mongodconfig44Enterprise_Network.fromPartial(object.net) + : undefined; + message.security = + object.security !== undefined && object.security !== null + ? Mongodconfig44Enterprise_Security.fromPartial(object.security) + : undefined; + message.auditLog = + object.auditLog !== undefined && object.auditLog !== null + ? Mongodconfig44Enterprise_AuditLog.fromPartial(object.auditLog) + : undefined; + message.setParameter = + object.setParameter !== undefined && object.setParameter !== null + ? Mongodconfig44Enterprise_SetParameter.fromPartial(object.setParameter) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig44Enterprise.$type, + Mongodconfig44Enterprise +); + +const baseMongodconfig44Enterprise_Storage: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.Storage", +}; + +export const Mongodconfig44Enterprise_Storage = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.Storage" as const, + + encode( + message: Mongodconfig44Enterprise_Storage, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.wiredTiger !== undefined) { + Mongodconfig44Enterprise_Storage_WiredTiger.encode( + message.wiredTiger, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.journal !== undefined) { + Mongodconfig44Enterprise_Storage_Journal.encode( + message.journal, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig44Enterprise_Storage { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig44Enterprise_Storage, + } as Mongodconfig44Enterprise_Storage; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.wiredTiger = + Mongodconfig44Enterprise_Storage_WiredTiger.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.journal = Mongodconfig44Enterprise_Storage_Journal.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig44Enterprise_Storage { + const message = { + ...baseMongodconfig44Enterprise_Storage, + } as Mongodconfig44Enterprise_Storage; + message.wiredTiger = + object.wiredTiger !== undefined && object.wiredTiger !== null + ? Mongodconfig44Enterprise_Storage_WiredTiger.fromJSON( + object.wiredTiger + ) + : undefined; + message.journal = + object.journal !== undefined && object.journal !== null + ? Mongodconfig44Enterprise_Storage_Journal.fromJSON(object.journal) + : undefined; + return message; + }, + + toJSON(message: Mongodconfig44Enterprise_Storage): unknown { + const obj: any = {}; + message.wiredTiger !== undefined && + (obj.wiredTiger = message.wiredTiger + ? Mongodconfig44Enterprise_Storage_WiredTiger.toJSON(message.wiredTiger) + : undefined); + message.journal !== undefined && + (obj.journal = message.journal + ? Mongodconfig44Enterprise_Storage_Journal.toJSON(message.journal) + : undefined); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodconfig44Enterprise_Storage { + const message = { + ...baseMongodconfig44Enterprise_Storage, + } as Mongodconfig44Enterprise_Storage; + message.wiredTiger = + object.wiredTiger !== undefined && object.wiredTiger !== null + ? Mongodconfig44Enterprise_Storage_WiredTiger.fromPartial( + object.wiredTiger + ) + : undefined; + message.journal = + object.journal !== undefined && object.journal !== null + ? Mongodconfig44Enterprise_Storage_Journal.fromPartial(object.journal) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig44Enterprise_Storage.$type, + Mongodconfig44Enterprise_Storage +); + +const baseMongodconfig44Enterprise_Storage_WiredTiger: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.Storage.WiredTiger", +}; + +export const Mongodconfig44Enterprise_Storage_WiredTiger = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.Storage.WiredTiger" as const, + + encode( + message: Mongodconfig44Enterprise_Storage_WiredTiger, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.engineConfig !== undefined) { + Mongodconfig44Enterprise_Storage_WiredTiger_EngineConfig.encode( + message.engineConfig, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.collectionConfig !== undefined) { + Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig.encode( + message.collectionConfig, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig44Enterprise_Storage_WiredTiger { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig44Enterprise_Storage_WiredTiger, + } as Mongodconfig44Enterprise_Storage_WiredTiger; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.engineConfig = + Mongodconfig44Enterprise_Storage_WiredTiger_EngineConfig.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.collectionConfig = + Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig44Enterprise_Storage_WiredTiger { + const message = { + ...baseMongodconfig44Enterprise_Storage_WiredTiger, + } as Mongodconfig44Enterprise_Storage_WiredTiger; + message.engineConfig = + object.engineConfig !== undefined && object.engineConfig !== null + ? Mongodconfig44Enterprise_Storage_WiredTiger_EngineConfig.fromJSON( + object.engineConfig + ) + : undefined; + message.collectionConfig = + object.collectionConfig !== undefined && object.collectionConfig !== null + ? Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig.fromJSON( + object.collectionConfig + ) + : undefined; + return message; + }, + + toJSON(message: Mongodconfig44Enterprise_Storage_WiredTiger): unknown { + const obj: any = {}; + message.engineConfig !== undefined && + (obj.engineConfig = message.engineConfig + ? Mongodconfig44Enterprise_Storage_WiredTiger_EngineConfig.toJSON( + message.engineConfig + ) + : undefined); + message.collectionConfig !== undefined && + (obj.collectionConfig = message.collectionConfig + ? Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig.toJSON( + message.collectionConfig + ) + : undefined); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodconfig44Enterprise_Storage_WiredTiger { + const message = { + ...baseMongodconfig44Enterprise_Storage_WiredTiger, + } as Mongodconfig44Enterprise_Storage_WiredTiger; + message.engineConfig = + object.engineConfig !== undefined && object.engineConfig !== null + ? Mongodconfig44Enterprise_Storage_WiredTiger_EngineConfig.fromPartial( + object.engineConfig + ) + : undefined; + message.collectionConfig = + object.collectionConfig !== undefined && object.collectionConfig !== null + ? Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig.fromPartial( + object.collectionConfig + ) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig44Enterprise_Storage_WiredTiger.$type, + Mongodconfig44Enterprise_Storage_WiredTiger +); + +const baseMongodconfig44Enterprise_Storage_WiredTiger_EngineConfig: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.Storage.WiredTiger.EngineConfig", +}; + +export const Mongodconfig44Enterprise_Storage_WiredTiger_EngineConfig = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.Storage.WiredTiger.EngineConfig" as const, + + encode( + message: Mongodconfig44Enterprise_Storage_WiredTiger_EngineConfig, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.cacheSizeGb !== undefined) { + DoubleValue.encode( + { $type: "google.protobuf.DoubleValue", value: message.cacheSizeGb! }, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig44Enterprise_Storage_WiredTiger_EngineConfig { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig44Enterprise_Storage_WiredTiger_EngineConfig, + } as Mongodconfig44Enterprise_Storage_WiredTiger_EngineConfig; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.cacheSizeGb = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON( + object: any + ): Mongodconfig44Enterprise_Storage_WiredTiger_EngineConfig { + const message = { + ...baseMongodconfig44Enterprise_Storage_WiredTiger_EngineConfig, + } as Mongodconfig44Enterprise_Storage_WiredTiger_EngineConfig; + message.cacheSizeGb = + object.cacheSizeGb !== undefined && object.cacheSizeGb !== null + ? Number(object.cacheSizeGb) + : undefined; + return message; + }, + + toJSON( + message: Mongodconfig44Enterprise_Storage_WiredTiger_EngineConfig + ): unknown { + const obj: any = {}; + message.cacheSizeGb !== undefined && + (obj.cacheSizeGb = message.cacheSizeGb); + return obj; + }, + + fromPartial< + I extends Exact< + DeepPartial, + I + > + >(object: I): Mongodconfig44Enterprise_Storage_WiredTiger_EngineConfig { + const message = { + ...baseMongodconfig44Enterprise_Storage_WiredTiger_EngineConfig, + } as Mongodconfig44Enterprise_Storage_WiredTiger_EngineConfig; + message.cacheSizeGb = object.cacheSizeGb ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig44Enterprise_Storage_WiredTiger_EngineConfig.$type, + Mongodconfig44Enterprise_Storage_WiredTiger_EngineConfig +); + +const baseMongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig: object = + { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.Storage.WiredTiger.CollectionConfig", + blockCompressor: 0, + }; + +export const Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.Storage.WiredTiger.CollectionConfig" as const, + + encode( + message: Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.blockCompressor !== 0) { + writer.uint32(8).int32(message.blockCompressor); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig, + } as Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.blockCompressor = reader.int32() as any; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON( + object: any + ): Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig { + const message = { + ...baseMongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig, + } as Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig; + message.blockCompressor = + object.blockCompressor !== undefined && object.blockCompressor !== null + ? mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig_CompressorFromJSON( + object.blockCompressor + ) + : 0; + return message; + }, + + toJSON( + message: Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig + ): unknown { + const obj: any = {}; + message.blockCompressor !== undefined && + (obj.blockCompressor = + mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig_CompressorToJSON( + message.blockCompressor + )); + return obj; + }, + + fromPartial< + I extends Exact< + DeepPartial, + I + > + >(object: I): Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig { + const message = { + ...baseMongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig, + } as Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig; + message.blockCompressor = object.blockCompressor ?? 0; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig.$type, + Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig +); + +const baseMongodconfig44Enterprise_Storage_Journal: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.Storage.Journal", +}; + +export const Mongodconfig44Enterprise_Storage_Journal = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.Storage.Journal" as const, + + encode( + message: Mongodconfig44Enterprise_Storage_Journal, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.commitInterval !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.commitInterval! }, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig44Enterprise_Storage_Journal { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig44Enterprise_Storage_Journal, + } as Mongodconfig44Enterprise_Storage_Journal; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.commitInterval = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig44Enterprise_Storage_Journal { + const message = { + ...baseMongodconfig44Enterprise_Storage_Journal, + } as Mongodconfig44Enterprise_Storage_Journal; + message.commitInterval = + object.commitInterval !== undefined && object.commitInterval !== null + ? Number(object.commitInterval) + : undefined; + return message; + }, + + toJSON(message: Mongodconfig44Enterprise_Storage_Journal): unknown { + const obj: any = {}; + message.commitInterval !== undefined && + (obj.commitInterval = message.commitInterval); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodconfig44Enterprise_Storage_Journal { + const message = { + ...baseMongodconfig44Enterprise_Storage_Journal, + } as Mongodconfig44Enterprise_Storage_Journal; + message.commitInterval = object.commitInterval ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig44Enterprise_Storage_Journal.$type, + Mongodconfig44Enterprise_Storage_Journal +); + +const baseMongodconfig44Enterprise_OperationProfiling: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.OperationProfiling", + mode: 0, +}; + +export const Mongodconfig44Enterprise_OperationProfiling = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.OperationProfiling" as const, + + encode( + message: Mongodconfig44Enterprise_OperationProfiling, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.mode !== 0) { + writer.uint32(8).int32(message.mode); + } + if (message.slowOpThreshold !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.slowOpThreshold!, + }, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig44Enterprise_OperationProfiling { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig44Enterprise_OperationProfiling, + } as Mongodconfig44Enterprise_OperationProfiling; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.mode = reader.int32() as any; + break; + case 2: + message.slowOpThreshold = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig44Enterprise_OperationProfiling { + const message = { + ...baseMongodconfig44Enterprise_OperationProfiling, + } as Mongodconfig44Enterprise_OperationProfiling; + message.mode = + object.mode !== undefined && object.mode !== null + ? mongodconfig44Enterprise_OperationProfiling_ModeFromJSON(object.mode) + : 0; + message.slowOpThreshold = + object.slowOpThreshold !== undefined && object.slowOpThreshold !== null + ? Number(object.slowOpThreshold) + : undefined; + return message; + }, + + toJSON(message: Mongodconfig44Enterprise_OperationProfiling): unknown { + const obj: any = {}; + message.mode !== undefined && + (obj.mode = mongodconfig44Enterprise_OperationProfiling_ModeToJSON( + message.mode + )); + message.slowOpThreshold !== undefined && + (obj.slowOpThreshold = message.slowOpThreshold); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodconfig44Enterprise_OperationProfiling { + const message = { + ...baseMongodconfig44Enterprise_OperationProfiling, + } as Mongodconfig44Enterprise_OperationProfiling; + message.mode = object.mode ?? 0; + message.slowOpThreshold = object.slowOpThreshold ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig44Enterprise_OperationProfiling.$type, + Mongodconfig44Enterprise_OperationProfiling +); + +const baseMongodconfig44Enterprise_Network: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.Network", +}; + +export const Mongodconfig44Enterprise_Network = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.Network" as const, + + encode( + message: Mongodconfig44Enterprise_Network, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.maxIncomingConnections !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxIncomingConnections!, + }, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig44Enterprise_Network { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig44Enterprise_Network, + } as Mongodconfig44Enterprise_Network; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.maxIncomingConnections = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig44Enterprise_Network { + const message = { + ...baseMongodconfig44Enterprise_Network, + } as Mongodconfig44Enterprise_Network; + message.maxIncomingConnections = + object.maxIncomingConnections !== undefined && + object.maxIncomingConnections !== null + ? Number(object.maxIncomingConnections) + : undefined; + return message; + }, + + toJSON(message: Mongodconfig44Enterprise_Network): unknown { + const obj: any = {}; + message.maxIncomingConnections !== undefined && + (obj.maxIncomingConnections = message.maxIncomingConnections); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodconfig44Enterprise_Network { + const message = { + ...baseMongodconfig44Enterprise_Network, + } as Mongodconfig44Enterprise_Network; + message.maxIncomingConnections = object.maxIncomingConnections ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig44Enterprise_Network.$type, + Mongodconfig44Enterprise_Network +); + +const baseMongodconfig44Enterprise_Security: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.Security", +}; + +export const Mongodconfig44Enterprise_Security = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.Security" as const, + + encode( + message: Mongodconfig44Enterprise_Security, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.enableEncryption !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableEncryption!, + }, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.kmip !== undefined) { + Mongodconfig44Enterprise_Security_KMIP.encode( + message.kmip, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig44Enterprise_Security { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig44Enterprise_Security, + } as Mongodconfig44Enterprise_Security; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.enableEncryption = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 2: + message.kmip = Mongodconfig44Enterprise_Security_KMIP.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig44Enterprise_Security { + const message = { + ...baseMongodconfig44Enterprise_Security, + } as Mongodconfig44Enterprise_Security; + message.enableEncryption = + object.enableEncryption !== undefined && object.enableEncryption !== null + ? Boolean(object.enableEncryption) + : undefined; + message.kmip = + object.kmip !== undefined && object.kmip !== null + ? Mongodconfig44Enterprise_Security_KMIP.fromJSON(object.kmip) + : undefined; + return message; + }, + + toJSON(message: Mongodconfig44Enterprise_Security): unknown { + const obj: any = {}; + message.enableEncryption !== undefined && + (obj.enableEncryption = message.enableEncryption); + message.kmip !== undefined && + (obj.kmip = message.kmip + ? Mongodconfig44Enterprise_Security_KMIP.toJSON(message.kmip) + : undefined); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodconfig44Enterprise_Security { + const message = { + ...baseMongodconfig44Enterprise_Security, + } as Mongodconfig44Enterprise_Security; + message.enableEncryption = object.enableEncryption ?? undefined; + message.kmip = + object.kmip !== undefined && object.kmip !== null + ? Mongodconfig44Enterprise_Security_KMIP.fromPartial(object.kmip) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig44Enterprise_Security.$type, + Mongodconfig44Enterprise_Security +); + +const baseMongodconfig44Enterprise_Security_KMIP: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.Security.KMIP", + serverName: "", + serverCa: "", + clientCertificate: "", + keyIdentifier: "", +}; + +export const Mongodconfig44Enterprise_Security_KMIP = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.Security.KMIP" as const, + + encode( + message: Mongodconfig44Enterprise_Security_KMIP, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.serverName !== "") { + writer.uint32(10).string(message.serverName); + } + if (message.port !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.port! }, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.serverCa !== "") { + writer.uint32(26).string(message.serverCa); + } + if (message.clientCertificate !== "") { + writer.uint32(34).string(message.clientCertificate); + } + if (message.keyIdentifier !== "") { + writer.uint32(42).string(message.keyIdentifier); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig44Enterprise_Security_KMIP { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig44Enterprise_Security_KMIP, + } as Mongodconfig44Enterprise_Security_KMIP; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.serverName = reader.string(); + break; + case 2: + message.port = Int64Value.decode(reader, reader.uint32()).value; + break; + case 3: + message.serverCa = reader.string(); + break; + case 4: + message.clientCertificate = reader.string(); + break; + case 5: + message.keyIdentifier = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig44Enterprise_Security_KMIP { + const message = { + ...baseMongodconfig44Enterprise_Security_KMIP, + } as Mongodconfig44Enterprise_Security_KMIP; + message.serverName = + object.serverName !== undefined && object.serverName !== null + ? String(object.serverName) + : ""; + message.port = + object.port !== undefined && object.port !== null + ? Number(object.port) + : undefined; + message.serverCa = + object.serverCa !== undefined && object.serverCa !== null + ? String(object.serverCa) + : ""; + message.clientCertificate = + object.clientCertificate !== undefined && + object.clientCertificate !== null + ? String(object.clientCertificate) + : ""; + message.keyIdentifier = + object.keyIdentifier !== undefined && object.keyIdentifier !== null + ? String(object.keyIdentifier) + : ""; + return message; + }, + + toJSON(message: Mongodconfig44Enterprise_Security_KMIP): unknown { + const obj: any = {}; + message.serverName !== undefined && (obj.serverName = message.serverName); + message.port !== undefined && (obj.port = message.port); + message.serverCa !== undefined && (obj.serverCa = message.serverCa); + message.clientCertificate !== undefined && + (obj.clientCertificate = message.clientCertificate); + message.keyIdentifier !== undefined && + (obj.keyIdentifier = message.keyIdentifier); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodconfig44Enterprise_Security_KMIP { + const message = { + ...baseMongodconfig44Enterprise_Security_KMIP, + } as Mongodconfig44Enterprise_Security_KMIP; + message.serverName = object.serverName ?? ""; + message.port = object.port ?? undefined; + message.serverCa = object.serverCa ?? ""; + message.clientCertificate = object.clientCertificate ?? ""; + message.keyIdentifier = object.keyIdentifier ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig44Enterprise_Security_KMIP.$type, + Mongodconfig44Enterprise_Security_KMIP +); + +const baseMongodconfig44Enterprise_AuditLog: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.AuditLog", + filter: "", +}; + +export const Mongodconfig44Enterprise_AuditLog = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.AuditLog" as const, + + encode( + message: Mongodconfig44Enterprise_AuditLog, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.filter !== "") { + writer.uint32(10).string(message.filter); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig44Enterprise_AuditLog { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig44Enterprise_AuditLog, + } as Mongodconfig44Enterprise_AuditLog; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.filter = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig44Enterprise_AuditLog { + const message = { + ...baseMongodconfig44Enterprise_AuditLog, + } as Mongodconfig44Enterprise_AuditLog; + message.filter = + object.filter !== undefined && object.filter !== null + ? String(object.filter) + : ""; + return message; + }, + + toJSON(message: Mongodconfig44Enterprise_AuditLog): unknown { + const obj: any = {}; + message.filter !== undefined && (obj.filter = message.filter); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodconfig44Enterprise_AuditLog { + const message = { + ...baseMongodconfig44Enterprise_AuditLog, + } as Mongodconfig44Enterprise_AuditLog; + message.filter = object.filter ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig44Enterprise_AuditLog.$type, + Mongodconfig44Enterprise_AuditLog +); + +const baseMongodconfig44Enterprise_SetParameter: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.SetParameter", +}; + +export const Mongodconfig44Enterprise_SetParameter = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.SetParameter" as const, + + encode( + message: Mongodconfig44Enterprise_SetParameter, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.auditAuthorizationSuccess !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.auditAuthorizationSuccess!, + }, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig44Enterprise_SetParameter { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig44Enterprise_SetParameter, + } as Mongodconfig44Enterprise_SetParameter; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.auditAuthorizationSuccess = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig44Enterprise_SetParameter { + const message = { + ...baseMongodconfig44Enterprise_SetParameter, + } as Mongodconfig44Enterprise_SetParameter; + message.auditAuthorizationSuccess = + object.auditAuthorizationSuccess !== undefined && + object.auditAuthorizationSuccess !== null + ? Boolean(object.auditAuthorizationSuccess) + : undefined; + return message; + }, + + toJSON(message: Mongodconfig44Enterprise_SetParameter): unknown { + const obj: any = {}; + message.auditAuthorizationSuccess !== undefined && + (obj.auditAuthorizationSuccess = message.auditAuthorizationSuccess); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodconfig44Enterprise_SetParameter { + const message = { + ...baseMongodconfig44Enterprise_SetParameter, + } as Mongodconfig44Enterprise_SetParameter; + message.auditAuthorizationSuccess = + object.auditAuthorizationSuccess ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig44Enterprise_SetParameter.$type, + Mongodconfig44Enterprise_SetParameter +); + +const baseMongocfgconfig44Enterprise: object = { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig4_4_enterprise", +}; + +export const Mongocfgconfig44Enterprise = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig4_4_enterprise" as const, + + encode( + message: Mongocfgconfig44Enterprise, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.storage !== undefined) { + Mongocfgconfig44Enterprise_Storage.encode( + message.storage, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.operationProfiling !== undefined) { + Mongocfgconfig44Enterprise_OperationProfiling.encode( + message.operationProfiling, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.net !== undefined) { + Mongocfgconfig44Enterprise_Network.encode( + message.net, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongocfgconfig44Enterprise { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongocfgconfig44Enterprise, + } as Mongocfgconfig44Enterprise; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.storage = Mongocfgconfig44Enterprise_Storage.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.operationProfiling = + Mongocfgconfig44Enterprise_OperationProfiling.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.net = Mongocfgconfig44Enterprise_Network.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongocfgconfig44Enterprise { + const message = { + ...baseMongocfgconfig44Enterprise, + } as Mongocfgconfig44Enterprise; + message.storage = + object.storage !== undefined && object.storage !== null + ? Mongocfgconfig44Enterprise_Storage.fromJSON(object.storage) + : undefined; + message.operationProfiling = + object.operationProfiling !== undefined && + object.operationProfiling !== null + ? Mongocfgconfig44Enterprise_OperationProfiling.fromJSON( + object.operationProfiling + ) + : undefined; + message.net = + object.net !== undefined && object.net !== null + ? Mongocfgconfig44Enterprise_Network.fromJSON(object.net) + : undefined; + return message; + }, + + toJSON(message: Mongocfgconfig44Enterprise): unknown { + const obj: any = {}; + message.storage !== undefined && + (obj.storage = message.storage + ? Mongocfgconfig44Enterprise_Storage.toJSON(message.storage) + : undefined); + message.operationProfiling !== undefined && + (obj.operationProfiling = message.operationProfiling + ? Mongocfgconfig44Enterprise_OperationProfiling.toJSON( + message.operationProfiling + ) + : undefined); + message.net !== undefined && + (obj.net = message.net + ? Mongocfgconfig44Enterprise_Network.toJSON(message.net) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongocfgconfig44Enterprise { + const message = { + ...baseMongocfgconfig44Enterprise, + } as Mongocfgconfig44Enterprise; + message.storage = + object.storage !== undefined && object.storage !== null + ? Mongocfgconfig44Enterprise_Storage.fromPartial(object.storage) + : undefined; + message.operationProfiling = + object.operationProfiling !== undefined && + object.operationProfiling !== null + ? Mongocfgconfig44Enterprise_OperationProfiling.fromPartial( + object.operationProfiling + ) + : undefined; + message.net = + object.net !== undefined && object.net !== null + ? Mongocfgconfig44Enterprise_Network.fromPartial(object.net) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongocfgconfig44Enterprise.$type, + Mongocfgconfig44Enterprise +); + +const baseMongocfgconfig44Enterprise_Storage: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig4_4_enterprise.Storage", +}; + +export const Mongocfgconfig44Enterprise_Storage = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig4_4_enterprise.Storage" as const, + + encode( + message: Mongocfgconfig44Enterprise_Storage, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.wiredTiger !== undefined) { + Mongocfgconfig44Enterprise_Storage_WiredTiger.encode( + message.wiredTiger, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongocfgconfig44Enterprise_Storage { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongocfgconfig44Enterprise_Storage, + } as Mongocfgconfig44Enterprise_Storage; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.wiredTiger = + Mongocfgconfig44Enterprise_Storage_WiredTiger.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongocfgconfig44Enterprise_Storage { + const message = { + ...baseMongocfgconfig44Enterprise_Storage, + } as Mongocfgconfig44Enterprise_Storage; + message.wiredTiger = + object.wiredTiger !== undefined && object.wiredTiger !== null + ? Mongocfgconfig44Enterprise_Storage_WiredTiger.fromJSON( + object.wiredTiger + ) + : undefined; + return message; + }, + + toJSON(message: Mongocfgconfig44Enterprise_Storage): unknown { + const obj: any = {}; + message.wiredTiger !== undefined && + (obj.wiredTiger = message.wiredTiger + ? Mongocfgconfig44Enterprise_Storage_WiredTiger.toJSON( + message.wiredTiger + ) + : undefined); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongocfgconfig44Enterprise_Storage { + const message = { + ...baseMongocfgconfig44Enterprise_Storage, + } as Mongocfgconfig44Enterprise_Storage; + message.wiredTiger = + object.wiredTiger !== undefined && object.wiredTiger !== null + ? Mongocfgconfig44Enterprise_Storage_WiredTiger.fromPartial( + object.wiredTiger + ) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongocfgconfig44Enterprise_Storage.$type, + Mongocfgconfig44Enterprise_Storage +); + +const baseMongocfgconfig44Enterprise_Storage_WiredTiger: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig4_4_enterprise.Storage.WiredTiger", +}; + +export const Mongocfgconfig44Enterprise_Storage_WiredTiger = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig4_4_enterprise.Storage.WiredTiger" as const, + + encode( + message: Mongocfgconfig44Enterprise_Storage_WiredTiger, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.engineConfig !== undefined) { + Mongocfgconfig44Enterprise_Storage_WiredTiger_EngineConfig.encode( + message.engineConfig, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongocfgconfig44Enterprise_Storage_WiredTiger { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongocfgconfig44Enterprise_Storage_WiredTiger, + } as Mongocfgconfig44Enterprise_Storage_WiredTiger; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.engineConfig = + Mongocfgconfig44Enterprise_Storage_WiredTiger_EngineConfig.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongocfgconfig44Enterprise_Storage_WiredTiger { + const message = { + ...baseMongocfgconfig44Enterprise_Storage_WiredTiger, + } as Mongocfgconfig44Enterprise_Storage_WiredTiger; + message.engineConfig = + object.engineConfig !== undefined && object.engineConfig !== null + ? Mongocfgconfig44Enterprise_Storage_WiredTiger_EngineConfig.fromJSON( + object.engineConfig + ) + : undefined; + return message; + }, + + toJSON(message: Mongocfgconfig44Enterprise_Storage_WiredTiger): unknown { + const obj: any = {}; + message.engineConfig !== undefined && + (obj.engineConfig = message.engineConfig + ? Mongocfgconfig44Enterprise_Storage_WiredTiger_EngineConfig.toJSON( + message.engineConfig + ) + : undefined); + return obj; + }, + + fromPartial< + I extends Exact< + DeepPartial, + I + > + >(object: I): Mongocfgconfig44Enterprise_Storage_WiredTiger { + const message = { + ...baseMongocfgconfig44Enterprise_Storage_WiredTiger, + } as Mongocfgconfig44Enterprise_Storage_WiredTiger; + message.engineConfig = + object.engineConfig !== undefined && object.engineConfig !== null + ? Mongocfgconfig44Enterprise_Storage_WiredTiger_EngineConfig.fromPartial( + object.engineConfig + ) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongocfgconfig44Enterprise_Storage_WiredTiger.$type, + Mongocfgconfig44Enterprise_Storage_WiredTiger +); + +const baseMongocfgconfig44Enterprise_Storage_WiredTiger_EngineConfig: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig4_4_enterprise.Storage.WiredTiger.EngineConfig", +}; + +export const Mongocfgconfig44Enterprise_Storage_WiredTiger_EngineConfig = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig4_4_enterprise.Storage.WiredTiger.EngineConfig" as const, + + encode( + message: Mongocfgconfig44Enterprise_Storage_WiredTiger_EngineConfig, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.cacheSizeGb !== undefined) { + DoubleValue.encode( + { $type: "google.protobuf.DoubleValue", value: message.cacheSizeGb! }, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongocfgconfig44Enterprise_Storage_WiredTiger_EngineConfig { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongocfgconfig44Enterprise_Storage_WiredTiger_EngineConfig, + } as Mongocfgconfig44Enterprise_Storage_WiredTiger_EngineConfig; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.cacheSizeGb = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON( + object: any + ): Mongocfgconfig44Enterprise_Storage_WiredTiger_EngineConfig { + const message = { + ...baseMongocfgconfig44Enterprise_Storage_WiredTiger_EngineConfig, + } as Mongocfgconfig44Enterprise_Storage_WiredTiger_EngineConfig; + message.cacheSizeGb = + object.cacheSizeGb !== undefined && object.cacheSizeGb !== null + ? Number(object.cacheSizeGb) + : undefined; + return message; + }, + + toJSON( + message: Mongocfgconfig44Enterprise_Storage_WiredTiger_EngineConfig + ): unknown { + const obj: any = {}; + message.cacheSizeGb !== undefined && + (obj.cacheSizeGb = message.cacheSizeGb); + return obj; + }, + + fromPartial< + I extends Exact< + DeepPartial, + I + > + >(object: I): Mongocfgconfig44Enterprise_Storage_WiredTiger_EngineConfig { + const message = { + ...baseMongocfgconfig44Enterprise_Storage_WiredTiger_EngineConfig, + } as Mongocfgconfig44Enterprise_Storage_WiredTiger_EngineConfig; + message.cacheSizeGb = object.cacheSizeGb ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongocfgconfig44Enterprise_Storage_WiredTiger_EngineConfig.$type, + Mongocfgconfig44Enterprise_Storage_WiredTiger_EngineConfig +); + +const baseMongocfgconfig44Enterprise_OperationProfiling: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig4_4_enterprise.OperationProfiling", + mode: 0, +}; + +export const Mongocfgconfig44Enterprise_OperationProfiling = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig4_4_enterprise.OperationProfiling" as const, + + encode( + message: Mongocfgconfig44Enterprise_OperationProfiling, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.mode !== 0) { + writer.uint32(8).int32(message.mode); + } + if (message.slowOpThreshold !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.slowOpThreshold!, + }, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongocfgconfig44Enterprise_OperationProfiling { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongocfgconfig44Enterprise_OperationProfiling, + } as Mongocfgconfig44Enterprise_OperationProfiling; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.mode = reader.int32() as any; + break; + case 2: + message.slowOpThreshold = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongocfgconfig44Enterprise_OperationProfiling { + const message = { + ...baseMongocfgconfig44Enterprise_OperationProfiling, + } as Mongocfgconfig44Enterprise_OperationProfiling; + message.mode = + object.mode !== undefined && object.mode !== null + ? mongocfgconfig44Enterprise_OperationProfiling_ModeFromJSON( + object.mode + ) + : 0; + message.slowOpThreshold = + object.slowOpThreshold !== undefined && object.slowOpThreshold !== null + ? Number(object.slowOpThreshold) + : undefined; + return message; + }, + + toJSON(message: Mongocfgconfig44Enterprise_OperationProfiling): unknown { + const obj: any = {}; + message.mode !== undefined && + (obj.mode = mongocfgconfig44Enterprise_OperationProfiling_ModeToJSON( + message.mode + )); + message.slowOpThreshold !== undefined && + (obj.slowOpThreshold = message.slowOpThreshold); + return obj; + }, + + fromPartial< + I extends Exact< + DeepPartial, + I + > + >(object: I): Mongocfgconfig44Enterprise_OperationProfiling { + const message = { + ...baseMongocfgconfig44Enterprise_OperationProfiling, + } as Mongocfgconfig44Enterprise_OperationProfiling; + message.mode = object.mode ?? 0; + message.slowOpThreshold = object.slowOpThreshold ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongocfgconfig44Enterprise_OperationProfiling.$type, + Mongocfgconfig44Enterprise_OperationProfiling +); + +const baseMongocfgconfig44Enterprise_Network: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig4_4_enterprise.Network", +}; + +export const Mongocfgconfig44Enterprise_Network = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig4_4_enterprise.Network" as const, + + encode( + message: Mongocfgconfig44Enterprise_Network, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.maxIncomingConnections !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxIncomingConnections!, + }, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongocfgconfig44Enterprise_Network { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongocfgconfig44Enterprise_Network, + } as Mongocfgconfig44Enterprise_Network; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.maxIncomingConnections = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongocfgconfig44Enterprise_Network { + const message = { + ...baseMongocfgconfig44Enterprise_Network, + } as Mongocfgconfig44Enterprise_Network; + message.maxIncomingConnections = + object.maxIncomingConnections !== undefined && + object.maxIncomingConnections !== null + ? Number(object.maxIncomingConnections) + : undefined; + return message; + }, + + toJSON(message: Mongocfgconfig44Enterprise_Network): unknown { + const obj: any = {}; + message.maxIncomingConnections !== undefined && + (obj.maxIncomingConnections = message.maxIncomingConnections); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongocfgconfig44Enterprise_Network { + const message = { + ...baseMongocfgconfig44Enterprise_Network, + } as Mongocfgconfig44Enterprise_Network; + message.maxIncomingConnections = object.maxIncomingConnections ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongocfgconfig44Enterprise_Network.$type, + Mongocfgconfig44Enterprise_Network +); + +const baseMongosconfig44Enterprise: object = { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongosConfig4_4_enterprise", +}; + +export const Mongosconfig44Enterprise = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongosConfig4_4_enterprise" as const, + + encode( + message: Mongosconfig44Enterprise, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.net !== undefined) { + Mongosconfig44Enterprise_Network.encode( + message.net, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongosconfig44Enterprise { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongosconfig44Enterprise, + } as Mongosconfig44Enterprise; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.net = Mongosconfig44Enterprise_Network.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongosconfig44Enterprise { + const message = { + ...baseMongosconfig44Enterprise, + } as Mongosconfig44Enterprise; + message.net = + object.net !== undefined && object.net !== null + ? Mongosconfig44Enterprise_Network.fromJSON(object.net) + : undefined; + return message; + }, + + toJSON(message: Mongosconfig44Enterprise): unknown { + const obj: any = {}; + message.net !== undefined && + (obj.net = message.net + ? Mongosconfig44Enterprise_Network.toJSON(message.net) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongosconfig44Enterprise { + const message = { + ...baseMongosconfig44Enterprise, + } as Mongosconfig44Enterprise; + message.net = + object.net !== undefined && object.net !== null + ? Mongosconfig44Enterprise_Network.fromPartial(object.net) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongosconfig44Enterprise.$type, + Mongosconfig44Enterprise +); + +const baseMongosconfig44Enterprise_Network: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongosConfig4_4_enterprise.Network", +}; + +export const Mongosconfig44Enterprise_Network = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongosConfig4_4_enterprise.Network" as const, + + encode( + message: Mongosconfig44Enterprise_Network, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.maxIncomingConnections !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxIncomingConnections!, + }, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongosconfig44Enterprise_Network { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongosconfig44Enterprise_Network, + } as Mongosconfig44Enterprise_Network; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.maxIncomingConnections = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongosconfig44Enterprise_Network { + const message = { + ...baseMongosconfig44Enterprise_Network, + } as Mongosconfig44Enterprise_Network; + message.maxIncomingConnections = + object.maxIncomingConnections !== undefined && + object.maxIncomingConnections !== null + ? Number(object.maxIncomingConnections) + : undefined; + return message; + }, + + toJSON(message: Mongosconfig44Enterprise_Network): unknown { + const obj: any = {}; + message.maxIncomingConnections !== undefined && + (obj.maxIncomingConnections = message.maxIncomingConnections); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongosconfig44Enterprise_Network { + const message = { + ...baseMongosconfig44Enterprise_Network, + } as Mongosconfig44Enterprise_Network; + message.maxIncomingConnections = object.maxIncomingConnections ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongosconfig44Enterprise_Network.$type, + Mongosconfig44Enterprise_Network +); + +const baseMongodconfigset44Enterprise: object = { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfigSet4_4_enterprise", +}; + +export const Mongodconfigset44Enterprise = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfigSet4_4_enterprise" as const, + + encode( + message: Mongodconfigset44Enterprise, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.effectiveConfig !== undefined) { + Mongodconfig44Enterprise.encode( + message.effectiveConfig, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.userConfig !== undefined) { + Mongodconfig44Enterprise.encode( + message.userConfig, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.defaultConfig !== undefined) { + Mongodconfig44Enterprise.encode( + message.defaultConfig, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfigset44Enterprise { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfigset44Enterprise, + } as Mongodconfigset44Enterprise; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.effectiveConfig = Mongodconfig44Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.userConfig = Mongodconfig44Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.defaultConfig = Mongodconfig44Enterprise.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfigset44Enterprise { + const message = { + ...baseMongodconfigset44Enterprise, + } as Mongodconfigset44Enterprise; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Mongodconfig44Enterprise.fromJSON(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Mongodconfig44Enterprise.fromJSON(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Mongodconfig44Enterprise.fromJSON(object.defaultConfig) + : undefined; + return message; + }, + + toJSON(message: Mongodconfigset44Enterprise): unknown { + const obj: any = {}; + message.effectiveConfig !== undefined && + (obj.effectiveConfig = message.effectiveConfig + ? Mongodconfig44Enterprise.toJSON(message.effectiveConfig) + : undefined); + message.userConfig !== undefined && + (obj.userConfig = message.userConfig + ? Mongodconfig44Enterprise.toJSON(message.userConfig) + : undefined); + message.defaultConfig !== undefined && + (obj.defaultConfig = message.defaultConfig + ? Mongodconfig44Enterprise.toJSON(message.defaultConfig) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodconfigset44Enterprise { + const message = { + ...baseMongodconfigset44Enterprise, + } as Mongodconfigset44Enterprise; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Mongodconfig44Enterprise.fromPartial(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Mongodconfig44Enterprise.fromPartial(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Mongodconfig44Enterprise.fromPartial(object.defaultConfig) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfigset44Enterprise.$type, + Mongodconfigset44Enterprise +); + +const baseMongocfgconfigset44Enterprise: object = { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfigSet4_4_enterprise", +}; + +export const Mongocfgconfigset44Enterprise = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfigSet4_4_enterprise" as const, + + encode( + message: Mongocfgconfigset44Enterprise, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.effectiveConfig !== undefined) { + Mongocfgconfig44Enterprise.encode( + message.effectiveConfig, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.userConfig !== undefined) { + Mongocfgconfig44Enterprise.encode( + message.userConfig, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.defaultConfig !== undefined) { + Mongocfgconfig44Enterprise.encode( + message.defaultConfig, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongocfgconfigset44Enterprise { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongocfgconfigset44Enterprise, + } as Mongocfgconfigset44Enterprise; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.effectiveConfig = Mongocfgconfig44Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.userConfig = Mongocfgconfig44Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.defaultConfig = Mongocfgconfig44Enterprise.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongocfgconfigset44Enterprise { + const message = { + ...baseMongocfgconfigset44Enterprise, + } as Mongocfgconfigset44Enterprise; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Mongocfgconfig44Enterprise.fromJSON(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Mongocfgconfig44Enterprise.fromJSON(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Mongocfgconfig44Enterprise.fromJSON(object.defaultConfig) + : undefined; + return message; + }, + + toJSON(message: Mongocfgconfigset44Enterprise): unknown { + const obj: any = {}; + message.effectiveConfig !== undefined && + (obj.effectiveConfig = message.effectiveConfig + ? Mongocfgconfig44Enterprise.toJSON(message.effectiveConfig) + : undefined); + message.userConfig !== undefined && + (obj.userConfig = message.userConfig + ? Mongocfgconfig44Enterprise.toJSON(message.userConfig) + : undefined); + message.defaultConfig !== undefined && + (obj.defaultConfig = message.defaultConfig + ? Mongocfgconfig44Enterprise.toJSON(message.defaultConfig) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongocfgconfigset44Enterprise { + const message = { + ...baseMongocfgconfigset44Enterprise, + } as Mongocfgconfigset44Enterprise; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Mongocfgconfig44Enterprise.fromPartial(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Mongocfgconfig44Enterprise.fromPartial(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Mongocfgconfig44Enterprise.fromPartial(object.defaultConfig) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongocfgconfigset44Enterprise.$type, + Mongocfgconfigset44Enterprise +); + +const baseMongosconfigset44Enterprise: object = { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongosConfigSet4_4_enterprise", +}; + +export const Mongosconfigset44Enterprise = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongosConfigSet4_4_enterprise" as const, + + encode( + message: Mongosconfigset44Enterprise, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.effectiveConfig !== undefined) { + Mongosconfig44Enterprise.encode( + message.effectiveConfig, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.userConfig !== undefined) { + Mongosconfig44Enterprise.encode( + message.userConfig, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.defaultConfig !== undefined) { + Mongosconfig44Enterprise.encode( + message.defaultConfig, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongosconfigset44Enterprise { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongosconfigset44Enterprise, + } as Mongosconfigset44Enterprise; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.effectiveConfig = Mongosconfig44Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.userConfig = Mongosconfig44Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.defaultConfig = Mongosconfig44Enterprise.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongosconfigset44Enterprise { + const message = { + ...baseMongosconfigset44Enterprise, + } as Mongosconfigset44Enterprise; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Mongosconfig44Enterprise.fromJSON(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Mongosconfig44Enterprise.fromJSON(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Mongosconfig44Enterprise.fromJSON(object.defaultConfig) + : undefined; + return message; + }, + + toJSON(message: Mongosconfigset44Enterprise): unknown { + const obj: any = {}; + message.effectiveConfig !== undefined && + (obj.effectiveConfig = message.effectiveConfig + ? Mongosconfig44Enterprise.toJSON(message.effectiveConfig) + : undefined); + message.userConfig !== undefined && + (obj.userConfig = message.userConfig + ? Mongosconfig44Enterprise.toJSON(message.userConfig) + : undefined); + message.defaultConfig !== undefined && + (obj.defaultConfig = message.defaultConfig + ? Mongosconfig44Enterprise.toJSON(message.defaultConfig) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongosconfigset44Enterprise { + const message = { + ...baseMongosconfigset44Enterprise, + } as Mongosconfigset44Enterprise; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Mongosconfig44Enterprise.fromPartial(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Mongosconfig44Enterprise.fromPartial(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Mongosconfig44Enterprise.fromPartial(object.defaultConfig) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongosconfigset44Enterprise.$type, + Mongosconfigset44Enterprise +); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/mongodb/v1/config/mongodb5_0_enterprise.ts b/src/generated/yandex/cloud/mdb/mongodb/v1/config/mongodb5_0_enterprise.ts new file mode 100644 index 00000000..770cc57c --- /dev/null +++ b/src/generated/yandex/cloud/mdb/mongodb/v1/config/mongodb5_0_enterprise.ts @@ -0,0 +1,2927 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { + DoubleValue, + Int64Value, + BoolValue, +} from "../../../../../../google/protobuf/wrappers"; + +export const protobufPackage = "yandex.cloud.mdb.mongodb.v1.config"; + +/** + * Configuration of a mongod daemon. Supported options are a limited subset of all + * options described in [MongoDB documentation](https://docs.mongodb.com/v5.0/reference/configuration-options/). + */ +export interface Mongodconfig50Enterprise { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise"; + /** `storage` section of mongod configuration. */ + storage?: Mongodconfig50Enterprise_Storage; + /** `operationProfiling` section of mongod configuration. */ + operationProfiling?: Mongodconfig50Enterprise_OperationProfiling; + /** `net` section of mongod configuration. */ + net?: Mongodconfig50Enterprise_Network; + /** `security` section of mongod configuration. */ + security?: Mongodconfig50Enterprise_Security; + /** `AuditLog` section of mongod configuration. */ + auditLog?: Mongodconfig50Enterprise_AuditLog; + /** `SetParameter` section of mongod configuration. */ + setParameter?: Mongodconfig50Enterprise_SetParameter; +} + +export interface Mongodconfig50Enterprise_Storage { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.Storage"; + /** Configuration of the WiredTiger storage engine. */ + wiredTiger?: Mongodconfig50Enterprise_Storage_WiredTiger; + /** Configuration of the MongoDB [journal](https://docs.mongodb.com/v5.0/reference/glossary/#term-journal). */ + journal?: Mongodconfig50Enterprise_Storage_Journal; +} + +/** Configuration of WiredTiger storage engine. */ +export interface Mongodconfig50Enterprise_Storage_WiredTiger { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.Storage.WiredTiger"; + /** Engine configuration for WiredTiger. */ + engineConfig?: Mongodconfig50Enterprise_Storage_WiredTiger_EngineConfig; + /** Collection configuration for WiredTiger. */ + collectionConfig?: Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig; +} + +export interface Mongodconfig50Enterprise_Storage_WiredTiger_EngineConfig { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.Storage.WiredTiger.EngineConfig"; + /** The maximum size of the internal cache that WiredTiger will use for all data. */ + cacheSizeGb?: number; +} + +export interface Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.Storage.WiredTiger.CollectionConfig"; + /** Default type of compression to use for collection data. */ + blockCompressor: Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig_Compressor; +} + +export enum Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig_Compressor { + COMPRESSOR_UNSPECIFIED = 0, + /** NONE - No compression. */ + NONE = 1, + /** SNAPPY - The [Snappy](https://docs.mongodb.com/v5.0/reference/glossary/#term-snappy) compression. */ + SNAPPY = 2, + /** ZLIB - The [zlib](https://docs.mongodb.com/v5.0/reference/glossary/#term-zlib) compression. */ + ZLIB = 3, + UNRECOGNIZED = -1, +} + +export function mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig_CompressorFromJSON( + object: any +): Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig_Compressor { + switch (object) { + case 0: + case "COMPRESSOR_UNSPECIFIED": + return Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.COMPRESSOR_UNSPECIFIED; + case 1: + case "NONE": + return Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.NONE; + case 2: + case "SNAPPY": + return Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.SNAPPY; + case 3: + case "ZLIB": + return Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.ZLIB; + case -1: + case "UNRECOGNIZED": + default: + return Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.UNRECOGNIZED; + } +} + +export function mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig_CompressorToJSON( + object: Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig_Compressor +): string { + switch (object) { + case Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.COMPRESSOR_UNSPECIFIED: + return "COMPRESSOR_UNSPECIFIED"; + case Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.NONE: + return "NONE"; + case Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.SNAPPY: + return "SNAPPY"; + case Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.ZLIB: + return "ZLIB"; + default: + return "UNKNOWN"; + } +} + +export interface Mongodconfig50Enterprise_Storage_Journal { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.Storage.Journal"; + /** + * Commit interval between journal operations, in milliseconds. + * Default: 100. + */ + commitInterval?: number; +} + +export interface Mongodconfig50Enterprise_OperationProfiling { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.OperationProfiling"; + /** Mode which specifies operations that should be profiled. */ + mode: Mongodconfig50Enterprise_OperationProfiling_Mode; + /** + * The slow operation time threshold, in milliseconds. Operations that run + * for longer than this threshold are considered slow, and are processed by the profiler + * running in the SLOW_OP mode. + */ + slowOpThreshold?: number; +} + +export enum Mongodconfig50Enterprise_OperationProfiling_Mode { + MODE_UNSPECIFIED = 0, + /** OFF - The profiler is off and does not collect any data. */ + OFF = 1, + /** SLOW_OP - The profiler collects data for operations that take longer than the value of [slow_op_threshold]. */ + SLOW_OP = 2, + /** ALL - The profiler collects data for all operations. */ + ALL = 3, + UNRECOGNIZED = -1, +} + +export function mongodconfig50Enterprise_OperationProfiling_ModeFromJSON( + object: any +): Mongodconfig50Enterprise_OperationProfiling_Mode { + switch (object) { + case 0: + case "MODE_UNSPECIFIED": + return Mongodconfig50Enterprise_OperationProfiling_Mode.MODE_UNSPECIFIED; + case 1: + case "OFF": + return Mongodconfig50Enterprise_OperationProfiling_Mode.OFF; + case 2: + case "SLOW_OP": + return Mongodconfig50Enterprise_OperationProfiling_Mode.SLOW_OP; + case 3: + case "ALL": + return Mongodconfig50Enterprise_OperationProfiling_Mode.ALL; + case -1: + case "UNRECOGNIZED": + default: + return Mongodconfig50Enterprise_OperationProfiling_Mode.UNRECOGNIZED; + } +} + +export function mongodconfig50Enterprise_OperationProfiling_ModeToJSON( + object: Mongodconfig50Enterprise_OperationProfiling_Mode +): string { + switch (object) { + case Mongodconfig50Enterprise_OperationProfiling_Mode.MODE_UNSPECIFIED: + return "MODE_UNSPECIFIED"; + case Mongodconfig50Enterprise_OperationProfiling_Mode.OFF: + return "OFF"; + case Mongodconfig50Enterprise_OperationProfiling_Mode.SLOW_OP: + return "SLOW_OP"; + case Mongodconfig50Enterprise_OperationProfiling_Mode.ALL: + return "ALL"; + default: + return "UNKNOWN"; + } +} + +export interface Mongodconfig50Enterprise_Network { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.Network"; + /** The maximum number of simultaneous connections that mongod will accept. */ + maxIncomingConnections?: number; +} + +export interface Mongodconfig50Enterprise_Security { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.Security"; + /** If encryption at rest should be enabled or not */ + enableEncryption?: boolean; + /** `kmip` section of mongod security config */ + kmip?: Mongodconfig50Enterprise_Security_KMIP; +} + +export interface Mongodconfig50Enterprise_Security_KMIP { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.Security.KMIP"; + /** KMIP server name */ + serverName: string; + /** KMIP server port */ + port?: number; + /** KMIP Server CA */ + serverCa: string; + /** KMIP client certificate + private key (unencrypted) */ + clientCertificate: string; + /** KMIP Key identifier (if any) */ + keyIdentifier: string; +} + +export interface Mongodconfig50Enterprise_AuditLog { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.AuditLog"; + /** Audit filter */ + filter: string; + /** Allows runtime configuration of audit filter and auditAuthorizationSuccess */ + runtimeConfiguration?: boolean; +} + +export interface Mongodconfig50Enterprise_SetParameter { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.SetParameter"; + /** Enables the auditing of authorization successes */ + auditAuthorizationSuccess?: boolean; +} + +export interface Mongocfgconfig50Enterprise { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig5_0_enterprise"; + /** `storage` section of mongocfg configuration. */ + storage?: Mongocfgconfig50Enterprise_Storage; + /** `operationProfiling` section of mongocfg configuration. */ + operationProfiling?: Mongocfgconfig50Enterprise_OperationProfiling; + /** `net` section of mongocfg configuration. */ + net?: Mongocfgconfig50Enterprise_Network; +} + +export interface Mongocfgconfig50Enterprise_Storage { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig5_0_enterprise.Storage"; + /** Configuration of the WiredTiger storage engine. */ + wiredTiger?: Mongocfgconfig50Enterprise_Storage_WiredTiger; +} + +/** Configuration of WiredTiger storage engine. */ +export interface Mongocfgconfig50Enterprise_Storage_WiredTiger { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig5_0_enterprise.Storage.WiredTiger"; + /** Engine configuration for WiredTiger. */ + engineConfig?: Mongocfgconfig50Enterprise_Storage_WiredTiger_EngineConfig; +} + +export interface Mongocfgconfig50Enterprise_Storage_WiredTiger_EngineConfig { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig5_0_enterprise.Storage.WiredTiger.EngineConfig"; + /** The maximum size of the internal cache that WiredTiger will use for all data. */ + cacheSizeGb?: number; +} + +export interface Mongocfgconfig50Enterprise_OperationProfiling { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig5_0_enterprise.OperationProfiling"; + /** Mode which specifies operations that should be profiled. */ + mode: Mongocfgconfig50Enterprise_OperationProfiling_Mode; + /** + * The slow operation time threshold, in milliseconds. Operations that run + * for longer than this threshold are considered slow, and are processed by the profiler + * running in the SLOW_OP mode. For details see [MongoDB documentation](https://docs.mongodb.com/v5.0/reference/configuration-options/#operationProfiling.slowOpThresholdMs). + */ + slowOpThreshold?: number; +} + +export enum Mongocfgconfig50Enterprise_OperationProfiling_Mode { + MODE_UNSPECIFIED = 0, + /** OFF - The profiler is off and does not collect any data. */ + OFF = 1, + /** SLOW_OP - The profiler collects data for operations that take longer than the value of [slow_op_threshold]. */ + SLOW_OP = 2, + /** ALL - The profiler collects data for all operations. */ + ALL = 3, + UNRECOGNIZED = -1, +} + +export function mongocfgconfig50Enterprise_OperationProfiling_ModeFromJSON( + object: any +): Mongocfgconfig50Enterprise_OperationProfiling_Mode { + switch (object) { + case 0: + case "MODE_UNSPECIFIED": + return Mongocfgconfig50Enterprise_OperationProfiling_Mode.MODE_UNSPECIFIED; + case 1: + case "OFF": + return Mongocfgconfig50Enterprise_OperationProfiling_Mode.OFF; + case 2: + case "SLOW_OP": + return Mongocfgconfig50Enterprise_OperationProfiling_Mode.SLOW_OP; + case 3: + case "ALL": + return Mongocfgconfig50Enterprise_OperationProfiling_Mode.ALL; + case -1: + case "UNRECOGNIZED": + default: + return Mongocfgconfig50Enterprise_OperationProfiling_Mode.UNRECOGNIZED; + } +} + +export function mongocfgconfig50Enterprise_OperationProfiling_ModeToJSON( + object: Mongocfgconfig50Enterprise_OperationProfiling_Mode +): string { + switch (object) { + case Mongocfgconfig50Enterprise_OperationProfiling_Mode.MODE_UNSPECIFIED: + return "MODE_UNSPECIFIED"; + case Mongocfgconfig50Enterprise_OperationProfiling_Mode.OFF: + return "OFF"; + case Mongocfgconfig50Enterprise_OperationProfiling_Mode.SLOW_OP: + return "SLOW_OP"; + case Mongocfgconfig50Enterprise_OperationProfiling_Mode.ALL: + return "ALL"; + default: + return "UNKNOWN"; + } +} + +export interface Mongocfgconfig50Enterprise_Network { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig5_0_enterprise.Network"; + /** The maximum number of simultaneous connections that mongocfg will accept. */ + maxIncomingConnections?: number; +} + +export interface Mongosconfig50Enterprise { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongosConfig5_0_enterprise"; + /** Network settings for mongos. */ + net?: Mongosconfig50Enterprise_Network; +} + +export interface Mongosconfig50Enterprise_Network { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongosConfig5_0_enterprise.Network"; + /** The maximum number of simultaneous connections that mongos will accept. */ + maxIncomingConnections?: number; +} + +export interface Mongodconfigset50Enterprise { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfigSet5_0_enterprise"; + /** + * Effective mongod settings for a MongoDB 5.0 cluster (a combination of settings defined + * in [user_config] and [default_config]). + */ + effectiveConfig?: Mongodconfig50Enterprise; + /** User-defined mongod settings for a MongoDB 5.0 cluster. */ + userConfig?: Mongodconfig50Enterprise; + /** Default mongod configuration for a MongoDB 5.0 cluster. */ + defaultConfig?: Mongodconfig50Enterprise; +} + +export interface Mongocfgconfigset50Enterprise { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfigSet5_0_enterprise"; + /** + * Effective mongocfg settings for a MongoDB 5.0 cluster (a combination of settings defined + * in [user_config] and [default_config]). + */ + effectiveConfig?: Mongocfgconfig50Enterprise; + /** User-defined mongocfg settings for a MongoDB 5.0 cluster. */ + userConfig?: Mongocfgconfig50Enterprise; + /** Default mongocfg configuration for a MongoDB 5.0 cluster. */ + defaultConfig?: Mongocfgconfig50Enterprise; +} + +export interface Mongosconfigset50Enterprise { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongosConfigSet5_0_enterprise"; + /** + * Effective mongos settings for a MongoDB 5.0 cluster (a combination of settings defined + * in [user_config] and [default_config]). + */ + effectiveConfig?: Mongosconfig50Enterprise; + /** User-defined mongos settings for a MongoDB 5.0 cluster. */ + userConfig?: Mongosconfig50Enterprise; + /** Default mongos configuration for a MongoDB 5.0 cluster. */ + defaultConfig?: Mongosconfig50Enterprise; +} + +const baseMongodconfig50Enterprise: object = { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise", +}; + +export const Mongodconfig50Enterprise = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise" as const, + + encode( + message: Mongodconfig50Enterprise, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.storage !== undefined) { + Mongodconfig50Enterprise_Storage.encode( + message.storage, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.operationProfiling !== undefined) { + Mongodconfig50Enterprise_OperationProfiling.encode( + message.operationProfiling, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.net !== undefined) { + Mongodconfig50Enterprise_Network.encode( + message.net, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.security !== undefined) { + Mongodconfig50Enterprise_Security.encode( + message.security, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.auditLog !== undefined) { + Mongodconfig50Enterprise_AuditLog.encode( + message.auditLog, + writer.uint32(42).fork() + ).ldelim(); + } + if (message.setParameter !== undefined) { + Mongodconfig50Enterprise_SetParameter.encode( + message.setParameter, + writer.uint32(50).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig50Enterprise { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig50Enterprise, + } as Mongodconfig50Enterprise; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.storage = Mongodconfig50Enterprise_Storage.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.operationProfiling = + Mongodconfig50Enterprise_OperationProfiling.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.net = Mongodconfig50Enterprise_Network.decode( + reader, + reader.uint32() + ); + break; + case 4: + message.security = Mongodconfig50Enterprise_Security.decode( + reader, + reader.uint32() + ); + break; + case 5: + message.auditLog = Mongodconfig50Enterprise_AuditLog.decode( + reader, + reader.uint32() + ); + break; + case 6: + message.setParameter = Mongodconfig50Enterprise_SetParameter.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig50Enterprise { + const message = { + ...baseMongodconfig50Enterprise, + } as Mongodconfig50Enterprise; + message.storage = + object.storage !== undefined && object.storage !== null + ? Mongodconfig50Enterprise_Storage.fromJSON(object.storage) + : undefined; + message.operationProfiling = + object.operationProfiling !== undefined && + object.operationProfiling !== null + ? Mongodconfig50Enterprise_OperationProfiling.fromJSON( + object.operationProfiling + ) + : undefined; + message.net = + object.net !== undefined && object.net !== null + ? Mongodconfig50Enterprise_Network.fromJSON(object.net) + : undefined; + message.security = + object.security !== undefined && object.security !== null + ? Mongodconfig50Enterprise_Security.fromJSON(object.security) + : undefined; + message.auditLog = + object.auditLog !== undefined && object.auditLog !== null + ? Mongodconfig50Enterprise_AuditLog.fromJSON(object.auditLog) + : undefined; + message.setParameter = + object.setParameter !== undefined && object.setParameter !== null + ? Mongodconfig50Enterprise_SetParameter.fromJSON(object.setParameter) + : undefined; + return message; + }, + + toJSON(message: Mongodconfig50Enterprise): unknown { + const obj: any = {}; + message.storage !== undefined && + (obj.storage = message.storage + ? Mongodconfig50Enterprise_Storage.toJSON(message.storage) + : undefined); + message.operationProfiling !== undefined && + (obj.operationProfiling = message.operationProfiling + ? Mongodconfig50Enterprise_OperationProfiling.toJSON( + message.operationProfiling + ) + : undefined); + message.net !== undefined && + (obj.net = message.net + ? Mongodconfig50Enterprise_Network.toJSON(message.net) + : undefined); + message.security !== undefined && + (obj.security = message.security + ? Mongodconfig50Enterprise_Security.toJSON(message.security) + : undefined); + message.auditLog !== undefined && + (obj.auditLog = message.auditLog + ? Mongodconfig50Enterprise_AuditLog.toJSON(message.auditLog) + : undefined); + message.setParameter !== undefined && + (obj.setParameter = message.setParameter + ? Mongodconfig50Enterprise_SetParameter.toJSON(message.setParameter) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodconfig50Enterprise { + const message = { + ...baseMongodconfig50Enterprise, + } as Mongodconfig50Enterprise; + message.storage = + object.storage !== undefined && object.storage !== null + ? Mongodconfig50Enterprise_Storage.fromPartial(object.storage) + : undefined; + message.operationProfiling = + object.operationProfiling !== undefined && + object.operationProfiling !== null + ? Mongodconfig50Enterprise_OperationProfiling.fromPartial( + object.operationProfiling + ) + : undefined; + message.net = + object.net !== undefined && object.net !== null + ? Mongodconfig50Enterprise_Network.fromPartial(object.net) + : undefined; + message.security = + object.security !== undefined && object.security !== null + ? Mongodconfig50Enterprise_Security.fromPartial(object.security) + : undefined; + message.auditLog = + object.auditLog !== undefined && object.auditLog !== null + ? Mongodconfig50Enterprise_AuditLog.fromPartial(object.auditLog) + : undefined; + message.setParameter = + object.setParameter !== undefined && object.setParameter !== null + ? Mongodconfig50Enterprise_SetParameter.fromPartial(object.setParameter) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig50Enterprise.$type, + Mongodconfig50Enterprise +); + +const baseMongodconfig50Enterprise_Storage: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.Storage", +}; + +export const Mongodconfig50Enterprise_Storage = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.Storage" as const, + + encode( + message: Mongodconfig50Enterprise_Storage, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.wiredTiger !== undefined) { + Mongodconfig50Enterprise_Storage_WiredTiger.encode( + message.wiredTiger, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.journal !== undefined) { + Mongodconfig50Enterprise_Storage_Journal.encode( + message.journal, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig50Enterprise_Storage { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig50Enterprise_Storage, + } as Mongodconfig50Enterprise_Storage; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.wiredTiger = + Mongodconfig50Enterprise_Storage_WiredTiger.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.journal = Mongodconfig50Enterprise_Storage_Journal.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig50Enterprise_Storage { + const message = { + ...baseMongodconfig50Enterprise_Storage, + } as Mongodconfig50Enterprise_Storage; + message.wiredTiger = + object.wiredTiger !== undefined && object.wiredTiger !== null + ? Mongodconfig50Enterprise_Storage_WiredTiger.fromJSON( + object.wiredTiger + ) + : undefined; + message.journal = + object.journal !== undefined && object.journal !== null + ? Mongodconfig50Enterprise_Storage_Journal.fromJSON(object.journal) + : undefined; + return message; + }, + + toJSON(message: Mongodconfig50Enterprise_Storage): unknown { + const obj: any = {}; + message.wiredTiger !== undefined && + (obj.wiredTiger = message.wiredTiger + ? Mongodconfig50Enterprise_Storage_WiredTiger.toJSON(message.wiredTiger) + : undefined); + message.journal !== undefined && + (obj.journal = message.journal + ? Mongodconfig50Enterprise_Storage_Journal.toJSON(message.journal) + : undefined); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodconfig50Enterprise_Storage { + const message = { + ...baseMongodconfig50Enterprise_Storage, + } as Mongodconfig50Enterprise_Storage; + message.wiredTiger = + object.wiredTiger !== undefined && object.wiredTiger !== null + ? Mongodconfig50Enterprise_Storage_WiredTiger.fromPartial( + object.wiredTiger + ) + : undefined; + message.journal = + object.journal !== undefined && object.journal !== null + ? Mongodconfig50Enterprise_Storage_Journal.fromPartial(object.journal) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig50Enterprise_Storage.$type, + Mongodconfig50Enterprise_Storage +); + +const baseMongodconfig50Enterprise_Storage_WiredTiger: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.Storage.WiredTiger", +}; + +export const Mongodconfig50Enterprise_Storage_WiredTiger = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.Storage.WiredTiger" as const, + + encode( + message: Mongodconfig50Enterprise_Storage_WiredTiger, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.engineConfig !== undefined) { + Mongodconfig50Enterprise_Storage_WiredTiger_EngineConfig.encode( + message.engineConfig, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.collectionConfig !== undefined) { + Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig.encode( + message.collectionConfig, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig50Enterprise_Storage_WiredTiger { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig50Enterprise_Storage_WiredTiger, + } as Mongodconfig50Enterprise_Storage_WiredTiger; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.engineConfig = + Mongodconfig50Enterprise_Storage_WiredTiger_EngineConfig.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.collectionConfig = + Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig50Enterprise_Storage_WiredTiger { + const message = { + ...baseMongodconfig50Enterprise_Storage_WiredTiger, + } as Mongodconfig50Enterprise_Storage_WiredTiger; + message.engineConfig = + object.engineConfig !== undefined && object.engineConfig !== null + ? Mongodconfig50Enterprise_Storage_WiredTiger_EngineConfig.fromJSON( + object.engineConfig + ) + : undefined; + message.collectionConfig = + object.collectionConfig !== undefined && object.collectionConfig !== null + ? Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig.fromJSON( + object.collectionConfig + ) + : undefined; + return message; + }, + + toJSON(message: Mongodconfig50Enterprise_Storage_WiredTiger): unknown { + const obj: any = {}; + message.engineConfig !== undefined && + (obj.engineConfig = message.engineConfig + ? Mongodconfig50Enterprise_Storage_WiredTiger_EngineConfig.toJSON( + message.engineConfig + ) + : undefined); + message.collectionConfig !== undefined && + (obj.collectionConfig = message.collectionConfig + ? Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig.toJSON( + message.collectionConfig + ) + : undefined); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodconfig50Enterprise_Storage_WiredTiger { + const message = { + ...baseMongodconfig50Enterprise_Storage_WiredTiger, + } as Mongodconfig50Enterprise_Storage_WiredTiger; + message.engineConfig = + object.engineConfig !== undefined && object.engineConfig !== null + ? Mongodconfig50Enterprise_Storage_WiredTiger_EngineConfig.fromPartial( + object.engineConfig + ) + : undefined; + message.collectionConfig = + object.collectionConfig !== undefined && object.collectionConfig !== null + ? Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig.fromPartial( + object.collectionConfig + ) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig50Enterprise_Storage_WiredTiger.$type, + Mongodconfig50Enterprise_Storage_WiredTiger +); + +const baseMongodconfig50Enterprise_Storage_WiredTiger_EngineConfig: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.Storage.WiredTiger.EngineConfig", +}; + +export const Mongodconfig50Enterprise_Storage_WiredTiger_EngineConfig = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.Storage.WiredTiger.EngineConfig" as const, + + encode( + message: Mongodconfig50Enterprise_Storage_WiredTiger_EngineConfig, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.cacheSizeGb !== undefined) { + DoubleValue.encode( + { $type: "google.protobuf.DoubleValue", value: message.cacheSizeGb! }, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig50Enterprise_Storage_WiredTiger_EngineConfig { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig50Enterprise_Storage_WiredTiger_EngineConfig, + } as Mongodconfig50Enterprise_Storage_WiredTiger_EngineConfig; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.cacheSizeGb = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON( + object: any + ): Mongodconfig50Enterprise_Storage_WiredTiger_EngineConfig { + const message = { + ...baseMongodconfig50Enterprise_Storage_WiredTiger_EngineConfig, + } as Mongodconfig50Enterprise_Storage_WiredTiger_EngineConfig; + message.cacheSizeGb = + object.cacheSizeGb !== undefined && object.cacheSizeGb !== null + ? Number(object.cacheSizeGb) + : undefined; + return message; + }, + + toJSON( + message: Mongodconfig50Enterprise_Storage_WiredTiger_EngineConfig + ): unknown { + const obj: any = {}; + message.cacheSizeGb !== undefined && + (obj.cacheSizeGb = message.cacheSizeGb); + return obj; + }, + + fromPartial< + I extends Exact< + DeepPartial, + I + > + >(object: I): Mongodconfig50Enterprise_Storage_WiredTiger_EngineConfig { + const message = { + ...baseMongodconfig50Enterprise_Storage_WiredTiger_EngineConfig, + } as Mongodconfig50Enterprise_Storage_WiredTiger_EngineConfig; + message.cacheSizeGb = object.cacheSizeGb ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig50Enterprise_Storage_WiredTiger_EngineConfig.$type, + Mongodconfig50Enterprise_Storage_WiredTiger_EngineConfig +); + +const baseMongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig: object = + { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.Storage.WiredTiger.CollectionConfig", + blockCompressor: 0, + }; + +export const Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.Storage.WiredTiger.CollectionConfig" as const, + + encode( + message: Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.blockCompressor !== 0) { + writer.uint32(8).int32(message.blockCompressor); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig, + } as Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.blockCompressor = reader.int32() as any; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON( + object: any + ): Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig { + const message = { + ...baseMongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig, + } as Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig; + message.blockCompressor = + object.blockCompressor !== undefined && object.blockCompressor !== null + ? mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig_CompressorFromJSON( + object.blockCompressor + ) + : 0; + return message; + }, + + toJSON( + message: Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig + ): unknown { + const obj: any = {}; + message.blockCompressor !== undefined && + (obj.blockCompressor = + mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig_CompressorToJSON( + message.blockCompressor + )); + return obj; + }, + + fromPartial< + I extends Exact< + DeepPartial, + I + > + >(object: I): Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig { + const message = { + ...baseMongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig, + } as Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig; + message.blockCompressor = object.blockCompressor ?? 0; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig.$type, + Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig +); + +const baseMongodconfig50Enterprise_Storage_Journal: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.Storage.Journal", +}; + +export const Mongodconfig50Enterprise_Storage_Journal = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.Storage.Journal" as const, + + encode( + message: Mongodconfig50Enterprise_Storage_Journal, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.commitInterval !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.commitInterval! }, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig50Enterprise_Storage_Journal { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig50Enterprise_Storage_Journal, + } as Mongodconfig50Enterprise_Storage_Journal; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.commitInterval = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig50Enterprise_Storage_Journal { + const message = { + ...baseMongodconfig50Enterprise_Storage_Journal, + } as Mongodconfig50Enterprise_Storage_Journal; + message.commitInterval = + object.commitInterval !== undefined && object.commitInterval !== null + ? Number(object.commitInterval) + : undefined; + return message; + }, + + toJSON(message: Mongodconfig50Enterprise_Storage_Journal): unknown { + const obj: any = {}; + message.commitInterval !== undefined && + (obj.commitInterval = message.commitInterval); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodconfig50Enterprise_Storage_Journal { + const message = { + ...baseMongodconfig50Enterprise_Storage_Journal, + } as Mongodconfig50Enterprise_Storage_Journal; + message.commitInterval = object.commitInterval ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig50Enterprise_Storage_Journal.$type, + Mongodconfig50Enterprise_Storage_Journal +); + +const baseMongodconfig50Enterprise_OperationProfiling: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.OperationProfiling", + mode: 0, +}; + +export const Mongodconfig50Enterprise_OperationProfiling = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.OperationProfiling" as const, + + encode( + message: Mongodconfig50Enterprise_OperationProfiling, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.mode !== 0) { + writer.uint32(8).int32(message.mode); + } + if (message.slowOpThreshold !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.slowOpThreshold!, + }, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig50Enterprise_OperationProfiling { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig50Enterprise_OperationProfiling, + } as Mongodconfig50Enterprise_OperationProfiling; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.mode = reader.int32() as any; + break; + case 2: + message.slowOpThreshold = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig50Enterprise_OperationProfiling { + const message = { + ...baseMongodconfig50Enterprise_OperationProfiling, + } as Mongodconfig50Enterprise_OperationProfiling; + message.mode = + object.mode !== undefined && object.mode !== null + ? mongodconfig50Enterprise_OperationProfiling_ModeFromJSON(object.mode) + : 0; + message.slowOpThreshold = + object.slowOpThreshold !== undefined && object.slowOpThreshold !== null + ? Number(object.slowOpThreshold) + : undefined; + return message; + }, + + toJSON(message: Mongodconfig50Enterprise_OperationProfiling): unknown { + const obj: any = {}; + message.mode !== undefined && + (obj.mode = mongodconfig50Enterprise_OperationProfiling_ModeToJSON( + message.mode + )); + message.slowOpThreshold !== undefined && + (obj.slowOpThreshold = message.slowOpThreshold); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodconfig50Enterprise_OperationProfiling { + const message = { + ...baseMongodconfig50Enterprise_OperationProfiling, + } as Mongodconfig50Enterprise_OperationProfiling; + message.mode = object.mode ?? 0; + message.slowOpThreshold = object.slowOpThreshold ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig50Enterprise_OperationProfiling.$type, + Mongodconfig50Enterprise_OperationProfiling +); + +const baseMongodconfig50Enterprise_Network: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.Network", +}; + +export const Mongodconfig50Enterprise_Network = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.Network" as const, + + encode( + message: Mongodconfig50Enterprise_Network, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.maxIncomingConnections !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxIncomingConnections!, + }, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig50Enterprise_Network { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig50Enterprise_Network, + } as Mongodconfig50Enterprise_Network; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.maxIncomingConnections = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig50Enterprise_Network { + const message = { + ...baseMongodconfig50Enterprise_Network, + } as Mongodconfig50Enterprise_Network; + message.maxIncomingConnections = + object.maxIncomingConnections !== undefined && + object.maxIncomingConnections !== null + ? Number(object.maxIncomingConnections) + : undefined; + return message; + }, + + toJSON(message: Mongodconfig50Enterprise_Network): unknown { + const obj: any = {}; + message.maxIncomingConnections !== undefined && + (obj.maxIncomingConnections = message.maxIncomingConnections); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodconfig50Enterprise_Network { + const message = { + ...baseMongodconfig50Enterprise_Network, + } as Mongodconfig50Enterprise_Network; + message.maxIncomingConnections = object.maxIncomingConnections ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig50Enterprise_Network.$type, + Mongodconfig50Enterprise_Network +); + +const baseMongodconfig50Enterprise_Security: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.Security", +}; + +export const Mongodconfig50Enterprise_Security = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.Security" as const, + + encode( + message: Mongodconfig50Enterprise_Security, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.enableEncryption !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableEncryption!, + }, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.kmip !== undefined) { + Mongodconfig50Enterprise_Security_KMIP.encode( + message.kmip, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig50Enterprise_Security { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig50Enterprise_Security, + } as Mongodconfig50Enterprise_Security; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.enableEncryption = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 2: + message.kmip = Mongodconfig50Enterprise_Security_KMIP.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig50Enterprise_Security { + const message = { + ...baseMongodconfig50Enterprise_Security, + } as Mongodconfig50Enterprise_Security; + message.enableEncryption = + object.enableEncryption !== undefined && object.enableEncryption !== null + ? Boolean(object.enableEncryption) + : undefined; + message.kmip = + object.kmip !== undefined && object.kmip !== null + ? Mongodconfig50Enterprise_Security_KMIP.fromJSON(object.kmip) + : undefined; + return message; + }, + + toJSON(message: Mongodconfig50Enterprise_Security): unknown { + const obj: any = {}; + message.enableEncryption !== undefined && + (obj.enableEncryption = message.enableEncryption); + message.kmip !== undefined && + (obj.kmip = message.kmip + ? Mongodconfig50Enterprise_Security_KMIP.toJSON(message.kmip) + : undefined); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodconfig50Enterprise_Security { + const message = { + ...baseMongodconfig50Enterprise_Security, + } as Mongodconfig50Enterprise_Security; + message.enableEncryption = object.enableEncryption ?? undefined; + message.kmip = + object.kmip !== undefined && object.kmip !== null + ? Mongodconfig50Enterprise_Security_KMIP.fromPartial(object.kmip) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig50Enterprise_Security.$type, + Mongodconfig50Enterprise_Security +); + +const baseMongodconfig50Enterprise_Security_KMIP: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.Security.KMIP", + serverName: "", + serverCa: "", + clientCertificate: "", + keyIdentifier: "", +}; + +export const Mongodconfig50Enterprise_Security_KMIP = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.Security.KMIP" as const, + + encode( + message: Mongodconfig50Enterprise_Security_KMIP, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.serverName !== "") { + writer.uint32(10).string(message.serverName); + } + if (message.port !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.port! }, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.serverCa !== "") { + writer.uint32(26).string(message.serverCa); + } + if (message.clientCertificate !== "") { + writer.uint32(34).string(message.clientCertificate); + } + if (message.keyIdentifier !== "") { + writer.uint32(42).string(message.keyIdentifier); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig50Enterprise_Security_KMIP { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig50Enterprise_Security_KMIP, + } as Mongodconfig50Enterprise_Security_KMIP; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.serverName = reader.string(); + break; + case 2: + message.port = Int64Value.decode(reader, reader.uint32()).value; + break; + case 3: + message.serverCa = reader.string(); + break; + case 4: + message.clientCertificate = reader.string(); + break; + case 5: + message.keyIdentifier = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig50Enterprise_Security_KMIP { + const message = { + ...baseMongodconfig50Enterprise_Security_KMIP, + } as Mongodconfig50Enterprise_Security_KMIP; + message.serverName = + object.serverName !== undefined && object.serverName !== null + ? String(object.serverName) + : ""; + message.port = + object.port !== undefined && object.port !== null + ? Number(object.port) + : undefined; + message.serverCa = + object.serverCa !== undefined && object.serverCa !== null + ? String(object.serverCa) + : ""; + message.clientCertificate = + object.clientCertificate !== undefined && + object.clientCertificate !== null + ? String(object.clientCertificate) + : ""; + message.keyIdentifier = + object.keyIdentifier !== undefined && object.keyIdentifier !== null + ? String(object.keyIdentifier) + : ""; + return message; + }, + + toJSON(message: Mongodconfig50Enterprise_Security_KMIP): unknown { + const obj: any = {}; + message.serverName !== undefined && (obj.serverName = message.serverName); + message.port !== undefined && (obj.port = message.port); + message.serverCa !== undefined && (obj.serverCa = message.serverCa); + message.clientCertificate !== undefined && + (obj.clientCertificate = message.clientCertificate); + message.keyIdentifier !== undefined && + (obj.keyIdentifier = message.keyIdentifier); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodconfig50Enterprise_Security_KMIP { + const message = { + ...baseMongodconfig50Enterprise_Security_KMIP, + } as Mongodconfig50Enterprise_Security_KMIP; + message.serverName = object.serverName ?? ""; + message.port = object.port ?? undefined; + message.serverCa = object.serverCa ?? ""; + message.clientCertificate = object.clientCertificate ?? ""; + message.keyIdentifier = object.keyIdentifier ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig50Enterprise_Security_KMIP.$type, + Mongodconfig50Enterprise_Security_KMIP +); + +const baseMongodconfig50Enterprise_AuditLog: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.AuditLog", + filter: "", +}; + +export const Mongodconfig50Enterprise_AuditLog = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.AuditLog" as const, + + encode( + message: Mongodconfig50Enterprise_AuditLog, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.filter !== "") { + writer.uint32(10).string(message.filter); + } + if (message.runtimeConfiguration !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.runtimeConfiguration!, + }, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig50Enterprise_AuditLog { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig50Enterprise_AuditLog, + } as Mongodconfig50Enterprise_AuditLog; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.filter = reader.string(); + break; + case 2: + message.runtimeConfiguration = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig50Enterprise_AuditLog { + const message = { + ...baseMongodconfig50Enterprise_AuditLog, + } as Mongodconfig50Enterprise_AuditLog; + message.filter = + object.filter !== undefined && object.filter !== null + ? String(object.filter) + : ""; + message.runtimeConfiguration = + object.runtimeConfiguration !== undefined && + object.runtimeConfiguration !== null + ? Boolean(object.runtimeConfiguration) + : undefined; + return message; + }, + + toJSON(message: Mongodconfig50Enterprise_AuditLog): unknown { + const obj: any = {}; + message.filter !== undefined && (obj.filter = message.filter); + message.runtimeConfiguration !== undefined && + (obj.runtimeConfiguration = message.runtimeConfiguration); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodconfig50Enterprise_AuditLog { + const message = { + ...baseMongodconfig50Enterprise_AuditLog, + } as Mongodconfig50Enterprise_AuditLog; + message.filter = object.filter ?? ""; + message.runtimeConfiguration = object.runtimeConfiguration ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig50Enterprise_AuditLog.$type, + Mongodconfig50Enterprise_AuditLog +); + +const baseMongodconfig50Enterprise_SetParameter: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.SetParameter", +}; + +export const Mongodconfig50Enterprise_SetParameter = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.SetParameter" as const, + + encode( + message: Mongodconfig50Enterprise_SetParameter, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.auditAuthorizationSuccess !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.auditAuthorizationSuccess!, + }, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig50Enterprise_SetParameter { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig50Enterprise_SetParameter, + } as Mongodconfig50Enterprise_SetParameter; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.auditAuthorizationSuccess = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig50Enterprise_SetParameter { + const message = { + ...baseMongodconfig50Enterprise_SetParameter, + } as Mongodconfig50Enterprise_SetParameter; + message.auditAuthorizationSuccess = + object.auditAuthorizationSuccess !== undefined && + object.auditAuthorizationSuccess !== null + ? Boolean(object.auditAuthorizationSuccess) + : undefined; + return message; + }, + + toJSON(message: Mongodconfig50Enterprise_SetParameter): unknown { + const obj: any = {}; + message.auditAuthorizationSuccess !== undefined && + (obj.auditAuthorizationSuccess = message.auditAuthorizationSuccess); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodconfig50Enterprise_SetParameter { + const message = { + ...baseMongodconfig50Enterprise_SetParameter, + } as Mongodconfig50Enterprise_SetParameter; + message.auditAuthorizationSuccess = + object.auditAuthorizationSuccess ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig50Enterprise_SetParameter.$type, + Mongodconfig50Enterprise_SetParameter +); + +const baseMongocfgconfig50Enterprise: object = { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig5_0_enterprise", +}; + +export const Mongocfgconfig50Enterprise = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig5_0_enterprise" as const, + + encode( + message: Mongocfgconfig50Enterprise, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.storage !== undefined) { + Mongocfgconfig50Enterprise_Storage.encode( + message.storage, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.operationProfiling !== undefined) { + Mongocfgconfig50Enterprise_OperationProfiling.encode( + message.operationProfiling, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.net !== undefined) { + Mongocfgconfig50Enterprise_Network.encode( + message.net, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongocfgconfig50Enterprise { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongocfgconfig50Enterprise, + } as Mongocfgconfig50Enterprise; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.storage = Mongocfgconfig50Enterprise_Storage.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.operationProfiling = + Mongocfgconfig50Enterprise_OperationProfiling.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.net = Mongocfgconfig50Enterprise_Network.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongocfgconfig50Enterprise { + const message = { + ...baseMongocfgconfig50Enterprise, + } as Mongocfgconfig50Enterprise; + message.storage = + object.storage !== undefined && object.storage !== null + ? Mongocfgconfig50Enterprise_Storage.fromJSON(object.storage) + : undefined; + message.operationProfiling = + object.operationProfiling !== undefined && + object.operationProfiling !== null + ? Mongocfgconfig50Enterprise_OperationProfiling.fromJSON( + object.operationProfiling + ) + : undefined; + message.net = + object.net !== undefined && object.net !== null + ? Mongocfgconfig50Enterprise_Network.fromJSON(object.net) + : undefined; + return message; + }, + + toJSON(message: Mongocfgconfig50Enterprise): unknown { + const obj: any = {}; + message.storage !== undefined && + (obj.storage = message.storage + ? Mongocfgconfig50Enterprise_Storage.toJSON(message.storage) + : undefined); + message.operationProfiling !== undefined && + (obj.operationProfiling = message.operationProfiling + ? Mongocfgconfig50Enterprise_OperationProfiling.toJSON( + message.operationProfiling + ) + : undefined); + message.net !== undefined && + (obj.net = message.net + ? Mongocfgconfig50Enterprise_Network.toJSON(message.net) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongocfgconfig50Enterprise { + const message = { + ...baseMongocfgconfig50Enterprise, + } as Mongocfgconfig50Enterprise; + message.storage = + object.storage !== undefined && object.storage !== null + ? Mongocfgconfig50Enterprise_Storage.fromPartial(object.storage) + : undefined; + message.operationProfiling = + object.operationProfiling !== undefined && + object.operationProfiling !== null + ? Mongocfgconfig50Enterprise_OperationProfiling.fromPartial( + object.operationProfiling + ) + : undefined; + message.net = + object.net !== undefined && object.net !== null + ? Mongocfgconfig50Enterprise_Network.fromPartial(object.net) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongocfgconfig50Enterprise.$type, + Mongocfgconfig50Enterprise +); + +const baseMongocfgconfig50Enterprise_Storage: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig5_0_enterprise.Storage", +}; + +export const Mongocfgconfig50Enterprise_Storage = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig5_0_enterprise.Storage" as const, + + encode( + message: Mongocfgconfig50Enterprise_Storage, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.wiredTiger !== undefined) { + Mongocfgconfig50Enterprise_Storage_WiredTiger.encode( + message.wiredTiger, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongocfgconfig50Enterprise_Storage { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongocfgconfig50Enterprise_Storage, + } as Mongocfgconfig50Enterprise_Storage; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.wiredTiger = + Mongocfgconfig50Enterprise_Storage_WiredTiger.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongocfgconfig50Enterprise_Storage { + const message = { + ...baseMongocfgconfig50Enterprise_Storage, + } as Mongocfgconfig50Enterprise_Storage; + message.wiredTiger = + object.wiredTiger !== undefined && object.wiredTiger !== null + ? Mongocfgconfig50Enterprise_Storage_WiredTiger.fromJSON( + object.wiredTiger + ) + : undefined; + return message; + }, + + toJSON(message: Mongocfgconfig50Enterprise_Storage): unknown { + const obj: any = {}; + message.wiredTiger !== undefined && + (obj.wiredTiger = message.wiredTiger + ? Mongocfgconfig50Enterprise_Storage_WiredTiger.toJSON( + message.wiredTiger + ) + : undefined); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongocfgconfig50Enterprise_Storage { + const message = { + ...baseMongocfgconfig50Enterprise_Storage, + } as Mongocfgconfig50Enterprise_Storage; + message.wiredTiger = + object.wiredTiger !== undefined && object.wiredTiger !== null + ? Mongocfgconfig50Enterprise_Storage_WiredTiger.fromPartial( + object.wiredTiger + ) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongocfgconfig50Enterprise_Storage.$type, + Mongocfgconfig50Enterprise_Storage +); + +const baseMongocfgconfig50Enterprise_Storage_WiredTiger: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig5_0_enterprise.Storage.WiredTiger", +}; + +export const Mongocfgconfig50Enterprise_Storage_WiredTiger = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig5_0_enterprise.Storage.WiredTiger" as const, + + encode( + message: Mongocfgconfig50Enterprise_Storage_WiredTiger, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.engineConfig !== undefined) { + Mongocfgconfig50Enterprise_Storage_WiredTiger_EngineConfig.encode( + message.engineConfig, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongocfgconfig50Enterprise_Storage_WiredTiger { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongocfgconfig50Enterprise_Storage_WiredTiger, + } as Mongocfgconfig50Enterprise_Storage_WiredTiger; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.engineConfig = + Mongocfgconfig50Enterprise_Storage_WiredTiger_EngineConfig.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongocfgconfig50Enterprise_Storage_WiredTiger { + const message = { + ...baseMongocfgconfig50Enterprise_Storage_WiredTiger, + } as Mongocfgconfig50Enterprise_Storage_WiredTiger; + message.engineConfig = + object.engineConfig !== undefined && object.engineConfig !== null + ? Mongocfgconfig50Enterprise_Storage_WiredTiger_EngineConfig.fromJSON( + object.engineConfig + ) + : undefined; + return message; + }, + + toJSON(message: Mongocfgconfig50Enterprise_Storage_WiredTiger): unknown { + const obj: any = {}; + message.engineConfig !== undefined && + (obj.engineConfig = message.engineConfig + ? Mongocfgconfig50Enterprise_Storage_WiredTiger_EngineConfig.toJSON( + message.engineConfig + ) + : undefined); + return obj; + }, + + fromPartial< + I extends Exact< + DeepPartial, + I + > + >(object: I): Mongocfgconfig50Enterprise_Storage_WiredTiger { + const message = { + ...baseMongocfgconfig50Enterprise_Storage_WiredTiger, + } as Mongocfgconfig50Enterprise_Storage_WiredTiger; + message.engineConfig = + object.engineConfig !== undefined && object.engineConfig !== null + ? Mongocfgconfig50Enterprise_Storage_WiredTiger_EngineConfig.fromPartial( + object.engineConfig + ) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongocfgconfig50Enterprise_Storage_WiredTiger.$type, + Mongocfgconfig50Enterprise_Storage_WiredTiger +); + +const baseMongocfgconfig50Enterprise_Storage_WiredTiger_EngineConfig: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig5_0_enterprise.Storage.WiredTiger.EngineConfig", +}; + +export const Mongocfgconfig50Enterprise_Storage_WiredTiger_EngineConfig = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig5_0_enterprise.Storage.WiredTiger.EngineConfig" as const, + + encode( + message: Mongocfgconfig50Enterprise_Storage_WiredTiger_EngineConfig, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.cacheSizeGb !== undefined) { + DoubleValue.encode( + { $type: "google.protobuf.DoubleValue", value: message.cacheSizeGb! }, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongocfgconfig50Enterprise_Storage_WiredTiger_EngineConfig { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongocfgconfig50Enterprise_Storage_WiredTiger_EngineConfig, + } as Mongocfgconfig50Enterprise_Storage_WiredTiger_EngineConfig; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.cacheSizeGb = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON( + object: any + ): Mongocfgconfig50Enterprise_Storage_WiredTiger_EngineConfig { + const message = { + ...baseMongocfgconfig50Enterprise_Storage_WiredTiger_EngineConfig, + } as Mongocfgconfig50Enterprise_Storage_WiredTiger_EngineConfig; + message.cacheSizeGb = + object.cacheSizeGb !== undefined && object.cacheSizeGb !== null + ? Number(object.cacheSizeGb) + : undefined; + return message; + }, + + toJSON( + message: Mongocfgconfig50Enterprise_Storage_WiredTiger_EngineConfig + ): unknown { + const obj: any = {}; + message.cacheSizeGb !== undefined && + (obj.cacheSizeGb = message.cacheSizeGb); + return obj; + }, + + fromPartial< + I extends Exact< + DeepPartial, + I + > + >(object: I): Mongocfgconfig50Enterprise_Storage_WiredTiger_EngineConfig { + const message = { + ...baseMongocfgconfig50Enterprise_Storage_WiredTiger_EngineConfig, + } as Mongocfgconfig50Enterprise_Storage_WiredTiger_EngineConfig; + message.cacheSizeGb = object.cacheSizeGb ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongocfgconfig50Enterprise_Storage_WiredTiger_EngineConfig.$type, + Mongocfgconfig50Enterprise_Storage_WiredTiger_EngineConfig +); + +const baseMongocfgconfig50Enterprise_OperationProfiling: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig5_0_enterprise.OperationProfiling", + mode: 0, +}; + +export const Mongocfgconfig50Enterprise_OperationProfiling = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig5_0_enterprise.OperationProfiling" as const, + + encode( + message: Mongocfgconfig50Enterprise_OperationProfiling, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.mode !== 0) { + writer.uint32(8).int32(message.mode); + } + if (message.slowOpThreshold !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.slowOpThreshold!, + }, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongocfgconfig50Enterprise_OperationProfiling { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongocfgconfig50Enterprise_OperationProfiling, + } as Mongocfgconfig50Enterprise_OperationProfiling; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.mode = reader.int32() as any; + break; + case 2: + message.slowOpThreshold = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongocfgconfig50Enterprise_OperationProfiling { + const message = { + ...baseMongocfgconfig50Enterprise_OperationProfiling, + } as Mongocfgconfig50Enterprise_OperationProfiling; + message.mode = + object.mode !== undefined && object.mode !== null + ? mongocfgconfig50Enterprise_OperationProfiling_ModeFromJSON( + object.mode + ) + : 0; + message.slowOpThreshold = + object.slowOpThreshold !== undefined && object.slowOpThreshold !== null + ? Number(object.slowOpThreshold) + : undefined; + return message; + }, + + toJSON(message: Mongocfgconfig50Enterprise_OperationProfiling): unknown { + const obj: any = {}; + message.mode !== undefined && + (obj.mode = mongocfgconfig50Enterprise_OperationProfiling_ModeToJSON( + message.mode + )); + message.slowOpThreshold !== undefined && + (obj.slowOpThreshold = message.slowOpThreshold); + return obj; + }, + + fromPartial< + I extends Exact< + DeepPartial, + I + > + >(object: I): Mongocfgconfig50Enterprise_OperationProfiling { + const message = { + ...baseMongocfgconfig50Enterprise_OperationProfiling, + } as Mongocfgconfig50Enterprise_OperationProfiling; + message.mode = object.mode ?? 0; + message.slowOpThreshold = object.slowOpThreshold ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongocfgconfig50Enterprise_OperationProfiling.$type, + Mongocfgconfig50Enterprise_OperationProfiling +); + +const baseMongocfgconfig50Enterprise_Network: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig5_0_enterprise.Network", +}; + +export const Mongocfgconfig50Enterprise_Network = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig5_0_enterprise.Network" as const, + + encode( + message: Mongocfgconfig50Enterprise_Network, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.maxIncomingConnections !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxIncomingConnections!, + }, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongocfgconfig50Enterprise_Network { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongocfgconfig50Enterprise_Network, + } as Mongocfgconfig50Enterprise_Network; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.maxIncomingConnections = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongocfgconfig50Enterprise_Network { + const message = { + ...baseMongocfgconfig50Enterprise_Network, + } as Mongocfgconfig50Enterprise_Network; + message.maxIncomingConnections = + object.maxIncomingConnections !== undefined && + object.maxIncomingConnections !== null + ? Number(object.maxIncomingConnections) + : undefined; + return message; + }, + + toJSON(message: Mongocfgconfig50Enterprise_Network): unknown { + const obj: any = {}; + message.maxIncomingConnections !== undefined && + (obj.maxIncomingConnections = message.maxIncomingConnections); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongocfgconfig50Enterprise_Network { + const message = { + ...baseMongocfgconfig50Enterprise_Network, + } as Mongocfgconfig50Enterprise_Network; + message.maxIncomingConnections = object.maxIncomingConnections ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongocfgconfig50Enterprise_Network.$type, + Mongocfgconfig50Enterprise_Network +); + +const baseMongosconfig50Enterprise: object = { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongosConfig5_0_enterprise", +}; + +export const Mongosconfig50Enterprise = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongosConfig5_0_enterprise" as const, + + encode( + message: Mongosconfig50Enterprise, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.net !== undefined) { + Mongosconfig50Enterprise_Network.encode( + message.net, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongosconfig50Enterprise { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongosconfig50Enterprise, + } as Mongosconfig50Enterprise; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.net = Mongosconfig50Enterprise_Network.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongosconfig50Enterprise { + const message = { + ...baseMongosconfig50Enterprise, + } as Mongosconfig50Enterprise; + message.net = + object.net !== undefined && object.net !== null + ? Mongosconfig50Enterprise_Network.fromJSON(object.net) + : undefined; + return message; + }, + + toJSON(message: Mongosconfig50Enterprise): unknown { + const obj: any = {}; + message.net !== undefined && + (obj.net = message.net + ? Mongosconfig50Enterprise_Network.toJSON(message.net) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongosconfig50Enterprise { + const message = { + ...baseMongosconfig50Enterprise, + } as Mongosconfig50Enterprise; + message.net = + object.net !== undefined && object.net !== null + ? Mongosconfig50Enterprise_Network.fromPartial(object.net) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongosconfig50Enterprise.$type, + Mongosconfig50Enterprise +); + +const baseMongosconfig50Enterprise_Network: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongosConfig5_0_enterprise.Network", +}; + +export const Mongosconfig50Enterprise_Network = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongosConfig5_0_enterprise.Network" as const, + + encode( + message: Mongosconfig50Enterprise_Network, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.maxIncomingConnections !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxIncomingConnections!, + }, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongosconfig50Enterprise_Network { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongosconfig50Enterprise_Network, + } as Mongosconfig50Enterprise_Network; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.maxIncomingConnections = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongosconfig50Enterprise_Network { + const message = { + ...baseMongosconfig50Enterprise_Network, + } as Mongosconfig50Enterprise_Network; + message.maxIncomingConnections = + object.maxIncomingConnections !== undefined && + object.maxIncomingConnections !== null + ? Number(object.maxIncomingConnections) + : undefined; + return message; + }, + + toJSON(message: Mongosconfig50Enterprise_Network): unknown { + const obj: any = {}; + message.maxIncomingConnections !== undefined && + (obj.maxIncomingConnections = message.maxIncomingConnections); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongosconfig50Enterprise_Network { + const message = { + ...baseMongosconfig50Enterprise_Network, + } as Mongosconfig50Enterprise_Network; + message.maxIncomingConnections = object.maxIncomingConnections ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongosconfig50Enterprise_Network.$type, + Mongosconfig50Enterprise_Network +); + +const baseMongodconfigset50Enterprise: object = { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfigSet5_0_enterprise", +}; + +export const Mongodconfigset50Enterprise = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfigSet5_0_enterprise" as const, + + encode( + message: Mongodconfigset50Enterprise, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.effectiveConfig !== undefined) { + Mongodconfig50Enterprise.encode( + message.effectiveConfig, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.userConfig !== undefined) { + Mongodconfig50Enterprise.encode( + message.userConfig, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.defaultConfig !== undefined) { + Mongodconfig50Enterprise.encode( + message.defaultConfig, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfigset50Enterprise { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfigset50Enterprise, + } as Mongodconfigset50Enterprise; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.effectiveConfig = Mongodconfig50Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.userConfig = Mongodconfig50Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.defaultConfig = Mongodconfig50Enterprise.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfigset50Enterprise { + const message = { + ...baseMongodconfigset50Enterprise, + } as Mongodconfigset50Enterprise; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Mongodconfig50Enterprise.fromJSON(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Mongodconfig50Enterprise.fromJSON(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Mongodconfig50Enterprise.fromJSON(object.defaultConfig) + : undefined; + return message; + }, + + toJSON(message: Mongodconfigset50Enterprise): unknown { + const obj: any = {}; + message.effectiveConfig !== undefined && + (obj.effectiveConfig = message.effectiveConfig + ? Mongodconfig50Enterprise.toJSON(message.effectiveConfig) + : undefined); + message.userConfig !== undefined && + (obj.userConfig = message.userConfig + ? Mongodconfig50Enterprise.toJSON(message.userConfig) + : undefined); + message.defaultConfig !== undefined && + (obj.defaultConfig = message.defaultConfig + ? Mongodconfig50Enterprise.toJSON(message.defaultConfig) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodconfigset50Enterprise { + const message = { + ...baseMongodconfigset50Enterprise, + } as Mongodconfigset50Enterprise; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Mongodconfig50Enterprise.fromPartial(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Mongodconfig50Enterprise.fromPartial(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Mongodconfig50Enterprise.fromPartial(object.defaultConfig) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfigset50Enterprise.$type, + Mongodconfigset50Enterprise +); + +const baseMongocfgconfigset50Enterprise: object = { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfigSet5_0_enterprise", +}; + +export const Mongocfgconfigset50Enterprise = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfigSet5_0_enterprise" as const, + + encode( + message: Mongocfgconfigset50Enterprise, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.effectiveConfig !== undefined) { + Mongocfgconfig50Enterprise.encode( + message.effectiveConfig, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.userConfig !== undefined) { + Mongocfgconfig50Enterprise.encode( + message.userConfig, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.defaultConfig !== undefined) { + Mongocfgconfig50Enterprise.encode( + message.defaultConfig, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongocfgconfigset50Enterprise { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongocfgconfigset50Enterprise, + } as Mongocfgconfigset50Enterprise; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.effectiveConfig = Mongocfgconfig50Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.userConfig = Mongocfgconfig50Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.defaultConfig = Mongocfgconfig50Enterprise.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongocfgconfigset50Enterprise { + const message = { + ...baseMongocfgconfigset50Enterprise, + } as Mongocfgconfigset50Enterprise; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Mongocfgconfig50Enterprise.fromJSON(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Mongocfgconfig50Enterprise.fromJSON(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Mongocfgconfig50Enterprise.fromJSON(object.defaultConfig) + : undefined; + return message; + }, + + toJSON(message: Mongocfgconfigset50Enterprise): unknown { + const obj: any = {}; + message.effectiveConfig !== undefined && + (obj.effectiveConfig = message.effectiveConfig + ? Mongocfgconfig50Enterprise.toJSON(message.effectiveConfig) + : undefined); + message.userConfig !== undefined && + (obj.userConfig = message.userConfig + ? Mongocfgconfig50Enterprise.toJSON(message.userConfig) + : undefined); + message.defaultConfig !== undefined && + (obj.defaultConfig = message.defaultConfig + ? Mongocfgconfig50Enterprise.toJSON(message.defaultConfig) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongocfgconfigset50Enterprise { + const message = { + ...baseMongocfgconfigset50Enterprise, + } as Mongocfgconfigset50Enterprise; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Mongocfgconfig50Enterprise.fromPartial(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Mongocfgconfig50Enterprise.fromPartial(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Mongocfgconfig50Enterprise.fromPartial(object.defaultConfig) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongocfgconfigset50Enterprise.$type, + Mongocfgconfigset50Enterprise +); + +const baseMongosconfigset50Enterprise: object = { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongosConfigSet5_0_enterprise", +}; + +export const Mongosconfigset50Enterprise = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongosConfigSet5_0_enterprise" as const, + + encode( + message: Mongosconfigset50Enterprise, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.effectiveConfig !== undefined) { + Mongosconfig50Enterprise.encode( + message.effectiveConfig, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.userConfig !== undefined) { + Mongosconfig50Enterprise.encode( + message.userConfig, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.defaultConfig !== undefined) { + Mongosconfig50Enterprise.encode( + message.defaultConfig, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongosconfigset50Enterprise { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongosconfigset50Enterprise, + } as Mongosconfigset50Enterprise; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.effectiveConfig = Mongosconfig50Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.userConfig = Mongosconfig50Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.defaultConfig = Mongosconfig50Enterprise.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongosconfigset50Enterprise { + const message = { + ...baseMongosconfigset50Enterprise, + } as Mongosconfigset50Enterprise; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Mongosconfig50Enterprise.fromJSON(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Mongosconfig50Enterprise.fromJSON(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Mongosconfig50Enterprise.fromJSON(object.defaultConfig) + : undefined; + return message; + }, + + toJSON(message: Mongosconfigset50Enterprise): unknown { + const obj: any = {}; + message.effectiveConfig !== undefined && + (obj.effectiveConfig = message.effectiveConfig + ? Mongosconfig50Enterprise.toJSON(message.effectiveConfig) + : undefined); + message.userConfig !== undefined && + (obj.userConfig = message.userConfig + ? Mongosconfig50Enterprise.toJSON(message.userConfig) + : undefined); + message.defaultConfig !== undefined && + (obj.defaultConfig = message.defaultConfig + ? Mongosconfig50Enterprise.toJSON(message.defaultConfig) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongosconfigset50Enterprise { + const message = { + ...baseMongosconfigset50Enterprise, + } as Mongosconfigset50Enterprise; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Mongosconfig50Enterprise.fromPartial(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Mongosconfig50Enterprise.fromPartial(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Mongosconfig50Enterprise.fromPartial(object.defaultConfig) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongosconfigset50Enterprise.$type, + Mongosconfigset50Enterprise +); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/mysql/v1/backup.ts b/src/generated/yandex/cloud/mdb/mysql/v1/backup.ts index 1443c308..f0babe72 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1/backup.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1/backup.ts @@ -7,8 +7,9 @@ import { Timestamp } from "../../../../../google/protobuf/timestamp"; export const protobufPackage = "yandex.cloud.mdb.mysql.v1"; /** - * A MySQL backup. For more information, see - * the [documentation](/docs/managed-mysql/concepts/backup). + * An object that represents MySQL backup. + * + * See [the documentation](/docs/managed-mysql/concepts/backup) for details. */ export interface Backup { $type: "yandex.cloud.mdb.mysql.v1.Backup"; @@ -16,11 +17,11 @@ export interface Backup { id: string; /** ID of the folder that the backup belongs to. */ folderId: string; - /** Comment for API reference generated automatically. */ + /** Creation timestamp (the time when the backup operation was completed). */ createdAt?: Date; - /** ID of the MySQL cluster that the backup was created for. */ + /** ID of the cluster that the backup was created for. */ sourceClusterId: string; - /** Time when the backup operation was started. */ + /** Start timestamp (the time when the backup operation was started). */ startedAt?: Date; } diff --git a/src/generated/yandex/cloud/mdb/mysql/v1/backup_service.ts b/src/generated/yandex/cloud/mdb/mysql/v1/backup_service.ts index ab5d8109..44a19d46 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1/backup_service.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1/backup_service.ts @@ -22,7 +22,8 @@ export interface GetBackupRequest { $type: "yandex.cloud.mdb.mysql.v1.GetBackupRequest"; /** * ID of the backup to return information about. - * To get the backup ID, use a [ClusterService.ListBackups] request. + * + * To get this ID, make a [BackupService.List] request (lists all backups in a folder) or a [ClusterService.ListBackups] request (lists all backups for an existing cluster). */ backupId: string; } @@ -31,31 +32,34 @@ export interface ListBackupsRequest { $type: "yandex.cloud.mdb.mysql.v1.ListBackupsRequest"; /** * ID of the folder to list backups in. - * To get the folder ID, use a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + * + * To get this ID, make a [yandex.cloud.resourcemanager.v1.FolderService.List] request. */ folderId: string; /** - * The maximum number of results per page to return. If the number of available - * results is larger than [page_size], the service returns a [ListBackupsResponse.next_page_token] - * that can be used to get the next page of results in subsequent list requests. + * The maximum number of results per page to return. + * + * If the number of available results is larger than [page_size], the API returns a [ListBackupsResponse.next_page_token] that can be used to get the next page of results in the subsequent [BackupService.List] requests. */ pageSize: number; /** - * Page token. To get the next page of results, Set [page_token] to the [ListBackupsResponse.next_page_token] - * returned by a previous list request. + * Page token that can be used to iterate through multiple pages of results. + * + * To get the next page of results, set [page_token] to the [ListBackupsResponse.next_page_token] returned by the previous [BackupService.List] request. */ pageToken: string; } export interface ListBackupsResponse { $type: "yandex.cloud.mdb.mysql.v1.ListBackupsResponse"; - /** List of MySQL backups. */ + /** List of backups. */ backups: Backup[]; /** - * This token allows you to get the next page of results for list requests. If the number of results - * is larger than [ListBackupsRequest.page_size], use the [next_page_token] as the value - * for the [ListBackupsRequest.page_token] parameter in the next list request. Each subsequent - * list request will have its own [next_page_token] to continue paging through the results. + * The token that can be used to get the next page of results. + * + * If the number of results is larger than [ListBackupsRequest.page_size], use the [next_page_token] as the value for the [ListBackupsRequest.page_token] in the subsequent [BackupService.List] request to iterate through multiple pages of results. + * + * Each of the subsequent [BackupService.List] requests should use the [next_page_token] value returned by the previous request to continue paging through the results. */ nextPageToken: string; } @@ -292,13 +296,13 @@ export const ListBackupsResponse = { messageTypeRegistry.set(ListBackupsResponse.$type, ListBackupsResponse); -/** A set of methods for managing MySQL backups. */ +/** + * A set of methods for managing MySQL backups. + * + * See [the documentation](/docs/managed-mysql/operations/cluster-backups) for details. + */ export const BackupServiceService = { - /** - * Returns the specified MySQL backup. - * - * To get the list of available MySQL backups, make a [List] request. - */ + /** Retrieves information about the specified backup. */ get: { path: "/yandex.cloud.mdb.mysql.v1.BackupService/Get", requestStream: false, @@ -310,7 +314,11 @@ export const BackupServiceService = { Buffer.from(Backup.encode(value).finish()), responseDeserialize: (value: Buffer) => Backup.decode(value), }, - /** Retrieves the list of MySQL backups available for the specified folder. */ + /** + * Retrieves the list of backups in a folder. + * + * To list backups for an existing cluster, make a [ClusterService.ListBackups] request. + */ list: { path: "/yandex.cloud.mdb.mysql.v1.BackupService/List", requestStream: false, @@ -325,22 +333,18 @@ export const BackupServiceService = { } as const; export interface BackupServiceServer extends UntypedServiceImplementation { + /** Retrieves information about the specified backup. */ + get: handleUnaryCall; /** - * Returns the specified MySQL backup. + * Retrieves the list of backups in a folder. * - * To get the list of available MySQL backups, make a [List] request. + * To list backups for an existing cluster, make a [ClusterService.ListBackups] request. */ - get: handleUnaryCall; - /** Retrieves the list of MySQL backups available for the specified folder. */ list: handleUnaryCall; } export interface BackupServiceClient extends Client { - /** - * Returns the specified MySQL backup. - * - * To get the list of available MySQL backups, make a [List] request. - */ + /** Retrieves information about the specified backup. */ get( request: GetBackupRequest, callback: (error: ServiceError | null, response: Backup) => void @@ -356,7 +360,11 @@ export interface BackupServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Backup) => void ): ClientUnaryCall; - /** Retrieves the list of MySQL backups available for the specified folder. */ + /** + * Retrieves the list of backups in a folder. + * + * To list backups for an existing cluster, make a [ClusterService.ListBackups] request. + */ list( request: ListBackupsRequest, callback: ( diff --git a/src/generated/yandex/cloud/mdb/mysql/v1/cluster.ts b/src/generated/yandex/cloud/mdb/mysql/v1/cluster.ts index 9b57f717..73a796eb 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1/cluster.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1/cluster.ts @@ -14,64 +14,61 @@ import { Mysqlconfigset80 } from "../../../../../yandex/cloud/mdb/mysql/v1/confi export const protobufPackage = "yandex.cloud.mdb.mysql.v1"; /** - * A MySQL cluster. For more information, see - * the [documentation](/docs/managed-mysql/concepts). + * An object that represents MySQL cluster. + * + * See [the documentation](/docs/managed-mysql/concepts) for details. */ export interface Cluster { $type: "yandex.cloud.mdb.mysql.v1.Cluster"; /** - * ID of the MySQL cluster. - * This ID is assigned by Managed Service for MySQL at creation time. + * ID of the cluster. + * + * This ID is assigned by Yandex Cloud at the time of creation. */ id: string; - /** ID of the folder that the MySQL cluster belongs to. */ + /** ID of the folder that the cluster belongs to. */ folderId: string; + /** Creation timestamp of the cluster. */ createdAt?: Date; - /** - * Name of the MySQL cluster. - * The name must be unique within the folder, comply with RFC 1035 - * and be 1-63 characters long. - */ + /** Name of the cluster. */ name: string; - /** Description of the MySQL cluster. 0-256 characters long. */ + /** Description of the cluster. */ description: string; - /** - * Custom labels for the MySQL cluster as `key:value` pairs. - * Maximum 64 per resource. - */ + /** Custom labels for the cluster as `key:value` pairs. */ labels: { [key: string]: string }; - /** Deployment environment of the MySQL cluster. */ + /** Deployment environment of the cluster. */ environment: Cluster_Environment; - /** Description of monitoring systems relevant to the MySQL cluster. */ + /** Monitoring systems data that is relevant to the cluster. */ monitoring: Monitoring[]; - /** Configuration of the MySQL cluster. */ + /** Configuration of the cluster. */ config?: ClusterConfig; /** ID of the network that the cluster belongs to. */ networkId: string; - /** Aggregated cluster health. */ + /** Aggregated health of the cluster. */ health: Cluster_Health; /** Current state of the cluster. */ status: Cluster_Status; - /** Maintenance window for the cluster. */ + /** Maintenance window settings for the cluster. */ maintenanceWindow?: MaintenanceWindow; /** Planned maintenance operation to be started for the cluster within the nearest [maintenance_window]. */ plannedOperation?: MaintenanceOperation; - /** User security groups */ + /** Effective list of security group IDs applied to the cluster. */ securityGroupIds: string[]; - /** Deletion Protection inhibits deletion of the cluster */ + /** This option prevents unintended deletion of the cluster. */ deletionProtection: boolean; } export enum Cluster_Environment { ENVIRONMENT_UNSPECIFIED = 0, /** - * PRODUCTION - Stable environment with a conservative update policy: - * only hotfixes are applied during regular maintenance. + * PRODUCTION - Environment for stable versions of your apps. + * A conservative update policy is in effect: only bug fixes are applied during regular maintenance. */ PRODUCTION = 1, /** - * PRESTABLE - Environment with more aggressive update policy: new versions - * are rolled out irrespective of backward compatibility. + * PRESTABLE - Environment for testing, including the Managed Service for MySQL itself. + * This environment gets new features, improvements, and bug fixes in the first place, compared to the production environment. + * However, not every update ensures backward compatibility. */ PRESTABLE = 2, UNRECOGNIZED = -1, @@ -109,13 +106,13 @@ export function cluster_EnvironmentToJSON(object: Cluster_Environment): string { } export enum Cluster_Health { - /** HEALTH_UNKNOWN - State of the cluster is unknown ([Host.health] for every host in the cluster is UNKNOWN). */ + /** HEALTH_UNKNOWN - Health of the cluster is unknown ([Host.health] for every host in the cluster is `UNKNOWN`). */ HEALTH_UNKNOWN = 0, - /** ALIVE - Cluster is alive and well ([Host.health] for every host in the cluster is ALIVE). */ + /** ALIVE - Cluster is alive and well ([Host.health] for every host in the cluster is `ALIVE`). */ ALIVE = 1, - /** DEAD - Cluster is inoperable ([Host.health] for every host in the cluster is DEAD). */ + /** DEAD - Cluster is inoperable ([Host.health] for every host in the cluster is `DEAD`). */ DEAD = 2, - /** DEGRADED - Cluster is working below capacity ([Host.health] for at least one host in the cluster is not ALIVE). */ + /** DEGRADED - Cluster is degraded ([Host.health] for at least one host in the cluster is not `ALIVE`). */ DEGRADED = 3, UNRECOGNIZED = -1, } @@ -169,7 +166,7 @@ export enum Cluster_Status { UPDATING = 4, /** STOPPING - Cluster is stopping. */ STOPPING = 5, - /** STOPPED - Cluster stopped. */ + /** STOPPED - Cluster is stopped. */ STOPPED = 6, /** STARTING - Cluster is starting. */ STARTING = 7, @@ -238,73 +235,74 @@ export interface Cluster_LabelsEntry { value: string; } +/** Cluster-related monitoring system data. */ export interface Monitoring { $type: "yandex.cloud.mdb.mysql.v1.Monitoring"; /** Name of the monitoring system. */ name: string; /** Description of the monitoring system. */ description: string; - /** Link to the monitoring system charts for the MySQL cluster. */ + /** Link to the monitoring system charts for the cluster. */ link: string; } export interface ClusterConfig { $type: "yandex.cloud.mdb.mysql.v1.ClusterConfig"; - /** Version of MySQL server software. */ + /** Version of MySQL used in the cluster. */ version: string; /** Configuration of a MySQL 5.7 server. */ mysqlConfig57?: Mysqlconfigset57 | undefined; /** Configuration of a MySQL 8.0 server. */ mysqlConfig80?: Mysqlconfigset80 | undefined; - /** Resources allocated to MySQL hosts. */ + /** Resource preset for the cluster hosts. */ resources?: Resources; /** Time to start the daily backup, in the UTC timezone. */ backupWindowStart?: TimeOfDay; - /** Access policy to DB */ + /** Access policy for external services. */ access?: Access; + /** Configuration of the performance diagnostics service. */ + performanceDiagnostics?: PerformanceDiagnostics; } export interface Host { $type: "yandex.cloud.mdb.mysql.v1.Host"; /** - * Name of the MySQL host. The host name is assigned by Managed Service for MySQL - * at creation time, and cannot be changed. 1-63 characters long. + * Name of the host. * - * The name is unique across all existing database hosts in Yandex.Cloud, - * as it defines the FQDN of the host. + * This name is assigned by Yandex Cloud at the time of creation. + * The name is unique across all existing MDB hosts in Yandex Cloud, as it defines the FQDN of the host. */ name: string; - /** - * ID of the MySQL host. The ID is assigned by Managed Service for MySQL - * at creation time. - */ + /** ID of the cluster the host belongs to. */ clusterId: string; - /** ID of the availability zone where the MySQL host resides. */ + /** ID of the availability zone where the host resides. */ zoneId: string; /** Resources allocated to the host. */ resources?: Resources; /** Role of the host in the cluster. */ role: Host_Role; - /** Status code of the aggregated health of the host. */ + /** Aggregated health of the host. */ health: Host_Health; - /** Services provided by the host. */ + /** List of services provided by the host. */ services: Service[]; /** ID of the subnet that the host belongs to. */ subnetId: string; - /** Flag showing public IP assignment status to this host. */ + /** Flag that shows if public IP address is assigned to the host so that the host can be accessed from the internet. */ assignPublicIp: boolean; /** Name of the host to be used as the replication source for cascading replication. */ replicationSource: string; - /** Host backup priority */ + /** Host backup priority. */ backupPriority: number; + /** Host master promotion priority. */ + priority: number; } export enum Host_Role { - /** ROLE_UNKNOWN - Role of the host in the cluster is unknown. */ + /** ROLE_UNKNOWN - Role of the host is unknown. */ ROLE_UNKNOWN = 0, - /** MASTER - Host is the master MySQL server in the cluster. */ + /** MASTER - Host is the master. */ MASTER = 1, - /** REPLICA - Host is a replica MySQL server in the cluster. */ + /** REPLICA - Host is a replica. */ REPLICA = 2, UNRECOGNIZED = -1, } @@ -343,11 +341,11 @@ export function host_RoleToJSON(object: Host_Role): string { export enum Host_Health { /** HEALTH_UNKNOWN - Health of the host is unknown. */ HEALTH_UNKNOWN = 0, - /** ALIVE - The host is performing all its functions normally. */ + /** ALIVE - Host is performing all its functions normally. */ ALIVE = 1, - /** DEAD - The host is inoperable, and cannot perform any of its essential functions. */ + /** DEAD - Host is inoperable, and cannot perform any of its essential functions. */ DEAD = 2, - /** DEGRADED - The host is degraded, and can perform only some of its essential functions. */ + /** DEGRADED - Host is degraded, and can perform only some of its essential functions. */ DEGRADED = 3, UNRECOGNIZED = -1, } @@ -392,7 +390,7 @@ export interface Service { $type: "yandex.cloud.mdb.mysql.v1.Service"; /** Type of the service provided by the host. */ type: Service_Type; - /** Status code of server availability. */ + /** Aggregated health of the service. */ health: Service_Health; } @@ -430,11 +428,11 @@ export function service_TypeToJSON(object: Service_Type): string { } export enum Service_Health { - /** HEALTH_UNKNOWN - Health of the server is unknown. */ + /** HEALTH_UNKNOWN - Health of the service is unknown. */ HEALTH_UNKNOWN = 0, - /** ALIVE - The server is working normally. */ + /** ALIVE - The service is working normally. */ ALIVE = 1, - /** DEAD - The server is dead or unresponsive. */ + /** DEAD - The service is dead or unresponsive. */ DEAD = 2, UNRECOGNIZED = -1, } @@ -470,43 +468,56 @@ export function service_HealthToJSON(object: Service_Health): string { } } +/** Cluster resource preset. */ export interface Resources { $type: "yandex.cloud.mdb.mysql.v1.Resources"; /** - * ID of the preset for computational resources available to a host (CPU, memory etc.). - * All available presets are listed in the [documentation](/docs/managed-mysql/concepts/instance-types). + * ID of the resource preset that defines available computational resources (vCPU, RAM, etc.) for a cluster host. + * + * All available presets are listed in [the documentation](/docs/managed-mysql/concepts/instance-types). */ resourcePresetId: string; - /** Volume of the storage available to a host. */ + /** Volume of the storage (for each cluster host, in bytes). */ diskSize: number; /** - * Type of the storage environment for the host. + * Type of the storage. + * * Possible values: - * * network-ssd - network SSD drive, - * * local-ssd - local SSD storage. + * * `network-hdd` - standard network storage + * * `network-ssd` - fast network storage + * * `network-ssd-nonreplicated` - fast network nonreplicated storage + * * `local-ssd` - fast local storage. + * + * See [the documentation](/docs/managed-mysql/concepts/storage) for details. */ diskTypeId: string; } export interface Access { $type: "yandex.cloud.mdb.mysql.v1.Access"; - /** Allow access for DataLens */ + /** + * Allows access from DataLens. + * + * See [the documentation](/docs/managed-mysql/operations/datalens-connect) for details. + */ dataLens: boolean; /** - * Allow SQL queries to the cluster databases from the Yandex.Cloud management console. + * Allows SQL queries to the cluster databases from Yandex Cloud management console. * - * See [SQL queries in the management console](/docs/managed-mysql/operations/web-sql-query) for more details. + * See [the documentation](/docs/managed-mysql/operations/web-sql-query) for details. */ webSql: boolean; + /** Allow access for DataTransfer. */ + dataTransfer: boolean; } export interface PerformanceDiagnostics { $type: "yandex.cloud.mdb.mysql.v1.PerformanceDiagnostics"; - /** Configuration setting which enables/disables performance diagnostics service in cluster. */ + /** Flag that shows if performance statistics gathering is enabled for the cluster. */ enabled: boolean; - /** Interval (in seconds) for my_session sampling */ + /** Interval (in seconds) for `my_session` sampling. */ sessionsSamplingInterval: number; - /** Interval (in seconds) for my_statements sampling */ + /** Interval (in seconds) for `my_statements` sampling. */ statementsSamplingInterval: number; } @@ -1038,6 +1049,12 @@ export const ClusterConfig = { if (message.access !== undefined) { Access.encode(message.access, writer.uint32(42).fork()).ldelim(); } + if (message.performanceDiagnostics !== undefined) { + PerformanceDiagnostics.encode( + message.performanceDiagnostics, + writer.uint32(58).fork() + ).ldelim(); + } return writer; }, @@ -1072,6 +1089,12 @@ export const ClusterConfig = { case 5: message.access = Access.decode(reader, reader.uint32()); break; + case 7: + message.performanceDiagnostics = PerformanceDiagnostics.decode( + reader, + reader.uint32() + ); + break; default: reader.skipType(tag & 7); break; @@ -1107,6 +1130,11 @@ export const ClusterConfig = { object.access !== undefined && object.access !== null ? Access.fromJSON(object.access) : undefined; + message.performanceDiagnostics = + object.performanceDiagnostics !== undefined && + object.performanceDiagnostics !== null + ? PerformanceDiagnostics.fromJSON(object.performanceDiagnostics) + : undefined; return message; }, @@ -1131,6 +1159,10 @@ export const ClusterConfig = { : undefined); message.access !== undefined && (obj.access = message.access ? Access.toJSON(message.access) : undefined); + message.performanceDiagnostics !== undefined && + (obj.performanceDiagnostics = message.performanceDiagnostics + ? PerformanceDiagnostics.toJSON(message.performanceDiagnostics) + : undefined); return obj; }, @@ -1160,6 +1192,11 @@ export const ClusterConfig = { object.access !== undefined && object.access !== null ? Access.fromPartial(object.access) : undefined; + message.performanceDiagnostics = + object.performanceDiagnostics !== undefined && + object.performanceDiagnostics !== null + ? PerformanceDiagnostics.fromPartial(object.performanceDiagnostics) + : undefined; return message; }, }; @@ -1177,6 +1214,7 @@ const baseHost: object = { assignPublicIp: false, replicationSource: "", backupPriority: 0, + priority: 0, }; export const Host = { @@ -1216,6 +1254,9 @@ export const Host = { if (message.backupPriority !== 0) { writer.uint32(88).int64(message.backupPriority); } + if (message.priority !== 0) { + writer.uint32(96).int64(message.priority); + } return writer; }, @@ -1260,6 +1301,9 @@ export const Host = { case 11: message.backupPriority = longToNumber(reader.int64() as Long); break; + case 12: + message.priority = longToNumber(reader.int64() as Long); + break; default: reader.skipType(tag & 7); break; @@ -1314,6 +1358,10 @@ export const Host = { object.backupPriority !== undefined && object.backupPriority !== null ? Number(object.backupPriority) : 0; + message.priority = + object.priority !== undefined && object.priority !== null + ? Number(object.priority) + : 0; return message; }, @@ -1343,6 +1391,8 @@ export const Host = { (obj.replicationSource = message.replicationSource); message.backupPriority !== undefined && (obj.backupPriority = Math.round(message.backupPriority)); + message.priority !== undefined && + (obj.priority = Math.round(message.priority)); return obj; }, @@ -1363,6 +1413,7 @@ export const Host = { message.assignPublicIp = object.assignPublicIp ?? false; message.replicationSource = object.replicationSource ?? ""; message.backupPriority = object.backupPriority ?? 0; + message.priority = object.priority ?? 0; return message; }, }; @@ -1537,6 +1588,7 @@ const baseAccess: object = { $type: "yandex.cloud.mdb.mysql.v1.Access", dataLens: false, webSql: false, + dataTransfer: false, }; export const Access = { @@ -1552,6 +1604,9 @@ export const Access = { if (message.webSql === true) { writer.uint32(16).bool(message.webSql); } + if (message.dataTransfer === true) { + writer.uint32(24).bool(message.dataTransfer); + } return writer; }, @@ -1568,6 +1623,9 @@ export const Access = { case 2: message.webSql = reader.bool(); break; + case 3: + message.dataTransfer = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -1586,6 +1644,10 @@ export const Access = { object.webSql !== undefined && object.webSql !== null ? Boolean(object.webSql) : false; + message.dataTransfer = + object.dataTransfer !== undefined && object.dataTransfer !== null + ? Boolean(object.dataTransfer) + : false; return message; }, @@ -1593,6 +1655,8 @@ export const Access = { const obj: any = {}; message.dataLens !== undefined && (obj.dataLens = message.dataLens); message.webSql !== undefined && (obj.webSql = message.webSql); + message.dataTransfer !== undefined && + (obj.dataTransfer = message.dataTransfer); return obj; }, @@ -1600,6 +1664,7 @@ export const Access = { const message = { ...baseAccess } as Access; message.dataLens = object.dataLens ?? false; message.webSql = object.webSql ?? false; + message.dataTransfer = object.dataTransfer ?? false; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/mysql/v1/cluster_service.ts b/src/generated/yandex/cloud/mdb/mysql/v1/cluster_service.ts index a709441a..d99fd4fd 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1/cluster_service.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1/cluster_service.ts @@ -20,6 +20,7 @@ import { Cluster_Environment, Resources, Access, + PerformanceDiagnostics, Cluster, Host, cluster_EnvironmentFromJSON, @@ -41,8 +42,9 @@ export const protobufPackage = "yandex.cloud.mdb.mysql.v1"; export interface GetClusterRequest { $type: "yandex.cloud.mdb.mysql.v1.GetClusterRequest"; /** - * ID of the MySQL cluster to return. - * To get the cluster ID use a [ClusterService.List] request. + * ID of the cluster to return information about. + * + * To get this ID, make a [ClusterService.List] request. */ clusterId: string; } @@ -50,23 +52,26 @@ export interface GetClusterRequest { export interface ListClustersRequest { $type: "yandex.cloud.mdb.mysql.v1.ListClustersRequest"; /** - * ID of the folder to list MySQL clusters in. - * To get the folder ID, use a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + * ID of the folder to list clusters in. + * + * To get this ID, make a [yandex.cloud.resourcemanager.v1.FolderService.List] request. */ folderId: string; /** - * The maximum number of results per page to return. If the number of available - * results is larger than [page_size], the service returns a [ListClustersResponse.next_page_token] - * that can be used to get the next page of results in subsequent list requests. + * The maximum number of results per page to return. + * + * If the number of available results is larger than [page_size], the API returns a [ListClustersResponse.next_page_token] that can be used to get the next page of results in the subsequent [ClusterService.List] requests. */ pageSize: number; /** - * Page token. To get the next page of results, set [page_token] to the [ListClustersResponse.next_page_token] - * returned by a previous list request. + * Page token that can be used to iterate through multiple pages of results. + * + * To get the next page of results, set [page_token] to the [ListClustersResponse.next_page_token] returned by the previous [ClusterService.List] request. */ pageToken: string; /** - * A filter expression that filters resources listed in the response. + * A filter expression that selects clusters listed in the response. + * * The expression must specify: * 1. The field name. Currently you can only use filtering with the [Cluster.name] field. * 2. An `=` operator. @@ -77,45 +82,47 @@ export interface ListClustersRequest { export interface ListClustersResponse { $type: "yandex.cloud.mdb.mysql.v1.ListClustersResponse"; - /** List of MySQL clusters. */ + /** List of clusters. */ clusters: Cluster[]; /** - * This token allows you to get the next page of results for list requests. If the number of results - * is larger than [ListClustersRequest.page_size], use the [next_page_token] as the value - * for the [ListClustersRequest.page_token] parameter in the next list request. Each subsequent - * list request will have its own [next_page_token] to continue paging through the results. + * The token that can be used to get the next page of results. + * + * If the number of results is larger than [ListClustersRequest.page_size], use the [next_page_token] as the value for the [ListClustersRequest.page_token] in the subsequent [ClusterService.List] request to iterate through multiple pages of results. + * + * Each of the subsequent [ClusterService.List] requests should use the [next_page_token] value returned by the previous request to continue paging through the results. */ nextPageToken: string; } export interface CreateClusterRequest { $type: "yandex.cloud.mdb.mysql.v1.CreateClusterRequest"; - /** ID of the folder to create the MySQL cluster in. */ + /** + * ID of the folder to create the cluster in. + * + * To get this ID, make a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + */ folderId: string; - /** Name of the MySQL cluster. The name must be unique within the folder. */ + /** Name of the cluster. The name must be unique within the folder. */ name: string; - /** Description of the MySQL cluster. */ + /** Description of the cluster. */ description: string; - /** - * Custom labels for the MySQL cluster as `key:value` pairs. Maximum 64 per resource. - * For example, "project": "mvp" or "source": "dictionary". - */ + /** Custom labels for the cluster as `key:value` pairs. */ labels: { [key: string]: string }; - /** Deployment environment of the MySQL cluster. */ + /** Deployment environment of the cluster. */ environment: Cluster_Environment; - /** Configuration and resources for hosts that should be created for the MySQL cluster. */ + /** Configuration of the cluster. */ configSpec?: ConfigSpec; - /** Descriptions of databases to be created in the MySQL cluster. */ + /** Configuration of databases in the cluster. */ databaseSpecs: DatabaseSpec[]; - /** Descriptions of database users to be created in the MySQL cluster. */ + /** Configuration of database users in the cluster. */ userSpecs: UserSpec[]; - /** Individual configurations for hosts that should be created for the MySQL cluster. */ + /** Configuration of hosts in the cluster. */ hostSpecs: HostSpec[]; /** ID of the network to create the cluster in. */ networkId: string; - /** User security groups */ + /** List of security group IDs to apply to the cluster. */ securityGroupIds: string[]; - /** Deletion Protection inhibits deletion of the cluster */ + /** This option prevents unintended deletion of the cluster. */ deletionProtection: boolean; } @@ -127,38 +134,38 @@ export interface CreateClusterRequest_LabelsEntry { export interface CreateClusterMetadata { $type: "yandex.cloud.mdb.mysql.v1.CreateClusterMetadata"; - /** ID of the MySQL cluster that is being created. */ + /** ID of the cluster that is being created. */ clusterId: string; } export interface UpdateClusterRequest { $type: "yandex.cloud.mdb.mysql.v1.UpdateClusterRequest"; /** - * ID of the MySQL cluster to update. - * To get the MySQL cluster ID, use a [ClusterService.List] request. + * ID of the cluster to update. + * + * To get this ID, make a [ClusterService.List] request. */ clusterId: string; - /** Field mask that specifies which fields of the MySQL cluster should be updated. */ + /** Field mask that specifies which settings of the cluster should be updated. */ updateMask?: FieldMask; - /** New description of the MySQL cluster. */ + /** New description of the cluster. */ description: string; /** - * Custom labels for the MySQL cluster as `key:value` pairs. Maximum 64 per resource. - * For example, "project": "mvp" or "source": "dictionary". + * New set of custom labels for the cluster as `key:value` pairs. * - * The new set of labels will completely replace the old ones. To add a label, request the current - * set with the [ClusterService.Get] method, then send an [ClusterService.Update] request with the new label added to the set. + * This set will completely replace the current one. + * To add a label, request the current label set with the [ClusterService.Get] request, then send an [ClusterService.Update] request with the new label added to the current set. */ labels: { [key: string]: string }; - /** New configuration and resources for hosts in the cluster. */ + /** New configuration of the cluster. */ configSpec?: ConfigSpec; - /** New name for the cluster. */ + /** New name of the cluster. */ name: string; - /** New maintenance window settings for the cluster. */ + /** Configuration of a maintenance window in an MySQL cluster. */ maintenanceWindow?: MaintenanceWindow; - /** User security groups */ + /** New list of security group IDs to apply to the cluster. */ securityGroupIds: string[]; - /** Deletion Protection inhibits deletion of the cluster */ + /** This option prevents unintended deletion of the cluster. */ deletionProtection: boolean; } @@ -170,72 +177,69 @@ export interface UpdateClusterRequest_LabelsEntry { export interface UpdateClusterMetadata { $type: "yandex.cloud.mdb.mysql.v1.UpdateClusterMetadata"; - /** ID of the MySQL cluster that is being modified. */ + /** ID of the cluster that is being updated. */ clusterId: string; } export interface DeleteClusterRequest { $type: "yandex.cloud.mdb.mysql.v1.DeleteClusterRequest"; /** - * ID of the MySQL cluster to delete. - * To get the MySQL cluster ID, use a [ClusterService.List] request. + * ID of the cluster to delete. + * + * To get this ID, make a [ClusterService.List] request. */ clusterId: string; } export interface DeleteClusterMetadata { $type: "yandex.cloud.mdb.mysql.v1.DeleteClusterMetadata"; - /** ID of the MySQL cluster that is being deleted. */ + /** ID of the cluster that is being deleted. */ clusterId: string; } export interface BackupClusterRequest { $type: "yandex.cloud.mdb.mysql.v1.BackupClusterRequest"; /** - * ID of the MySQL cluster to back up. - * To get the MySQL cluster ID, use a [ClusterService.List] request. + * ID of the cluster to back up. + * + * To get this ID, make a [ClusterService.List] request. */ clusterId: string; } export interface BackupClusterMetadata { $type: "yandex.cloud.mdb.mysql.v1.BackupClusterMetadata"; - /** ID of the MySQL cluster that is being backed up. */ + /** ID of the cluster that is being backed up. */ clusterId: string; } export interface RestoreClusterRequest { $type: "yandex.cloud.mdb.mysql.v1.RestoreClusterRequest"; /** - * ID of the backup to create a cluster from. - * To get the backup ID, use a [ClusterService.ListBackups] request. + * ID of the backup to restore from. + * + * To get this ID, make a [BackupService.List] request (lists all backups in a folder) or a [ClusterService.ListBackups] request (lists all backups for an existing cluster). */ backupId: string; /** Timestamp of the moment to which the MySQL cluster should be restored. */ time?: Date; - /** Name of the new MySQL cluster. The name must be unique within the folder. */ + /** Name of the new MySQL cluster the backup will be restored to. The name must be unique within the folder. */ name: string; - /** Description of the new MySQL cluster. */ + /** Description of the new cluster. */ description: string; - /** - * Custom labels for the MySQL cluster as `key:value` pairs. Maximum 64 per resource. - * For example, "project": "mvp" or "source": "dictionary". - */ + /** Custom labels for the new cluster as `key:value` pairs. */ labels: { [key: string]: string }; - /** Deployment environment of the new MySQL cluster. */ + /** Deployment environment for the new cluster. */ environment: Cluster_Environment; - /** Configuration for the MySQL cluster to be created. */ + /** Configuration of the new cluster. */ configSpec?: ConfigSpec; - /** - * Configurations for MySQL hosts that should be added - * to the cluster that is being created from the backup. - */ + /** Configuration of hosts in the new cluster. */ hostSpecs: HostSpec[]; - /** ID of the network to create the MySQL cluster in. */ + /** ID of the network to create the new cluster in. */ networkId: string; - /** ID of the folder to create the MySQL cluster in. */ + /** ID of the folder to create the new cluster in. */ folderId: string; - /** User security groups */ + /** List of security group IDs to apply to the new cluster. */ securityGroupIds: string[]; } @@ -247,7 +251,7 @@ export interface RestoreClusterRequest_LabelsEntry { export interface RestoreClusterMetadata { $type: "yandex.cloud.mdb.mysql.v1.RestoreClusterMetadata"; - /** ID of the new MySQL cluster that is being created from a backup. */ + /** ID of the new cluster that is being created from a backup. */ clusterId: string; /** ID of the backup that is being used for creating a cluster. */ backupId: string; @@ -255,25 +259,42 @@ export interface RestoreClusterMetadata { export interface StartClusterFailoverRequest { $type: "yandex.cloud.mdb.mysql.v1.StartClusterFailoverRequest"; - /** ID of MySQL cluster. */ + /** + * ID of the cluster to start failover for. + * + * To get this ID, make a [ClusterService.List] request. + */ clusterId: string; - /** New master host. Switch to the most up-to-date replica if not provided. */ + /** + * Host name to switch master role to. + * If not provided, then the master role is switched to the most up-to-date replica host. + * + * To get this name, make a [ClusterService.ListHosts] request. + */ hostName: string; } export interface StartClusterFailoverMetadata { $type: "yandex.cloud.mdb.mysql.v1.StartClusterFailoverMetadata"; - /** ID of the MySQL cluster being failovered. */ + /** ID of the cluster that is being failovered. */ clusterId: string; } export interface RescheduleMaintenanceRequest { $type: "yandex.cloud.mdb.mysql.v1.RescheduleMaintenanceRequest"; - /** ID of the MySQL cluster to reschedule the maintenance operation for. */ + /** + * ID of the cluster to reschedule the maintenance operation for. + * + * To get this ID, make a [ClusterService.List] request. + */ clusterId: string; /** The type of reschedule request. */ rescheduleType: RescheduleMaintenanceRequest_RescheduleType; - /** The time until which this maintenance operation should be delayed. The value should be ahead of the first time when the maintenance operation has been scheduled for no more than two weeks. The value can also point to the past moment of time if [reschedule_type.IMMEDIATE] reschedule type is chosen. */ + /** + * The time until which this maintenance operation should be delayed. + * The value should be ahead of the first time when the maintenance operation has been scheduled for no more than two weeks. + * The value can also point to the past moment of time if `IMMEDIATE` reschedule type is chosen. + */ delayedUntil?: Date; } @@ -328,18 +349,18 @@ export function rescheduleMaintenanceRequest_RescheduleTypeToJSON( } } -/** Rescheduled maintenance operation metadata. */ export interface RescheduleMaintenanceMetadata { $type: "yandex.cloud.mdb.mysql.v1.RescheduleMaintenanceMetadata"; - /** Required. ID of the MySQL cluster. */ + /** ID of the cluster the maintenance operation is being rescheduled for. */ clusterId: string; - /** Required. The time until which this maintenance operation is to be delayed. */ + /** The time until which this maintenance operation is to be delayed. */ delayedUntil?: Date; } +/** A single log record. */ export interface LogRecord { $type: "yandex.cloud.mdb.mysql.v1.LogRecord"; - /** Log record timestamp in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. */ + /** Timestamp of the log record. */ timestamp?: Date; /** Contents of the log record. */ message: { [key: string]: string }; @@ -354,33 +375,44 @@ export interface LogRecord_MessageEntry { export interface ListClusterLogsRequest { $type: "yandex.cloud.mdb.mysql.v1.ListClusterLogsRequest"; /** - * ID of the MySQL cluster to request logs for. - * To get the MySQL cluster ID use a [ClusterService.List] request. + * ID of the cluster to request logs for. + * + * To get this ID, make a [ClusterService.List] request. */ clusterId: string; /** * Columns from the logs table to request. - * If no columns are specified, entire log records are returned. + * If no columns are specified, complete log records are returned. */ columnFilter: string[]; - /** Type of the service to request logs about. */ + /** The log type. */ serviceType: ListClusterLogsRequest_ServiceType; - /** Start timestamp for the logs request. */ + /** + * Start timestamp for the logs request. + * The logs in the response will be within [from_time] to [to_time] range. + */ fromTime?: Date; - /** End timestamp for the logs request. */ + /** + * End timestamp for the logs request. + * The logs in the response will be within [from_time] to [to_time] range. + */ toTime?: Date; /** - * The maximum number of results per page to return. If the number of available - * results is larger than [page_size], the service returns a [ListClusterLogsResponse.next_page_token] - * that can be used to get the next page of results in subsequent list requests. + * The maximum number of results per page to return. + * + * If the number of available results is larger than [page_size], the API returns a [ListClusterLogsResponse.next_page_token] that can be used to get the next page of results in the subsequent [ClusterService.ListLogs] requests. */ pageSize: number; /** - * Page token. To get the next page of results, set [page_token] to the - * [ListClusterLogsResponse.next_page_token] returned by a previous list request. + * Page token that can be used to iterate through multiple pages of results. + * + * To get the next page of results, set [page_token] to the [ListClusterLogsResponse.next_page_token] returned by the previous [ClusterService.ListLogs] request. */ pageToken: string; - /** Always return `next_page_token`, even if current page is empty. */ + /** + * Option that controls the behavior of result pagination. + * If it is set to `true`, then [ListClusterLogsResponse.next_page_token] will always be returned, even if the current page is empty. + */ alwaysNextPageToken: boolean; } @@ -447,50 +479,64 @@ export interface ListClusterLogsResponse { /** Requested log records. */ logs: LogRecord[]; /** - * This token allows you to get the next page of results for list requests. If the number of results - * is larger than [ListClusterLogsRequest.page_size], use the [next_page_token] as the value - * for the [ListClusterLogsRequest.page_token] query parameter in the next list request. - * Each subsequent list request will have its own [next_page_token] to continue paging through the results. - * This value is interchangeable with `next_record_token` from StreamLogs method. + * The token that can be used to get the next page of results. + * + * If the number of results is larger than [ListClusterLogsRequest.page_size], use the [next_page_token] as the value for the [ListClusterLogsRequest.page_token] in the subsequent [ClusterService.ListLogs] request to iterate through multiple pages of results. + * + * Each of the subsequent [ClusterService.ListLogs] requests should use the [next_page_token] value returned by the previous request to continue paging through the results. + * + * This value is interchangeable with [StreamLogRecord.next_record_token] from [ClusterService.StreamLogs] method. */ nextPageToken: string; } +/** A single log record in the logs stream. */ export interface StreamLogRecord { $type: "yandex.cloud.mdb.mysql.v1.StreamLogRecord"; /** One of the requested log records. */ record?: LogRecord; /** - * This token allows you to continue streaming logs starting from the exact - * same record. To continue streaming, specify value of `next_record_token` - * as value for `record_token` parameter in the next StreamLogs request. - * This value is interchangeable with `next_page_token` from ListLogs method. + * The token that can be used to continue streaming logs starting from the exact same record. + * To continue streaming, specify value of [next_record_token] as the [StreamClusterLogsRequest.record_token] value in the next [ClusterService.StreamLogs] request. + * + * This value is interchangeable with [ListClusterLogsResponse.next_page_token] from [ClusterService.ListLogs] method. */ nextRecordToken: string; } export interface StreamClusterLogsRequest { $type: "yandex.cloud.mdb.mysql.v1.StreamClusterLogsRequest"; - /** Required. ID of the MySQL cluster. */ + /** + * ID of the cluster to stream logs for. + * + * To get this ID, make a [ClusterService.List] request. + */ clusterId: string; - /** Columns from logs table to get in the response. */ + /** + * Columns from the logs table to request. + * If no columns are specified, complete log records are returned. + */ columnFilter: string[]; + /** The log type. */ serviceType: StreamClusterLogsRequest_ServiceType; /** Start timestamp for the logs request. */ fromTime?: Date; /** * End timestamp for the logs request. - * If this field is not set, all existing logs will be sent and then the new ones as - * they appear. In essence it has 'tail -f' semantics. + * If this field is not set, all existing log records beginning from [from_time] will be returned first, and then the new records will be returned as they appear. + * + * In essence it has `tail -f` command semantics. */ toTime?: Date; /** - * Record token. Set `record_token` to the `next_record_token` returned by a previous StreamLogs - * request to start streaming from next log record. + * Record token that can be used to control logs streaming. + * + * Set [record_token] to the [StreamLogRecord.next_record_token], returned by the previous [ClusterService.StreamLogs] request to start streaming from the next log record. */ recordToken: string; /** - * A filter expression that filters resources listed in the response. + * A filter expression that selects clusters logs listed in the response. + * * The expression must specify: * 1. The field name. Currently filtering can be applied to the [LogRecord.logs.hostname] field. * 2. An `=` operator. @@ -560,30 +606,36 @@ export function streamClusterLogsRequest_ServiceTypeToJSON( export interface ListClusterOperationsRequest { $type: "yandex.cloud.mdb.mysql.v1.ListClusterOperationsRequest"; - /** ID of the MySQL cluster to list operations for. */ + /** + * ID of the cluster to list operations for. + * + * To get this ID, make a [ClusterService.List] request. + */ clusterId: string; /** - * The maximum number of results per page to return. If the number of available - * results is larger than [page_size], the service returns a [ListClusterOperationsResponse.next_page_token] - * that can be used to get the next page of results in subsequent list requests. + * The maximum number of results per page to return. + * + * If the number of available results is larger than [page_size], the API returns a [ListClusterOperationsResponse.next_page_token] that can be used to get the next page of results in the subsequent [ClusterService.ListOperations] requests. */ pageSize: number; /** - * Page token. To get the next page of results, set [page_token] to the [ListClusterOperationsResponse.next_page_token] - * returned by a previous list request. + * Page token that can be used to iterate through multiple pages of results. + * + * To get the next page of results, set [page_token] to the [ListClusterOperationsResponse.next_page_token] returned by the previous [ClusterService.ListOperations] request. */ pageToken: string; } export interface ListClusterOperationsResponse { $type: "yandex.cloud.mdb.mysql.v1.ListClusterOperationsResponse"; - /** List of operations for the specified MySQL cluster. */ + /** List of operations in the cluster. */ operations: Operation[]; /** - * This token allows you to get the next page of results for list requests. If the number of results - * is larger than [ListClusterOperationsRequest.page_size], use the [next_page_token] as the value - * for the [ListClusterOperationsRequest.page_token] query parameter in the next list request. - * Each subsequent list request will have its own [next_page_token] to continue paging through the results. + * The token that can be used to get the next page of results. + * + * If the number of results is larger than [ListClusterOperationsRequest.page_size], use the [next_page_token] as the value for the [ListClusterOperationsRequest.page_token] in the subsequent [ClusterService.ListOperations] request to iterate through multiple pages of results. + * + * Each of the subsequent [ClusterService.ListOperations] requests should use the [next_page_token] value returned by the previous request to continue paging through the results. */ nextPageToken: string; } @@ -591,32 +643,35 @@ export interface ListClusterOperationsResponse { export interface ListClusterBackupsRequest { $type: "yandex.cloud.mdb.mysql.v1.ListClusterBackupsRequest"; /** - * ID of the MySQL cluster. - * To get the MySQL cluster ID use a [ClusterService.List] request. + * ID of the cluster to list backups for. + * + * To get this ID, make a [ClusterService.List] request. */ clusterId: string; /** - * The maximum number of results per page to return. If the number of available - * results is larger than [page_size], the service returns a [ListClusterBackupsResponse.next_page_token] - * that can be used to get the next page of results in subsequent list requests. + * The maximum number of results per page to return. + * + * If the number of available results is larger than [page_size], the API returns a [ListClusterBackupsResponse.next_page_token] that can be used to get the next page of results in the subsequent [ClusterService.ListBackups] requests. */ pageSize: number; /** - * Page token. To get the next page of results, set [page_token] to the [ListClusterBackupsResponse.next_page_token] - * returned by a previous list request. + * Page token that can be used to iterate through multiple pages of results. + * + * To get the next page of results, set [page_token] to the [ListClusterBackupsResponse.next_page_token] returned by the previous [ClusterService.ListBackups] request. */ pageToken: string; } export interface ListClusterBackupsResponse { $type: "yandex.cloud.mdb.mysql.v1.ListClusterBackupsResponse"; - /** List of MySQL backups. */ + /** List of the cluster backups. */ backups: Backup[]; /** - * This token allows you to get the next page of results for list requests. If the number of results - * is larger than [ListClusterBackupsRequest.page_size], use the [next_page_token] as the value - * for the [ListClusterBackupsRequest.page_token] query parameter in the next list request. - * Each subsequent list request will have its own [next_page_token] to continue paging through the results. + * The token that can be used to get the next page of results. + * + * If the number of results is larger than [ListClusterBackupsRequest.page_size], use the [next_page_token] as the value for the [ListClusterBackupsRequest.page_token] in the subsequent [ClusterService.ListBackups] request to iterate through multiple pages of results. + * + * Each of the subsequent [ClusterService.ListBackups] requests should use the [next_page_token] value returned by the previous request to continue paging through the results. */ nextPageToken: string; } @@ -624,32 +679,35 @@ export interface ListClusterBackupsResponse { export interface ListClusterHostsRequest { $type: "yandex.cloud.mdb.mysql.v1.ListClusterHostsRequest"; /** - * ID of the MySQL cluster. - * To get the MySQL cluster ID use a [ClusterService.List] request. + * ID of the cluster to list hosts for. + * + * To get this ID, make a [ClusterService.List] request. */ clusterId: string; /** - * The maximum number of results per page to return. If the number of available - * results is larger than [page_size], the service returns a [ListClusterHostsResponse.next_page_token] - * that can be used to get the next page of results in subsequent list requests. + * The maximum number of results per page to return. + * + * If the number of available results is larger than [page_size], the API returns a [ListClusterHostsResponse.next_page_token] that can be used to get the next page of results in the subsequent [ClusterService.ListHosts] requests. */ pageSize: number; /** - * Page token. To get the next page of results, set [page_token] to the [ListClusterHostsResponse.next_page_token] - * returned by a previous list request. + * Page token that can be used to iterate through multiple pages of results. + * + * To get the next page of results, set [page_token] to the [ListClusterHostsResponse.next_page_token] returned by the previous [ClusterService.ListHosts] request. */ pageToken: string; } export interface ListClusterHostsResponse { $type: "yandex.cloud.mdb.mysql.v1.ListClusterHostsResponse"; - /** List of MySQL hosts. */ + /** List of hosts in the cluster. */ hosts: Host[]; /** - * This token allows you to get the next page of results for list requests. If the number of results - * is larger than [ListClusterHostsRequest.page_size], use the [next_page_token] as the value - * for the [ListClusterHostsRequest.page_token] query parameter in the next list request. - * Each subsequent list request will have its own [next_page_token] to continue paging through the results. + * The token that can be used to get the next page of results. + * + * If the number of results is larger than [ListClusterHostsRequest.page_size], use the [next_page_token] as the value for the [ListClusterHostsRequest.page_token] in the subsequent [ClusterService.ListHosts] request to iterate through multiple pages of results. + * + * Each of the subsequent [ClusterService.ListHosts] requests should use the [next_page_token] value returned by the previous request to continue paging through the results. */ nextPageToken: string; } @@ -657,36 +715,42 @@ export interface ListClusterHostsResponse { export interface AddClusterHostsRequest { $type: "yandex.cloud.mdb.mysql.v1.AddClusterHostsRequest"; /** - * ID of the MySQL cluster to add hosts to. - * To get the MySQL cluster ID, use a [ClusterService.List] request. + * ID of the cluster to add hosts to. + * + * To get this ID, make a [ClusterService.List] request. */ clusterId: string; - /** Configurations for MySQL hosts that should be added to the cluster. */ + /** Configuration of the newly added hosts. */ hostSpecs: HostSpec[]; } export interface AddClusterHostsMetadata { $type: "yandex.cloud.mdb.mysql.v1.AddClusterHostsMetadata"; - /** ID of the MySQL cluster to which the hosts are being added. */ + /** ID of the cluster to which the hosts are being added. */ clusterId: string; - /** Names of hosts that are being added to the cluster. */ + /** Names of hosts that are being added. */ hostNames: string[]; } export interface DeleteClusterHostsRequest { $type: "yandex.cloud.mdb.mysql.v1.DeleteClusterHostsRequest"; /** - * ID of the MySQL cluster to remove hosts from. - * To get the MySQL cluster ID, use a [ClusterService.List] request. + * ID of the cluster to delete hosts from. + * + * To get this ID, make a [ClusterService.List] request. */ clusterId: string; - /** Names of hosts to delete. */ + /** + * Names of hosts to delete. + * + * To get these names, make a [ClusterService.ListHosts] request. + */ hostNames: string[]; } export interface DeleteClusterHostsMetadata { $type: "yandex.cloud.mdb.mysql.v1.DeleteClusterHostsMetadata"; - /** ID of the MySQL cluster to remove hosts from. */ + /** ID of the cluster from which the hosts are being deleted. */ clusterId: string; /** Names of hosts that are being deleted. */ hostNames: string[]; @@ -694,43 +758,59 @@ export interface DeleteClusterHostsMetadata { export interface StartClusterRequest { $type: "yandex.cloud.mdb.mysql.v1.StartClusterRequest"; - /** ID of the MySQL cluster to start. */ + /** + * ID of the cluster to start. + * + * To get this ID, make a [ClusterService.List] request. + */ clusterId: string; } export interface StartClusterMetadata { $type: "yandex.cloud.mdb.mysql.v1.StartClusterMetadata"; - /** ID of the MySQL cluster being started. */ + /** ID of the cluster that is being started. */ clusterId: string; } export interface StopClusterRequest { $type: "yandex.cloud.mdb.mysql.v1.StopClusterRequest"; - /** ID of the MySQL cluster to stop. */ + /** + * ID of the cluster to stop. + * + * To get this ID, make a [ClusterService.List] request. + */ clusterId: string; } export interface StopClusterMetadata { $type: "yandex.cloud.mdb.mysql.v1.StopClusterMetadata"; - /** ID of the MySQL cluster being stopped. */ + /** ID of the cluster that is being stopped. */ clusterId: string; } export interface MoveClusterRequest { $type: "yandex.cloud.mdb.mysql.v1.MoveClusterRequest"; - /** ID of the MySQL cluster to move. */ + /** + * ID of the cluster to move. + * + * To get this ID, make a [ClusterService.List] request. + */ clusterId: string; - /** ID of the destination folder. */ + /** + * ID of the destination folder. + * + * To get this ID, make a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + */ destinationFolderId: string; } export interface MoveClusterMetadata { $type: "yandex.cloud.mdb.mysql.v1.MoveClusterMetadata"; - /** ID of the MySQL cluster being moved. */ + /** ID of the cluster that is being moved. */ clusterId: string; /** ID of the source folder. */ sourceFolderId: string; - /** ID of the destnation folder. */ + /** ID of the destination folder. */ destinationFolderId: string; } @@ -747,9 +827,9 @@ export interface UpdateClusterHostsRequest { export interface UpdateClusterHostsMetadata { $type: "yandex.cloud.mdb.mysql.v1.UpdateClusterHostsMetadata"; - /** ID of the MySQL cluster to modify hosts in. */ + /** ID of the cluster in which the hosts are being updated. */ clusterId: string; - /** Names of hosts that are being modified. */ + /** Names of hosts that are being updated. */ hostNames: string[]; } @@ -757,71 +837,81 @@ export interface UpdateHostSpec { $type: "yandex.cloud.mdb.mysql.v1.UpdateHostSpec"; /** * Name of the host to update. - * To get the MySQL host name, use a [ClusterService.ListHosts] request. + * To get a MySQL host name, use a [ClusterService.ListHosts] request. */ hostName: string; /** * [Host.name] of the host to be used as the replication source (for cascading replication). - * To get the MySQL host name, use a [ClusterService.ListHosts] request. + * To get a MySQL host name, use a [ClusterService.ListHosts] request. */ replicationSource: string; - /** Field mask that specifies which fields of the MySQL host should be updated. */ + /** Field mask that specifies which settings of the MySQL host should be updated. */ updateMask?: FieldMask; - /** Host backup priority */ + /** Host backup priority. */ backupPriority: number; /** Whether the host should get a public IP address on creation. */ assignPublicIp: boolean; + /** Host master promotion priority. */ + priority: number; } export interface HostSpec { $type: "yandex.cloud.mdb.mysql.v1.HostSpec"; /** * ID of the availability zone where the host resides. - * To get a list of available zones, use the [yandex.cloud.compute.v1.ZoneService.List] request. + * + * To get a list of available zones, make the [yandex.cloud.compute.v1.ZoneService.List] request. */ zoneId: string; /** - * ID of the subnet that the host should belong to. This subnet should be a part - * of the network that the cluster belongs to. - * The ID of the network is set in the field [Cluster.network_id]. + * ID of the subnet to assign to the host. + * + * This subnet should be a part of the cluster network (the network ID is specified in the [ClusterService.CreateClusterRequest.network_id]). */ subnetId: string; /** - * Whether the host should get a public IP address on creation. + * Option that enables public IP address for the host so that the host can be accessed from the internet. * - * After a host has been created, this setting cannot be changed. To remove an assigned public IP, or to assign - * a public IP to a host without one, recreate the host with [assign_public_ip] set as needed. + * After a host has been created, this setting cannot be changed. + * To remove an assigned public IP address, or to assign a public IP address to a host without one, recreate the host with the appropriate [assign_public_ip] value set. * * Possible values: - * * false - don't assign a public IP to the host. - * * true - the host should have a public IP address. + * * `false` - don't assign a public IP address to the host. + * * `true` - assign a public IP address to the host. */ assignPublicIp: boolean; /** [Host.name] of the host to be used as the replication source (for cascading replication). */ replicationSource: string; /** Host backup priority */ backupPriority: number; + /** Host master promotion priority */ + priority: number; } export interface ConfigSpec { $type: "yandex.cloud.mdb.mysql.v1.ConfigSpec"; /** * Version of MySQL used in the cluster. - * Possible values: - * * 5.7 - * * 8.0 + * + * Possible values: `5.7`, `8.0`. */ version: string; /** Configuration for a MySQL 5.7 cluster. */ mysqlConfig57?: Mysqlconfig57 | undefined; /** Configuration for a MySQL 8.0 cluster. */ mysqlConfig80?: Mysqlconfig80 | undefined; - /** Resources allocated to MySQL hosts. */ + /** Resource preset for the cluster hosts. */ resources?: Resources; /** Time to start the daily backup, in the UTC timezone. */ backupWindowStart?: TimeOfDay; - /** Access policy to DB */ + /** + * Access policy for external services. + * + * If the specific services need to access the cluster, then set the necessary values in this policy. + */ access?: Access; + /** Configuration of the performance diagnostics service. */ + performanceDiagnostics?: PerformanceDiagnostics; } const baseGetClusterRequest: object = { @@ -5118,6 +5208,7 @@ const baseUpdateHostSpec: object = { replicationSource: "", backupPriority: 0, assignPublicIp: false, + priority: 0, }; export const UpdateHostSpec = { @@ -5142,6 +5233,9 @@ export const UpdateHostSpec = { if (message.assignPublicIp === true) { writer.uint32(40).bool(message.assignPublicIp); } + if (message.priority !== 0) { + writer.uint32(48).int64(message.priority); + } return writer; }, @@ -5167,6 +5261,9 @@ export const UpdateHostSpec = { case 5: message.assignPublicIp = reader.bool(); break; + case 6: + message.priority = longToNumber(reader.int64() as Long); + break; default: reader.skipType(tag & 7); break; @@ -5198,6 +5295,10 @@ export const UpdateHostSpec = { object.assignPublicIp !== undefined && object.assignPublicIp !== null ? Boolean(object.assignPublicIp) : false; + message.priority = + object.priority !== undefined && object.priority !== null + ? Number(object.priority) + : 0; return message; }, @@ -5214,6 +5315,8 @@ export const UpdateHostSpec = { (obj.backupPriority = Math.round(message.backupPriority)); message.assignPublicIp !== undefined && (obj.assignPublicIp = message.assignPublicIp); + message.priority !== undefined && + (obj.priority = Math.round(message.priority)); return obj; }, @@ -5229,6 +5332,7 @@ export const UpdateHostSpec = { : undefined; message.backupPriority = object.backupPriority ?? 0; message.assignPublicIp = object.assignPublicIp ?? false; + message.priority = object.priority ?? 0; return message; }, }; @@ -5242,6 +5346,7 @@ const baseHostSpec: object = { assignPublicIp: false, replicationSource: "", backupPriority: 0, + priority: 0, }; export const HostSpec = { @@ -5266,6 +5371,9 @@ export const HostSpec = { if (message.backupPriority !== 0) { writer.uint32(40).int64(message.backupPriority); } + if (message.priority !== 0) { + writer.uint32(48).int64(message.priority); + } return writer; }, @@ -5291,6 +5399,9 @@ export const HostSpec = { case 5: message.backupPriority = longToNumber(reader.int64() as Long); break; + case 6: + message.priority = longToNumber(reader.int64() as Long); + break; default: reader.skipType(tag & 7); break; @@ -5322,6 +5433,10 @@ export const HostSpec = { object.backupPriority !== undefined && object.backupPriority !== null ? Number(object.backupPriority) : 0; + message.priority = + object.priority !== undefined && object.priority !== null + ? Number(object.priority) + : 0; return message; }, @@ -5335,6 +5450,8 @@ export const HostSpec = { (obj.replicationSource = message.replicationSource); message.backupPriority !== undefined && (obj.backupPriority = Math.round(message.backupPriority)); + message.priority !== undefined && + (obj.priority = Math.round(message.priority)); return obj; }, @@ -5345,6 +5462,7 @@ export const HostSpec = { message.assignPublicIp = object.assignPublicIp ?? false; message.replicationSource = object.replicationSource ?? ""; message.backupPriority = object.backupPriority ?? 0; + message.priority = object.priority ?? 0; return message; }, }; @@ -5390,6 +5508,12 @@ export const ConfigSpec = { if (message.access !== undefined) { Access.encode(message.access, writer.uint32(42).fork()).ldelim(); } + if (message.performanceDiagnostics !== undefined) { + PerformanceDiagnostics.encode( + message.performanceDiagnostics, + writer.uint32(58).fork() + ).ldelim(); + } return writer; }, @@ -5418,6 +5542,12 @@ export const ConfigSpec = { case 5: message.access = Access.decode(reader, reader.uint32()); break; + case 7: + message.performanceDiagnostics = PerformanceDiagnostics.decode( + reader, + reader.uint32() + ); + break; default: reader.skipType(tag & 7); break; @@ -5453,6 +5583,11 @@ export const ConfigSpec = { object.access !== undefined && object.access !== null ? Access.fromJSON(object.access) : undefined; + message.performanceDiagnostics = + object.performanceDiagnostics !== undefined && + object.performanceDiagnostics !== null + ? PerformanceDiagnostics.fromJSON(object.performanceDiagnostics) + : undefined; return message; }, @@ -5477,6 +5612,10 @@ export const ConfigSpec = { : undefined); message.access !== undefined && (obj.access = message.access ? Access.toJSON(message.access) : undefined); + message.performanceDiagnostics !== undefined && + (obj.performanceDiagnostics = message.performanceDiagnostics + ? PerformanceDiagnostics.toJSON(message.performanceDiagnostics) + : undefined); return obj; }, @@ -5506,6 +5645,11 @@ export const ConfigSpec = { object.access !== undefined && object.access !== null ? Access.fromPartial(object.access) : undefined; + message.performanceDiagnostics = + object.performanceDiagnostics !== undefined && + object.performanceDiagnostics !== null + ? PerformanceDiagnostics.fromPartial(object.performanceDiagnostics) + : undefined; return message; }, }; @@ -5514,11 +5658,7 @@ messageTypeRegistry.set(ConfigSpec.$type, ConfigSpec); /** A set of methods for managing MySQL clusters. */ export const ClusterServiceService = { - /** - * Returns the specified MySQL cluster. - * - * To get the list of available MySQL clusters, make a [List] request. - */ + /** Retrieves information about a cluster. */ get: { path: "/yandex.cloud.mdb.mysql.v1.ClusterService/Get", requestStream: false, @@ -5530,7 +5670,7 @@ export const ClusterServiceService = { Buffer.from(Cluster.encode(value).finish()), responseDeserialize: (value: Buffer) => Cluster.decode(value), }, - /** Retrieves the list of MySQL clusters that belong to the specified folder. */ + /** Retrieves the list of clusters in a folder. */ list: { path: "/yandex.cloud.mdb.mysql.v1.ClusterService/List", requestStream: false, @@ -5542,7 +5682,7 @@ export const ClusterServiceService = { Buffer.from(ListClustersResponse.encode(value).finish()), responseDeserialize: (value: Buffer) => ListClustersResponse.decode(value), }, - /** Creates a MySQL cluster in the specified folder. */ + /** Creates a cluster in a folder. */ create: { path: "/yandex.cloud.mdb.mysql.v1.ClusterService/Create", requestStream: false, @@ -5554,7 +5694,7 @@ export const ClusterServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Modifies the specified MySQL cluster. */ + /** Updates a cluster. */ update: { path: "/yandex.cloud.mdb.mysql.v1.ClusterService/Update", requestStream: false, @@ -5566,7 +5706,7 @@ export const ClusterServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Deletes the specified MySQL cluster. */ + /** Deletes a cluster. */ delete: { path: "/yandex.cloud.mdb.mysql.v1.ClusterService/Delete", requestStream: false, @@ -5578,7 +5718,7 @@ export const ClusterServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Starts the specified MySQL cluster. */ + /** Starts a cluster. */ start: { path: "/yandex.cloud.mdb.mysql.v1.ClusterService/Start", requestStream: false, @@ -5590,7 +5730,7 @@ export const ClusterServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Stops the specified MySQL cluster. */ + /** Stops a cluster. */ stop: { path: "/yandex.cloud.mdb.mysql.v1.ClusterService/Stop", requestStream: false, @@ -5602,7 +5742,7 @@ export const ClusterServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Moves the specified MySQL cluster to the specified folder. */ + /** Moves a cluster to a folder. */ move: { path: "/yandex.cloud.mdb.mysql.v1.ClusterService/Move", requestStream: false, @@ -5614,7 +5754,11 @@ export const ClusterServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Creates a backup for the specified MySQL cluster. */ + /** + * Creates a backup for a cluster. + * + * To get information about a backup, make a [BackupService.Get] request. + */ backup: { path: "/yandex.cloud.mdb.mysql.v1.ClusterService/Backup", requestStream: false, @@ -5626,7 +5770,11 @@ export const ClusterServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Creates a new MySQL cluster using the specified backup. */ + /** + * Restores a backup to a new cluster. + * + * See [the documentation](/docs/managed-mysql/concepts/backup) for details. + */ restore: { path: "/yandex.cloud.mdb.mysql.v1.ClusterService/Restore", requestStream: false, @@ -5651,7 +5799,7 @@ export const ClusterServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Start a manual failover on the specified MySQL cluster. */ + /** Starts a manual failover for a cluster. */ startFailover: { path: "/yandex.cloud.mdb.mysql.v1.ClusterService/StartFailover", requestStream: false, @@ -5664,7 +5812,11 @@ export const ClusterServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Retrieves logs for the specified MySQL cluster. */ + /** + * Retrieves logs for a cluster. + * + * Alternatively, logs can be streamed using [StreamLogs]. + */ listLogs: { path: "/yandex.cloud.mdb.mysql.v1.ClusterService/ListLogs", requestStream: false, @@ -5677,7 +5829,11 @@ export const ClusterServiceService = { responseDeserialize: (value: Buffer) => ListClusterLogsResponse.decode(value), }, - /** Same as ListLogs but using server-side streaming. Also allows for 'tail -f' semantics. */ + /** + * Retrieves a log stream for a cluster. + * + * This method is similar to [ListLogs], but uses server-side streaming, which allows for the `tail -f` command semantics. + */ streamLogs: { path: "/yandex.cloud.mdb.mysql.v1.ClusterService/StreamLogs", requestStream: false, @@ -5690,7 +5846,7 @@ export const ClusterServiceService = { Buffer.from(StreamLogRecord.encode(value).finish()), responseDeserialize: (value: Buffer) => StreamLogRecord.decode(value), }, - /** Retrieves the list of operations for the specified MySQL cluster. */ + /** Retrieves a list of operations for a cluster. */ listOperations: { path: "/yandex.cloud.mdb.mysql.v1.ClusterService/ListOperations", requestStream: false, @@ -5704,7 +5860,11 @@ export const ClusterServiceService = { responseDeserialize: (value: Buffer) => ListClusterOperationsResponse.decode(value), }, - /** Retrieves the list of available backups for the specified MySQL cluster. */ + /** + * Retrieves a list of backups for a cluster. + * + * To list all backups in a folder, make a [BackupService.List] request. + */ listBackups: { path: "/yandex.cloud.mdb.mysql.v1.ClusterService/ListBackups", requestStream: false, @@ -5718,7 +5878,7 @@ export const ClusterServiceService = { responseDeserialize: (value: Buffer) => ListClusterBackupsResponse.decode(value), }, - /** Retrieves a list of hosts for the specified MySQL cluster. */ + /** Retrieves a list of hosts for a cluster. */ listHosts: { path: "/yandex.cloud.mdb.mysql.v1.ClusterService/ListHosts", requestStream: false, @@ -5732,7 +5892,7 @@ export const ClusterServiceService = { responseDeserialize: (value: Buffer) => ListClusterHostsResponse.decode(value), }, - /** Creates new hosts for a cluster. */ + /** Adds new hosts in a cluster. */ addHosts: { path: "/yandex.cloud.mdb.mysql.v1.ClusterService/AddHosts", requestStream: false, @@ -5773,57 +5933,73 @@ export const ClusterServiceService = { } as const; export interface ClusterServiceServer extends UntypedServiceImplementation { - /** - * Returns the specified MySQL cluster. - * - * To get the list of available MySQL clusters, make a [List] request. - */ + /** Retrieves information about a cluster. */ get: handleUnaryCall; - /** Retrieves the list of MySQL clusters that belong to the specified folder. */ + /** Retrieves the list of clusters in a folder. */ list: handleUnaryCall; - /** Creates a MySQL cluster in the specified folder. */ + /** Creates a cluster in a folder. */ create: handleUnaryCall; - /** Modifies the specified MySQL cluster. */ + /** Updates a cluster. */ update: handleUnaryCall; - /** Deletes the specified MySQL cluster. */ + /** Deletes a cluster. */ delete: handleUnaryCall; - /** Starts the specified MySQL cluster. */ + /** Starts a cluster. */ start: handleUnaryCall; - /** Stops the specified MySQL cluster. */ + /** Stops a cluster. */ stop: handleUnaryCall; - /** Moves the specified MySQL cluster to the specified folder. */ + /** Moves a cluster to a folder. */ move: handleUnaryCall; - /** Creates a backup for the specified MySQL cluster. */ + /** + * Creates a backup for a cluster. + * + * To get information about a backup, make a [BackupService.Get] request. + */ backup: handleUnaryCall; - /** Creates a new MySQL cluster using the specified backup. */ + /** + * Restores a backup to a new cluster. + * + * See [the documentation](/docs/managed-mysql/concepts/backup) for details. + */ restore: handleUnaryCall; /** Reschedules planned maintenance operation. */ rescheduleMaintenance: handleUnaryCall< RescheduleMaintenanceRequest, Operation >; - /** Start a manual failover on the specified MySQL cluster. */ + /** Starts a manual failover for a cluster. */ startFailover: handleUnaryCall; - /** Retrieves logs for the specified MySQL cluster. */ + /** + * Retrieves logs for a cluster. + * + * Alternatively, logs can be streamed using [StreamLogs]. + */ listLogs: handleUnaryCall; - /** Same as ListLogs but using server-side streaming. Also allows for 'tail -f' semantics. */ + /** + * Retrieves a log stream for a cluster. + * + * This method is similar to [ListLogs], but uses server-side streaming, which allows for the `tail -f` command semantics. + */ streamLogs: handleServerStreamingCall< StreamClusterLogsRequest, StreamLogRecord >; - /** Retrieves the list of operations for the specified MySQL cluster. */ + /** Retrieves a list of operations for a cluster. */ listOperations: handleUnaryCall< ListClusterOperationsRequest, ListClusterOperationsResponse >; - /** Retrieves the list of available backups for the specified MySQL cluster. */ + /** + * Retrieves a list of backups for a cluster. + * + * To list all backups in a folder, make a [BackupService.List] request. + */ listBackups: handleUnaryCall< ListClusterBackupsRequest, ListClusterBackupsResponse >; - /** Retrieves a list of hosts for the specified MySQL cluster. */ + /** Retrieves a list of hosts for a cluster. */ listHosts: handleUnaryCall; - /** Creates new hosts for a cluster. */ + /** Adds new hosts in a cluster. */ addHosts: handleUnaryCall; /** Updates the specified hosts. */ updateHosts: handleUnaryCall; @@ -5832,11 +6008,7 @@ export interface ClusterServiceServer extends UntypedServiceImplementation { } export interface ClusterServiceClient extends Client { - /** - * Returns the specified MySQL cluster. - * - * To get the list of available MySQL clusters, make a [List] request. - */ + /** Retrieves information about a cluster. */ get( request: GetClusterRequest, callback: (error: ServiceError | null, response: Cluster) => void @@ -5852,7 +6024,7 @@ export interface ClusterServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Cluster) => void ): ClientUnaryCall; - /** Retrieves the list of MySQL clusters that belong to the specified folder. */ + /** Retrieves the list of clusters in a folder. */ list( request: ListClustersRequest, callback: ( @@ -5877,7 +6049,7 @@ export interface ClusterServiceClient extends Client { response: ListClustersResponse ) => void ): ClientUnaryCall; - /** Creates a MySQL cluster in the specified folder. */ + /** Creates a cluster in a folder. */ create( request: CreateClusterRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -5893,7 +6065,7 @@ export interface ClusterServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Modifies the specified MySQL cluster. */ + /** Updates a cluster. */ update( request: UpdateClusterRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -5909,7 +6081,7 @@ export interface ClusterServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Deletes the specified MySQL cluster. */ + /** Deletes a cluster. */ delete( request: DeleteClusterRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -5925,7 +6097,7 @@ export interface ClusterServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Starts the specified MySQL cluster. */ + /** Starts a cluster. */ start( request: StartClusterRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -5941,7 +6113,7 @@ export interface ClusterServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Stops the specified MySQL cluster. */ + /** Stops a cluster. */ stop( request: StopClusterRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -5957,7 +6129,7 @@ export interface ClusterServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Moves the specified MySQL cluster to the specified folder. */ + /** Moves a cluster to a folder. */ move( request: MoveClusterRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -5973,7 +6145,11 @@ export interface ClusterServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Creates a backup for the specified MySQL cluster. */ + /** + * Creates a backup for a cluster. + * + * To get information about a backup, make a [BackupService.Get] request. + */ backup( request: BackupClusterRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -5989,7 +6165,11 @@ export interface ClusterServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Creates a new MySQL cluster using the specified backup. */ + /** + * Restores a backup to a new cluster. + * + * See [the documentation](/docs/managed-mysql/concepts/backup) for details. + */ restore( request: RestoreClusterRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -6021,7 +6201,7 @@ export interface ClusterServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Start a manual failover on the specified MySQL cluster. */ + /** Starts a manual failover for a cluster. */ startFailover( request: StartClusterFailoverRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -6037,7 +6217,11 @@ export interface ClusterServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Retrieves logs for the specified MySQL cluster. */ + /** + * Retrieves logs for a cluster. + * + * Alternatively, logs can be streamed using [StreamLogs]. + */ listLogs( request: ListClusterLogsRequest, callback: ( @@ -6062,7 +6246,11 @@ export interface ClusterServiceClient extends Client { response: ListClusterLogsResponse ) => void ): ClientUnaryCall; - /** Same as ListLogs but using server-side streaming. Also allows for 'tail -f' semantics. */ + /** + * Retrieves a log stream for a cluster. + * + * This method is similar to [ListLogs], but uses server-side streaming, which allows for the `tail -f` command semantics. + */ streamLogs( request: StreamClusterLogsRequest, options?: Partial @@ -6072,7 +6260,7 @@ export interface ClusterServiceClient extends Client { metadata?: Metadata, options?: Partial ): ClientReadableStream; - /** Retrieves the list of operations for the specified MySQL cluster. */ + /** Retrieves a list of operations for a cluster. */ listOperations( request: ListClusterOperationsRequest, callback: ( @@ -6097,7 +6285,11 @@ export interface ClusterServiceClient extends Client { response: ListClusterOperationsResponse ) => void ): ClientUnaryCall; - /** Retrieves the list of available backups for the specified MySQL cluster. */ + /** + * Retrieves a list of backups for a cluster. + * + * To list all backups in a folder, make a [BackupService.List] request. + */ listBackups( request: ListClusterBackupsRequest, callback: ( @@ -6122,7 +6314,7 @@ export interface ClusterServiceClient extends Client { response: ListClusterBackupsResponse ) => void ): ClientUnaryCall; - /** Retrieves a list of hosts for the specified MySQL cluster. */ + /** Retrieves a list of hosts for a cluster. */ listHosts( request: ListClusterHostsRequest, callback: ( @@ -6147,7 +6339,7 @@ export interface ClusterServiceClient extends Client { response: ListClusterHostsResponse ) => void ): ClientUnaryCall; - /** Creates new hosts for a cluster. */ + /** Adds new hosts in a cluster. */ addHosts( request: AddClusterHostsRequest, callback: (error: ServiceError | null, response: Operation) => void diff --git a/src/generated/yandex/cloud/mdb/mysql/v1/config/mysql5_7.ts b/src/generated/yandex/cloud/mdb/mysql/v1/config/mysql5_7.ts index 9b008799..d4f88278 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1/config/mysql5_7.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1/config/mysql5_7.ts @@ -10,301 +10,301 @@ import { export const protobufPackage = "yandex.cloud.mdb.mysql.v1.config"; -/** Options and structure of `MysqlConfig5_7` reflects MySQL 5.7 configuration file */ +/** Options and structure of `MysqlConfig5_7` reflects MySQL 5.7 configuration file. */ export interface Mysqlconfig57 { $type: "yandex.cloud.mdb.mysql.v1.config.MysqlConfig5_7"; /** * Size of the InnoDB buffer pool used for caching table and index data. * - * For details, see [MySQL documentation for the parameter](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_buffer_pool_size). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_buffer_pool_size) for details. */ innodbBufferPoolSize?: number; /** * The maximum permitted number of simultaneous client connections. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_max_connections). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_max_connections) for details. */ maxConnections?: number; /** * Time that it takes to process a query before it is considered slow. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_long_query_time). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_long_query_time) for details. */ longQueryTime?: number; /** * Enable writing of general query log of MySQL. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_general_log). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_general_log) for details. */ generalLog?: boolean; /** * Enable writing of audit log of MySQL. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/mysql-security-excerpt/5.6/en/audit-log-options-variables.html#option_mysqld_audit-log). + * See [MySQL documentation](https://dev.mysql.com/doc/mysql-security-excerpt/5.6/en/audit-log-options-variables.html#option_mysqld_audit-log) for details. */ auditLog?: boolean; /** * Server SQL mode of MySQL. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html#sql-mode-setting). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html#sql-mode-setting) for details. */ sqlMode: Mysqlconfig57_SQLMode[]; /** * The maximum size in bytes of one packet. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_max_allowed_packet). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_max_allowed_packet) for details. */ maxAllowedPacket?: number; /** * Authentication plugin used in the managed MySQL cluster. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_default_authentication_plugin) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_default_authentication_plugin for details. */ defaultAuthenticationPlugin: Mysqlconfig57_AuthPlugin; /** * Transaction log flush behaviour. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_flush_log_at_trx_commit) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_flush_log_at_trx_commit for details. */ innodbFlushLogAtTrxCommit?: number; /** - * Max time in seconds for a transaction to wait for a row lock + * Max time in seconds for a transaction to wait for a row lock. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_lock_wait_timeout) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_lock_wait_timeout for details. */ innodbLockWaitTimeout?: number; /** * Default transaction isolation level. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_transaction_isolation) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_transaction_isolation for details. */ transactionIsolation: Mysqlconfig57_TransactionIsolation; /** - * Print information about deadlocks in error log + * Print information about deadlocks in error log. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_print_all_deadlocks) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_print_all_deadlocks for details. */ innodbPrintAllDeadlocks?: boolean; /** * The number of seconds to wait for more data from a connection before aborting the read. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_net_read_timeout) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_net_read_timeout for details. */ netReadTimeout?: number; /** * The number of seconds to wait for a block to be written to a connection before aborting the write. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_net_write_timeout) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_net_write_timeout for details. */ netWriteTimeout?: number; /** * The maximum permitted result length in bytes for the GROUP_CONCAT() function. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_group_concat_max_len) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_group_concat_max_len for details. */ groupConcatMaxLen?: number; /** * The maximum size of internal in-memory temporary tables. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_tmp_table_size) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_tmp_table_size for details. */ tmpTableSize?: number; /** * This variable sets the maximum size to which user-created MEMORY tables are permitted to grow. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_max_heap_table_size) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_max_heap_table_size for details. */ maxHeapTableSize?: number; /** * The servers default time zone. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_default-time-zone) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_default-time-zone for details. */ defaultTimeZone: string; /** * The servers default character set. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_character_set_server) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_character_set_server for details. */ characterSetServer: string; /** * The server default collation. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_collation_server) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_collation_server for details. */ collationServer: string; /** - * Enables Innodb adaptive hash index + * Enables InnoDB adaptive hash index. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_adaptive_hash_index) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_adaptive_hash_index for details. */ innodbAdaptiveHashIndex?: boolean; /** * Enables the NUMA interleave memory policy for allocation of the InnoDB buffer pool. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_numa_interleave) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_numa_interleave for details. */ innodbNumaInterleave?: boolean; /** * The size in bytes of the buffer that InnoDB uses to write to the log files on disk. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_log_buffer_size) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_log_buffer_size for details. */ innodbLogBufferSize?: number; /** - * The size in bytes of the single Innodb Redo log file. + * The size in bytes of the single InnoDB Redo log file. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_log_file_size) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_log_file_size for details. */ innodbLogFileSize?: number; /** - * Limits IO available for InnoDB background tasks + * Limits IO available for InnoDB background tasks. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_io_capacity) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_io_capacity for details. */ innodbIoCapacity?: number; /** - * Limits IO available for InnoDB background tasks + * Limits IO available for InnoDB background tasks. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_io_capacity_max) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_io_capacity_max for details. */ innodbIoCapacityMax?: number; /** * The number of I/O threads for read operations in InnoDB. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_read_io_threads) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_read_io_threads for details. */ innodbReadIoThreads?: number; /** * The number of I/O threads for write operations in InnoDB. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_write_io_threads) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_write_io_threads for details. */ innodbWriteIoThreads?: number; /** * The number of background threads devoted to the InnoDB purge operation. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_purge_threads) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_purge_threads for details. */ innodbPurgeThreads?: number; /** * Defines the maximum number of threads permitted inside of InnoDB. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_thread_concurrency) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_thread_concurrency for details. */ innodbThreadConcurrency?: number; /** - * Limits the max size of InnoDB temp tablespace + * Limits the max size of InnoDB temp tablespace. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_temp_data_file_path) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_temp_data_file_path for details. */ innodbTempDataFileMaxSize?: number; /** - * How many threads the server should cache for reuse. + * A number of threads the server should cache for reuse. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_thread_cache_size). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_thread_cache_size) for details. */ threadCacheSize?: number; /** * The stack size for each thread. The default is large enough for normal operation. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_thread_stack). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_thread_stack) for details. */ threadStack?: number; /** * The minimum size of the buffer that is used for plain index scans, range index scans, and joins that do not use indexes and thus perform full table scans. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_join_buffer_size). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_join_buffer_size) for details. */ joinBufferSize?: number; /** * Each session that must perform a sort allocates a buffer of this size. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_sort_buffer_size). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_sort_buffer_size) for details. */ sortBufferSize?: number; /** * The number of table definitions that can be stored in the definition cache. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_table_definition_cache). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_table_definition_cache) for details. */ tableDefinitionCache?: number; /** * The number of open tables for all threads. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_table_open_cache). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_table_open_cache) for details. */ tableOpenCache?: number; /** * The number of open tables cache instances. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_table_open_cache_instances). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_table_open_cache_instances) for details. */ tableOpenCacheInstances?: number; /** - * This system variable determines whether the server enables certain nonstandard behaviors for default values and NULL-value handling in TIMESTAMP columns. + * Determines whether the server enables certain nonstandard behaviors for default values and NULL-value handling in TIMESTAMP columns. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_explicit_defaults_for_timestamp). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_explicit_defaults_for_timestamp) for details. */ explicitDefaultsForTimestamp?: boolean; /** * Can be used to control the operation of AUTO_INCREMENT columns. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/replication-options-master.html#sysvar_auto_increment_increment). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/replication-options-master.html#sysvar_auto_increment_increment) for details. */ autoIncrementIncrement?: number; /** * Can be used to control the operation of AUTO_INCREMENT columns. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/replication-options-master.html#sysvar_auto_increment_offset). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/replication-options-master.html#sysvar_auto_increment_offset) for details. */ autoIncrementOffset?: number; /** * Controls how often the MySQL server synchronizes the binary log to disk. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/replication-options-binary-log.html#sysvar_sync_binlog). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/replication-options-binary-log.html#sysvar_sync_binlog) for details. */ syncBinlog?: number; /** * The size of the cache to hold changes to the binary log during a transaction. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/replication-options-binary-log.html#sysvar_binlog_cache_size). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/replication-options-binary-log.html#sysvar_binlog_cache_size) for details. */ binlogCacheSize?: number; /** * Controls how many microseconds the binary log commit waits before synchronizing the binary log file to disk. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/replication-options-binary-log.html#sysvar_binlog_group_commit_sync_delay). + * See [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/replication-options-binary-log.html#sysvar_binlog_group_commit_sync_delay) for details. */ binlogGroupCommitSyncDelay?: number; /** * For MySQL row-based replication, this variable determines how row images are written to the binary log. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/replication-options-binary-log.html#sysvar_binlog_row_image). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/replication-options-binary-log.html#sysvar_binlog_row_image) for details. */ binlogRowImage: Mysqlconfig57_BinlogRowImage; /** * When enabled, it causes the server to write informational log events such as row query log events into its binary log. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/replication-options-binary-log.html#sysvar_binlog_rows_query_log_events). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/replication-options-binary-log.html#sysvar_binlog_rows_query_log_events) for details. */ binlogRowsQueryLogEvents?: boolean; /** * The number of replica acknowledgments the source must receive per transaction before proceeding. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/replication-options-master.html#sysvar_rpl_semi_sync_master_wait_for_slave_count). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/replication-options-master.html#sysvar_rpl_semi_sync_master_wait_for_slave_count) for details. */ rplSemiSyncMasterWaitForSlaveCount?: number; /** - * When using a multithreaded replica, this variable specifies the policy used to decide which transactions are allowed to execute in parallel on the replica. + * When using a multi-threaded replica, this variable specifies the policy used to decide which transactions are allowed to execute in parallel on the replica. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/replication-options-replica.html#sysvar_slave_parallel_type). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/replication-options-replica.html#sysvar_slave_parallel_type) for details. */ slaveParallelType: Mysqlconfig57_SlaveParallelType; /** * Sets the number of applier threads for executing replication transactions in parallel. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/replication-options-replica.html#sysvar_slave_parallel_workers). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/replication-options-replica.html#sysvar_slave_parallel_workers) for details. */ slaveParallelWorkers?: number; /** The size of the binary log to hold. */ @@ -312,64 +312,105 @@ export interface Mysqlconfig57 { /** * The number of seconds the server waits for activity on an interactive connection before closing it. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_interactive_timeout). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_interactive_timeout) for details. */ interactiveTimeout?: number; /** * The number of seconds the server waits for activity on a noninteractive connection before closing it. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_wait_timeout). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_wait_timeout) for details. */ waitTimeout?: number; /** Replication lag threshold (seconds) which will switch MySQL to 'offline_mode = ON' to prevent users from reading stale data. */ mdbOfflineModeEnableLag?: number; /** * Replication lag threshold (seconds) which will switch MySQL to 'offline_mode = OFF'. - * Should be less than mdb_offline_mode_enable_lag. + * Should be less than mdb_offline_mode_enable_lag value. */ mdbOfflineModeDisableLag?: number; /** * The limit on memory consumption for the range optimizer. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_range_optimizer_max_mem_size). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_range_optimizer_max_mem_size) for details. */ rangeOptimizerMaxMemSize?: number; /** - * Manages slow query log + * Manages slow query log. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_slow_query_log). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_slow_query_log) for details. */ slowQueryLog?: boolean; /** - * Query execution time, after which query to be logged unconditionally, that is, log_slow_rate_limit will not apply to it + * Query execution time, after which query to be logged unconditionally, that is, `log_slow_rate_limit`` will not apply to it. * - * For details, see [Percona documentation for the variable](https://www.percona.com/doc/percona-server/8.0/diagnostics/slow_extended.html#slow_query_log_always_write_time). + * See [Percona documentation](https://www.percona.com/doc/percona-server/8.0/diagnostics/slow_extended.html#slow_query_log_always_write_time) for details. */ slowQueryLogAlwaysWriteTime?: number; /** - * Specifies slow log granularity for log_slow_rate_limit: QUERY or SESSION + * Specifies slow log granularity for `log_slow_rate_limit` values QUERY or SESSION. * - * For details, see [Percona documentation for the variable](https://www.percona.com/doc/percona-server/8.0/diagnostics/slow_extended.html#log_slow_rate_type). + * See [Percona documentation](https://www.percona.com/doc/percona-server/8.0/diagnostics/slow_extended.html#log_slow_rate_type) for details. */ logSlowRateType: Mysqlconfig57_LogSlowRateType; /** * Specifies what fraction of session/query should be logged. Logging is enabled for every nth session/query. * - * For details, see [Percona documentation for the variable](https://www.percona.com/doc/percona-server/8.0/diagnostics/slow_extended.html#log_slow_rate_limit). + * See [Percona documentation](https://www.percona.com/doc/percona-server/8.0/diagnostics/slow_extended.html#log_slow_rate_limit) for details. */ logSlowRateLimit?: number; /** - * When TRUE, statements executed by stored procedures are logged to the slow log + * When TRUE, statements executed by stored procedures are logged to the slow log. * - * For details, see [Percona documentation for the variable](https://www.percona.com/doc/percona-server/8.0/diagnostics/slow_extended.html#log_slow_sp_statements). + * See [Percona documentation](https://www.percona.com/doc/percona-server/8.0/diagnostics/slow_extended.html#log_slow_sp_statements) for details. */ logSlowSpStatements?: boolean; /** - * Filters the slow log by the query's execution plan + * Filters the slow log by the query's execution plan. * - * For details, see [Percona documentation for the variable](https://www.percona.com/doc/percona-server/8.0/diagnostics/slow_extended.html#log_slow_filter). + * See [Percona documentation](https://www.percona.com/doc/percona-server/8.0/diagnostics/slow_extended.html#log_slow_filter) for details. */ logSlowFilter: Mysqlconfig57_LogSlowFilterType[]; + /** + * Replication lag threshold (seconds) which allows replica to be promoted to master while executing "switchover from". + * Should be less than mdb_offline_mode_disable_lag. + */ + mdbPriorityChoiceMaxLag?: number; + /** + * Specifies the page size for InnoDB tablespaces. + * + * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_page_size). + */ + innodbPageSize?: number; + /** + * The limit in bytes on the size of the temporary log files used during online DDL operations + * + * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_online_alter_log_max_size). + */ + innodbOnlineAlterLogMaxSize?: number; + /** + * Minimum length of words that are stored in an InnoDB FULLTEXT index + * + * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_ft_min_token_size). + */ + innodbFtMinTokenSize?: number; + /** + * Maximum length of words that are stored in an InnoDB FULLTEXT index + * + * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_ft_max_token_size). + */ + innodbFtMaxTokenSize?: number; + /** + * Table names storage and comparison strategy + * + * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_lower_case_table_names). + */ + lowerCaseTableNames?: number; + /** + * Manages MySQL 5.6 compatibility + * + * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_show_compatibility_56). + */ + showCompatibility56?: boolean; } export enum Mysqlconfig57_SQLMode { @@ -1373,6 +1414,66 @@ export const Mysqlconfig57 = { writer.int32(v); } writer.ldelim(); + if (message.mdbPriorityChoiceMaxLag !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.mdbPriorityChoiceMaxLag!, + }, + writer.uint32(498).fork() + ).ldelim(); + } + if (message.innodbPageSize !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.innodbPageSize! }, + writer.uint32(506).fork() + ).ldelim(); + } + if (message.innodbOnlineAlterLogMaxSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.innodbOnlineAlterLogMaxSize!, + }, + writer.uint32(514).fork() + ).ldelim(); + } + if (message.innodbFtMinTokenSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.innodbFtMinTokenSize!, + }, + writer.uint32(522).fork() + ).ldelim(); + } + if (message.innodbFtMaxTokenSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.innodbFtMaxTokenSize!, + }, + writer.uint32(530).fork() + ).ldelim(); + } + if (message.lowerCaseTableNames !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.lowerCaseTableNames!, + }, + writer.uint32(538).fork() + ).ldelim(); + } + if (message.showCompatibility56 !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.showCompatibility56!, + }, + writer.uint32(546).fork() + ).ldelim(); + } return writer; }, @@ -1726,6 +1827,48 @@ export const Mysqlconfig57 = { message.logSlowFilter.push(reader.int32() as any); } break; + case 62: + message.mdbPriorityChoiceMaxLag = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 63: + message.innodbPageSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 64: + message.innodbOnlineAlterLogMaxSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 65: + message.innodbFtMinTokenSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 66: + message.innodbFtMaxTokenSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 67: + message.lowerCaseTableNames = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 68: + message.showCompatibility56 = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; default: reader.skipType(tag & 7); break; @@ -2015,6 +2158,40 @@ export const Mysqlconfig57 = { message.logSlowFilter = (object.logSlowFilter ?? []).map((e: any) => mysqlconfig57_LogSlowFilterTypeFromJSON(e) ); + message.mdbPriorityChoiceMaxLag = + object.mdbPriorityChoiceMaxLag !== undefined && + object.mdbPriorityChoiceMaxLag !== null + ? Number(object.mdbPriorityChoiceMaxLag) + : undefined; + message.innodbPageSize = + object.innodbPageSize !== undefined && object.innodbPageSize !== null + ? Number(object.innodbPageSize) + : undefined; + message.innodbOnlineAlterLogMaxSize = + object.innodbOnlineAlterLogMaxSize !== undefined && + object.innodbOnlineAlterLogMaxSize !== null + ? Number(object.innodbOnlineAlterLogMaxSize) + : undefined; + message.innodbFtMinTokenSize = + object.innodbFtMinTokenSize !== undefined && + object.innodbFtMinTokenSize !== null + ? Number(object.innodbFtMinTokenSize) + : undefined; + message.innodbFtMaxTokenSize = + object.innodbFtMaxTokenSize !== undefined && + object.innodbFtMaxTokenSize !== null + ? Number(object.innodbFtMaxTokenSize) + : undefined; + message.lowerCaseTableNames = + object.lowerCaseTableNames !== undefined && + object.lowerCaseTableNames !== null + ? Number(object.lowerCaseTableNames) + : undefined; + message.showCompatibility56 = + object.showCompatibility_56 !== undefined && + object.showCompatibility_56 !== null + ? Boolean(object.showCompatibility_56) + : undefined; return message; }, @@ -2158,6 +2335,20 @@ export const Mysqlconfig57 = { } else { obj.logSlowFilter = []; } + message.mdbPriorityChoiceMaxLag !== undefined && + (obj.mdbPriorityChoiceMaxLag = message.mdbPriorityChoiceMaxLag); + message.innodbPageSize !== undefined && + (obj.innodbPageSize = message.innodbPageSize); + message.innodbOnlineAlterLogMaxSize !== undefined && + (obj.innodbOnlineAlterLogMaxSize = message.innodbOnlineAlterLogMaxSize); + message.innodbFtMinTokenSize !== undefined && + (obj.innodbFtMinTokenSize = message.innodbFtMinTokenSize); + message.innodbFtMaxTokenSize !== undefined && + (obj.innodbFtMaxTokenSize = message.innodbFtMaxTokenSize); + message.lowerCaseTableNames !== undefined && + (obj.lowerCaseTableNames = message.lowerCaseTableNames); + message.showCompatibility56 !== undefined && + (obj.showCompatibility_56 = message.showCompatibility56); return obj; }, @@ -2241,6 +2432,15 @@ export const Mysqlconfig57 = { message.logSlowRateLimit = object.logSlowRateLimit ?? undefined; message.logSlowSpStatements = object.logSlowSpStatements ?? undefined; message.logSlowFilter = object.logSlowFilter?.map((e) => e) || []; + message.mdbPriorityChoiceMaxLag = + object.mdbPriorityChoiceMaxLag ?? undefined; + message.innodbPageSize = object.innodbPageSize ?? undefined; + message.innodbOnlineAlterLogMaxSize = + object.innodbOnlineAlterLogMaxSize ?? undefined; + message.innodbFtMinTokenSize = object.innodbFtMinTokenSize ?? undefined; + message.innodbFtMaxTokenSize = object.innodbFtMaxTokenSize ?? undefined; + message.lowerCaseTableNames = object.lowerCaseTableNames ?? undefined; + message.showCompatibility56 = object.showCompatibility56 ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/mysql/v1/config/mysql8_0.ts b/src/generated/yandex/cloud/mdb/mysql/v1/config/mysql8_0.ts index f79c9405..20d8be1c 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1/config/mysql8_0.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1/config/mysql8_0.ts @@ -10,307 +10,307 @@ import { export const protobufPackage = "yandex.cloud.mdb.mysql.v1.config"; -/** Options and structure of `MysqlConfig8_0` reflects MySQL 8.0 configuration file */ +/** Options and structure of `MysqlConfig8_0` reflects MySQL 8.0 configuration file. */ export interface Mysqlconfig80 { $type: "yandex.cloud.mdb.mysql.v1.config.MysqlConfig8_0"; /** * Size of the InnoDB buffer pool used for caching table and index data. * - * For details, see [MySQL documentation for the parameter](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_buffer_pool_size). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_buffer_pool_size) for details. */ innodbBufferPoolSize?: number; /** * The maximum permitted number of simultaneous client connections. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_max_connections). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_max_connections) for details. */ maxConnections?: number; /** * Time that it takes to process a query before it is considered slow. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_long_query_time). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_long_query_time) for details. */ longQueryTime?: number; /** * Enable writing of general query log of MySQL. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_general_log). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_general_log) for details. */ generalLog?: boolean; /** * Enable writing of audit log of MySQL. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/audit-log-reference.html#audit-log-options-variables). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/audit-log-reference.html#audit-log-options-variables) for details. */ auditLog?: boolean; /** * Server SQL mode of MySQL. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/sql-mode.html#sql-mode-setting). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/sql-mode.html#sql-mode-setting) for details. */ sqlMode: Mysqlconfig80_SQLMode[]; /** * The maximum size in bytes of one packet. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_max_allowed_packet). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_max_allowed_packet) for details. */ maxAllowedPacket?: number; /** * Authentication plugin used in the managed MySQL cluster. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_default_authentication_plugin) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_default_authentication_plugin for details. */ defaultAuthenticationPlugin: Mysqlconfig80_AuthPlugin; /** * Transaction log flush behaviour. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_flush_log_at_trx_commit) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_flush_log_at_trx_commit for details. */ innodbFlushLogAtTrxCommit?: number; /** - * Max time in seconds for a transaction to wait for a row lock + * Max time in seconds for a transaction to wait for a row lock. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_lock_wait_timeout) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_lock_wait_timeout for details. */ innodbLockWaitTimeout?: number; /** * Default transaction isolation level. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_transaction_isolation) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_transaction_isolation for details. */ transactionIsolation: Mysqlconfig80_TransactionIsolation; /** - * Print information about deadlocks in error log + * Print information about deadlocks in error log. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_print_all_deadlocks) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_print_all_deadlocks for details. */ innodbPrintAllDeadlocks?: boolean; /** * The number of seconds to wait for more data from a connection before aborting the read. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_net_read_timeout) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_net_read_timeout for details. */ netReadTimeout?: number; /** * The number of seconds to wait for a block to be written to a connection before aborting the write. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_net_write_timeout) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_net_write_timeout for details. */ netWriteTimeout?: number; /** * The maximum permitted result length in bytes for the GROUP_CONCAT() function. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_group_concat_max_len) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_group_concat_max_len for details. */ groupConcatMaxLen?: number; /** * The maximum size of internal in-memory temporary tables. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_tmp_table_size) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_tmp_table_size for details. */ tmpTableSize?: number; /** * This variable sets the maximum size to which user-created MEMORY tables are permitted to grow. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_max_heap_table_size) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_max_heap_table_size for details. */ maxHeapTableSize?: number; /** * The servers default time zone. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-options.html#option_mysqld_default-time-zone) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-options.html#option_mysqld_default-time-zone for details. */ defaultTimeZone: string; /** * The servers default character set. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_character_set_server) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_character_set_server for details. */ characterSetServer: string; /** * The server default collation. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_collation_server) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_collation_server for details. */ collationServer: string; /** - * Enables Innodb adaptive hash index + * Enables InnoDB adaptive hash index. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_adaptive_hash_index) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_adaptive_hash_index for details. */ innodbAdaptiveHashIndex?: boolean; /** * Enables the NUMA interleave memory policy for allocation of the InnoDB buffer pool. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_numa_interleave) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_numa_interleave for details. */ innodbNumaInterleave?: boolean; /** * The size in bytes of the buffer that InnoDB uses to write to the log files on disk. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_log_buffer_size) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_log_buffer_size for details. */ innodbLogBufferSize?: number; /** - * The size in bytes of the single Innodb Redo log file. + * The size in bytes of the single InnoDB Redo log file. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_log_file_size) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_log_file_size for details. */ innodbLogFileSize?: number; /** - * Limits IO available for InnoDB background tasks + * Limits IO available for InnoDB background tasks. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_io_capacity) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_io_capacity for details. */ innodbIoCapacity?: number; /** - * Limits IO available for InnoDB background tasks + * Limits IO available for InnoDB background tasks. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_io_capacity_max) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_io_capacity_max for details. */ innodbIoCapacityMax?: number; /** * The number of I/O threads for read operations in InnoDB. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_read_io_threads) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_read_io_threads for details. */ innodbReadIoThreads?: number; /** * The number of I/O threads for write operations in InnoDB. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_write_io_threads) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_write_io_threads for details. */ innodbWriteIoThreads?: number; /** * The number of background threads devoted to the InnoDB purge operation. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_purge_threads) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_purge_threads for details. */ innodbPurgeThreads?: number; /** * Defines the maximum number of threads permitted inside of InnoDB. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_thread_concurrency) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_thread_concurrency for details. */ innodbThreadConcurrency?: number; /** - * Limits the max size of InnoDB temp tablespace + * Limits the max size of InnoDB temp tablespace. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_temp_data_file_path) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_temp_data_file_path for details. */ innodbTempDataFileMaxSize?: number; /** * How many threads the server should cache for reuse. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_thread_cache_size). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_thread_cache_size) for details. */ threadCacheSize?: number; /** * The stack size for each thread. The default is large enough for normal operation. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_thread_stack). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_thread_stack) for details. */ threadStack?: number; /** * The minimum size of the buffer that is used for plain index scans, range index scans, and joins that do not use indexes and thus perform full table scans. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_join_buffer_size). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_join_buffer_size) for details. */ joinBufferSize?: number; /** * Each session that must perform a sort allocates a buffer of this size. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_sort_buffer_size). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_sort_buffer_size) for details. */ sortBufferSize?: number; /** * The number of table definitions that can be stored in the definition cache. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_table_definition_cache). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_table_definition_cache) for details. */ tableDefinitionCache?: number; /** * The number of open tables for all threads. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_table_open_cache). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_table_open_cache) for details. */ tableOpenCache?: number; /** * The number of open tables cache instances. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_table_open_cache_instances). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_table_open_cache_instances) for details. */ tableOpenCacheInstances?: number; /** - * This system variable determines whether the server enables certain nonstandard behaviors for default values and NULL-value handling in TIMESTAMP columns. + * Determines whether the server enables certain nonstandard behaviors for default values and NULL-value handling in TIMESTAMP columns. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_explicit_defaults_for_timestamp). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_explicit_defaults_for_timestamp) for details. */ explicitDefaultsForTimestamp?: boolean; /** * Can be used to control the operation of AUTO_INCREMENT columns. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/replication-options-master.html#sysvar_auto_increment_increment). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/replication-options-master.html#sysvar_auto_increment_increment) for details. */ autoIncrementIncrement?: number; /** * Can be used to control the operation of AUTO_INCREMENT columns. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/replication-options-master.html#sysvar_auto_increment_offset). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/replication-options-master.html#sysvar_auto_increment_offset) for details. */ autoIncrementOffset?: number; /** * Controls how often the MySQL server synchronizes the binary log to disk. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/replication-options-binary-log.html#sysvar_sync_binlog). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/replication-options-binary-log.html#sysvar_sync_binlog) for details. */ syncBinlog?: number; /** * The size of the cache to hold changes to the binary log during a transaction. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/replication-options-binary-log.html#sysvar_binlog_cache_size). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/replication-options-binary-log.html#sysvar_binlog_cache_size) for details. */ binlogCacheSize?: number; /** * Controls how many microseconds the binary log commit waits before synchronizing the binary log file to disk. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/replication-options-binary-log.html#sysvar_binlog_group_commit_sync_delay). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/replication-options-binary-log.html#sysvar_binlog_group_commit_sync_delay) for details. */ binlogGroupCommitSyncDelay?: number; /** * For MySQL row-based replication, this variable determines how row images are written to the binary log. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/replication-options-binary-log.html#sysvar_binlog_row_image). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/replication-options-binary-log.html#sysvar_binlog_row_image) for details. */ binlogRowImage: Mysqlconfig80_BinlogRowImage; /** * When enabled, it causes the server to write informational log events such as row query log events into its binary log. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/replication-options-binary-log.html#sysvar_binlog_rows_query_log_events). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/replication-options-binary-log.html#sysvar_binlog_rows_query_log_events) for details. */ binlogRowsQueryLogEvents?: boolean; /** * The number of replica acknowledgments the source must receive per transaction before proceeding. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/replication-options-master.html#sysvar_rpl_semi_sync_master_wait_for_slave_count). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/replication-options-master.html#sysvar_rpl_semi_sync_master_wait_for_slave_count) for details. */ rplSemiSyncMasterWaitForSlaveCount?: number; /** - * When using a multithreaded replica, this variable specifies the policy used to decide which transactions are allowed to execute in parallel on the replica. + * When using a multi-threaded replica, this variable specifies the policy used to decide which transactions are allowed to execute in parallel on the replica. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/replication-options-replica.html#sysvar_slave_parallel_type). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/replication-options-replica.html#sysvar_slave_parallel_type) for details. */ slaveParallelType: Mysqlconfig80_SlaveParallelType; /** * Sets the number of applier threads for executing replication transactions in parallel. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/replication-options-replica.html#sysvar_slave_parallel_workers). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/replication-options-replica.html#sysvar_slave_parallel_workers) for details. */ slaveParallelWorkers?: number; /** - * The time limit for regular expression matching operations performed by REGEXP_LIKE and similar functions + * The time limit for regular expression matching operations performed by REGEXP_LIKE and similar functions. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/replication-options-replica.html#sysvar_regexp_time_limit). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/replication-options-replica.html#sysvar_regexp_time_limit) for details. */ regexpTimeLimit?: number; /** The size of the binary log to hold. */ @@ -318,13 +318,13 @@ export interface Mysqlconfig80 { /** * The number of seconds the server waits for activity on an interactive connection before closing it. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_interactive_timeout). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_interactive_timeout) for details. */ interactiveTimeout?: number; /** * The number of seconds the server waits for activity on a noninteractive connection before closing it. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_wait_timeout). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_wait_timeout) for details. */ waitTimeout?: number; /** Replication lag threshold (seconds) which will switch MySQL to 'offline_mode = ON' to prevent users from reading stale data. */ @@ -337,45 +337,80 @@ export interface Mysqlconfig80 { /** * The limit on memory consumption for the range optimizer. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_range_optimizer_max_mem_size). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_range_optimizer_max_mem_size) for details. */ rangeOptimizerMaxMemSize?: number; /** - * Manages slow query log + * Manages slow query log. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_slow_query_log). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_slow_query_log) for details. */ slowQueryLog?: boolean; /** - * Query execution time, after which query to be logged unconditionally, that is, log_slow_rate_limit will not apply to it + * Query execution time, after which query to be logged unconditionally, that is, `log_slow_rate_limit` will not apply to it. * - * For details, see [Percona documentation for the variable](https://www.percona.com/doc/percona-server/8.0/diagnostics/slow_extended.html#slow_query_log_always_write_time). + * See [Percona documentation](https://www.percona.com/doc/percona-server/8.0/diagnostics/slow_extended.html#slow_query_log_always_write_time) for details. */ slowQueryLogAlwaysWriteTime?: number; /** - * Specifies slow log granularity for log_slow_rate_limit: QUERY or SESSION + * Specifies slow log granularity for `log_slow_rate_limit` QUERY or SESSION value. * - * For details, see [Percona documentation for the variable](https://www.percona.com/doc/percona-server/8.0/diagnostics/slow_extended.html#log_slow_rate_type). + * See [Percona documentation](https://www.percona.com/doc/percona-server/8.0/diagnostics/slow_extended.html#log_slow_rate_type) for details. */ logSlowRateType: Mysqlconfig80_LogSlowRateType; /** * Specifies what fraction of session/query should be logged. Logging is enabled for every nth session/query. * - * For details, see [Percona documentation for the variable](https://www.percona.com/doc/percona-server/8.0/diagnostics/slow_extended.html#log_slow_rate_limit). + * See [Percona documentation](https://www.percona.com/doc/percona-server/8.0/diagnostics/slow_extended.html#log_slow_rate_limit) for details. */ logSlowRateLimit?: number; /** - * When TRUE, statements executed by stored procedures are logged to the slow log + * When TRUE, statements executed by stored procedures are logged to the slow log. * - * For details, see [Percona documentation for the variable](https://www.percona.com/doc/percona-server/8.0/diagnostics/slow_extended.html#log_slow_sp_statements). + * See [Percona documentation](https://www.percona.com/doc/percona-server/8.0/diagnostics/slow_extended.html#log_slow_sp_statements) for details. */ logSlowSpStatements?: boolean; /** - * Filters the slow log by the query's execution plan + * Filters the slow log by the query's execution plan. * - * For details, see [Percona documentation for the variable](https://www.percona.com/doc/percona-server/8.0/diagnostics/slow_extended.html#log_slow_filter). + * See [Percona documentation](https://www.percona.com/doc/percona-server/8.0/diagnostics/slow_extended.html#log_slow_filter) for details. */ logSlowFilter: Mysqlconfig80_LogSlowFilterType[]; + /** + * Replication lag threshold (seconds) which allows replica to be promoted to master while executing "switchover from". + * Should be less than mdb_offline_mode_disable_lag. + */ + mdbPriorityChoiceMaxLag?: number; + /** + * Specifies the page size for InnoDB tablespaces. + * + * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_page_size). + */ + innodbPageSize?: number; + /** + * The limit in bytes on the size of the temporary log files used during online DDL operations + * + * See [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_online_alter_log_max_size) for details. + */ + innodbOnlineAlterLogMaxSize?: number; + /** + * Minimum length of words that are stored in an InnoDB FULLTEXT index + * + * See [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_ft_min_token_size) for details. + */ + innodbFtMinTokenSize?: number; + /** + * Maximum length of words that are stored in an InnoDB FULLTEXT index + * + * See [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_ft_max_token_size) for details. + */ + innodbFtMaxTokenSize?: number; + /** + * Table names storage and comparison strategy + * + * See [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_lower_case_table_names) for details. + */ + lowerCaseTableNames?: number; } export enum Mysqlconfig80_SQLMode { @@ -1328,6 +1363,57 @@ export const Mysqlconfig80 = { writer.int32(v); } writer.ldelim(); + if (message.mdbPriorityChoiceMaxLag !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.mdbPriorityChoiceMaxLag!, + }, + writer.uint32(506).fork() + ).ldelim(); + } + if (message.innodbPageSize !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.innodbPageSize! }, + writer.uint32(514).fork() + ).ldelim(); + } + if (message.innodbOnlineAlterLogMaxSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.innodbOnlineAlterLogMaxSize!, + }, + writer.uint32(522).fork() + ).ldelim(); + } + if (message.innodbFtMinTokenSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.innodbFtMinTokenSize!, + }, + writer.uint32(530).fork() + ).ldelim(); + } + if (message.innodbFtMaxTokenSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.innodbFtMaxTokenSize!, + }, + writer.uint32(538).fork() + ).ldelim(); + } + if (message.lowerCaseTableNames !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.lowerCaseTableNames!, + }, + writer.uint32(546).fork() + ).ldelim(); + } return writer; }, @@ -1687,6 +1773,42 @@ export const Mysqlconfig80 = { message.logSlowFilter.push(reader.int32() as any); } break; + case 63: + message.mdbPriorityChoiceMaxLag = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 64: + message.innodbPageSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 65: + message.innodbOnlineAlterLogMaxSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 66: + message.innodbFtMinTokenSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 67: + message.innodbFtMaxTokenSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 68: + message.lowerCaseTableNames = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; default: reader.skipType(tag & 7); break; @@ -1980,6 +2102,35 @@ export const Mysqlconfig80 = { message.logSlowFilter = (object.logSlowFilter ?? []).map((e: any) => mysqlconfig80_LogSlowFilterTypeFromJSON(e) ); + message.mdbPriorityChoiceMaxLag = + object.mdbPriorityChoiceMaxLag !== undefined && + object.mdbPriorityChoiceMaxLag !== null + ? Number(object.mdbPriorityChoiceMaxLag) + : undefined; + message.innodbPageSize = + object.innodbPageSize !== undefined && object.innodbPageSize !== null + ? Number(object.innodbPageSize) + : undefined; + message.innodbOnlineAlterLogMaxSize = + object.innodbOnlineAlterLogMaxSize !== undefined && + object.innodbOnlineAlterLogMaxSize !== null + ? Number(object.innodbOnlineAlterLogMaxSize) + : undefined; + message.innodbFtMinTokenSize = + object.innodbFtMinTokenSize !== undefined && + object.innodbFtMinTokenSize !== null + ? Number(object.innodbFtMinTokenSize) + : undefined; + message.innodbFtMaxTokenSize = + object.innodbFtMaxTokenSize !== undefined && + object.innodbFtMaxTokenSize !== null + ? Number(object.innodbFtMaxTokenSize) + : undefined; + message.lowerCaseTableNames = + object.lowerCaseTableNames !== undefined && + object.lowerCaseTableNames !== null + ? Number(object.lowerCaseTableNames) + : undefined; return message; }, @@ -2125,6 +2276,18 @@ export const Mysqlconfig80 = { } else { obj.logSlowFilter = []; } + message.mdbPriorityChoiceMaxLag !== undefined && + (obj.mdbPriorityChoiceMaxLag = message.mdbPriorityChoiceMaxLag); + message.innodbPageSize !== undefined && + (obj.innodbPageSize = message.innodbPageSize); + message.innodbOnlineAlterLogMaxSize !== undefined && + (obj.innodbOnlineAlterLogMaxSize = message.innodbOnlineAlterLogMaxSize); + message.innodbFtMinTokenSize !== undefined && + (obj.innodbFtMinTokenSize = message.innodbFtMinTokenSize); + message.innodbFtMaxTokenSize !== undefined && + (obj.innodbFtMaxTokenSize = message.innodbFtMaxTokenSize); + message.lowerCaseTableNames !== undefined && + (obj.lowerCaseTableNames = message.lowerCaseTableNames); return obj; }, @@ -2209,6 +2372,14 @@ export const Mysqlconfig80 = { message.logSlowRateLimit = object.logSlowRateLimit ?? undefined; message.logSlowSpStatements = object.logSlowSpStatements ?? undefined; message.logSlowFilter = object.logSlowFilter?.map((e) => e) || []; + message.mdbPriorityChoiceMaxLag = + object.mdbPriorityChoiceMaxLag ?? undefined; + message.innodbPageSize = object.innodbPageSize ?? undefined; + message.innodbOnlineAlterLogMaxSize = + object.innodbOnlineAlterLogMaxSize ?? undefined; + message.innodbFtMinTokenSize = object.innodbFtMinTokenSize ?? undefined; + message.innodbFtMaxTokenSize = object.innodbFtMaxTokenSize ?? undefined; + message.lowerCaseTableNames = object.lowerCaseTableNames ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/mysql/v1/database.ts b/src/generated/yandex/cloud/mdb/mysql/v1/database.ts index fbeb4d29..a3d30c8f 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1/database.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1/database.ts @@ -6,20 +6,21 @@ import _m0 from "protobufjs/minimal"; export const protobufPackage = "yandex.cloud.mdb.mysql.v1"; /** - * A MySQL database. For more information, see - * the [documentation](/docs/managed-mysql/concepts). + * An object that represents MySQL database. + * + * See [the documentation](/docs/managed-mysql/operations/databases) for details. */ export interface Database { $type: "yandex.cloud.mdb.mysql.v1.Database"; /** Name of the database. */ name: string; - /** ID of the MySQL cluster that the database belongs to. */ + /** ID of the cluster that the database belongs to. */ clusterId: string; } export interface DatabaseSpec { $type: "yandex.cloud.mdb.mysql.v1.DatabaseSpec"; - /** Name of the MySQL database. */ + /** Name of the database. */ name: string; } diff --git a/src/generated/yandex/cloud/mdb/mysql/v1/database_service.ts b/src/generated/yandex/cloud/mdb/mysql/v1/database_service.ts index 34f0d622..c9af7fa0 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1/database_service.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1/database_service.ts @@ -25,13 +25,15 @@ export const protobufPackage = "yandex.cloud.mdb.mysql.v1"; export interface GetDatabaseRequest { $type: "yandex.cloud.mdb.mysql.v1.GetDatabaseRequest"; /** - * ID of the MySQL cluster that the database belongs to. - * To get the cluster ID use a [ClusterService.List] request. + * ID of the cluster that the database belongs to. + * + * To get this ID, make a [ClusterService.List] request. */ clusterId: string; /** - * Name of the MySQL database to return. - * To get the name of the database use a [DatabaseService.List] request. + * Name of the database to return information about. + * + * To get this name, make a [DatabaseService.List] request. */ databaseName: string; } @@ -39,32 +41,35 @@ export interface GetDatabaseRequest { export interface ListDatabasesRequest { $type: "yandex.cloud.mdb.mysql.v1.ListDatabasesRequest"; /** - * ID of the MySQL cluster to list databases in. - * To get the cluster ID use a [ClusterService.List] request. + * ID of the cluster to list databases in. + * + * To get this ID, make a [ClusterService.List] request. */ clusterId: string; /** - * The maximum number of results per page to return. If the number of available - * results is larger than [page_size], the service returns a [ListDatabasesResponse.next_page_token] - * that can be used to get the next page of results in subsequent list requests. + * The maximum number of results per page to return. + * + * If the number of available results is larger than [page_size], the API returns a [ListDatabasesResponse.next_page_token] that can be used to get the next page of results in the subsequent [DatabaseService.List] requests. */ pageSize: number; /** - * Page token. To get the next page of results, Set [page_token] to the [ListDatabasesResponse.next_page_token] - * returned by a previous list request. + * Page token that can be used to iterate through multiple pages of results. + * + * To get the next page of results, set [page_token] to the [ListDatabasesResponse.next_page_token] returned by the previous [DatabaseService.List] request. */ pageToken: string; } export interface ListDatabasesResponse { $type: "yandex.cloud.mdb.mysql.v1.ListDatabasesResponse"; - /** List of MySQL databases. */ + /** List of databases. */ databases: Database[]; /** - * This token allows you to get the next page of results for list requests. If the number of results - * is larger than [ListDatabasesRequest.page_size], use the [next_page_token] as the value - * for the [ListDatabasesRequest.page_token] parameter in the next list request. Each subsequent - * list request will have its own [next_page_token] to continue paging through the results. + * The token that can be used to get the next page of results. + * + * If the number of results is larger than [ListDatabasesRequest.page_size], use the [next_page_token] as the value for the [ListDatabasesRequest.page_token] in the subsequent [DatabaseService.List] request to iterate through multiple pages of results. + * + * Each of the subsequent [DatabaseService.List] requests should use the [next_page_token] value returned by the previous request to continue paging through the results. */ nextPageToken: string; } @@ -72,41 +77,44 @@ export interface ListDatabasesResponse { export interface CreateDatabaseRequest { $type: "yandex.cloud.mdb.mysql.v1.CreateDatabaseRequest"; /** - * ID of the MySQL cluster to create a database in. - * To get the cluster ID use a [ClusterService.List] request. + * ID of the cluster to create the database in. + * + * To get this ID, make a [ClusterService.List] request. */ clusterId: string; - /** Configuration of the database to create. */ + /** Configuration of the database. */ databaseSpec?: DatabaseSpec; } export interface CreateDatabaseMetadata { $type: "yandex.cloud.mdb.mysql.v1.CreateDatabaseMetadata"; - /** ID of the MySQL cluster where a database is being created. */ + /** ID of the cluster the database is being created in. */ clusterId: string; - /** Name of the MySQL database that is being created. */ + /** Name of the database that is being created. */ databaseName: string; } export interface DeleteDatabaseRequest { $type: "yandex.cloud.mdb.mysql.v1.DeleteDatabaseRequest"; /** - * ID of the MySQL cluster to delete a database in. - * To get the cluster ID, use a [ClusterService.List] request. + * ID of the cluster to delete the database from. + * + * To get this ID, make a [ClusterService.List] request. */ clusterId: string; /** * Name of the database to delete. - * To get the name of the database, use a [DatabaseService.List] request. + * + * To get this name, make a [DatabaseService.List] request. */ databaseName: string; } export interface DeleteDatabaseMetadata { $type: "yandex.cloud.mdb.mysql.v1.DeleteDatabaseMetadata"; - /** ID of the MySQL cluster where a database is being deleted. */ + /** ID of the cluster the database is being deleted from. */ clusterId: string; - /** Name of the MySQL database that is being deleted. */ + /** Name of the database that is being deleted. */ databaseName: string; } @@ -686,13 +694,13 @@ export const DeleteDatabaseMetadata = { messageTypeRegistry.set(DeleteDatabaseMetadata.$type, DeleteDatabaseMetadata); -/** A set of methods for managing MySQL databases. */ +/** + * A set of methods for managing MySQL databases in a cluster. + * + * See [the documentation](/docs/managed-mysql/operations/databases) for details. + */ export const DatabaseServiceService = { - /** - * Returns the specified MySQL database. - * - * To get the list of available MySQL databases, make a [List] request. - */ + /** Retrieves information about the specified database. */ get: { path: "/yandex.cloud.mdb.mysql.v1.DatabaseService/Get", requestStream: false, @@ -704,7 +712,7 @@ export const DatabaseServiceService = { Buffer.from(Database.encode(value).finish()), responseDeserialize: (value: Buffer) => Database.decode(value), }, - /** Retrieves the list of MySQL databases in the specified cluster. */ + /** Retrieves the list of databases in a cluster. */ list: { path: "/yandex.cloud.mdb.mysql.v1.DatabaseService/List", requestStream: false, @@ -716,7 +724,7 @@ export const DatabaseServiceService = { Buffer.from(ListDatabasesResponse.encode(value).finish()), responseDeserialize: (value: Buffer) => ListDatabasesResponse.decode(value), }, - /** Creates a new MySQL database in the specified cluster. */ + /** Creates a new database in a cluster. */ create: { path: "/yandex.cloud.mdb.mysql.v1.DatabaseService/Create", requestStream: false, @@ -728,7 +736,7 @@ export const DatabaseServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Deletes the specified MySQL database. */ + /** Deletes a database from a cluster. */ delete: { path: "/yandex.cloud.mdb.mysql.v1.DatabaseService/Delete", requestStream: false, @@ -743,26 +751,18 @@ export const DatabaseServiceService = { } as const; export interface DatabaseServiceServer extends UntypedServiceImplementation { - /** - * Returns the specified MySQL database. - * - * To get the list of available MySQL databases, make a [List] request. - */ + /** Retrieves information about the specified database. */ get: handleUnaryCall; - /** Retrieves the list of MySQL databases in the specified cluster. */ + /** Retrieves the list of databases in a cluster. */ list: handleUnaryCall; - /** Creates a new MySQL database in the specified cluster. */ + /** Creates a new database in a cluster. */ create: handleUnaryCall; - /** Deletes the specified MySQL database. */ + /** Deletes a database from a cluster. */ delete: handleUnaryCall; } export interface DatabaseServiceClient extends Client { - /** - * Returns the specified MySQL database. - * - * To get the list of available MySQL databases, make a [List] request. - */ + /** Retrieves information about the specified database. */ get( request: GetDatabaseRequest, callback: (error: ServiceError | null, response: Database) => void @@ -778,7 +778,7 @@ export interface DatabaseServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Database) => void ): ClientUnaryCall; - /** Retrieves the list of MySQL databases in the specified cluster. */ + /** Retrieves the list of databases in a cluster. */ list( request: ListDatabasesRequest, callback: ( @@ -803,7 +803,7 @@ export interface DatabaseServiceClient extends Client { response: ListDatabasesResponse ) => void ): ClientUnaryCall; - /** Creates a new MySQL database in the specified cluster. */ + /** Creates a new database in a cluster. */ create( request: CreateDatabaseRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -819,7 +819,7 @@ export interface DatabaseServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Deletes the specified MySQL database. */ + /** Deletes a database from a cluster. */ delete( request: DeleteDatabaseRequest, callback: (error: ServiceError | null, response: Operation) => void diff --git a/src/generated/yandex/cloud/mdb/mysql/v1/maintenance.ts b/src/generated/yandex/cloud/mdb/mysql/v1/maintenance.ts index 157bc229..6be4bc39 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1/maintenance.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1/maintenance.ts @@ -6,7 +6,7 @@ import { Timestamp } from "../../../../../google/protobuf/timestamp"; export const protobufPackage = "yandex.cloud.mdb.mysql.v1"; -/** A maintenance window settings. */ +/** Configuration of a maintenance window in a MySQL cluster. */ export interface MaintenanceWindow { $type: "yandex.cloud.mdb.mysql.v1.MaintenanceWindow"; /** Maintenance operation can be scheduled anytime. */ diff --git a/src/generated/yandex/cloud/mdb/mysql/v1/resource_preset.ts b/src/generated/yandex/cloud/mdb/mysql/v1/resource_preset.ts index f18b20d2..dfbbd118 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1/resource_preset.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1/resource_preset.ts @@ -5,10 +5,15 @@ import _m0 from "protobufjs/minimal"; export const protobufPackage = "yandex.cloud.mdb.mysql.v1"; -/** A preset of resources for hardware configuration of MySQL hosts. */ +/** + * An object that represents MySQL resource preset. + * A resource preset defines hardware configuration for cluster hosts. + * + * See [the documentation](/docs/managed-mysql/concepts/instance-types) for details. + */ export interface ResourcePreset { $type: "yandex.cloud.mdb.mysql.v1.ResourcePreset"; - /** ID of the resource preset. */ + /** ID of the resource preset that defines available computational resources (vCPU, RAM, etc.) for a cluster host. */ id: string; /** IDs of availability zones where the resource preset is available. */ zoneIds: string[]; diff --git a/src/generated/yandex/cloud/mdb/mysql/v1/resource_preset_service.ts b/src/generated/yandex/cloud/mdb/mysql/v1/resource_preset_service.ts index d755fb2b..e6842533 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1/resource_preset_service.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1/resource_preset_service.ts @@ -21,8 +21,9 @@ export const protobufPackage = "yandex.cloud.mdb.mysql.v1"; export interface GetResourcePresetRequest { $type: "yandex.cloud.mdb.mysql.v1.GetResourcePresetRequest"; /** - * ID of the resource preset to return. - * To get the resource preset ID, use a [ResourcePresetService.List] request. + * ID of the resource preset to return information about. + * + * To get this ID, make a [ResourcePresetService.List] request. */ resourcePresetId: string; } @@ -30,14 +31,15 @@ export interface GetResourcePresetRequest { export interface ListResourcePresetsRequest { $type: "yandex.cloud.mdb.mysql.v1.ListResourcePresetsRequest"; /** - * The maximum number of results per page to return. If the number of available - * results is larger than [page_size], the service returns a [ListResourcePresetsResponse.next_page_token] - * that can be used to get the next page of results in subsequent list requests. + * The maximum number of results per page to return. + * + * If the number of available results is larger than [page_size], the API returns a [ListResourcePresetsResponse.next_page_token] that can be used to get the next page of results in the subsequent [ResourcePresetService.List] requests. */ pageSize: number; /** - * Page token. To get the next page of results, set [page_token] to the [ListResourcePresetsResponse.next_page_token] - * returned by a previous list request. + * Page token that can be used to iterate through multiple pages of results. + * + * To get the next page of results, set [page_token] to the [ListResourcePresetsResponse.next_page_token] returned by the previous [ResourcePresetService.List] request. */ pageToken: string; } @@ -47,10 +49,11 @@ export interface ListResourcePresetsResponse { /** List of resource presets. */ resourcePresets: ResourcePreset[]; /** - * This token allows you to get the next page of results for list requests. If the number of results - * is larger than [ListResourcePresetsRequest.page_size], use the [next_page_token] as the value - * for the [ListResourcePresetsRequest.page_token] parameter in the next list request. Each subsequent - * list request will have its own [next_page_token] to continue paging through the results. + * The token that can be used to get the next page of results. + * + * If the number of results is larger than [ListResourcePresetsRequest.page_size], use the [next_page_token] as the value for the [ListResourcePresetsRequest.page_token] in the subsequent [ResourcePresetService.List] request to iterate through multiple pages of results. + * + * Each of the subsequent [ResourcePresetService.List] requests should use the [next_page_token] value returned by the previous request to continue paging through the results. */ nextPageToken: string; } @@ -314,13 +317,13 @@ messageTypeRegistry.set( ListResourcePresetsResponse ); -/** A set of methods for managing resource presets. */ +/** + * A set of methods for managing MySQL resource presets. + * + * See [the documentation](/docs/managed-mysql/concepts/instance-types) for details. + */ export const ResourcePresetServiceService = { - /** - * Returns the specified resource preset. - * - * To get the list of available resource presets, make a [List] request. - */ + /** Retrieves information about a resource preset. */ get: { path: "/yandex.cloud.mdb.mysql.v1.ResourcePresetService/Get", requestStream: false, @@ -351,11 +354,7 @@ export const ResourcePresetServiceService = { export interface ResourcePresetServiceServer extends UntypedServiceImplementation { - /** - * Returns the specified resource preset. - * - * To get the list of available resource presets, make a [List] request. - */ + /** Retrieves information about a resource preset. */ get: handleUnaryCall; /** Retrieves the list of available resource presets. */ list: handleUnaryCall< @@ -365,11 +364,7 @@ export interface ResourcePresetServiceServer } export interface ResourcePresetServiceClient extends Client { - /** - * Returns the specified resource preset. - * - * To get the list of available resource presets, make a [List] request. - */ + /** Retrieves information about a resource preset. */ get( request: GetResourcePresetRequest, callback: (error: ServiceError | null, response: ResourcePreset) => void diff --git a/src/generated/yandex/cloud/mdb/mysql/v1/user.ts b/src/generated/yandex/cloud/mdb/mysql/v1/user.ts index 4710ac0a..54827507 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1/user.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1/user.ts @@ -8,18 +8,18 @@ export const protobufPackage = "yandex.cloud.mdb.mysql.v1"; export enum GlobalPermission { GLOBAL_PERMISSION_UNSPECIFIED = 0, - /** REPLICATION_CLIENT - Enables use of the SHOW MASTER STATUS, SHOW SLAVE STATUS, and SHOW BINARY LOGS statements. */ + /** REPLICATION_CLIENT - Enables use of the `SHOW MASTER STATUS`, `SHOW SLAVE STATUS`, and `SHOW BINARY LOGS` statements. */ REPLICATION_CLIENT = 1, /** * REPLICATION_SLAVE - Enables the account to request updates that have been made to databases on the master server, - * using the SHOW SLAVE HOSTS, SHOW RELAYLOG EVENTS, and SHOW BINLOG EVENTS statements. + * using the `SHOW SLAVE HOSTS`, `SHOW RELAYLOG EVENTS` and `SHOW BINLOG EVENTS` statements. */ REPLICATION_SLAVE = 2, /** - * PROCESS - Enables display of information about the threads executing within the server - * (that is, information about the statements being executed by sessions). - * The privilege enables use of SHOW PROCESSLIST or mysqladmin processlist to see threads belonging - * to other accounts; you can always see your own threads. The PROCESS privilege also enables use of SHOW ENGINE. + * PROCESS - Enables display of information about the the statements currently being performed by sessions (the set of threads executing within the server). + * + * The privilege enables use of `SHOW PROCESSLIST` or `mysqladmin` processlist to see threads belonging to other users. + * You can always see your own threads. The `PROCESS` privilege also enables use of `SHOW ENGINE`. */ PROCESS = 3, UNRECOGNIZED = -1, @@ -109,14 +109,15 @@ export function authPluginToJSON(object: AuthPlugin): string { } /** - * A MySQL user. For more information, see - * the [documentation](/docs/managed-mysql/concepts). + * An object that represents MySQL user. + * + * See [the documentation](/docs/managed-mysql/operations/cluster-users) for details. */ export interface User { $type: "yandex.cloud.mdb.mysql.v1.User"; - /** Name of the MySQL user. */ + /** Name of the user. */ name: string; - /** ID of the MySQL cluster the user belongs to. */ + /** ID of the cluster the user belongs to. */ clusterId: string; /** Set of permissions granted to the user. */ permissions: Permission[]; @@ -132,7 +133,11 @@ export interface Permission { $type: "yandex.cloud.mdb.mysql.v1.Permission"; /** Name of the database that the permission grants access to. */ databaseName: string; - /** Roles granted to the user within the database. */ + /** + * Roles granted to the user within the database. + * + * See [the documentation](/docs/managed-mysql/operations/grant) for details. + */ roles: Permission_Privilege[]; } @@ -142,7 +147,7 @@ export enum Permission_Privilege { ALL_PRIVILEGES = 1, /** ALTER - Altering tables. */ ALTER = 2, - /** ALTER_ROUTINE - Altering stored routines (stored procedures and functions). */ + /** ALTER_ROUTINE - Altering stored routines and functions. */ ALTER_ROUTINE = 3, /** CREATE - Creating tables or indexes. */ CREATE = 4, @@ -164,17 +169,17 @@ export enum Permission_Privilege { INDEX = 12, /** INSERT - Inserting rows into the database. */ INSERT = 13, - /** LOCK_TABLES - Using LOCK TABLES statement for tables available with SELECT privilege. */ + /** LOCK_TABLES - Using `LOCK TABLES` statement for tables available with `SELECT` privilege. */ LOCK_TABLES = 14, /** * SELECT - Selecting rows from tables. * - * Some SELECT statements can be allowed without the SELECT privilege. All - * statements that read column values require the SELECT privilege. See - * details in [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/privileges-provided.html#priv_select). + * Some `SELECT` statements can be allowed without the `SELECT` privilege. All statements that read column values require the `SELECT` privilege. + * + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_select) for details. */ SELECT = 15, - /** SHOW_VIEW - Using the SHOW CREATE VIEW statement. Also needed for views used with EXPLAIN. */ + /** SHOW_VIEW - Using the `SHOW CREATE VIEW` statement. Also needed for views used with `EXPLAIN`. */ SHOW_VIEW = 16, /** TRIGGER - Creating, removing, executing, or displaying triggers for a table. */ TRIGGER = 17, @@ -319,11 +324,16 @@ export interface ConnectionLimits { export interface UserSpec { $type: "yandex.cloud.mdb.mysql.v1.UserSpec"; - /** Name of the MySQL user. */ + /** Name of the user. */ name: string; - /** Password of the MySQL user. */ + /** Password of the user. */ password: string; - /** Set of permissions to grant to the user. */ + /** + * Set of permissions granted to the user to access specific databases. + * One permission per database. + * + * When a permission for a database is set, the user will have access to the database. + */ permissions: Permission[]; /** Set of global permissions to grant to the user. */ globalPermissions: GlobalPermission[]; diff --git a/src/generated/yandex/cloud/mdb/mysql/v1/user_service.ts b/src/generated/yandex/cloud/mdb/mysql/v1/user_service.ts index a2f93715..435312c7 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1/user_service.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1/user_service.ts @@ -33,41 +33,52 @@ export const protobufPackage = "yandex.cloud.mdb.mysql.v1"; export interface GetUserRequest { $type: "yandex.cloud.mdb.mysql.v1.GetUserRequest"; - /** ID of the MySQL cluster. */ + /** + * ID of the cluster the user belongs to. + * + * To get this ID, make a [ClusterService.List] request. + */ clusterId: string; - /** Required. */ + /** + * Name of the user to return information about. + * + * To get this name, make a [UserService.List] request. + */ userName: string; } export interface ListUsersRequest { $type: "yandex.cloud.mdb.mysql.v1.ListUsersRequest"; /** - * ID of the cluster to list MySQL users in. - * To get the cluster ID, use a [ClusterService.List] request. + * ID of the cluster to list the users in. + * + * To get this ID, make a [ClusterService.List] request. */ clusterId: string; /** - * The maximum number of results per page to return. If the number of available - * results is larger than [page_size], the service returns a [ListUsersResponse.next_page_token] - * that can be used to get the next page of results in subsequent list requests. + * The maximum number of results per page to return. + * + * If the number of available results is larger than [page_size], the API returns a [ListUsersResponse.next_page_token] that can be used to get the next page of results in the subsequent [UserService.List] requests. */ pageSize: number; /** - * Page token. To get the next page of results, set [page_token] to the [ListUsersResponse.next_page_token] - * returned by a previous list request. + * Page token that can be used to iterate through multiple pages of results. + * + * To get the next page of results, set [page_token] to the [ListUsersResponse.next_page_token] returned by the previous [UserService.List] request. */ pageToken: string; } export interface ListUsersResponse { $type: "yandex.cloud.mdb.mysql.v1.ListUsersResponse"; - /** Requested list of MySQL users. */ + /** List of users. */ users: User[]; /** - * This token allows you to get the next page of results for list requests. If the number of results - * is larger than [ListUsersRequest.page_size], use the [next_page_token] as the value - * for the [ListUsersRequest.page_token] parameter in the next list request. Each subsequent - * list request will have its own [next_page_token] to continue paging through the results. + * The token that can be used to get the next page of results. + * + * If the number of results is larger than [ListUsersRequest.page_size], use the [next_page_token] as the value for the [ListUsersRequest.page_token] in the subsequent [UserService.List] request to iterate through multiple pages of results. + * + * Each of the subsequent [UserService.List] requests should use the [next_page_token] value returned by the previous request to continue paging through the results. */ nextPageToken: string; } @@ -75,17 +86,18 @@ export interface ListUsersResponse { export interface CreateUserRequest { $type: "yandex.cloud.mdb.mysql.v1.CreateUserRequest"; /** - * ID of the MySQL cluster to create a user for. - * To get the cluster ID, use a [ClusterService.List] request. + * ID of the cluster to create the user in. + * + * To get this ID, make a [ClusterService.List] request. */ clusterId: string; - /** Properties of the user to be created. */ + /** Configuration of the user. */ userSpec?: UserSpec; } export interface CreateUserMetadata { $type: "yandex.cloud.mdb.mysql.v1.CreateUserMetadata"; - /** ID of the MySQL cluster the user is being created for. */ + /** ID of the cluster the user is being created in. */ clusterId: string; /** Name of the user that is being created. */ userName: string; @@ -94,20 +106,22 @@ export interface CreateUserMetadata { export interface UpdateUserRequest { $type: "yandex.cloud.mdb.mysql.v1.UpdateUserRequest"; /** - * ID of the MySQL cluster the user belongs to. - * To get the cluster ID use a [ClusterService.List] request. + * ID of the cluster to update the user in. + * + * To get this ID, make a [ClusterService.List] request. */ clusterId: string; /** - * Name of the user to be updated. - * To get the name of the user use a [UserService.List] request. + * Name of the user to update. + * + * To get this name, make a [UserService.List] request. */ userName: string; - /** Field mask that specifies which fields of the MySQL user should be updated. */ + /** Field mask that specifies which settings of the user should be updated. */ updateMask?: FieldMask; /** New password for the user. */ password: string; - /** New set of permissions for the user. */ + /** A new set of permissions that should be granted to the user. */ permissions: Permission[]; /** New set of global permissions to grant to the user. */ globalPermissions: GlobalPermission[]; @@ -119,7 +133,7 @@ export interface UpdateUserRequest { export interface UpdateUserMetadata { $type: "yandex.cloud.mdb.mysql.v1.UpdateUserMetadata"; - /** ID of the MySQL cluster the user belongs to. */ + /** ID of the cluster the user is being updated in. */ clusterId: string; /** Name of the user that is being updated. */ userName: string; @@ -128,20 +142,22 @@ export interface UpdateUserMetadata { export interface DeleteUserRequest { $type: "yandex.cloud.mdb.mysql.v1.DeleteUserRequest"; /** - * ID of the MySQL cluster the user belongs to. - * To get the cluster ID, use a [ClusterService.List] request. + * ID of the cluster to delete the user from. + * + * To get this ID, make a [ClusterService.List] request. */ clusterId: string; /** * Name of the user to delete. - * To get the name of the user, use a [UserService.List] request. + * + * To get this name, make a [UserService.List] request. */ userName: string; } export interface DeleteUserMetadata { $type: "yandex.cloud.mdb.mysql.v1.DeleteUserMetadata"; - /** ID of the MySQL cluster the user belongs to. */ + /** ID of the cluster the user is being deleted from. */ clusterId: string; /** Name of the user that is being deleted. */ userName: string; @@ -150,13 +166,15 @@ export interface DeleteUserMetadata { export interface GrantUserPermissionRequest { $type: "yandex.cloud.mdb.mysql.v1.GrantUserPermissionRequest"; /** - * ID of the MySQL cluster the user belongs to. - * To get the cluster ID, use a [ClusterService.List] request. + * ID of the cluster to grant permission to the user in. + * + * To get this ID, make a [ClusterService.List] request. */ clusterId: string; /** - * Name of the user to grant the permission to. - * To get the name of the user, use a [UserService.List] request. + * Name of the user to grant permission to. + * + * To get this name, make a [UserService.List] request. */ userName: string; /** Permission that should be granted to the specified user. */ @@ -165,10 +183,7 @@ export interface GrantUserPermissionRequest { export interface GrantUserPermissionMetadata { $type: "yandex.cloud.mdb.mysql.v1.GrantUserPermissionMetadata"; - /** - * ID of the MySQL cluster the user belongs to. - * To get the cluster ID, use a [ClusterService.List] request. - */ + /** ID of the cluster the user is being granted a permission in. */ clusterId: string; /** Name of the user that is being granted a permission. */ userName: string; @@ -177,22 +192,24 @@ export interface GrantUserPermissionMetadata { export interface RevokeUserPermissionRequest { $type: "yandex.cloud.mdb.mysql.v1.RevokeUserPermissionRequest"; /** - * ID of the MySQL cluster the user belongs to. - * To get the cluster ID, use a [ClusterService.List] request. + * ID of the cluster to revoke permission from the user in. + * + * To get this ID, make a [ClusterService.List] request. */ clusterId: string; /** - * Name of the user to revoke a permission from. - * To get the name of the user, use a [UserService.List] request. + * Name of the user to revoke permission from. + * + * To get this name, make a [UserService.List] request. */ userName: string; - /** Permission that should be revoked from the specified user. */ + /** Permission that should be revoked from the user. */ permission?: Permission; } export interface RevokeUserPermissionMetadata { $type: "yandex.cloud.mdb.mysql.v1.RevokeUserPermissionMetadata"; - /** ID of the MySQL cluster the user belongs to. */ + /** ID of the cluster the user is being revoked a permission in. */ clusterId: string; /** Name of the user whose permission is being revoked. */ userName: string; @@ -1397,13 +1414,13 @@ messageTypeRegistry.set( RevokeUserPermissionMetadata ); -/** A set of methods for managing MySQL users. */ +/** + * A set of methods for managing MySQL users. + * + * See [the documentation](/docs/managed-mysql/operations/cluster-users) for details. + */ export const UserServiceService = { - /** - * Returns the specified MySQL user. - * - * To get the list of available MySQL users, make a [List] request. - */ + /** Retrieves information about the specified user. */ get: { path: "/yandex.cloud.mdb.mysql.v1.UserService/Get", requestStream: false, @@ -1415,7 +1432,7 @@ export const UserServiceService = { Buffer.from(User.encode(value).finish()), responseDeserialize: (value: Buffer) => User.decode(value), }, - /** Retrieves a list of MySQL users in the specified cluster. */ + /** Retrieves the list of users in a cluster. */ list: { path: "/yandex.cloud.mdb.mysql.v1.UserService/List", requestStream: false, @@ -1427,7 +1444,7 @@ export const UserServiceService = { Buffer.from(ListUsersResponse.encode(value).finish()), responseDeserialize: (value: Buffer) => ListUsersResponse.decode(value), }, - /** Creates a MySQL user in the specified cluster. */ + /** Creates a user in a cluster. */ create: { path: "/yandex.cloud.mdb.mysql.v1.UserService/Create", requestStream: false, @@ -1439,7 +1456,7 @@ export const UserServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Modifies the specified MySQL user. */ + /** Updates a user in a cluster. */ update: { path: "/yandex.cloud.mdb.mysql.v1.UserService/Update", requestStream: false, @@ -1451,7 +1468,7 @@ export const UserServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Deletes the specified MySQL user. */ + /** Deletes a user in a cluster. */ delete: { path: "/yandex.cloud.mdb.mysql.v1.UserService/Delete", requestStream: false, @@ -1463,7 +1480,7 @@ export const UserServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Grants a permission to the specified MySQL user. */ + /** Grants permission to access a database to a user in a cluster. */ grantPermission: { path: "/yandex.cloud.mdb.mysql.v1.UserService/GrantPermission", requestStream: false, @@ -1476,7 +1493,7 @@ export const UserServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Revokes a permission from the specified MySQL user. */ + /** Revokes permission to access a database from a user in a cluster. */ revokePermission: { path: "/yandex.cloud.mdb.mysql.v1.UserService/RevokePermission", requestStream: false, @@ -1492,32 +1509,24 @@ export const UserServiceService = { } as const; export interface UserServiceServer extends UntypedServiceImplementation { - /** - * Returns the specified MySQL user. - * - * To get the list of available MySQL users, make a [List] request. - */ + /** Retrieves information about the specified user. */ get: handleUnaryCall; - /** Retrieves a list of MySQL users in the specified cluster. */ + /** Retrieves the list of users in a cluster. */ list: handleUnaryCall; - /** Creates a MySQL user in the specified cluster. */ + /** Creates a user in a cluster. */ create: handleUnaryCall; - /** Modifies the specified MySQL user. */ + /** Updates a user in a cluster. */ update: handleUnaryCall; - /** Deletes the specified MySQL user. */ + /** Deletes a user in a cluster. */ delete: handleUnaryCall; - /** Grants a permission to the specified MySQL user. */ + /** Grants permission to access a database to a user in a cluster. */ grantPermission: handleUnaryCall; - /** Revokes a permission from the specified MySQL user. */ + /** Revokes permission to access a database from a user in a cluster. */ revokePermission: handleUnaryCall; } export interface UserServiceClient extends Client { - /** - * Returns the specified MySQL user. - * - * To get the list of available MySQL users, make a [List] request. - */ + /** Retrieves information about the specified user. */ get( request: GetUserRequest, callback: (error: ServiceError | null, response: User) => void @@ -1533,7 +1542,7 @@ export interface UserServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: User) => void ): ClientUnaryCall; - /** Retrieves a list of MySQL users in the specified cluster. */ + /** Retrieves the list of users in a cluster. */ list( request: ListUsersRequest, callback: (error: ServiceError | null, response: ListUsersResponse) => void @@ -1549,7 +1558,7 @@ export interface UserServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: ListUsersResponse) => void ): ClientUnaryCall; - /** Creates a MySQL user in the specified cluster. */ + /** Creates a user in a cluster. */ create( request: CreateUserRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -1565,7 +1574,7 @@ export interface UserServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Modifies the specified MySQL user. */ + /** Updates a user in a cluster. */ update( request: UpdateUserRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -1581,7 +1590,7 @@ export interface UserServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Deletes the specified MySQL user. */ + /** Deletes a user in a cluster. */ delete( request: DeleteUserRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -1597,7 +1606,7 @@ export interface UserServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Grants a permission to the specified MySQL user. */ + /** Grants permission to access a database to a user in a cluster. */ grantPermission( request: GrantUserPermissionRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -1613,7 +1622,7 @@ export interface UserServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Revokes a permission from the specified MySQL user. */ + /** Revokes permission to access a database from a user in a cluster. */ revokePermission( request: RevokeUserPermissionRequest, callback: (error: ServiceError | null, response: Operation) => void diff --git a/src/generated/yandex/cloud/mdb/mysql/v1alpha/cluster.ts b/src/generated/yandex/cloud/mdb/mysql/v1alpha/cluster.ts index b6ba7546..1e895864 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1alpha/cluster.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1alpha/cluster.ts @@ -253,7 +253,7 @@ export interface Host { * Name of the MySQL host. The host name is assigned by Managed Service for MySQL * at creation time, and cannot be changed. 1-63 characters long. * - * The name is unique across all existing database hosts in Yandex.Cloud, + * The name is unique across all existing database hosts in Yandex Cloud, * as it defines the FQDN of the host. */ name: string; diff --git a/src/generated/yandex/cloud/mdb/mysql/v1alpha/cluster_service.ts b/src/generated/yandex/cloud/mdb/mysql/v1alpha/cluster_service.ts index 9f87696a..a6956fc3 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1alpha/cluster_service.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1alpha/cluster_service.ts @@ -129,7 +129,7 @@ export interface UpdateClusterRequest { * To get the MySQL cluster ID, use a [ClusterService.List] request. */ clusterId: string; - /** Field mask that specifies which fields of the MySQL cluster should be updated. */ + /** Field mask that specifies which settings of the MySQL cluster should be updated. */ updateMask?: FieldMask; /** New description of the MySQL cluster. */ description: string; diff --git a/src/generated/yandex/cloud/mdb/mysql/v1alpha/database.ts b/src/generated/yandex/cloud/mdb/mysql/v1alpha/database.ts index 8032d8fc..3f807261 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1alpha/database.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1alpha/database.ts @@ -5,10 +5,7 @@ import _m0 from "protobufjs/minimal"; export const protobufPackage = "yandex.cloud.mdb.mysql.v1alpha"; -/** - * A MySQL database. For more information, see - * the [documentation](/docs/managed-mysql/concepts). - */ +/** A MySQL database. For more information, see the [documentation](/docs/managed-mysql/concepts). */ export interface Database { $type: "yandex.cloud.mdb.mysql.v1alpha.Database"; /** Name of the database. */ diff --git a/src/generated/yandex/cloud/mdb/mysql/v1alpha/user.ts b/src/generated/yandex/cloud/mdb/mysql/v1alpha/user.ts index c1f01574..f78b5020 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1alpha/user.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1alpha/user.ts @@ -5,10 +5,7 @@ import _m0 from "protobufjs/minimal"; export const protobufPackage = "yandex.cloud.mdb.mysql.v1alpha"; -/** - * A MySQL user. For more information, see - * the [documentation](/docs/managed-mysql/concepts). - */ +/** A MySQL user. For more information, see the [documentation](/docs/managed-mysql/concepts). */ export interface User { $type: "yandex.cloud.mdb.mysql.v1alpha.User"; /** Name of the MySQL user. */ @@ -60,9 +57,7 @@ export enum Permission_Privilege { /** * SELECT - Selecting rows from tables. * - * Some SELECT statements can be allowed without the SELECT privilege. All - * statements that read column values require the SELECT privilege. See - * details in [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/privileges-provided.html#priv_select). + * Some SELECT statements can be allowed without the SELECT privilege. All statements that read column values require the SELECT privilege. See details in [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/privileges-provided.html#priv_select). */ SELECT = 15, /** SHOW_VIEW - Using the SHOW CREATE VIEW statement. Also needed for views used with EXPLAIN. */ diff --git a/src/generated/yandex/cloud/mdb/mysql/v1alpha/user_service.ts b/src/generated/yandex/cloud/mdb/mysql/v1alpha/user_service.ts index 0f5cba20..b47e33b9 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1alpha/user_service.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1alpha/user_service.ts @@ -34,21 +34,11 @@ export interface GetUserRequest { export interface ListUsersRequest { $type: "yandex.cloud.mdb.mysql.v1alpha.ListUsersRequest"; - /** - * ID of the cluster to list MySQL users in. - * To get the cluster ID, use a [ClusterService.List] request. - */ + /** ID of the cluster to list MySQL users in. To get the cluster ID, use a [ClusterService.List] request. */ clusterId: string; - /** - * The maximum number of results per page to return. If the number of available - * results is larger than [page_size], the service returns a [ListUsersResponse.next_page_token] - * that can be used to get the next page of results in subsequent list requests. - */ + /** The maximum number of results per page to return. If the number of available results is larger than [page_size], the service returns a [ListUsersResponse.next_page_token] that can be used to get the next page of results in subsequent list requests. */ pageSize: number; - /** - * Page token. To get the next page of results, set [page_token] to the [ListUsersResponse.next_page_token] - * returned by a previous list request. - */ + /** Page token. To get the next page of results, set [page_token] to the [ListUsersResponse.next_page_token] returned by a previous list request. */ pageToken: string; } @@ -56,21 +46,13 @@ export interface ListUsersResponse { $type: "yandex.cloud.mdb.mysql.v1alpha.ListUsersResponse"; /** Requested list of MySQL users. */ users: User[]; - /** - * This token allows you to get the next page of results for list requests. If the number of results - * is larger than [ListUsersRequest.page_size], use the [next_page_token] as the value - * for the [ListUsersRequest.page_token] parameter in the next list request. Each subsequent - * list request will have its own [next_page_token] to continue paging through the results. - */ + /** This token allows you to get the next page of results for list requests. If the number of results is larger than [ListUsersRequest.page_size], use the [next_page_token] as the value for the [ListUsersRequest.page_token] parameter in the next list request. Each subsequent list request will have its own [next_page_token] to continue paging through the results. */ nextPageToken: string; } export interface CreateUserRequest { $type: "yandex.cloud.mdb.mysql.v1alpha.CreateUserRequest"; - /** - * ID of the MySQL cluster to create a user for. - * To get the cluster ID, use a [ClusterService.List] request. - */ + /** ID of the MySQL cluster to create a user for. To get the cluster ID, use a [ClusterService.List] request. */ clusterId: string; /** Properties of the user to be created. */ userSpec?: UserSpec; @@ -86,17 +68,11 @@ export interface CreateUserMetadata { export interface UpdateUserRequest { $type: "yandex.cloud.mdb.mysql.v1alpha.UpdateUserRequest"; - /** - * ID of the MySQL cluster the user belongs to. - * To get the cluster ID use a [ClusterService.List] request. - */ + /** ID of the MySQL cluster the user belongs to. To get the cluster ID, use a [ClusterService.List] request. */ clusterId: string; - /** - * Name of the user to be updated. - * To get the name of the user use a [UserService.List] request. - */ + /** Name of the user to be updated. To get the name of the user, use a [UserService.List] request. */ userName: string; - /** Field mask that specifies which fields of the MySQL user should be updated. */ + /** Field mask that specifies which settings of the MySQL user should be updated. */ updateMask?: FieldMask; /** New password for the user. */ password: string; @@ -108,21 +84,15 @@ export interface UpdateUserMetadata { $type: "yandex.cloud.mdb.mysql.v1alpha.UpdateUserMetadata"; /** ID of the MySQL cluster the user belongs to. */ clusterId: string; - /** Name of the user that is being updated. */ + /** Name of a user that is being updated. */ userName: string; } export interface DeleteUserRequest { $type: "yandex.cloud.mdb.mysql.v1alpha.DeleteUserRequest"; - /** - * ID of the MySQL cluster the user belongs to. - * To get the cluster ID, use a [ClusterService.List] request. - */ + /** ID of the MySQL cluster the user belongs to. To get the cluster ID, use a [ClusterService.List] request. */ clusterId: string; - /** - * Name of the user to delete. - * To get the name of the user, use a [UserService.List] request. - */ + /** Name of the user to delete. To get the name of the user, use a [UserService.List] request. */ userName: string; } @@ -136,15 +106,9 @@ export interface DeleteUserMetadata { export interface GrantUserPermissionRequest { $type: "yandex.cloud.mdb.mysql.v1alpha.GrantUserPermissionRequest"; - /** - * ID of the MySQL cluster the user belongs to. - * To get the cluster ID, use a [ClusterService.List] request. - */ + /** ID of the MySQL cluster the user belongs to. To get the cluster ID, use a [ClusterService.List] request. */ clusterId: string; - /** - * Name of the user to grant the permission to. - * To get the name of the user, use a [UserService.List] request. - */ + /** Name of the user to grant the permission to. To get the name of the user, use a [UserService.List] request. */ userName: string; /** Permission that should be granted to the specified user. */ permission?: Permission; @@ -152,10 +116,7 @@ export interface GrantUserPermissionRequest { export interface GrantUserPermissionMetadata { $type: "yandex.cloud.mdb.mysql.v1alpha.GrantUserPermissionMetadata"; - /** - * ID of the MySQL cluster the user belongs to. - * To get the cluster ID, use a [ClusterService.List] request. - */ + /** ID of the MySQL cluster the user belongs to. To get the cluster ID, use a [ClusterService.List] request. */ clusterId: string; /** Name of the user that is being granted a permission. */ userName: string; @@ -163,15 +124,9 @@ export interface GrantUserPermissionMetadata { export interface RevokeUserPermissionRequest { $type: "yandex.cloud.mdb.mysql.v1alpha.RevokeUserPermissionRequest"; - /** - * ID of the MySQL cluster the user belongs to. - * To get the cluster ID, use a [ClusterService.List] request. - */ + /** ID of the MySQL cluster the user belongs to. To get the cluster ID, use a [ClusterService.List] request. */ clusterId: string; - /** - * Name of the user to revoke a permission from. - * To get the name of the user, use a [UserService.List] request. - */ + /** Name of the user to revoke a permission from. To get the name of the user, use a [UserService.List] request. */ userName: string; /** Name of the database that the user should lose access to. */ databaseName: string; diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/cluster.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/cluster.ts index b1d29d7f..93be9eff 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/cluster.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/cluster.ts @@ -16,6 +16,7 @@ import { Postgresqlconfigset111c } from "../../../../../yandex/cloud/mdb/postgre import { PostgresqlConfigSet12 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/postgresql12"; import { Postgresqlconfigset121c } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/postgresql12_1c"; import { PostgresqlConfigSet13 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/postgresql13"; +import { PostgresqlConfigSet14 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/postgresql14"; import { Postgresqlhostconfig96 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host9_6"; import { Postgresqlhostconfig101c } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host10_1c"; import { PostgresqlHostConfig10 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host10"; @@ -24,6 +25,7 @@ import { Postgresqlhostconfig111c } from "../../../../../yandex/cloud/mdb/postgr import { PostgresqlHostConfig12 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host12"; import { Postgresqlhostconfig121c } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host12_1c"; import { PostgresqlHostConfig13 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host13"; +import { PostgresqlHostConfig14 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host14"; import { BoolValue, Int64Value } from "../../../../../google/protobuf/wrappers"; export const protobufPackage = "yandex.cloud.mdb.postgresql.v1"; @@ -284,6 +286,8 @@ export interface ClusterConfig { postgresqlConfig121c?: Postgresqlconfigset121c | undefined; /** Configuration of a PostgreSQL 13 server. */ postgresqlConfig13?: PostgresqlConfigSet13 | undefined; + /** Configuration of a PostgreSQL 14 server. */ + postgresqlConfig14?: PostgresqlConfigSet14 | undefined; /** Configuration of the connection pooler. */ poolerConfig?: ConnectionPoolerConfig; /** Resources allocated to PostgreSQL hosts. */ @@ -368,7 +372,7 @@ export interface Host { * Name of the PostgreSQL host. The host name is assigned by MDB at creation time, and cannot be changed. * 1-63 characters long. * - * The name is unique across all existing MDB hosts in Yandex.Cloud, as it defines the FQDN of the host. + * The name is unique across all existing MDB hosts in Yandex Cloud, as it defines the FQDN of the host. */ name: string; /** ID of the PostgreSQL host. The ID is assigned by MDB at creation time. */ @@ -555,6 +559,8 @@ export interface HostConfig { postgresqlConfig121c?: Postgresqlhostconfig121c | undefined; /** Configuration for a host with PostgreSQL 13 server deployed. */ postgresqlConfig13?: PostgresqlHostConfig13 | undefined; + /** Configuration for a host with PostgreSQL 14 server deployed. */ + postgresqlConfig14?: PostgresqlHostConfig14 | undefined; } export interface Service { @@ -670,13 +676,15 @@ export interface Access { /** Allow access for DataLens */ dataLens: boolean; /** - * Allow SQL queries to the cluster databases from the Yandex.Cloud management console. + * Allow SQL queries to the cluster databases from the Yandex Cloud management console. * * See [SQL queries in the management console](/docs/managed-postgresql/operations/web-sql-query) for more details. */ webSql: boolean; /** Allow access for Serverless */ serverless: boolean; + /** Allow access for DataTransfer. */ + dataTransfer: boolean; } export interface PerformanceDiagnostics { @@ -1241,6 +1249,12 @@ export const ClusterConfig = { writer.uint32(122).fork() ).ldelim(); } + if (message.postgresqlConfig14 !== undefined) { + PostgresqlConfigSet14.encode( + message.postgresqlConfig14, + writer.uint32(130).fork() + ).ldelim(); + } if (message.poolerConfig !== undefined) { ConnectionPoolerConfig.encode( message.poolerConfig, @@ -1341,6 +1355,12 @@ export const ClusterConfig = { reader.uint32() ); break; + case 16: + message.postgresqlConfig14 = PostgresqlConfigSet14.decode( + reader, + reader.uint32() + ); + break; case 4: message.poolerConfig = ConnectionPoolerConfig.decode( reader, @@ -1428,6 +1448,11 @@ export const ClusterConfig = { object.postgresqlConfig_13 !== null ? PostgresqlConfigSet13.fromJSON(object.postgresqlConfig_13) : undefined; + message.postgresqlConfig14 = + object.postgresqlConfig_14 !== undefined && + object.postgresqlConfig_14 !== null + ? PostgresqlConfigSet14.fromJSON(object.postgresqlConfig_14) + : undefined; message.poolerConfig = object.poolerConfig !== undefined && object.poolerConfig !== null ? ConnectionPoolerConfig.fromJSON(object.poolerConfig) @@ -1497,6 +1522,10 @@ export const ClusterConfig = { (obj.postgresqlConfig_13 = message.postgresqlConfig13 ? PostgresqlConfigSet13.toJSON(message.postgresqlConfig13) : undefined); + message.postgresqlConfig14 !== undefined && + (obj.postgresqlConfig_14 = message.postgresqlConfig14 + ? PostgresqlConfigSet14.toJSON(message.postgresqlConfig14) + : undefined); message.poolerConfig !== undefined && (obj.poolerConfig = message.poolerConfig ? ConnectionPoolerConfig.toJSON(message.poolerConfig) @@ -1567,6 +1596,11 @@ export const ClusterConfig = { object.postgresqlConfig13 !== null ? PostgresqlConfigSet13.fromPartial(object.postgresqlConfig13) : undefined; + message.postgresqlConfig14 = + object.postgresqlConfig14 !== undefined && + object.postgresqlConfig14 !== null + ? PostgresqlConfigSet14.fromPartial(object.postgresqlConfig14) + : undefined; message.poolerConfig = object.poolerConfig !== undefined && object.poolerConfig !== null ? ConnectionPoolerConfig.fromPartial(object.poolerConfig) @@ -1976,6 +2010,12 @@ export const HostConfig = { writer.uint32(66).fork() ).ldelim(); } + if (message.postgresqlConfig14 !== undefined) { + PostgresqlHostConfig14.encode( + message.postgresqlConfig14, + writer.uint32(74).fork() + ).ldelim(); + } return writer; }, @@ -2034,6 +2074,12 @@ export const HostConfig = { reader.uint32() ); break; + case 9: + message.postgresqlConfig14 = PostgresqlHostConfig14.decode( + reader, + reader.uint32() + ); + break; default: reader.skipType(tag & 7); break; @@ -2084,6 +2130,11 @@ export const HostConfig = { object.postgresqlHostConfig_13 !== null ? PostgresqlHostConfig13.fromJSON(object.postgresqlHostConfig_13) : undefined; + message.postgresqlConfig14 = + object.postgresqlHostConfig_14 !== undefined && + object.postgresqlHostConfig_14 !== null + ? PostgresqlHostConfig14.fromJSON(object.postgresqlHostConfig_14) + : undefined; return message; }, @@ -2121,6 +2172,10 @@ export const HostConfig = { (obj.postgresqlHostConfig_13 = message.postgresqlConfig13 ? PostgresqlHostConfig13.toJSON(message.postgresqlConfig13) : undefined); + message.postgresqlConfig14 !== undefined && + (obj.postgresqlHostConfig_14 = message.postgresqlConfig14 + ? PostgresqlHostConfig14.toJSON(message.postgresqlConfig14) + : undefined); return obj; }, @@ -2168,6 +2223,11 @@ export const HostConfig = { object.postgresqlConfig13 !== null ? PostgresqlHostConfig13.fromPartial(object.postgresqlConfig13) : undefined; + message.postgresqlConfig14 = + object.postgresqlConfig14 !== undefined && + object.postgresqlConfig14 !== null + ? PostgresqlHostConfig14.fromPartial(object.postgresqlConfig14) + : undefined; return message; }, }; @@ -2343,6 +2403,7 @@ const baseAccess: object = { dataLens: false, webSql: false, serverless: false, + dataTransfer: false, }; export const Access = { @@ -2361,6 +2422,9 @@ export const Access = { if (message.serverless === true) { writer.uint32(24).bool(message.serverless); } + if (message.dataTransfer === true) { + writer.uint32(32).bool(message.dataTransfer); + } return writer; }, @@ -2380,6 +2444,9 @@ export const Access = { case 3: message.serverless = reader.bool(); break; + case 4: + message.dataTransfer = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -2402,6 +2469,10 @@ export const Access = { object.serverless !== undefined && object.serverless !== null ? Boolean(object.serverless) : false; + message.dataTransfer = + object.dataTransfer !== undefined && object.dataTransfer !== null + ? Boolean(object.dataTransfer) + : false; return message; }, @@ -2410,6 +2481,8 @@ export const Access = { message.dataLens !== undefined && (obj.dataLens = message.dataLens); message.webSql !== undefined && (obj.webSql = message.webSql); message.serverless !== undefined && (obj.serverless = message.serverless); + message.dataTransfer !== undefined && + (obj.dataTransfer = message.dataTransfer); return obj; }, @@ -2418,6 +2491,7 @@ export const Access = { message.dataLens = object.dataLens ?? false; message.webSql = object.webSql ?? false; message.serverless = object.serverless ?? false; + message.dataTransfer = object.dataTransfer ?? false; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/cluster_service.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/cluster_service.ts index 41132d64..886f947b 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/cluster_service.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/cluster_service.ts @@ -43,6 +43,7 @@ import { Postgresqlconfig111c } from "../../../../../yandex/cloud/mdb/postgresql import { PostgresqlConfig12 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/postgresql12"; import { Postgresqlconfig121c } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/postgresql12_1c"; import { PostgresqlConfig13 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/postgresql13"; +import { PostgresqlConfig14 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/postgresql14"; import { Postgresqlhostconfig96 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host9_6"; import { Postgresqlhostconfig101c } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host10_1c"; import { PostgresqlHostConfig10 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host10"; @@ -51,6 +52,7 @@ import { Postgresqlhostconfig111c } from "../../../../../yandex/cloud/mdb/postgr import { PostgresqlHostConfig12 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host12"; import { Postgresqlhostconfig121c } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host12_1c"; import { PostgresqlHostConfig13 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host13"; +import { PostgresqlHostConfig14 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host14"; import { Int64Value, BoolValue } from "../../../../../google/protobuf/wrappers"; export const protobufPackage = "yandex.cloud.mdb.postgresql.v1"; @@ -841,8 +843,10 @@ export interface ConfigSpec { postgresqlConfig12?: PostgresqlConfig12 | undefined; /** Configuration for a PostgreSQL 12 1C cluster. */ postgresqlConfig121c?: Postgresqlconfig121c | undefined; - /** Configuration for a PostgreSQL 13 1C cluster. */ + /** Configuration for a PostgreSQL 13 cluster. */ postgresqlConfig13?: PostgresqlConfig13 | undefined; + /** Configuration for a PostgreSQL 14 cluster. */ + postgresqlConfig14?: PostgresqlConfig14 | undefined; /** Configuration of the connection pooler. */ poolerConfig?: ConnectionPoolerConfig; /** Resources allocated to PostgreSQL hosts. */ @@ -877,6 +881,8 @@ export interface ConfigHostSpec { postgresqlConfig121c?: Postgresqlhostconfig121c | undefined; /** Configuration for a host with PostgreSQL 13 server deployed. */ postgresqlConfig13?: PostgresqlHostConfig13 | undefined; + /** Configuration for a host with PostgreSQL 14 server deployed. */ + postgresqlConfig14?: PostgresqlHostConfig14 | undefined; } const baseGetClusterRequest: object = { @@ -5537,6 +5543,12 @@ export const ConfigSpec = { writer.uint32(122).fork() ).ldelim(); } + if (message.postgresqlConfig14 !== undefined) { + PostgresqlConfig14.encode( + message.postgresqlConfig14, + writer.uint32(130).fork() + ).ldelim(); + } if (message.poolerConfig !== undefined) { ConnectionPoolerConfig.encode( message.poolerConfig, @@ -5637,6 +5649,12 @@ export const ConfigSpec = { reader.uint32() ); break; + case 16: + message.postgresqlConfig14 = PostgresqlConfig14.decode( + reader, + reader.uint32() + ); + break; case 4: message.poolerConfig = ConnectionPoolerConfig.decode( reader, @@ -5724,6 +5742,11 @@ export const ConfigSpec = { object.postgresqlConfig_13 !== null ? PostgresqlConfig13.fromJSON(object.postgresqlConfig_13) : undefined; + message.postgresqlConfig14 = + object.postgresqlConfig_14 !== undefined && + object.postgresqlConfig_14 !== null + ? PostgresqlConfig14.fromJSON(object.postgresqlConfig_14) + : undefined; message.poolerConfig = object.poolerConfig !== undefined && object.poolerConfig !== null ? ConnectionPoolerConfig.fromJSON(object.poolerConfig) @@ -5793,6 +5816,10 @@ export const ConfigSpec = { (obj.postgresqlConfig_13 = message.postgresqlConfig13 ? PostgresqlConfig13.toJSON(message.postgresqlConfig13) : undefined); + message.postgresqlConfig14 !== undefined && + (obj.postgresqlConfig_14 = message.postgresqlConfig14 + ? PostgresqlConfig14.toJSON(message.postgresqlConfig14) + : undefined); message.poolerConfig !== undefined && (obj.poolerConfig = message.poolerConfig ? ConnectionPoolerConfig.toJSON(message.poolerConfig) @@ -5863,6 +5890,11 @@ export const ConfigSpec = { object.postgresqlConfig13 !== null ? PostgresqlConfig13.fromPartial(object.postgresqlConfig13) : undefined; + message.postgresqlConfig14 = + object.postgresqlConfig14 !== undefined && + object.postgresqlConfig14 !== null + ? PostgresqlConfig14.fromPartial(object.postgresqlConfig14) + : undefined; message.poolerConfig = object.poolerConfig !== undefined && object.poolerConfig !== null ? ConnectionPoolerConfig.fromPartial(object.poolerConfig) @@ -5952,6 +5984,12 @@ export const ConfigHostSpec = { writer.uint32(66).fork() ).ldelim(); } + if (message.postgresqlConfig14 !== undefined) { + PostgresqlHostConfig14.encode( + message.postgresqlConfig14, + writer.uint32(74).fork() + ).ldelim(); + } return writer; }, @@ -6010,6 +6048,12 @@ export const ConfigHostSpec = { reader.uint32() ); break; + case 9: + message.postgresqlConfig14 = PostgresqlHostConfig14.decode( + reader, + reader.uint32() + ); + break; default: reader.skipType(tag & 7); break; @@ -6060,6 +6104,11 @@ export const ConfigHostSpec = { object.postgresqlHostConfig_13 !== null ? PostgresqlHostConfig13.fromJSON(object.postgresqlHostConfig_13) : undefined; + message.postgresqlConfig14 = + object.postgresqlHostConfig_14 !== undefined && + object.postgresqlHostConfig_14 !== null + ? PostgresqlHostConfig14.fromJSON(object.postgresqlHostConfig_14) + : undefined; return message; }, @@ -6097,6 +6146,10 @@ export const ConfigHostSpec = { (obj.postgresqlHostConfig_13 = message.postgresqlConfig13 ? PostgresqlHostConfig13.toJSON(message.postgresqlConfig13) : undefined); + message.postgresqlConfig14 !== undefined && + (obj.postgresqlHostConfig_14 = message.postgresqlConfig14 + ? PostgresqlHostConfig14.toJSON(message.postgresqlConfig14) + : undefined); return obj; }, @@ -6144,6 +6197,11 @@ export const ConfigHostSpec = { object.postgresqlConfig13 !== null ? PostgresqlHostConfig13.fromPartial(object.postgresqlConfig13) : undefined; + message.postgresqlConfig14 = + object.postgresqlConfig14 !== undefined && + object.postgresqlConfig14 !== null + ? PostgresqlHostConfig14.fromPartial(object.postgresqlConfig14) + : undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/host14.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/host14.ts new file mode 100644 index 00000000..72f96036 --- /dev/null +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/host14.ts @@ -0,0 +1,2028 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { + Int64Value, + DoubleValue, + BoolValue, +} from "../../../../../../google/protobuf/wrappers"; + +export const protobufPackage = "yandex.cloud.mdb.postgresql.v1.config"; + +/** + * Options and structure of `PostgresqlConfig` reflects PostgreSQL configuration file + * parameters which detailed description is available in + * [PostgreSQL documentation](https://www.postgresql.org/docs/11/runtime-config.html). + */ +export interface PostgresqlHostConfig14 { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig14"; + /** in milliseconds. */ + recoveryMinApplyDelay?: number; + /** in bytes. */ + sharedBuffers?: number; + /** in bytes. */ + tempBuffers?: number; + /** in bytes. */ + workMem?: number; + /** in bytes. */ + tempFileLimit?: number; + backendFlushAfter?: number; + oldSnapshotThreshold?: number; + /** in milliseconds. */ + maxStandbyStreamingDelay?: number; + constraintExclusion: PostgresqlHostConfig14_ConstraintExclusion; + cursorTupleFraction?: number; + fromCollapseLimit?: number; + joinCollapseLimit?: number; + forceParallelMode: PostgresqlHostConfig14_ForceParallelMode; + clientMinMessages: PostgresqlHostConfig14_LogLevel; + logMinMessages: PostgresqlHostConfig14_LogLevel; + logMinErrorStatement: PostgresqlHostConfig14_LogLevel; + /** in milliseconds. */ + logMinDurationStatement?: number; + logCheckpoints?: boolean; + logConnections?: boolean; + logDisconnections?: boolean; + logDuration?: boolean; + logErrorVerbosity: PostgresqlHostConfig14_LogErrorVerbosity; + logLockWaits?: boolean; + logStatement: PostgresqlHostConfig14_LogStatement; + logTempFiles?: number; + searchPath: string; + rowSecurity?: boolean; + defaultTransactionIsolation: PostgresqlHostConfig14_TransactionIsolation; + /** in milliseconds. */ + statementTimeout?: number; + /** in milliseconds. */ + lockTimeout?: number; + /** in milliseconds. */ + idleInTransactionSessionTimeout?: number; + byteaOutput: PostgresqlHostConfig14_ByteaOutput; + xmlbinary: PostgresqlHostConfig14_XmlBinary; + xmloption: PostgresqlHostConfig14_XmlOption; + /** in bytes. */ + ginPendingListLimit?: number; + /** in milliseconds. */ + deadlockTimeout?: number; + maxLocksPerTransaction?: number; + maxPredLocksPerTransaction?: number; + arrayNulls?: boolean; + backslashQuote: PostgresqlHostConfig14_BackslashQuote; + defaultWithOids?: boolean; + escapeStringWarning?: boolean; + loCompatPrivileges?: boolean; + quoteAllIdentifiers?: boolean; + standardConformingStrings?: boolean; + synchronizeSeqscans?: boolean; + transformNullEquals?: boolean; + exitOnError?: boolean; + seqPageCost?: number; + randomPageCost?: number; + enableBitmapscan?: boolean; + enableHashagg?: boolean; + enableHashjoin?: boolean; + enableIndexscan?: boolean; + enableIndexonlyscan?: boolean; + enableMaterial?: boolean; + enableMergejoin?: boolean; + enableNestloop?: boolean; + enableSeqscan?: boolean; + enableSort?: boolean; + enableTidscan?: boolean; + maxParallelWorkers?: number; + maxParallelWorkersPerGather?: number; + timezone: string; + effectiveIoConcurrency?: number; + effectiveCacheSize?: number; +} + +export enum PostgresqlHostConfig14_ConstraintExclusion { + CONSTRAINT_EXCLUSION_UNSPECIFIED = 0, + CONSTRAINT_EXCLUSION_ON = 1, + CONSTRAINT_EXCLUSION_OFF = 2, + CONSTRAINT_EXCLUSION_PARTITION = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig14_ConstraintExclusionFromJSON( + object: any +): PostgresqlHostConfig14_ConstraintExclusion { + switch (object) { + case 0: + case "CONSTRAINT_EXCLUSION_UNSPECIFIED": + return PostgresqlHostConfig14_ConstraintExclusion.CONSTRAINT_EXCLUSION_UNSPECIFIED; + case 1: + case "CONSTRAINT_EXCLUSION_ON": + return PostgresqlHostConfig14_ConstraintExclusion.CONSTRAINT_EXCLUSION_ON; + case 2: + case "CONSTRAINT_EXCLUSION_OFF": + return PostgresqlHostConfig14_ConstraintExclusion.CONSTRAINT_EXCLUSION_OFF; + case 3: + case "CONSTRAINT_EXCLUSION_PARTITION": + return PostgresqlHostConfig14_ConstraintExclusion.CONSTRAINT_EXCLUSION_PARTITION; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig14_ConstraintExclusion.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig14_ConstraintExclusionToJSON( + object: PostgresqlHostConfig14_ConstraintExclusion +): string { + switch (object) { + case PostgresqlHostConfig14_ConstraintExclusion.CONSTRAINT_EXCLUSION_UNSPECIFIED: + return "CONSTRAINT_EXCLUSION_UNSPECIFIED"; + case PostgresqlHostConfig14_ConstraintExclusion.CONSTRAINT_EXCLUSION_ON: + return "CONSTRAINT_EXCLUSION_ON"; + case PostgresqlHostConfig14_ConstraintExclusion.CONSTRAINT_EXCLUSION_OFF: + return "CONSTRAINT_EXCLUSION_OFF"; + case PostgresqlHostConfig14_ConstraintExclusion.CONSTRAINT_EXCLUSION_PARTITION: + return "CONSTRAINT_EXCLUSION_PARTITION"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlHostConfig14_ForceParallelMode { + FORCE_PARALLEL_MODE_UNSPECIFIED = 0, + FORCE_PARALLEL_MODE_ON = 1, + FORCE_PARALLEL_MODE_OFF = 2, + FORCE_PARALLEL_MODE_REGRESS = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig14_ForceParallelModeFromJSON( + object: any +): PostgresqlHostConfig14_ForceParallelMode { + switch (object) { + case 0: + case "FORCE_PARALLEL_MODE_UNSPECIFIED": + return PostgresqlHostConfig14_ForceParallelMode.FORCE_PARALLEL_MODE_UNSPECIFIED; + case 1: + case "FORCE_PARALLEL_MODE_ON": + return PostgresqlHostConfig14_ForceParallelMode.FORCE_PARALLEL_MODE_ON; + case 2: + case "FORCE_PARALLEL_MODE_OFF": + return PostgresqlHostConfig14_ForceParallelMode.FORCE_PARALLEL_MODE_OFF; + case 3: + case "FORCE_PARALLEL_MODE_REGRESS": + return PostgresqlHostConfig14_ForceParallelMode.FORCE_PARALLEL_MODE_REGRESS; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig14_ForceParallelMode.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig14_ForceParallelModeToJSON( + object: PostgresqlHostConfig14_ForceParallelMode +): string { + switch (object) { + case PostgresqlHostConfig14_ForceParallelMode.FORCE_PARALLEL_MODE_UNSPECIFIED: + return "FORCE_PARALLEL_MODE_UNSPECIFIED"; + case PostgresqlHostConfig14_ForceParallelMode.FORCE_PARALLEL_MODE_ON: + return "FORCE_PARALLEL_MODE_ON"; + case PostgresqlHostConfig14_ForceParallelMode.FORCE_PARALLEL_MODE_OFF: + return "FORCE_PARALLEL_MODE_OFF"; + case PostgresqlHostConfig14_ForceParallelMode.FORCE_PARALLEL_MODE_REGRESS: + return "FORCE_PARALLEL_MODE_REGRESS"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlHostConfig14_LogLevel { + LOG_LEVEL_UNSPECIFIED = 0, + LOG_LEVEL_DEBUG5 = 1, + LOG_LEVEL_DEBUG4 = 2, + LOG_LEVEL_DEBUG3 = 3, + LOG_LEVEL_DEBUG2 = 4, + LOG_LEVEL_DEBUG1 = 5, + LOG_LEVEL_LOG = 6, + LOG_LEVEL_NOTICE = 7, + LOG_LEVEL_WARNING = 8, + LOG_LEVEL_ERROR = 9, + LOG_LEVEL_FATAL = 10, + LOG_LEVEL_PANIC = 11, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig14_LogLevelFromJSON( + object: any +): PostgresqlHostConfig14_LogLevel { + switch (object) { + case 0: + case "LOG_LEVEL_UNSPECIFIED": + return PostgresqlHostConfig14_LogLevel.LOG_LEVEL_UNSPECIFIED; + case 1: + case "LOG_LEVEL_DEBUG5": + return PostgresqlHostConfig14_LogLevel.LOG_LEVEL_DEBUG5; + case 2: + case "LOG_LEVEL_DEBUG4": + return PostgresqlHostConfig14_LogLevel.LOG_LEVEL_DEBUG4; + case 3: + case "LOG_LEVEL_DEBUG3": + return PostgresqlHostConfig14_LogLevel.LOG_LEVEL_DEBUG3; + case 4: + case "LOG_LEVEL_DEBUG2": + return PostgresqlHostConfig14_LogLevel.LOG_LEVEL_DEBUG2; + case 5: + case "LOG_LEVEL_DEBUG1": + return PostgresqlHostConfig14_LogLevel.LOG_LEVEL_DEBUG1; + case 6: + case "LOG_LEVEL_LOG": + return PostgresqlHostConfig14_LogLevel.LOG_LEVEL_LOG; + case 7: + case "LOG_LEVEL_NOTICE": + return PostgresqlHostConfig14_LogLevel.LOG_LEVEL_NOTICE; + case 8: + case "LOG_LEVEL_WARNING": + return PostgresqlHostConfig14_LogLevel.LOG_LEVEL_WARNING; + case 9: + case "LOG_LEVEL_ERROR": + return PostgresqlHostConfig14_LogLevel.LOG_LEVEL_ERROR; + case 10: + case "LOG_LEVEL_FATAL": + return PostgresqlHostConfig14_LogLevel.LOG_LEVEL_FATAL; + case 11: + case "LOG_LEVEL_PANIC": + return PostgresqlHostConfig14_LogLevel.LOG_LEVEL_PANIC; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig14_LogLevel.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig14_LogLevelToJSON( + object: PostgresqlHostConfig14_LogLevel +): string { + switch (object) { + case PostgresqlHostConfig14_LogLevel.LOG_LEVEL_UNSPECIFIED: + return "LOG_LEVEL_UNSPECIFIED"; + case PostgresqlHostConfig14_LogLevel.LOG_LEVEL_DEBUG5: + return "LOG_LEVEL_DEBUG5"; + case PostgresqlHostConfig14_LogLevel.LOG_LEVEL_DEBUG4: + return "LOG_LEVEL_DEBUG4"; + case PostgresqlHostConfig14_LogLevel.LOG_LEVEL_DEBUG3: + return "LOG_LEVEL_DEBUG3"; + case PostgresqlHostConfig14_LogLevel.LOG_LEVEL_DEBUG2: + return "LOG_LEVEL_DEBUG2"; + case PostgresqlHostConfig14_LogLevel.LOG_LEVEL_DEBUG1: + return "LOG_LEVEL_DEBUG1"; + case PostgresqlHostConfig14_LogLevel.LOG_LEVEL_LOG: + return "LOG_LEVEL_LOG"; + case PostgresqlHostConfig14_LogLevel.LOG_LEVEL_NOTICE: + return "LOG_LEVEL_NOTICE"; + case PostgresqlHostConfig14_LogLevel.LOG_LEVEL_WARNING: + return "LOG_LEVEL_WARNING"; + case PostgresqlHostConfig14_LogLevel.LOG_LEVEL_ERROR: + return "LOG_LEVEL_ERROR"; + case PostgresqlHostConfig14_LogLevel.LOG_LEVEL_FATAL: + return "LOG_LEVEL_FATAL"; + case PostgresqlHostConfig14_LogLevel.LOG_LEVEL_PANIC: + return "LOG_LEVEL_PANIC"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlHostConfig14_LogErrorVerbosity { + LOG_ERROR_VERBOSITY_UNSPECIFIED = 0, + LOG_ERROR_VERBOSITY_TERSE = 1, + LOG_ERROR_VERBOSITY_DEFAULT = 2, + LOG_ERROR_VERBOSITY_VERBOSE = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig14_LogErrorVerbosityFromJSON( + object: any +): PostgresqlHostConfig14_LogErrorVerbosity { + switch (object) { + case 0: + case "LOG_ERROR_VERBOSITY_UNSPECIFIED": + return PostgresqlHostConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED; + case 1: + case "LOG_ERROR_VERBOSITY_TERSE": + return PostgresqlHostConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE; + case 2: + case "LOG_ERROR_VERBOSITY_DEFAULT": + return PostgresqlHostConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT; + case 3: + case "LOG_ERROR_VERBOSITY_VERBOSE": + return PostgresqlHostConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig14_LogErrorVerbosity.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig14_LogErrorVerbosityToJSON( + object: PostgresqlHostConfig14_LogErrorVerbosity +): string { + switch (object) { + case PostgresqlHostConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED: + return "LOG_ERROR_VERBOSITY_UNSPECIFIED"; + case PostgresqlHostConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE: + return "LOG_ERROR_VERBOSITY_TERSE"; + case PostgresqlHostConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT: + return "LOG_ERROR_VERBOSITY_DEFAULT"; + case PostgresqlHostConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE: + return "LOG_ERROR_VERBOSITY_VERBOSE"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlHostConfig14_LogStatement { + LOG_STATEMENT_UNSPECIFIED = 0, + LOG_STATEMENT_NONE = 1, + LOG_STATEMENT_DDL = 2, + LOG_STATEMENT_MOD = 3, + LOG_STATEMENT_ALL = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig14_LogStatementFromJSON( + object: any +): PostgresqlHostConfig14_LogStatement { + switch (object) { + case 0: + case "LOG_STATEMENT_UNSPECIFIED": + return PostgresqlHostConfig14_LogStatement.LOG_STATEMENT_UNSPECIFIED; + case 1: + case "LOG_STATEMENT_NONE": + return PostgresqlHostConfig14_LogStatement.LOG_STATEMENT_NONE; + case 2: + case "LOG_STATEMENT_DDL": + return PostgresqlHostConfig14_LogStatement.LOG_STATEMENT_DDL; + case 3: + case "LOG_STATEMENT_MOD": + return PostgresqlHostConfig14_LogStatement.LOG_STATEMENT_MOD; + case 4: + case "LOG_STATEMENT_ALL": + return PostgresqlHostConfig14_LogStatement.LOG_STATEMENT_ALL; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig14_LogStatement.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig14_LogStatementToJSON( + object: PostgresqlHostConfig14_LogStatement +): string { + switch (object) { + case PostgresqlHostConfig14_LogStatement.LOG_STATEMENT_UNSPECIFIED: + return "LOG_STATEMENT_UNSPECIFIED"; + case PostgresqlHostConfig14_LogStatement.LOG_STATEMENT_NONE: + return "LOG_STATEMENT_NONE"; + case PostgresqlHostConfig14_LogStatement.LOG_STATEMENT_DDL: + return "LOG_STATEMENT_DDL"; + case PostgresqlHostConfig14_LogStatement.LOG_STATEMENT_MOD: + return "LOG_STATEMENT_MOD"; + case PostgresqlHostConfig14_LogStatement.LOG_STATEMENT_ALL: + return "LOG_STATEMENT_ALL"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlHostConfig14_TransactionIsolation { + TRANSACTION_ISOLATION_UNSPECIFIED = 0, + TRANSACTION_ISOLATION_READ_UNCOMMITTED = 1, + TRANSACTION_ISOLATION_READ_COMMITTED = 2, + TRANSACTION_ISOLATION_REPEATABLE_READ = 3, + TRANSACTION_ISOLATION_SERIALIZABLE = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig14_TransactionIsolationFromJSON( + object: any +): PostgresqlHostConfig14_TransactionIsolation { + switch (object) { + case 0: + case "TRANSACTION_ISOLATION_UNSPECIFIED": + return PostgresqlHostConfig14_TransactionIsolation.TRANSACTION_ISOLATION_UNSPECIFIED; + case 1: + case "TRANSACTION_ISOLATION_READ_UNCOMMITTED": + return PostgresqlHostConfig14_TransactionIsolation.TRANSACTION_ISOLATION_READ_UNCOMMITTED; + case 2: + case "TRANSACTION_ISOLATION_READ_COMMITTED": + return PostgresqlHostConfig14_TransactionIsolation.TRANSACTION_ISOLATION_READ_COMMITTED; + case 3: + case "TRANSACTION_ISOLATION_REPEATABLE_READ": + return PostgresqlHostConfig14_TransactionIsolation.TRANSACTION_ISOLATION_REPEATABLE_READ; + case 4: + case "TRANSACTION_ISOLATION_SERIALIZABLE": + return PostgresqlHostConfig14_TransactionIsolation.TRANSACTION_ISOLATION_SERIALIZABLE; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig14_TransactionIsolation.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig14_TransactionIsolationToJSON( + object: PostgresqlHostConfig14_TransactionIsolation +): string { + switch (object) { + case PostgresqlHostConfig14_TransactionIsolation.TRANSACTION_ISOLATION_UNSPECIFIED: + return "TRANSACTION_ISOLATION_UNSPECIFIED"; + case PostgresqlHostConfig14_TransactionIsolation.TRANSACTION_ISOLATION_READ_UNCOMMITTED: + return "TRANSACTION_ISOLATION_READ_UNCOMMITTED"; + case PostgresqlHostConfig14_TransactionIsolation.TRANSACTION_ISOLATION_READ_COMMITTED: + return "TRANSACTION_ISOLATION_READ_COMMITTED"; + case PostgresqlHostConfig14_TransactionIsolation.TRANSACTION_ISOLATION_REPEATABLE_READ: + return "TRANSACTION_ISOLATION_REPEATABLE_READ"; + case PostgresqlHostConfig14_TransactionIsolation.TRANSACTION_ISOLATION_SERIALIZABLE: + return "TRANSACTION_ISOLATION_SERIALIZABLE"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlHostConfig14_ByteaOutput { + BYTEA_OUTPUT_UNSPECIFIED = 0, + BYTEA_OUTPUT_HEX = 1, + BYTEA_OUTPUT_ESCAPED = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig14_ByteaOutputFromJSON( + object: any +): PostgresqlHostConfig14_ByteaOutput { + switch (object) { + case 0: + case "BYTEA_OUTPUT_UNSPECIFIED": + return PostgresqlHostConfig14_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED; + case 1: + case "BYTEA_OUTPUT_HEX": + return PostgresqlHostConfig14_ByteaOutput.BYTEA_OUTPUT_HEX; + case 2: + case "BYTEA_OUTPUT_ESCAPED": + return PostgresqlHostConfig14_ByteaOutput.BYTEA_OUTPUT_ESCAPED; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig14_ByteaOutput.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig14_ByteaOutputToJSON( + object: PostgresqlHostConfig14_ByteaOutput +): string { + switch (object) { + case PostgresqlHostConfig14_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED: + return "BYTEA_OUTPUT_UNSPECIFIED"; + case PostgresqlHostConfig14_ByteaOutput.BYTEA_OUTPUT_HEX: + return "BYTEA_OUTPUT_HEX"; + case PostgresqlHostConfig14_ByteaOutput.BYTEA_OUTPUT_ESCAPED: + return "BYTEA_OUTPUT_ESCAPED"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlHostConfig14_XmlBinary { + XML_BINARY_UNSPECIFIED = 0, + XML_BINARY_BASE64 = 1, + XML_BINARY_HEX = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig14_XmlBinaryFromJSON( + object: any +): PostgresqlHostConfig14_XmlBinary { + switch (object) { + case 0: + case "XML_BINARY_UNSPECIFIED": + return PostgresqlHostConfig14_XmlBinary.XML_BINARY_UNSPECIFIED; + case 1: + case "XML_BINARY_BASE64": + return PostgresqlHostConfig14_XmlBinary.XML_BINARY_BASE64; + case 2: + case "XML_BINARY_HEX": + return PostgresqlHostConfig14_XmlBinary.XML_BINARY_HEX; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig14_XmlBinary.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig14_XmlBinaryToJSON( + object: PostgresqlHostConfig14_XmlBinary +): string { + switch (object) { + case PostgresqlHostConfig14_XmlBinary.XML_BINARY_UNSPECIFIED: + return "XML_BINARY_UNSPECIFIED"; + case PostgresqlHostConfig14_XmlBinary.XML_BINARY_BASE64: + return "XML_BINARY_BASE64"; + case PostgresqlHostConfig14_XmlBinary.XML_BINARY_HEX: + return "XML_BINARY_HEX"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlHostConfig14_XmlOption { + XML_OPTION_UNSPECIFIED = 0, + XML_OPTION_DOCUMENT = 1, + XML_OPTION_CONTENT = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig14_XmlOptionFromJSON( + object: any +): PostgresqlHostConfig14_XmlOption { + switch (object) { + case 0: + case "XML_OPTION_UNSPECIFIED": + return PostgresqlHostConfig14_XmlOption.XML_OPTION_UNSPECIFIED; + case 1: + case "XML_OPTION_DOCUMENT": + return PostgresqlHostConfig14_XmlOption.XML_OPTION_DOCUMENT; + case 2: + case "XML_OPTION_CONTENT": + return PostgresqlHostConfig14_XmlOption.XML_OPTION_CONTENT; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig14_XmlOption.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig14_XmlOptionToJSON( + object: PostgresqlHostConfig14_XmlOption +): string { + switch (object) { + case PostgresqlHostConfig14_XmlOption.XML_OPTION_UNSPECIFIED: + return "XML_OPTION_UNSPECIFIED"; + case PostgresqlHostConfig14_XmlOption.XML_OPTION_DOCUMENT: + return "XML_OPTION_DOCUMENT"; + case PostgresqlHostConfig14_XmlOption.XML_OPTION_CONTENT: + return "XML_OPTION_CONTENT"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlHostConfig14_BackslashQuote { + BACKSLASH_QUOTE_UNSPECIFIED = 0, + BACKSLASH_QUOTE = 1, + BACKSLASH_QUOTE_ON = 2, + BACKSLASH_QUOTE_OFF = 3, + BACKSLASH_QUOTE_SAFE_ENCODING = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig14_BackslashQuoteFromJSON( + object: any +): PostgresqlHostConfig14_BackslashQuote { + switch (object) { + case 0: + case "BACKSLASH_QUOTE_UNSPECIFIED": + return PostgresqlHostConfig14_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED; + case 1: + case "BACKSLASH_QUOTE": + return PostgresqlHostConfig14_BackslashQuote.BACKSLASH_QUOTE; + case 2: + case "BACKSLASH_QUOTE_ON": + return PostgresqlHostConfig14_BackslashQuote.BACKSLASH_QUOTE_ON; + case 3: + case "BACKSLASH_QUOTE_OFF": + return PostgresqlHostConfig14_BackslashQuote.BACKSLASH_QUOTE_OFF; + case 4: + case "BACKSLASH_QUOTE_SAFE_ENCODING": + return PostgresqlHostConfig14_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig14_BackslashQuote.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig14_BackslashQuoteToJSON( + object: PostgresqlHostConfig14_BackslashQuote +): string { + switch (object) { + case PostgresqlHostConfig14_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED: + return "BACKSLASH_QUOTE_UNSPECIFIED"; + case PostgresqlHostConfig14_BackslashQuote.BACKSLASH_QUOTE: + return "BACKSLASH_QUOTE"; + case PostgresqlHostConfig14_BackslashQuote.BACKSLASH_QUOTE_ON: + return "BACKSLASH_QUOTE_ON"; + case PostgresqlHostConfig14_BackslashQuote.BACKSLASH_QUOTE_OFF: + return "BACKSLASH_QUOTE_OFF"; + case PostgresqlHostConfig14_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING: + return "BACKSLASH_QUOTE_SAFE_ENCODING"; + default: + return "UNKNOWN"; + } +} + +const basePostgresqlHostConfig14: object = { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig14", + constraintExclusion: 0, + forceParallelMode: 0, + clientMinMessages: 0, + logMinMessages: 0, + logMinErrorStatement: 0, + logErrorVerbosity: 0, + logStatement: 0, + searchPath: "", + defaultTransactionIsolation: 0, + byteaOutput: 0, + xmlbinary: 0, + xmloption: 0, + backslashQuote: 0, + timezone: "", +}; + +export const PostgresqlHostConfig14 = { + $type: + "yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig14" as const, + + encode( + message: PostgresqlHostConfig14, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.recoveryMinApplyDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.recoveryMinApplyDelay!, + }, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.sharedBuffers !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.sharedBuffers! }, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.tempBuffers !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.tempBuffers! }, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.workMem !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.workMem! }, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.tempFileLimit !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.tempFileLimit! }, + writer.uint32(42).fork() + ).ldelim(); + } + if (message.backendFlushAfter !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.backendFlushAfter!, + }, + writer.uint32(50).fork() + ).ldelim(); + } + if (message.oldSnapshotThreshold !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.oldSnapshotThreshold!, + }, + writer.uint32(58).fork() + ).ldelim(); + } + if (message.maxStandbyStreamingDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxStandbyStreamingDelay!, + }, + writer.uint32(66).fork() + ).ldelim(); + } + if (message.constraintExclusion !== 0) { + writer.uint32(72).int32(message.constraintExclusion); + } + if (message.cursorTupleFraction !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.cursorTupleFraction!, + }, + writer.uint32(82).fork() + ).ldelim(); + } + if (message.fromCollapseLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.fromCollapseLimit!, + }, + writer.uint32(90).fork() + ).ldelim(); + } + if (message.joinCollapseLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.joinCollapseLimit!, + }, + writer.uint32(98).fork() + ).ldelim(); + } + if (message.forceParallelMode !== 0) { + writer.uint32(104).int32(message.forceParallelMode); + } + if (message.clientMinMessages !== 0) { + writer.uint32(112).int32(message.clientMinMessages); + } + if (message.logMinMessages !== 0) { + writer.uint32(120).int32(message.logMinMessages); + } + if (message.logMinErrorStatement !== 0) { + writer.uint32(128).int32(message.logMinErrorStatement); + } + if (message.logMinDurationStatement !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logMinDurationStatement!, + }, + writer.uint32(138).fork() + ).ldelim(); + } + if (message.logCheckpoints !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logCheckpoints! }, + writer.uint32(146).fork() + ).ldelim(); + } + if (message.logConnections !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logConnections! }, + writer.uint32(154).fork() + ).ldelim(); + } + if (message.logDisconnections !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.logDisconnections!, + }, + writer.uint32(162).fork() + ).ldelim(); + } + if (message.logDuration !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logDuration! }, + writer.uint32(170).fork() + ).ldelim(); + } + if (message.logErrorVerbosity !== 0) { + writer.uint32(176).int32(message.logErrorVerbosity); + } + if (message.logLockWaits !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logLockWaits! }, + writer.uint32(186).fork() + ).ldelim(); + } + if (message.logStatement !== 0) { + writer.uint32(192).int32(message.logStatement); + } + if (message.logTempFiles !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.logTempFiles! }, + writer.uint32(202).fork() + ).ldelim(); + } + if (message.searchPath !== "") { + writer.uint32(210).string(message.searchPath); + } + if (message.rowSecurity !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.rowSecurity! }, + writer.uint32(218).fork() + ).ldelim(); + } + if (message.defaultTransactionIsolation !== 0) { + writer.uint32(224).int32(message.defaultTransactionIsolation); + } + if (message.statementTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.statementTimeout!, + }, + writer.uint32(234).fork() + ).ldelim(); + } + if (message.lockTimeout !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.lockTimeout! }, + writer.uint32(242).fork() + ).ldelim(); + } + if (message.idleInTransactionSessionTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.idleInTransactionSessionTimeout!, + }, + writer.uint32(250).fork() + ).ldelim(); + } + if (message.byteaOutput !== 0) { + writer.uint32(256).int32(message.byteaOutput); + } + if (message.xmlbinary !== 0) { + writer.uint32(264).int32(message.xmlbinary); + } + if (message.xmloption !== 0) { + writer.uint32(272).int32(message.xmloption); + } + if (message.ginPendingListLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.ginPendingListLimit!, + }, + writer.uint32(282).fork() + ).ldelim(); + } + if (message.deadlockTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.deadlockTimeout!, + }, + writer.uint32(290).fork() + ).ldelim(); + } + if (message.maxLocksPerTransaction !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxLocksPerTransaction!, + }, + writer.uint32(298).fork() + ).ldelim(); + } + if (message.maxPredLocksPerTransaction !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxPredLocksPerTransaction!, + }, + writer.uint32(306).fork() + ).ldelim(); + } + if (message.arrayNulls !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.arrayNulls! }, + writer.uint32(314).fork() + ).ldelim(); + } + if (message.backslashQuote !== 0) { + writer.uint32(320).int32(message.backslashQuote); + } + if (message.defaultWithOids !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.defaultWithOids! }, + writer.uint32(330).fork() + ).ldelim(); + } + if (message.escapeStringWarning !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.escapeStringWarning!, + }, + writer.uint32(338).fork() + ).ldelim(); + } + if (message.loCompatPrivileges !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.loCompatPrivileges!, + }, + writer.uint32(346).fork() + ).ldelim(); + } + if (message.quoteAllIdentifiers !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.quoteAllIdentifiers!, + }, + writer.uint32(362).fork() + ).ldelim(); + } + if (message.standardConformingStrings !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.standardConformingStrings!, + }, + writer.uint32(370).fork() + ).ldelim(); + } + if (message.synchronizeSeqscans !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.synchronizeSeqscans!, + }, + writer.uint32(378).fork() + ).ldelim(); + } + if (message.transformNullEquals !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.transformNullEquals!, + }, + writer.uint32(386).fork() + ).ldelim(); + } + if (message.exitOnError !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.exitOnError! }, + writer.uint32(394).fork() + ).ldelim(); + } + if (message.seqPageCost !== undefined) { + DoubleValue.encode( + { $type: "google.protobuf.DoubleValue", value: message.seqPageCost! }, + writer.uint32(402).fork() + ).ldelim(); + } + if (message.randomPageCost !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.randomPageCost!, + }, + writer.uint32(410).fork() + ).ldelim(); + } + if (message.enableBitmapscan !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableBitmapscan!, + }, + writer.uint32(434).fork() + ).ldelim(); + } + if (message.enableHashagg !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableHashagg! }, + writer.uint32(442).fork() + ).ldelim(); + } + if (message.enableHashjoin !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableHashjoin! }, + writer.uint32(450).fork() + ).ldelim(); + } + if (message.enableIndexscan !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableIndexscan! }, + writer.uint32(458).fork() + ).ldelim(); + } + if (message.enableIndexonlyscan !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableIndexonlyscan!, + }, + writer.uint32(466).fork() + ).ldelim(); + } + if (message.enableMaterial !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableMaterial! }, + writer.uint32(474).fork() + ).ldelim(); + } + if (message.enableMergejoin !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableMergejoin! }, + writer.uint32(482).fork() + ).ldelim(); + } + if (message.enableNestloop !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableNestloop! }, + writer.uint32(490).fork() + ).ldelim(); + } + if (message.enableSeqscan !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableSeqscan! }, + writer.uint32(498).fork() + ).ldelim(); + } + if (message.enableSort !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableSort! }, + writer.uint32(506).fork() + ).ldelim(); + } + if (message.enableTidscan !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableTidscan! }, + writer.uint32(514).fork() + ).ldelim(); + } + if (message.maxParallelWorkers !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxParallelWorkers!, + }, + writer.uint32(522).fork() + ).ldelim(); + } + if (message.maxParallelWorkersPerGather !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxParallelWorkersPerGather!, + }, + writer.uint32(530).fork() + ).ldelim(); + } + if (message.timezone !== "") { + writer.uint32(538).string(message.timezone); + } + if (message.effectiveIoConcurrency !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.effectiveIoConcurrency!, + }, + writer.uint32(546).fork() + ).ldelim(); + } + if (message.effectiveCacheSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.effectiveCacheSize!, + }, + writer.uint32(554).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): PostgresqlHostConfig14 { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...basePostgresqlHostConfig14 } as PostgresqlHostConfig14; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.recoveryMinApplyDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 2: + message.sharedBuffers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 3: + message.tempBuffers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 4: + message.workMem = Int64Value.decode(reader, reader.uint32()).value; + break; + case 5: + message.tempFileLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 6: + message.backendFlushAfter = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 7: + message.oldSnapshotThreshold = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 8: + message.maxStandbyStreamingDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 9: + message.constraintExclusion = reader.int32() as any; + break; + case 10: + message.cursorTupleFraction = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 11: + message.fromCollapseLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 12: + message.joinCollapseLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 13: + message.forceParallelMode = reader.int32() as any; + break; + case 14: + message.clientMinMessages = reader.int32() as any; + break; + case 15: + message.logMinMessages = reader.int32() as any; + break; + case 16: + message.logMinErrorStatement = reader.int32() as any; + break; + case 17: + message.logMinDurationStatement = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 18: + message.logCheckpoints = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 19: + message.logConnections = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 20: + message.logDisconnections = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 21: + message.logDuration = BoolValue.decode(reader, reader.uint32()).value; + break; + case 22: + message.logErrorVerbosity = reader.int32() as any; + break; + case 23: + message.logLockWaits = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 24: + message.logStatement = reader.int32() as any; + break; + case 25: + message.logTempFiles = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 26: + message.searchPath = reader.string(); + break; + case 27: + message.rowSecurity = BoolValue.decode(reader, reader.uint32()).value; + break; + case 28: + message.defaultTransactionIsolation = reader.int32() as any; + break; + case 29: + message.statementTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 30: + message.lockTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 31: + message.idleInTransactionSessionTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 32: + message.byteaOutput = reader.int32() as any; + break; + case 33: + message.xmlbinary = reader.int32() as any; + break; + case 34: + message.xmloption = reader.int32() as any; + break; + case 35: + message.ginPendingListLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 36: + message.deadlockTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 37: + message.maxLocksPerTransaction = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 38: + message.maxPredLocksPerTransaction = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 39: + message.arrayNulls = BoolValue.decode(reader, reader.uint32()).value; + break; + case 40: + message.backslashQuote = reader.int32() as any; + break; + case 41: + message.defaultWithOids = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 42: + message.escapeStringWarning = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 43: + message.loCompatPrivileges = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 45: + message.quoteAllIdentifiers = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 46: + message.standardConformingStrings = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 47: + message.synchronizeSeqscans = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 48: + message.transformNullEquals = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 49: + message.exitOnError = BoolValue.decode(reader, reader.uint32()).value; + break; + case 50: + message.seqPageCost = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 51: + message.randomPageCost = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 54: + message.enableBitmapscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 55: + message.enableHashagg = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 56: + message.enableHashjoin = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 57: + message.enableIndexscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 58: + message.enableIndexonlyscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 59: + message.enableMaterial = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 60: + message.enableMergejoin = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 61: + message.enableNestloop = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 62: + message.enableSeqscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 63: + message.enableSort = BoolValue.decode(reader, reader.uint32()).value; + break; + case 64: + message.enableTidscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 65: + message.maxParallelWorkers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 66: + message.maxParallelWorkersPerGather = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 67: + message.timezone = reader.string(); + break; + case 68: + message.effectiveIoConcurrency = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 69: + message.effectiveCacheSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): PostgresqlHostConfig14 { + const message = { ...basePostgresqlHostConfig14 } as PostgresqlHostConfig14; + message.recoveryMinApplyDelay = + object.recoveryMinApplyDelay !== undefined && + object.recoveryMinApplyDelay !== null + ? Number(object.recoveryMinApplyDelay) + : undefined; + message.sharedBuffers = + object.sharedBuffers !== undefined && object.sharedBuffers !== null + ? Number(object.sharedBuffers) + : undefined; + message.tempBuffers = + object.tempBuffers !== undefined && object.tempBuffers !== null + ? Number(object.tempBuffers) + : undefined; + message.workMem = + object.workMem !== undefined && object.workMem !== null + ? Number(object.workMem) + : undefined; + message.tempFileLimit = + object.tempFileLimit !== undefined && object.tempFileLimit !== null + ? Number(object.tempFileLimit) + : undefined; + message.backendFlushAfter = + object.backendFlushAfter !== undefined && + object.backendFlushAfter !== null + ? Number(object.backendFlushAfter) + : undefined; + message.oldSnapshotThreshold = + object.oldSnapshotThreshold !== undefined && + object.oldSnapshotThreshold !== null + ? Number(object.oldSnapshotThreshold) + : undefined; + message.maxStandbyStreamingDelay = + object.maxStandbyStreamingDelay !== undefined && + object.maxStandbyStreamingDelay !== null + ? Number(object.maxStandbyStreamingDelay) + : undefined; + message.constraintExclusion = + object.constraintExclusion !== undefined && + object.constraintExclusion !== null + ? postgresqlHostConfig14_ConstraintExclusionFromJSON( + object.constraintExclusion + ) + : 0; + message.cursorTupleFraction = + object.cursorTupleFraction !== undefined && + object.cursorTupleFraction !== null + ? Number(object.cursorTupleFraction) + : undefined; + message.fromCollapseLimit = + object.fromCollapseLimit !== undefined && + object.fromCollapseLimit !== null + ? Number(object.fromCollapseLimit) + : undefined; + message.joinCollapseLimit = + object.joinCollapseLimit !== undefined && + object.joinCollapseLimit !== null + ? Number(object.joinCollapseLimit) + : undefined; + message.forceParallelMode = + object.forceParallelMode !== undefined && + object.forceParallelMode !== null + ? postgresqlHostConfig14_ForceParallelModeFromJSON( + object.forceParallelMode + ) + : 0; + message.clientMinMessages = + object.clientMinMessages !== undefined && + object.clientMinMessages !== null + ? postgresqlHostConfig14_LogLevelFromJSON(object.clientMinMessages) + : 0; + message.logMinMessages = + object.logMinMessages !== undefined && object.logMinMessages !== null + ? postgresqlHostConfig14_LogLevelFromJSON(object.logMinMessages) + : 0; + message.logMinErrorStatement = + object.logMinErrorStatement !== undefined && + object.logMinErrorStatement !== null + ? postgresqlHostConfig14_LogLevelFromJSON(object.logMinErrorStatement) + : 0; + message.logMinDurationStatement = + object.logMinDurationStatement !== undefined && + object.logMinDurationStatement !== null + ? Number(object.logMinDurationStatement) + : undefined; + message.logCheckpoints = + object.logCheckpoints !== undefined && object.logCheckpoints !== null + ? Boolean(object.logCheckpoints) + : undefined; + message.logConnections = + object.logConnections !== undefined && object.logConnections !== null + ? Boolean(object.logConnections) + : undefined; + message.logDisconnections = + object.logDisconnections !== undefined && + object.logDisconnections !== null + ? Boolean(object.logDisconnections) + : undefined; + message.logDuration = + object.logDuration !== undefined && object.logDuration !== null + ? Boolean(object.logDuration) + : undefined; + message.logErrorVerbosity = + object.logErrorVerbosity !== undefined && + object.logErrorVerbosity !== null + ? postgresqlHostConfig14_LogErrorVerbosityFromJSON( + object.logErrorVerbosity + ) + : 0; + message.logLockWaits = + object.logLockWaits !== undefined && object.logLockWaits !== null + ? Boolean(object.logLockWaits) + : undefined; + message.logStatement = + object.logStatement !== undefined && object.logStatement !== null + ? postgresqlHostConfig14_LogStatementFromJSON(object.logStatement) + : 0; + message.logTempFiles = + object.logTempFiles !== undefined && object.logTempFiles !== null + ? Number(object.logTempFiles) + : undefined; + message.searchPath = + object.searchPath !== undefined && object.searchPath !== null + ? String(object.searchPath) + : ""; + message.rowSecurity = + object.rowSecurity !== undefined && object.rowSecurity !== null + ? Boolean(object.rowSecurity) + : undefined; + message.defaultTransactionIsolation = + object.defaultTransactionIsolation !== undefined && + object.defaultTransactionIsolation !== null + ? postgresqlHostConfig14_TransactionIsolationFromJSON( + object.defaultTransactionIsolation + ) + : 0; + message.statementTimeout = + object.statementTimeout !== undefined && object.statementTimeout !== null + ? Number(object.statementTimeout) + : undefined; + message.lockTimeout = + object.lockTimeout !== undefined && object.lockTimeout !== null + ? Number(object.lockTimeout) + : undefined; + message.idleInTransactionSessionTimeout = + object.idleInTransactionSessionTimeout !== undefined && + object.idleInTransactionSessionTimeout !== null + ? Number(object.idleInTransactionSessionTimeout) + : undefined; + message.byteaOutput = + object.byteaOutput !== undefined && object.byteaOutput !== null + ? postgresqlHostConfig14_ByteaOutputFromJSON(object.byteaOutput) + : 0; + message.xmlbinary = + object.xmlbinary !== undefined && object.xmlbinary !== null + ? postgresqlHostConfig14_XmlBinaryFromJSON(object.xmlbinary) + : 0; + message.xmloption = + object.xmloption !== undefined && object.xmloption !== null + ? postgresqlHostConfig14_XmlOptionFromJSON(object.xmloption) + : 0; + message.ginPendingListLimit = + object.ginPendingListLimit !== undefined && + object.ginPendingListLimit !== null + ? Number(object.ginPendingListLimit) + : undefined; + message.deadlockTimeout = + object.deadlockTimeout !== undefined && object.deadlockTimeout !== null + ? Number(object.deadlockTimeout) + : undefined; + message.maxLocksPerTransaction = + object.maxLocksPerTransaction !== undefined && + object.maxLocksPerTransaction !== null + ? Number(object.maxLocksPerTransaction) + : undefined; + message.maxPredLocksPerTransaction = + object.maxPredLocksPerTransaction !== undefined && + object.maxPredLocksPerTransaction !== null + ? Number(object.maxPredLocksPerTransaction) + : undefined; + message.arrayNulls = + object.arrayNulls !== undefined && object.arrayNulls !== null + ? Boolean(object.arrayNulls) + : undefined; + message.backslashQuote = + object.backslashQuote !== undefined && object.backslashQuote !== null + ? postgresqlHostConfig14_BackslashQuoteFromJSON(object.backslashQuote) + : 0; + message.defaultWithOids = + object.defaultWithOids !== undefined && object.defaultWithOids !== null + ? Boolean(object.defaultWithOids) + : undefined; + message.escapeStringWarning = + object.escapeStringWarning !== undefined && + object.escapeStringWarning !== null + ? Boolean(object.escapeStringWarning) + : undefined; + message.loCompatPrivileges = + object.loCompatPrivileges !== undefined && + object.loCompatPrivileges !== null + ? Boolean(object.loCompatPrivileges) + : undefined; + message.quoteAllIdentifiers = + object.quoteAllIdentifiers !== undefined && + object.quoteAllIdentifiers !== null + ? Boolean(object.quoteAllIdentifiers) + : undefined; + message.standardConformingStrings = + object.standardConformingStrings !== undefined && + object.standardConformingStrings !== null + ? Boolean(object.standardConformingStrings) + : undefined; + message.synchronizeSeqscans = + object.synchronizeSeqscans !== undefined && + object.synchronizeSeqscans !== null + ? Boolean(object.synchronizeSeqscans) + : undefined; + message.transformNullEquals = + object.transformNullEquals !== undefined && + object.transformNullEquals !== null + ? Boolean(object.transformNullEquals) + : undefined; + message.exitOnError = + object.exitOnError !== undefined && object.exitOnError !== null + ? Boolean(object.exitOnError) + : undefined; + message.seqPageCost = + object.seqPageCost !== undefined && object.seqPageCost !== null + ? Number(object.seqPageCost) + : undefined; + message.randomPageCost = + object.randomPageCost !== undefined && object.randomPageCost !== null + ? Number(object.randomPageCost) + : undefined; + message.enableBitmapscan = + object.enableBitmapscan !== undefined && object.enableBitmapscan !== null + ? Boolean(object.enableBitmapscan) + : undefined; + message.enableHashagg = + object.enableHashagg !== undefined && object.enableHashagg !== null + ? Boolean(object.enableHashagg) + : undefined; + message.enableHashjoin = + object.enableHashjoin !== undefined && object.enableHashjoin !== null + ? Boolean(object.enableHashjoin) + : undefined; + message.enableIndexscan = + object.enableIndexscan !== undefined && object.enableIndexscan !== null + ? Boolean(object.enableIndexscan) + : undefined; + message.enableIndexonlyscan = + object.enableIndexonlyscan !== undefined && + object.enableIndexonlyscan !== null + ? Boolean(object.enableIndexonlyscan) + : undefined; + message.enableMaterial = + object.enableMaterial !== undefined && object.enableMaterial !== null + ? Boolean(object.enableMaterial) + : undefined; + message.enableMergejoin = + object.enableMergejoin !== undefined && object.enableMergejoin !== null + ? Boolean(object.enableMergejoin) + : undefined; + message.enableNestloop = + object.enableNestloop !== undefined && object.enableNestloop !== null + ? Boolean(object.enableNestloop) + : undefined; + message.enableSeqscan = + object.enableSeqscan !== undefined && object.enableSeqscan !== null + ? Boolean(object.enableSeqscan) + : undefined; + message.enableSort = + object.enableSort !== undefined && object.enableSort !== null + ? Boolean(object.enableSort) + : undefined; + message.enableTidscan = + object.enableTidscan !== undefined && object.enableTidscan !== null + ? Boolean(object.enableTidscan) + : undefined; + message.maxParallelWorkers = + object.maxParallelWorkers !== undefined && + object.maxParallelWorkers !== null + ? Number(object.maxParallelWorkers) + : undefined; + message.maxParallelWorkersPerGather = + object.maxParallelWorkersPerGather !== undefined && + object.maxParallelWorkersPerGather !== null + ? Number(object.maxParallelWorkersPerGather) + : undefined; + message.timezone = + object.timezone !== undefined && object.timezone !== null + ? String(object.timezone) + : ""; + message.effectiveIoConcurrency = + object.effectiveIoConcurrency !== undefined && + object.effectiveIoConcurrency !== null + ? Number(object.effectiveIoConcurrency) + : undefined; + message.effectiveCacheSize = + object.effectiveCacheSize !== undefined && + object.effectiveCacheSize !== null + ? Number(object.effectiveCacheSize) + : undefined; + return message; + }, + + toJSON(message: PostgresqlHostConfig14): unknown { + const obj: any = {}; + message.recoveryMinApplyDelay !== undefined && + (obj.recoveryMinApplyDelay = message.recoveryMinApplyDelay); + message.sharedBuffers !== undefined && + (obj.sharedBuffers = message.sharedBuffers); + message.tempBuffers !== undefined && + (obj.tempBuffers = message.tempBuffers); + message.workMem !== undefined && (obj.workMem = message.workMem); + message.tempFileLimit !== undefined && + (obj.tempFileLimit = message.tempFileLimit); + message.backendFlushAfter !== undefined && + (obj.backendFlushAfter = message.backendFlushAfter); + message.oldSnapshotThreshold !== undefined && + (obj.oldSnapshotThreshold = message.oldSnapshotThreshold); + message.maxStandbyStreamingDelay !== undefined && + (obj.maxStandbyStreamingDelay = message.maxStandbyStreamingDelay); + message.constraintExclusion !== undefined && + (obj.constraintExclusion = + postgresqlHostConfig14_ConstraintExclusionToJSON( + message.constraintExclusion + )); + message.cursorTupleFraction !== undefined && + (obj.cursorTupleFraction = message.cursorTupleFraction); + message.fromCollapseLimit !== undefined && + (obj.fromCollapseLimit = message.fromCollapseLimit); + message.joinCollapseLimit !== undefined && + (obj.joinCollapseLimit = message.joinCollapseLimit); + message.forceParallelMode !== undefined && + (obj.forceParallelMode = postgresqlHostConfig14_ForceParallelModeToJSON( + message.forceParallelMode + )); + message.clientMinMessages !== undefined && + (obj.clientMinMessages = postgresqlHostConfig14_LogLevelToJSON( + message.clientMinMessages + )); + message.logMinMessages !== undefined && + (obj.logMinMessages = postgresqlHostConfig14_LogLevelToJSON( + message.logMinMessages + )); + message.logMinErrorStatement !== undefined && + (obj.logMinErrorStatement = postgresqlHostConfig14_LogLevelToJSON( + message.logMinErrorStatement + )); + message.logMinDurationStatement !== undefined && + (obj.logMinDurationStatement = message.logMinDurationStatement); + message.logCheckpoints !== undefined && + (obj.logCheckpoints = message.logCheckpoints); + message.logConnections !== undefined && + (obj.logConnections = message.logConnections); + message.logDisconnections !== undefined && + (obj.logDisconnections = message.logDisconnections); + message.logDuration !== undefined && + (obj.logDuration = message.logDuration); + message.logErrorVerbosity !== undefined && + (obj.logErrorVerbosity = postgresqlHostConfig14_LogErrorVerbosityToJSON( + message.logErrorVerbosity + )); + message.logLockWaits !== undefined && + (obj.logLockWaits = message.logLockWaits); + message.logStatement !== undefined && + (obj.logStatement = postgresqlHostConfig14_LogStatementToJSON( + message.logStatement + )); + message.logTempFiles !== undefined && + (obj.logTempFiles = message.logTempFiles); + message.searchPath !== undefined && (obj.searchPath = message.searchPath); + message.rowSecurity !== undefined && + (obj.rowSecurity = message.rowSecurity); + message.defaultTransactionIsolation !== undefined && + (obj.defaultTransactionIsolation = + postgresqlHostConfig14_TransactionIsolationToJSON( + message.defaultTransactionIsolation + )); + message.statementTimeout !== undefined && + (obj.statementTimeout = message.statementTimeout); + message.lockTimeout !== undefined && + (obj.lockTimeout = message.lockTimeout); + message.idleInTransactionSessionTimeout !== undefined && + (obj.idleInTransactionSessionTimeout = + message.idleInTransactionSessionTimeout); + message.byteaOutput !== undefined && + (obj.byteaOutput = postgresqlHostConfig14_ByteaOutputToJSON( + message.byteaOutput + )); + message.xmlbinary !== undefined && + (obj.xmlbinary = postgresqlHostConfig14_XmlBinaryToJSON( + message.xmlbinary + )); + message.xmloption !== undefined && + (obj.xmloption = postgresqlHostConfig14_XmlOptionToJSON( + message.xmloption + )); + message.ginPendingListLimit !== undefined && + (obj.ginPendingListLimit = message.ginPendingListLimit); + message.deadlockTimeout !== undefined && + (obj.deadlockTimeout = message.deadlockTimeout); + message.maxLocksPerTransaction !== undefined && + (obj.maxLocksPerTransaction = message.maxLocksPerTransaction); + message.maxPredLocksPerTransaction !== undefined && + (obj.maxPredLocksPerTransaction = message.maxPredLocksPerTransaction); + message.arrayNulls !== undefined && (obj.arrayNulls = message.arrayNulls); + message.backslashQuote !== undefined && + (obj.backslashQuote = postgresqlHostConfig14_BackslashQuoteToJSON( + message.backslashQuote + )); + message.defaultWithOids !== undefined && + (obj.defaultWithOids = message.defaultWithOids); + message.escapeStringWarning !== undefined && + (obj.escapeStringWarning = message.escapeStringWarning); + message.loCompatPrivileges !== undefined && + (obj.loCompatPrivileges = message.loCompatPrivileges); + message.quoteAllIdentifiers !== undefined && + (obj.quoteAllIdentifiers = message.quoteAllIdentifiers); + message.standardConformingStrings !== undefined && + (obj.standardConformingStrings = message.standardConformingStrings); + message.synchronizeSeqscans !== undefined && + (obj.synchronizeSeqscans = message.synchronizeSeqscans); + message.transformNullEquals !== undefined && + (obj.transformNullEquals = message.transformNullEquals); + message.exitOnError !== undefined && + (obj.exitOnError = message.exitOnError); + message.seqPageCost !== undefined && + (obj.seqPageCost = message.seqPageCost); + message.randomPageCost !== undefined && + (obj.randomPageCost = message.randomPageCost); + message.enableBitmapscan !== undefined && + (obj.enableBitmapscan = message.enableBitmapscan); + message.enableHashagg !== undefined && + (obj.enableHashagg = message.enableHashagg); + message.enableHashjoin !== undefined && + (obj.enableHashjoin = message.enableHashjoin); + message.enableIndexscan !== undefined && + (obj.enableIndexscan = message.enableIndexscan); + message.enableIndexonlyscan !== undefined && + (obj.enableIndexonlyscan = message.enableIndexonlyscan); + message.enableMaterial !== undefined && + (obj.enableMaterial = message.enableMaterial); + message.enableMergejoin !== undefined && + (obj.enableMergejoin = message.enableMergejoin); + message.enableNestloop !== undefined && + (obj.enableNestloop = message.enableNestloop); + message.enableSeqscan !== undefined && + (obj.enableSeqscan = message.enableSeqscan); + message.enableSort !== undefined && (obj.enableSort = message.enableSort); + message.enableTidscan !== undefined && + (obj.enableTidscan = message.enableTidscan); + message.maxParallelWorkers !== undefined && + (obj.maxParallelWorkers = message.maxParallelWorkers); + message.maxParallelWorkersPerGather !== undefined && + (obj.maxParallelWorkersPerGather = message.maxParallelWorkersPerGather); + message.timezone !== undefined && (obj.timezone = message.timezone); + message.effectiveIoConcurrency !== undefined && + (obj.effectiveIoConcurrency = message.effectiveIoConcurrency); + message.effectiveCacheSize !== undefined && + (obj.effectiveCacheSize = message.effectiveCacheSize); + return obj; + }, + + fromPartial, I>>( + object: I + ): PostgresqlHostConfig14 { + const message = { ...basePostgresqlHostConfig14 } as PostgresqlHostConfig14; + message.recoveryMinApplyDelay = object.recoveryMinApplyDelay ?? undefined; + message.sharedBuffers = object.sharedBuffers ?? undefined; + message.tempBuffers = object.tempBuffers ?? undefined; + message.workMem = object.workMem ?? undefined; + message.tempFileLimit = object.tempFileLimit ?? undefined; + message.backendFlushAfter = object.backendFlushAfter ?? undefined; + message.oldSnapshotThreshold = object.oldSnapshotThreshold ?? undefined; + message.maxStandbyStreamingDelay = + object.maxStandbyStreamingDelay ?? undefined; + message.constraintExclusion = object.constraintExclusion ?? 0; + message.cursorTupleFraction = object.cursorTupleFraction ?? undefined; + message.fromCollapseLimit = object.fromCollapseLimit ?? undefined; + message.joinCollapseLimit = object.joinCollapseLimit ?? undefined; + message.forceParallelMode = object.forceParallelMode ?? 0; + message.clientMinMessages = object.clientMinMessages ?? 0; + message.logMinMessages = object.logMinMessages ?? 0; + message.logMinErrorStatement = object.logMinErrorStatement ?? 0; + message.logMinDurationStatement = + object.logMinDurationStatement ?? undefined; + message.logCheckpoints = object.logCheckpoints ?? undefined; + message.logConnections = object.logConnections ?? undefined; + message.logDisconnections = object.logDisconnections ?? undefined; + message.logDuration = object.logDuration ?? undefined; + message.logErrorVerbosity = object.logErrorVerbosity ?? 0; + message.logLockWaits = object.logLockWaits ?? undefined; + message.logStatement = object.logStatement ?? 0; + message.logTempFiles = object.logTempFiles ?? undefined; + message.searchPath = object.searchPath ?? ""; + message.rowSecurity = object.rowSecurity ?? undefined; + message.defaultTransactionIsolation = + object.defaultTransactionIsolation ?? 0; + message.statementTimeout = object.statementTimeout ?? undefined; + message.lockTimeout = object.lockTimeout ?? undefined; + message.idleInTransactionSessionTimeout = + object.idleInTransactionSessionTimeout ?? undefined; + message.byteaOutput = object.byteaOutput ?? 0; + message.xmlbinary = object.xmlbinary ?? 0; + message.xmloption = object.xmloption ?? 0; + message.ginPendingListLimit = object.ginPendingListLimit ?? undefined; + message.deadlockTimeout = object.deadlockTimeout ?? undefined; + message.maxLocksPerTransaction = object.maxLocksPerTransaction ?? undefined; + message.maxPredLocksPerTransaction = + object.maxPredLocksPerTransaction ?? undefined; + message.arrayNulls = object.arrayNulls ?? undefined; + message.backslashQuote = object.backslashQuote ?? 0; + message.defaultWithOids = object.defaultWithOids ?? undefined; + message.escapeStringWarning = object.escapeStringWarning ?? undefined; + message.loCompatPrivileges = object.loCompatPrivileges ?? undefined; + message.quoteAllIdentifiers = object.quoteAllIdentifiers ?? undefined; + message.standardConformingStrings = + object.standardConformingStrings ?? undefined; + message.synchronizeSeqscans = object.synchronizeSeqscans ?? undefined; + message.transformNullEquals = object.transformNullEquals ?? undefined; + message.exitOnError = object.exitOnError ?? undefined; + message.seqPageCost = object.seqPageCost ?? undefined; + message.randomPageCost = object.randomPageCost ?? undefined; + message.enableBitmapscan = object.enableBitmapscan ?? undefined; + message.enableHashagg = object.enableHashagg ?? undefined; + message.enableHashjoin = object.enableHashjoin ?? undefined; + message.enableIndexscan = object.enableIndexscan ?? undefined; + message.enableIndexonlyscan = object.enableIndexonlyscan ?? undefined; + message.enableMaterial = object.enableMaterial ?? undefined; + message.enableMergejoin = object.enableMergejoin ?? undefined; + message.enableNestloop = object.enableNestloop ?? undefined; + message.enableSeqscan = object.enableSeqscan ?? undefined; + message.enableSort = object.enableSort ?? undefined; + message.enableTidscan = object.enableTidscan ?? undefined; + message.maxParallelWorkers = object.maxParallelWorkers ?? undefined; + message.maxParallelWorkersPerGather = + object.maxParallelWorkersPerGather ?? undefined; + message.timezone = object.timezone ?? ""; + message.effectiveIoConcurrency = object.effectiveIoConcurrency ?? undefined; + message.effectiveCacheSize = object.effectiveCacheSize ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(PostgresqlHostConfig14.$type, PostgresqlHostConfig14); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql10.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql10.ts index 5be6c6d8..5545a401 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql10.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql10.ts @@ -149,6 +149,11 @@ export interface PostgresqlConfig10 { pgHintPlanEnableHintTable?: boolean; pgHintPlanDebugPrint: PostgresqlConfig10_PgHintPlanDebugPrint; pgHintPlanMessageLevel: PostgresqlConfig10_LogLevel; + pgQualstatsEnabled?: boolean; + pgQualstatsTrackConstants?: boolean; + pgQualstatsMax?: number; + pgQualstatsResolveOids?: boolean; + pgQualstatsSampleRate?: number; } export enum PostgresqlConfig10_WalLevel { @@ -1732,6 +1737,48 @@ export const PostgresqlConfig10 = { if (message.pgHintPlanMessageLevel !== 0) { writer.uint32(896).int32(message.pgHintPlanMessageLevel); } + if (message.pgQualstatsEnabled !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsEnabled!, + }, + writer.uint32(906).fork() + ).ldelim(); + } + if (message.pgQualstatsTrackConstants !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsTrackConstants!, + }, + writer.uint32(914).fork() + ).ldelim(); + } + if (message.pgQualstatsMax !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.pgQualstatsMax! }, + writer.uint32(922).fork() + ).ldelim(); + } + if (message.pgQualstatsResolveOids !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsResolveOids!, + }, + writer.uint32(930).fork() + ).ldelim(); + } + if (message.pgQualstatsSampleRate !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgQualstatsSampleRate!, + }, + writer.uint32(938).fork() + ).ldelim(); + } return writer; }, @@ -2335,6 +2382,36 @@ export const PostgresqlConfig10 = { case 112: message.pgHintPlanMessageLevel = reader.int32() as any; break; + case 113: + message.pgQualstatsEnabled = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 114: + message.pgQualstatsTrackConstants = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 115: + message.pgQualstatsMax = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 116: + message.pgQualstatsResolveOids = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 117: + message.pgQualstatsSampleRate = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; default: reader.skipType(tag & 7); break; @@ -2860,6 +2937,30 @@ export const PostgresqlConfig10 = { object.pgHintPlanMessageLevel !== null ? postgresqlConfig10_LogLevelFromJSON(object.pgHintPlanMessageLevel) : 0; + message.pgQualstatsEnabled = + object.pgQualstatsEnabled !== undefined && + object.pgQualstatsEnabled !== null + ? Boolean(object.pgQualstatsEnabled) + : undefined; + message.pgQualstatsTrackConstants = + object.pgQualstatsTrackConstants !== undefined && + object.pgQualstatsTrackConstants !== null + ? Boolean(object.pgQualstatsTrackConstants) + : undefined; + message.pgQualstatsMax = + object.pgQualstatsMax !== undefined && object.pgQualstatsMax !== null + ? Number(object.pgQualstatsMax) + : undefined; + message.pgQualstatsResolveOids = + object.pgQualstatsResolveOids !== undefined && + object.pgQualstatsResolveOids !== null + ? Boolean(object.pgQualstatsResolveOids) + : undefined; + message.pgQualstatsSampleRate = + object.pgQualstatsSampleRate !== undefined && + object.pgQualstatsSampleRate !== null + ? Number(object.pgQualstatsSampleRate) + : undefined; return message; }, @@ -3114,6 +3215,16 @@ export const PostgresqlConfig10 = { (obj.pgHintPlanMessageLevel = postgresqlConfig10_LogLevelToJSON( message.pgHintPlanMessageLevel )); + message.pgQualstatsEnabled !== undefined && + (obj.pgQualstatsEnabled = message.pgQualstatsEnabled); + message.pgQualstatsTrackConstants !== undefined && + (obj.pgQualstatsTrackConstants = message.pgQualstatsTrackConstants); + message.pgQualstatsMax !== undefined && + (obj.pgQualstatsMax = message.pgQualstatsMax); + message.pgQualstatsResolveOids !== undefined && + (obj.pgQualstatsResolveOids = message.pgQualstatsResolveOids); + message.pgQualstatsSampleRate !== undefined && + (obj.pgQualstatsSampleRate = message.pgQualstatsSampleRate); return obj; }, @@ -3252,6 +3363,12 @@ export const PostgresqlConfig10 = { object.pgHintPlanEnableHintTable ?? undefined; message.pgHintPlanDebugPrint = object.pgHintPlanDebugPrint ?? 0; message.pgHintPlanMessageLevel = object.pgHintPlanMessageLevel ?? 0; + message.pgQualstatsEnabled = object.pgQualstatsEnabled ?? undefined; + message.pgQualstatsTrackConstants = + object.pgQualstatsTrackConstants ?? undefined; + message.pgQualstatsMax = object.pgQualstatsMax ?? undefined; + message.pgQualstatsResolveOids = object.pgQualstatsResolveOids ?? undefined; + message.pgQualstatsSampleRate = object.pgQualstatsSampleRate ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql10_1c.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql10_1c.ts index d4893bc1..430d62ab 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql10_1c.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql10_1c.ts @@ -151,6 +151,11 @@ export interface Postgresqlconfig101c { pgHintPlanMessageLevel: Postgresqlconfig101c_LogLevel; onlineAnalyzeEnable?: boolean; plantunerFixEmptyTable?: boolean; + pgQualstatsEnabled?: boolean; + pgQualstatsTrackConstants?: boolean; + pgQualstatsMax?: number; + pgQualstatsResolveOids?: boolean; + pgQualstatsSampleRate?: number; } export enum Postgresqlconfig101c_WalLevel { @@ -1752,6 +1757,48 @@ export const Postgresqlconfig101c = { writer.uint32(914).fork() ).ldelim(); } + if (message.pgQualstatsEnabled !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsEnabled!, + }, + writer.uint32(922).fork() + ).ldelim(); + } + if (message.pgQualstatsTrackConstants !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsTrackConstants!, + }, + writer.uint32(930).fork() + ).ldelim(); + } + if (message.pgQualstatsMax !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.pgQualstatsMax! }, + writer.uint32(938).fork() + ).ldelim(); + } + if (message.pgQualstatsResolveOids !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsResolveOids!, + }, + writer.uint32(946).fork() + ).ldelim(); + } + if (message.pgQualstatsSampleRate !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgQualstatsSampleRate!, + }, + writer.uint32(954).fork() + ).ldelim(); + } return writer; }, @@ -2370,6 +2417,36 @@ export const Postgresqlconfig101c = { reader.uint32() ).value; break; + case 115: + message.pgQualstatsEnabled = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 116: + message.pgQualstatsTrackConstants = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 117: + message.pgQualstatsMax = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 118: + message.pgQualstatsResolveOids = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 119: + message.pgQualstatsSampleRate = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; default: reader.skipType(tag & 7); break; @@ -2911,6 +2988,30 @@ export const Postgresqlconfig101c = { object.plantunerFixEmptyTable !== null ? Boolean(object.plantunerFixEmptyTable) : undefined; + message.pgQualstatsEnabled = + object.pgQualstatsEnabled !== undefined && + object.pgQualstatsEnabled !== null + ? Boolean(object.pgQualstatsEnabled) + : undefined; + message.pgQualstatsTrackConstants = + object.pgQualstatsTrackConstants !== undefined && + object.pgQualstatsTrackConstants !== null + ? Boolean(object.pgQualstatsTrackConstants) + : undefined; + message.pgQualstatsMax = + object.pgQualstatsMax !== undefined && object.pgQualstatsMax !== null + ? Number(object.pgQualstatsMax) + : undefined; + message.pgQualstatsResolveOids = + object.pgQualstatsResolveOids !== undefined && + object.pgQualstatsResolveOids !== null + ? Boolean(object.pgQualstatsResolveOids) + : undefined; + message.pgQualstatsSampleRate = + object.pgQualstatsSampleRate !== undefined && + object.pgQualstatsSampleRate !== null + ? Number(object.pgQualstatsSampleRate) + : undefined; return message; }, @@ -3170,6 +3271,16 @@ export const Postgresqlconfig101c = { (obj.onlineAnalyzeEnable = message.onlineAnalyzeEnable); message.plantunerFixEmptyTable !== undefined && (obj.plantunerFixEmptyTable = message.plantunerFixEmptyTable); + message.pgQualstatsEnabled !== undefined && + (obj.pgQualstatsEnabled = message.pgQualstatsEnabled); + message.pgQualstatsTrackConstants !== undefined && + (obj.pgQualstatsTrackConstants = message.pgQualstatsTrackConstants); + message.pgQualstatsMax !== undefined && + (obj.pgQualstatsMax = message.pgQualstatsMax); + message.pgQualstatsResolveOids !== undefined && + (obj.pgQualstatsResolveOids = message.pgQualstatsResolveOids); + message.pgQualstatsSampleRate !== undefined && + (obj.pgQualstatsSampleRate = message.pgQualstatsSampleRate); return obj; }, @@ -3310,6 +3421,12 @@ export const Postgresqlconfig101c = { message.pgHintPlanMessageLevel = object.pgHintPlanMessageLevel ?? 0; message.onlineAnalyzeEnable = object.onlineAnalyzeEnable ?? undefined; message.plantunerFixEmptyTable = object.plantunerFixEmptyTable ?? undefined; + message.pgQualstatsEnabled = object.pgQualstatsEnabled ?? undefined; + message.pgQualstatsTrackConstants = + object.pgQualstatsTrackConstants ?? undefined; + message.pgQualstatsMax = object.pgQualstatsMax ?? undefined; + message.pgQualstatsResolveOids = object.pgQualstatsResolveOids ?? undefined; + message.pgQualstatsSampleRate = object.pgQualstatsSampleRate ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql11.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql11.ts index 6bda4a9a..bda39416 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql11.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql11.ts @@ -157,6 +157,11 @@ export interface PostgresqlConfig11 { pgHintPlanEnableHintTable?: boolean; pgHintPlanDebugPrint: PostgresqlConfig11_PgHintPlanDebugPrint; pgHintPlanMessageLevel: PostgresqlConfig11_LogLevel; + pgQualstatsEnabled?: boolean; + pgQualstatsTrackConstants?: boolean; + pgQualstatsMax?: number; + pgQualstatsResolveOids?: boolean; + pgQualstatsSampleRate?: number; } export enum PostgresqlConfig11_WalLevel { @@ -1809,6 +1814,48 @@ export const PostgresqlConfig11 = { if (message.pgHintPlanMessageLevel !== 0) { writer.uint32(968).int32(message.pgHintPlanMessageLevel); } + if (message.pgQualstatsEnabled !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsEnabled!, + }, + writer.uint32(978).fork() + ).ldelim(); + } + if (message.pgQualstatsTrackConstants !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsTrackConstants!, + }, + writer.uint32(986).fork() + ).ldelim(); + } + if (message.pgQualstatsMax !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.pgQualstatsMax! }, + writer.uint32(994).fork() + ).ldelim(); + } + if (message.pgQualstatsResolveOids !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsResolveOids!, + }, + writer.uint32(1002).fork() + ).ldelim(); + } + if (message.pgQualstatsSampleRate !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgQualstatsSampleRate!, + }, + writer.uint32(1010).fork() + ).ldelim(); + } return writer; }, @@ -2457,6 +2504,36 @@ export const PostgresqlConfig11 = { case 121: message.pgHintPlanMessageLevel = reader.int32() as any; break; + case 122: + message.pgQualstatsEnabled = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 123: + message.pgQualstatsTrackConstants = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 124: + message.pgQualstatsMax = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 125: + message.pgQualstatsResolveOids = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 126: + message.pgQualstatsSampleRate = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; default: reader.skipType(tag & 7); break; @@ -3021,6 +3098,30 @@ export const PostgresqlConfig11 = { object.pgHintPlanMessageLevel !== null ? postgresqlConfig11_LogLevelFromJSON(object.pgHintPlanMessageLevel) : 0; + message.pgQualstatsEnabled = + object.pgQualstatsEnabled !== undefined && + object.pgQualstatsEnabled !== null + ? Boolean(object.pgQualstatsEnabled) + : undefined; + message.pgQualstatsTrackConstants = + object.pgQualstatsTrackConstants !== undefined && + object.pgQualstatsTrackConstants !== null + ? Boolean(object.pgQualstatsTrackConstants) + : undefined; + message.pgQualstatsMax = + object.pgQualstatsMax !== undefined && object.pgQualstatsMax !== null + ? Number(object.pgQualstatsMax) + : undefined; + message.pgQualstatsResolveOids = + object.pgQualstatsResolveOids !== undefined && + object.pgQualstatsResolveOids !== null + ? Boolean(object.pgQualstatsResolveOids) + : undefined; + message.pgQualstatsSampleRate = + object.pgQualstatsSampleRate !== undefined && + object.pgQualstatsSampleRate !== null + ? Number(object.pgQualstatsSampleRate) + : undefined; return message; }, @@ -3292,6 +3393,16 @@ export const PostgresqlConfig11 = { (obj.pgHintPlanMessageLevel = postgresqlConfig11_LogLevelToJSON( message.pgHintPlanMessageLevel )); + message.pgQualstatsEnabled !== undefined && + (obj.pgQualstatsEnabled = message.pgQualstatsEnabled); + message.pgQualstatsTrackConstants !== undefined && + (obj.pgQualstatsTrackConstants = message.pgQualstatsTrackConstants); + message.pgQualstatsMax !== undefined && + (obj.pgQualstatsMax = message.pgQualstatsMax); + message.pgQualstatsResolveOids !== undefined && + (obj.pgQualstatsResolveOids = message.pgQualstatsResolveOids); + message.pgQualstatsSampleRate !== undefined && + (obj.pgQualstatsSampleRate = message.pgQualstatsSampleRate); return obj; }, @@ -3443,6 +3554,12 @@ export const PostgresqlConfig11 = { object.pgHintPlanEnableHintTable ?? undefined; message.pgHintPlanDebugPrint = object.pgHintPlanDebugPrint ?? 0; message.pgHintPlanMessageLevel = object.pgHintPlanMessageLevel ?? 0; + message.pgQualstatsEnabled = object.pgQualstatsEnabled ?? undefined; + message.pgQualstatsTrackConstants = + object.pgQualstatsTrackConstants ?? undefined; + message.pgQualstatsMax = object.pgQualstatsMax ?? undefined; + message.pgQualstatsResolveOids = object.pgQualstatsResolveOids ?? undefined; + message.pgQualstatsSampleRate = object.pgQualstatsSampleRate ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql11_1c.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql11_1c.ts index 0c8473bb..f8e82ee5 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql11_1c.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql11_1c.ts @@ -157,6 +157,11 @@ export interface Postgresqlconfig111c { pgHintPlanEnableHintTable?: boolean; pgHintPlanDebugPrint: Postgresqlconfig111c_PgHintPlanDebugPrint; pgHintPlanMessageLevel: Postgresqlconfig111c_LogLevel; + pgQualstatsEnabled?: boolean; + pgQualstatsTrackConstants?: boolean; + pgQualstatsMax?: number; + pgQualstatsResolveOids?: boolean; + pgQualstatsSampleRate?: number; } export enum Postgresqlconfig111c_WalLevel { @@ -1809,6 +1814,48 @@ export const Postgresqlconfig111c = { if (message.pgHintPlanMessageLevel !== 0) { writer.uint32(968).int32(message.pgHintPlanMessageLevel); } + if (message.pgQualstatsEnabled !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsEnabled!, + }, + writer.uint32(978).fork() + ).ldelim(); + } + if (message.pgQualstatsTrackConstants !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsTrackConstants!, + }, + writer.uint32(986).fork() + ).ldelim(); + } + if (message.pgQualstatsMax !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.pgQualstatsMax! }, + writer.uint32(994).fork() + ).ldelim(); + } + if (message.pgQualstatsResolveOids !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsResolveOids!, + }, + writer.uint32(1002).fork() + ).ldelim(); + } + if (message.pgQualstatsSampleRate !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgQualstatsSampleRate!, + }, + writer.uint32(1010).fork() + ).ldelim(); + } return writer; }, @@ -2460,6 +2507,36 @@ export const Postgresqlconfig111c = { case 121: message.pgHintPlanMessageLevel = reader.int32() as any; break; + case 122: + message.pgQualstatsEnabled = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 123: + message.pgQualstatsTrackConstants = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 124: + message.pgQualstatsMax = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 125: + message.pgQualstatsResolveOids = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 126: + message.pgQualstatsSampleRate = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; default: reader.skipType(tag & 7); break; @@ -3030,6 +3107,30 @@ export const Postgresqlconfig111c = { object.pgHintPlanMessageLevel !== null ? postgresqlconfig111c_LogLevelFromJSON(object.pgHintPlanMessageLevel) : 0; + message.pgQualstatsEnabled = + object.pgQualstatsEnabled !== undefined && + object.pgQualstatsEnabled !== null + ? Boolean(object.pgQualstatsEnabled) + : undefined; + message.pgQualstatsTrackConstants = + object.pgQualstatsTrackConstants !== undefined && + object.pgQualstatsTrackConstants !== null + ? Boolean(object.pgQualstatsTrackConstants) + : undefined; + message.pgQualstatsMax = + object.pgQualstatsMax !== undefined && object.pgQualstatsMax !== null + ? Number(object.pgQualstatsMax) + : undefined; + message.pgQualstatsResolveOids = + object.pgQualstatsResolveOids !== undefined && + object.pgQualstatsResolveOids !== null + ? Boolean(object.pgQualstatsResolveOids) + : undefined; + message.pgQualstatsSampleRate = + object.pgQualstatsSampleRate !== undefined && + object.pgQualstatsSampleRate !== null + ? Number(object.pgQualstatsSampleRate) + : undefined; return message; }, @@ -3302,6 +3403,16 @@ export const Postgresqlconfig111c = { (obj.pgHintPlanMessageLevel = postgresqlconfig111c_LogLevelToJSON( message.pgHintPlanMessageLevel )); + message.pgQualstatsEnabled !== undefined && + (obj.pgQualstatsEnabled = message.pgQualstatsEnabled); + message.pgQualstatsTrackConstants !== undefined && + (obj.pgQualstatsTrackConstants = message.pgQualstatsTrackConstants); + message.pgQualstatsMax !== undefined && + (obj.pgQualstatsMax = message.pgQualstatsMax); + message.pgQualstatsResolveOids !== undefined && + (obj.pgQualstatsResolveOids = message.pgQualstatsResolveOids); + message.pgQualstatsSampleRate !== undefined && + (obj.pgQualstatsSampleRate = message.pgQualstatsSampleRate); return obj; }, @@ -3453,6 +3564,12 @@ export const Postgresqlconfig111c = { object.pgHintPlanEnableHintTable ?? undefined; message.pgHintPlanDebugPrint = object.pgHintPlanDebugPrint ?? 0; message.pgHintPlanMessageLevel = object.pgHintPlanMessageLevel ?? 0; + message.pgQualstatsEnabled = object.pgQualstatsEnabled ?? undefined; + message.pgQualstatsTrackConstants = + object.pgQualstatsTrackConstants ?? undefined; + message.pgQualstatsMax = object.pgQualstatsMax ?? undefined; + message.pgQualstatsResolveOids = object.pgQualstatsResolveOids ?? undefined; + message.pgQualstatsSampleRate = object.pgQualstatsSampleRate ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql12.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql12.ts index ba276b27..85503a0c 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql12.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql12.ts @@ -159,6 +159,11 @@ export interface PostgresqlConfig12 { pgHintPlanEnableHintTable?: boolean; pgHintPlanDebugPrint: PostgresqlConfig12_PgHintPlanDebugPrint; pgHintPlanMessageLevel: PostgresqlConfig12_LogLevel; + pgQualstatsEnabled?: boolean; + pgQualstatsTrackConstants?: boolean; + pgQualstatsMax?: number; + pgQualstatsResolveOids?: boolean; + pgQualstatsSampleRate?: number; } export enum PostgresqlConfig12_WalLevel { @@ -1872,6 +1877,48 @@ export const PostgresqlConfig12 = { if (message.pgHintPlanMessageLevel !== 0) { writer.uint32(984).int32(message.pgHintPlanMessageLevel); } + if (message.pgQualstatsEnabled !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsEnabled!, + }, + writer.uint32(994).fork() + ).ldelim(); + } + if (message.pgQualstatsTrackConstants !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsTrackConstants!, + }, + writer.uint32(1002).fork() + ).ldelim(); + } + if (message.pgQualstatsMax !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.pgQualstatsMax! }, + writer.uint32(1010).fork() + ).ldelim(); + } + if (message.pgQualstatsResolveOids !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsResolveOids!, + }, + writer.uint32(1018).fork() + ).ldelim(); + } + if (message.pgQualstatsSampleRate !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgQualstatsSampleRate!, + }, + writer.uint32(1026).fork() + ).ldelim(); + } return writer; }, @@ -2529,6 +2576,36 @@ export const PostgresqlConfig12 = { case 123: message.pgHintPlanMessageLevel = reader.int32() as any; break; + case 124: + message.pgQualstatsEnabled = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 125: + message.pgQualstatsTrackConstants = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 126: + message.pgQualstatsMax = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 127: + message.pgQualstatsResolveOids = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 128: + message.pgQualstatsSampleRate = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; default: reader.skipType(tag & 7); break; @@ -3102,6 +3179,30 @@ export const PostgresqlConfig12 = { object.pgHintPlanMessageLevel !== null ? postgresqlConfig12_LogLevelFromJSON(object.pgHintPlanMessageLevel) : 0; + message.pgQualstatsEnabled = + object.pgQualstatsEnabled !== undefined && + object.pgQualstatsEnabled !== null + ? Boolean(object.pgQualstatsEnabled) + : undefined; + message.pgQualstatsTrackConstants = + object.pgQualstatsTrackConstants !== undefined && + object.pgQualstatsTrackConstants !== null + ? Boolean(object.pgQualstatsTrackConstants) + : undefined; + message.pgQualstatsMax = + object.pgQualstatsMax !== undefined && object.pgQualstatsMax !== null + ? Number(object.pgQualstatsMax) + : undefined; + message.pgQualstatsResolveOids = + object.pgQualstatsResolveOids !== undefined && + object.pgQualstatsResolveOids !== null + ? Boolean(object.pgQualstatsResolveOids) + : undefined; + message.pgQualstatsSampleRate = + object.pgQualstatsSampleRate !== undefined && + object.pgQualstatsSampleRate !== null + ? Number(object.pgQualstatsSampleRate) + : undefined; return message; }, @@ -3379,6 +3480,16 @@ export const PostgresqlConfig12 = { (obj.pgHintPlanMessageLevel = postgresqlConfig12_LogLevelToJSON( message.pgHintPlanMessageLevel )); + message.pgQualstatsEnabled !== undefined && + (obj.pgQualstatsEnabled = message.pgQualstatsEnabled); + message.pgQualstatsTrackConstants !== undefined && + (obj.pgQualstatsTrackConstants = message.pgQualstatsTrackConstants); + message.pgQualstatsMax !== undefined && + (obj.pgQualstatsMax = message.pgQualstatsMax); + message.pgQualstatsResolveOids !== undefined && + (obj.pgQualstatsResolveOids = message.pgQualstatsResolveOids); + message.pgQualstatsSampleRate !== undefined && + (obj.pgQualstatsSampleRate = message.pgQualstatsSampleRate); return obj; }, @@ -3533,6 +3644,12 @@ export const PostgresqlConfig12 = { object.pgHintPlanEnableHintTable ?? undefined; message.pgHintPlanDebugPrint = object.pgHintPlanDebugPrint ?? 0; message.pgHintPlanMessageLevel = object.pgHintPlanMessageLevel ?? 0; + message.pgQualstatsEnabled = object.pgQualstatsEnabled ?? undefined; + message.pgQualstatsTrackConstants = + object.pgQualstatsTrackConstants ?? undefined; + message.pgQualstatsMax = object.pgQualstatsMax ?? undefined; + message.pgQualstatsResolveOids = object.pgQualstatsResolveOids ?? undefined; + message.pgQualstatsSampleRate = object.pgQualstatsSampleRate ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql12_1c.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql12_1c.ts index 52d346a5..d79a33ea 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql12_1c.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql12_1c.ts @@ -159,6 +159,11 @@ export interface Postgresqlconfig121c { pgHintPlanEnableHintTable?: boolean; pgHintPlanDebugPrint: Postgresqlconfig121c_PgHintPlanDebugPrint; pgHintPlanMessageLevel: Postgresqlconfig121c_LogLevel; + pgQualstatsEnabled?: boolean; + pgQualstatsTrackConstants?: boolean; + pgQualstatsMax?: number; + pgQualstatsResolveOids?: boolean; + pgQualstatsSampleRate?: number; } export enum Postgresqlconfig121c_WalLevel { @@ -1872,6 +1877,48 @@ export const Postgresqlconfig121c = { if (message.pgHintPlanMessageLevel !== 0) { writer.uint32(984).int32(message.pgHintPlanMessageLevel); } + if (message.pgQualstatsEnabled !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsEnabled!, + }, + writer.uint32(994).fork() + ).ldelim(); + } + if (message.pgQualstatsTrackConstants !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsTrackConstants!, + }, + writer.uint32(1002).fork() + ).ldelim(); + } + if (message.pgQualstatsMax !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.pgQualstatsMax! }, + writer.uint32(1010).fork() + ).ldelim(); + } + if (message.pgQualstatsResolveOids !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsResolveOids!, + }, + writer.uint32(1018).fork() + ).ldelim(); + } + if (message.pgQualstatsSampleRate !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgQualstatsSampleRate!, + }, + writer.uint32(1026).fork() + ).ldelim(); + } return writer; }, @@ -2532,6 +2579,36 @@ export const Postgresqlconfig121c = { case 123: message.pgHintPlanMessageLevel = reader.int32() as any; break; + case 124: + message.pgQualstatsEnabled = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 125: + message.pgQualstatsTrackConstants = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 126: + message.pgQualstatsMax = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 127: + message.pgQualstatsResolveOids = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 128: + message.pgQualstatsSampleRate = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; default: reader.skipType(tag & 7); break; @@ -3111,6 +3188,30 @@ export const Postgresqlconfig121c = { object.pgHintPlanMessageLevel !== null ? postgresqlconfig121c_LogLevelFromJSON(object.pgHintPlanMessageLevel) : 0; + message.pgQualstatsEnabled = + object.pgQualstatsEnabled !== undefined && + object.pgQualstatsEnabled !== null + ? Boolean(object.pgQualstatsEnabled) + : undefined; + message.pgQualstatsTrackConstants = + object.pgQualstatsTrackConstants !== undefined && + object.pgQualstatsTrackConstants !== null + ? Boolean(object.pgQualstatsTrackConstants) + : undefined; + message.pgQualstatsMax = + object.pgQualstatsMax !== undefined && object.pgQualstatsMax !== null + ? Number(object.pgQualstatsMax) + : undefined; + message.pgQualstatsResolveOids = + object.pgQualstatsResolveOids !== undefined && + object.pgQualstatsResolveOids !== null + ? Boolean(object.pgQualstatsResolveOids) + : undefined; + message.pgQualstatsSampleRate = + object.pgQualstatsSampleRate !== undefined && + object.pgQualstatsSampleRate !== null + ? Number(object.pgQualstatsSampleRate) + : undefined; return message; }, @@ -3389,6 +3490,16 @@ export const Postgresqlconfig121c = { (obj.pgHintPlanMessageLevel = postgresqlconfig121c_LogLevelToJSON( message.pgHintPlanMessageLevel )); + message.pgQualstatsEnabled !== undefined && + (obj.pgQualstatsEnabled = message.pgQualstatsEnabled); + message.pgQualstatsTrackConstants !== undefined && + (obj.pgQualstatsTrackConstants = message.pgQualstatsTrackConstants); + message.pgQualstatsMax !== undefined && + (obj.pgQualstatsMax = message.pgQualstatsMax); + message.pgQualstatsResolveOids !== undefined && + (obj.pgQualstatsResolveOids = message.pgQualstatsResolveOids); + message.pgQualstatsSampleRate !== undefined && + (obj.pgQualstatsSampleRate = message.pgQualstatsSampleRate); return obj; }, @@ -3543,6 +3654,12 @@ export const Postgresqlconfig121c = { object.pgHintPlanEnableHintTable ?? undefined; message.pgHintPlanDebugPrint = object.pgHintPlanDebugPrint ?? 0; message.pgHintPlanMessageLevel = object.pgHintPlanMessageLevel ?? 0; + message.pgQualstatsEnabled = object.pgQualstatsEnabled ?? undefined; + message.pgQualstatsTrackConstants = + object.pgQualstatsTrackConstants ?? undefined; + message.pgQualstatsMax = object.pgQualstatsMax ?? undefined; + message.pgQualstatsResolveOids = object.pgQualstatsResolveOids ?? undefined; + message.pgQualstatsSampleRate = object.pgQualstatsSampleRate ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql13.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql13.ts index d921ed9a..518a877b 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql13.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql13.ts @@ -177,6 +177,11 @@ export interface PostgresqlConfig13 { logParameterMaxLength?: number; /** in bytes. */ logParameterMaxLengthOnError?: number; + pgQualstatsEnabled?: boolean; + pgQualstatsTrackConstants?: boolean; + pgQualstatsMax?: number; + pgQualstatsResolveOids?: boolean; + pgQualstatsSampleRate?: number; } export enum PostgresqlConfig13_WalLevel { @@ -1995,6 +2000,48 @@ export const PostgresqlConfig13 = { writer.uint32(1090).fork() ).ldelim(); } + if (message.pgQualstatsEnabled !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsEnabled!, + }, + writer.uint32(1098).fork() + ).ldelim(); + } + if (message.pgQualstatsTrackConstants !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsTrackConstants!, + }, + writer.uint32(1106).fork() + ).ldelim(); + } + if (message.pgQualstatsMax !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.pgQualstatsMax! }, + writer.uint32(1114).fork() + ).ldelim(); + } + if (message.pgQualstatsResolveOids !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsResolveOids!, + }, + writer.uint32(1122).fork() + ).ldelim(); + } + if (message.pgQualstatsSampleRate !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgQualstatsSampleRate!, + }, + writer.uint32(1130).fork() + ).ldelim(); + } return writer; }, @@ -2724,6 +2771,36 @@ export const PostgresqlConfig13 = { reader.uint32() ).value; break; + case 137: + message.pgQualstatsEnabled = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 138: + message.pgQualstatsTrackConstants = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 139: + message.pgQualstatsMax = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 140: + message.pgQualstatsResolveOids = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 141: + message.pgQualstatsSampleRate = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; default: reader.skipType(tag & 7); break; @@ -3356,6 +3433,30 @@ export const PostgresqlConfig13 = { object.logParameterMaxLengthOnError !== null ? Number(object.logParameterMaxLengthOnError) : undefined; + message.pgQualstatsEnabled = + object.pgQualstatsEnabled !== undefined && + object.pgQualstatsEnabled !== null + ? Boolean(object.pgQualstatsEnabled) + : undefined; + message.pgQualstatsTrackConstants = + object.pgQualstatsTrackConstants !== undefined && + object.pgQualstatsTrackConstants !== null + ? Boolean(object.pgQualstatsTrackConstants) + : undefined; + message.pgQualstatsMax = + object.pgQualstatsMax !== undefined && object.pgQualstatsMax !== null + ? Number(object.pgQualstatsMax) + : undefined; + message.pgQualstatsResolveOids = + object.pgQualstatsResolveOids !== undefined && + object.pgQualstatsResolveOids !== null + ? Boolean(object.pgQualstatsResolveOids) + : undefined; + message.pgQualstatsSampleRate = + object.pgQualstatsSampleRate !== undefined && + object.pgQualstatsSampleRate !== null + ? Number(object.pgQualstatsSampleRate) + : undefined; return message; }, @@ -3659,6 +3760,16 @@ export const PostgresqlConfig13 = { (obj.logParameterMaxLength = message.logParameterMaxLength); message.logParameterMaxLengthOnError !== undefined && (obj.logParameterMaxLengthOnError = message.logParameterMaxLengthOnError); + message.pgQualstatsEnabled !== undefined && + (obj.pgQualstatsEnabled = message.pgQualstatsEnabled); + message.pgQualstatsTrackConstants !== undefined && + (obj.pgQualstatsTrackConstants = message.pgQualstatsTrackConstants); + message.pgQualstatsMax !== undefined && + (obj.pgQualstatsMax = message.pgQualstatsMax); + message.pgQualstatsResolveOids !== undefined && + (obj.pgQualstatsResolveOids = message.pgQualstatsResolveOids); + message.pgQualstatsSampleRate !== undefined && + (obj.pgQualstatsSampleRate = message.pgQualstatsSampleRate); return obj; }, @@ -3829,6 +3940,12 @@ export const PostgresqlConfig13 = { message.logParameterMaxLength = object.logParameterMaxLength ?? undefined; message.logParameterMaxLengthOnError = object.logParameterMaxLengthOnError ?? undefined; + message.pgQualstatsEnabled = object.pgQualstatsEnabled ?? undefined; + message.pgQualstatsTrackConstants = + object.pgQualstatsTrackConstants ?? undefined; + message.pgQualstatsMax = object.pgQualstatsMax ?? undefined; + message.pgQualstatsResolveOids = object.pgQualstatsResolveOids ?? undefined; + message.pgQualstatsSampleRate = object.pgQualstatsSampleRate ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql14.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql14.ts new file mode 100644 index 00000000..de6d6a7b --- /dev/null +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql14.ts @@ -0,0 +1,4230 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { + Int64Value, + DoubleValue, + BoolValue, +} from "../../../../../../google/protobuf/wrappers"; + +export const protobufPackage = "yandex.cloud.mdb.postgresql.v1.config"; + +/** + * Options and structure of `PostgresqlConfig` reflects PostgreSQL configuration file + * parameters which detailed description is available in + * [PostgreSQL documentation](https://www.postgresql.org/docs/11/runtime-config.html). + */ +export interface PostgresqlConfig14 { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig14"; + maxConnections?: number; + /** in bytes. */ + sharedBuffers?: number; + /** in bytes. */ + tempBuffers?: number; + maxPreparedTransactions?: number; + /** in bytes. */ + workMem?: number; + /** in bytes. */ + maintenanceWorkMem?: number; + /** in bytes. */ + autovacuumWorkMem?: number; + /** in bytes. */ + tempFileLimit?: number; + /** in milliseconds. */ + vacuumCostDelay?: number; + vacuumCostPageHit?: number; + vacuumCostPageMiss?: number; + vacuumCostPageDirty?: number; + vacuumCostLimit?: number; + /** in milliseconds. */ + bgwriterDelay?: number; + bgwriterLruMaxpages?: number; + bgwriterLruMultiplier?: number; + bgwriterFlushAfter?: number; + backendFlushAfter?: number; + oldSnapshotThreshold?: number; + walLevel: PostgresqlConfig14_WalLevel; + synchronousCommit: PostgresqlConfig14_SynchronousCommit; + /** in milliseconds. */ + checkpointTimeout?: number; + checkpointCompletionTarget?: number; + checkpointFlushAfter?: number; + /** in bytes. */ + maxWalSize?: number; + /** in bytes. */ + minWalSize?: number; + /** in milliseconds. */ + maxStandbyStreamingDelay?: number; + defaultStatisticsTarget?: number; + constraintExclusion: PostgresqlConfig14_ConstraintExclusion; + cursorTupleFraction?: number; + fromCollapseLimit?: number; + joinCollapseLimit?: number; + forceParallelMode: PostgresqlConfig14_ForceParallelMode; + clientMinMessages: PostgresqlConfig14_LogLevel; + logMinMessages: PostgresqlConfig14_LogLevel; + logMinErrorStatement: PostgresqlConfig14_LogLevel; + /** in milliseconds. */ + logMinDurationStatement?: number; + logCheckpoints?: boolean; + logConnections?: boolean; + logDisconnections?: boolean; + logDuration?: boolean; + logErrorVerbosity: PostgresqlConfig14_LogErrorVerbosity; + logLockWaits?: boolean; + logStatement: PostgresqlConfig14_LogStatement; + logTempFiles?: number; + searchPath: string; + rowSecurity?: boolean; + defaultTransactionIsolation: PostgresqlConfig14_TransactionIsolation; + /** in milliseconds. */ + statementTimeout?: number; + /** in milliseconds. */ + lockTimeout?: number; + /** in milliseconds. */ + idleInTransactionSessionTimeout?: number; + byteaOutput: PostgresqlConfig14_ByteaOutput; + xmlbinary: PostgresqlConfig14_XmlBinary; + xmloption: PostgresqlConfig14_XmlOption; + /** in bytes. */ + ginPendingListLimit?: number; + /** in milliseconds. */ + deadlockTimeout?: number; + maxLocksPerTransaction?: number; + maxPredLocksPerTransaction?: number; + arrayNulls?: boolean; + backslashQuote: PostgresqlConfig14_BackslashQuote; + defaultWithOids?: boolean; + escapeStringWarning?: boolean; + loCompatPrivileges?: boolean; + quoteAllIdentifiers?: boolean; + standardConformingStrings?: boolean; + synchronizeSeqscans?: boolean; + transformNullEquals?: boolean; + exitOnError?: boolean; + seqPageCost?: number; + randomPageCost?: number; + autovacuumMaxWorkers?: number; + autovacuumVacuumCostDelay?: number; + autovacuumVacuumCostLimit?: number; + /** in milliseconds. */ + autovacuumNaptime?: number; + /** in milliseconds. */ + archiveTimeout?: number; + trackActivityQuerySize?: number; + enableBitmapscan?: boolean; + enableHashagg?: boolean; + enableHashjoin?: boolean; + enableIndexscan?: boolean; + enableIndexonlyscan?: boolean; + enableMaterial?: boolean; + enableMergejoin?: boolean; + enableNestloop?: boolean; + enableSeqscan?: boolean; + enableSort?: boolean; + enableTidscan?: boolean; + maxWorkerProcesses?: number; + maxParallelWorkers?: number; + maxParallelWorkersPerGather?: number; + autovacuumVacuumScaleFactor?: number; + autovacuumAnalyzeScaleFactor?: number; + defaultTransactionReadOnly?: boolean; + timezone: string; + enableParallelAppend?: boolean; + enableParallelHash?: boolean; + enablePartitionPruning?: boolean; + enablePartitionwiseAggregate?: boolean; + enablePartitionwiseJoin?: boolean; + jit?: boolean; + maxParallelMaintenanceWorkers?: number; + parallelLeaderParticipation?: boolean; + logTransactionSampleRate?: number; + planCacheMode: PostgresqlConfig14_PlanCacheMode; + effectiveIoConcurrency?: number; + effectiveCacheSize?: number; + sharedPreloadLibraries: PostgresqlConfig14_SharedPreloadLibraries[]; + /** in milliseconds. */ + autoExplainLogMinDuration?: number; + autoExplainLogAnalyze?: boolean; + autoExplainLogBuffers?: boolean; + autoExplainLogTiming?: boolean; + autoExplainLogTriggers?: boolean; + autoExplainLogVerbose?: boolean; + autoExplainLogNestedStatements?: boolean; + autoExplainSampleRate?: number; + pgHintPlanEnableHint?: boolean; + pgHintPlanEnableHintTable?: boolean; + pgHintPlanDebugPrint: PostgresqlConfig14_PgHintPlanDebugPrint; + pgHintPlanMessageLevel: PostgresqlConfig14_LogLevel; + hashMemMultiplier?: number; + /** in bytes. */ + logicalDecodingWorkMem?: number; + maintenanceIoConcurrency?: number; + /** in bytes. */ + maxSlotWalKeepSize?: number; + /** in bytes. */ + walKeepSize?: number; + enableIncrementalSort?: boolean; + autovacuumVacuumInsertThreshold?: number; + autovacuumVacuumInsertScaleFactor?: number; + /** in milliseconds. */ + logMinDurationSample?: number; + logStatementSampleRate?: number; + /** in bytes. */ + logParameterMaxLength?: number; + /** in bytes. */ + logParameterMaxLengthOnError?: number; + /** in milliseconds. */ + clientConnectionCheckInterval?: number; + enableAsyncAppend?: boolean; + enableGathermerge?: boolean; + enableMemoize?: boolean; + /** in milliseconds. */ + logRecoveryConflictWaits?: boolean; + /** in milliseconds. */ + vacuumFailsafeAge?: number; + /** in milliseconds. */ + vacuumMultixactFailsafeAge?: number; + pgQualstatsEnabled?: boolean; + pgQualstatsTrackConstants?: boolean; + pgQualstatsMax?: number; + pgQualstatsResolveOids?: boolean; + pgQualstatsSampleRate?: number; +} + +export enum PostgresqlConfig14_WalLevel { + WAL_LEVEL_UNSPECIFIED = 0, + WAL_LEVEL_REPLICA = 1, + WAL_LEVEL_LOGICAL = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig14_WalLevelFromJSON( + object: any +): PostgresqlConfig14_WalLevel { + switch (object) { + case 0: + case "WAL_LEVEL_UNSPECIFIED": + return PostgresqlConfig14_WalLevel.WAL_LEVEL_UNSPECIFIED; + case 1: + case "WAL_LEVEL_REPLICA": + return PostgresqlConfig14_WalLevel.WAL_LEVEL_REPLICA; + case 2: + case "WAL_LEVEL_LOGICAL": + return PostgresqlConfig14_WalLevel.WAL_LEVEL_LOGICAL; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig14_WalLevel.UNRECOGNIZED; + } +} + +export function postgresqlConfig14_WalLevelToJSON( + object: PostgresqlConfig14_WalLevel +): string { + switch (object) { + case PostgresqlConfig14_WalLevel.WAL_LEVEL_UNSPECIFIED: + return "WAL_LEVEL_UNSPECIFIED"; + case PostgresqlConfig14_WalLevel.WAL_LEVEL_REPLICA: + return "WAL_LEVEL_REPLICA"; + case PostgresqlConfig14_WalLevel.WAL_LEVEL_LOGICAL: + return "WAL_LEVEL_LOGICAL"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig14_SynchronousCommit { + SYNCHRONOUS_COMMIT_UNSPECIFIED = 0, + SYNCHRONOUS_COMMIT_ON = 1, + SYNCHRONOUS_COMMIT_OFF = 2, + SYNCHRONOUS_COMMIT_LOCAL = 3, + SYNCHRONOUS_COMMIT_REMOTE_WRITE = 4, + SYNCHRONOUS_COMMIT_REMOTE_APPLY = 5, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig14_SynchronousCommitFromJSON( + object: any +): PostgresqlConfig14_SynchronousCommit { + switch (object) { + case 0: + case "SYNCHRONOUS_COMMIT_UNSPECIFIED": + return PostgresqlConfig14_SynchronousCommit.SYNCHRONOUS_COMMIT_UNSPECIFIED; + case 1: + case "SYNCHRONOUS_COMMIT_ON": + return PostgresqlConfig14_SynchronousCommit.SYNCHRONOUS_COMMIT_ON; + case 2: + case "SYNCHRONOUS_COMMIT_OFF": + return PostgresqlConfig14_SynchronousCommit.SYNCHRONOUS_COMMIT_OFF; + case 3: + case "SYNCHRONOUS_COMMIT_LOCAL": + return PostgresqlConfig14_SynchronousCommit.SYNCHRONOUS_COMMIT_LOCAL; + case 4: + case "SYNCHRONOUS_COMMIT_REMOTE_WRITE": + return PostgresqlConfig14_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_WRITE; + case 5: + case "SYNCHRONOUS_COMMIT_REMOTE_APPLY": + return PostgresqlConfig14_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_APPLY; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig14_SynchronousCommit.UNRECOGNIZED; + } +} + +export function postgresqlConfig14_SynchronousCommitToJSON( + object: PostgresqlConfig14_SynchronousCommit +): string { + switch (object) { + case PostgresqlConfig14_SynchronousCommit.SYNCHRONOUS_COMMIT_UNSPECIFIED: + return "SYNCHRONOUS_COMMIT_UNSPECIFIED"; + case PostgresqlConfig14_SynchronousCommit.SYNCHRONOUS_COMMIT_ON: + return "SYNCHRONOUS_COMMIT_ON"; + case PostgresqlConfig14_SynchronousCommit.SYNCHRONOUS_COMMIT_OFF: + return "SYNCHRONOUS_COMMIT_OFF"; + case PostgresqlConfig14_SynchronousCommit.SYNCHRONOUS_COMMIT_LOCAL: + return "SYNCHRONOUS_COMMIT_LOCAL"; + case PostgresqlConfig14_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_WRITE: + return "SYNCHRONOUS_COMMIT_REMOTE_WRITE"; + case PostgresqlConfig14_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_APPLY: + return "SYNCHRONOUS_COMMIT_REMOTE_APPLY"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig14_ConstraintExclusion { + CONSTRAINT_EXCLUSION_UNSPECIFIED = 0, + CONSTRAINT_EXCLUSION_ON = 1, + CONSTRAINT_EXCLUSION_OFF = 2, + CONSTRAINT_EXCLUSION_PARTITION = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig14_ConstraintExclusionFromJSON( + object: any +): PostgresqlConfig14_ConstraintExclusion { + switch (object) { + case 0: + case "CONSTRAINT_EXCLUSION_UNSPECIFIED": + return PostgresqlConfig14_ConstraintExclusion.CONSTRAINT_EXCLUSION_UNSPECIFIED; + case 1: + case "CONSTRAINT_EXCLUSION_ON": + return PostgresqlConfig14_ConstraintExclusion.CONSTRAINT_EXCLUSION_ON; + case 2: + case "CONSTRAINT_EXCLUSION_OFF": + return PostgresqlConfig14_ConstraintExclusion.CONSTRAINT_EXCLUSION_OFF; + case 3: + case "CONSTRAINT_EXCLUSION_PARTITION": + return PostgresqlConfig14_ConstraintExclusion.CONSTRAINT_EXCLUSION_PARTITION; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig14_ConstraintExclusion.UNRECOGNIZED; + } +} + +export function postgresqlConfig14_ConstraintExclusionToJSON( + object: PostgresqlConfig14_ConstraintExclusion +): string { + switch (object) { + case PostgresqlConfig14_ConstraintExclusion.CONSTRAINT_EXCLUSION_UNSPECIFIED: + return "CONSTRAINT_EXCLUSION_UNSPECIFIED"; + case PostgresqlConfig14_ConstraintExclusion.CONSTRAINT_EXCLUSION_ON: + return "CONSTRAINT_EXCLUSION_ON"; + case PostgresqlConfig14_ConstraintExclusion.CONSTRAINT_EXCLUSION_OFF: + return "CONSTRAINT_EXCLUSION_OFF"; + case PostgresqlConfig14_ConstraintExclusion.CONSTRAINT_EXCLUSION_PARTITION: + return "CONSTRAINT_EXCLUSION_PARTITION"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig14_ForceParallelMode { + FORCE_PARALLEL_MODE_UNSPECIFIED = 0, + FORCE_PARALLEL_MODE_ON = 1, + FORCE_PARALLEL_MODE_OFF = 2, + FORCE_PARALLEL_MODE_REGRESS = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig14_ForceParallelModeFromJSON( + object: any +): PostgresqlConfig14_ForceParallelMode { + switch (object) { + case 0: + case "FORCE_PARALLEL_MODE_UNSPECIFIED": + return PostgresqlConfig14_ForceParallelMode.FORCE_PARALLEL_MODE_UNSPECIFIED; + case 1: + case "FORCE_PARALLEL_MODE_ON": + return PostgresqlConfig14_ForceParallelMode.FORCE_PARALLEL_MODE_ON; + case 2: + case "FORCE_PARALLEL_MODE_OFF": + return PostgresqlConfig14_ForceParallelMode.FORCE_PARALLEL_MODE_OFF; + case 3: + case "FORCE_PARALLEL_MODE_REGRESS": + return PostgresqlConfig14_ForceParallelMode.FORCE_PARALLEL_MODE_REGRESS; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig14_ForceParallelMode.UNRECOGNIZED; + } +} + +export function postgresqlConfig14_ForceParallelModeToJSON( + object: PostgresqlConfig14_ForceParallelMode +): string { + switch (object) { + case PostgresqlConfig14_ForceParallelMode.FORCE_PARALLEL_MODE_UNSPECIFIED: + return "FORCE_PARALLEL_MODE_UNSPECIFIED"; + case PostgresqlConfig14_ForceParallelMode.FORCE_PARALLEL_MODE_ON: + return "FORCE_PARALLEL_MODE_ON"; + case PostgresqlConfig14_ForceParallelMode.FORCE_PARALLEL_MODE_OFF: + return "FORCE_PARALLEL_MODE_OFF"; + case PostgresqlConfig14_ForceParallelMode.FORCE_PARALLEL_MODE_REGRESS: + return "FORCE_PARALLEL_MODE_REGRESS"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig14_LogLevel { + LOG_LEVEL_UNSPECIFIED = 0, + LOG_LEVEL_DEBUG5 = 1, + LOG_LEVEL_DEBUG4 = 2, + LOG_LEVEL_DEBUG3 = 3, + LOG_LEVEL_DEBUG2 = 4, + LOG_LEVEL_DEBUG1 = 5, + LOG_LEVEL_LOG = 6, + LOG_LEVEL_NOTICE = 7, + LOG_LEVEL_WARNING = 8, + LOG_LEVEL_ERROR = 9, + LOG_LEVEL_FATAL = 10, + LOG_LEVEL_PANIC = 11, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig14_LogLevelFromJSON( + object: any +): PostgresqlConfig14_LogLevel { + switch (object) { + case 0: + case "LOG_LEVEL_UNSPECIFIED": + return PostgresqlConfig14_LogLevel.LOG_LEVEL_UNSPECIFIED; + case 1: + case "LOG_LEVEL_DEBUG5": + return PostgresqlConfig14_LogLevel.LOG_LEVEL_DEBUG5; + case 2: + case "LOG_LEVEL_DEBUG4": + return PostgresqlConfig14_LogLevel.LOG_LEVEL_DEBUG4; + case 3: + case "LOG_LEVEL_DEBUG3": + return PostgresqlConfig14_LogLevel.LOG_LEVEL_DEBUG3; + case 4: + case "LOG_LEVEL_DEBUG2": + return PostgresqlConfig14_LogLevel.LOG_LEVEL_DEBUG2; + case 5: + case "LOG_LEVEL_DEBUG1": + return PostgresqlConfig14_LogLevel.LOG_LEVEL_DEBUG1; + case 6: + case "LOG_LEVEL_LOG": + return PostgresqlConfig14_LogLevel.LOG_LEVEL_LOG; + case 7: + case "LOG_LEVEL_NOTICE": + return PostgresqlConfig14_LogLevel.LOG_LEVEL_NOTICE; + case 8: + case "LOG_LEVEL_WARNING": + return PostgresqlConfig14_LogLevel.LOG_LEVEL_WARNING; + case 9: + case "LOG_LEVEL_ERROR": + return PostgresqlConfig14_LogLevel.LOG_LEVEL_ERROR; + case 10: + case "LOG_LEVEL_FATAL": + return PostgresqlConfig14_LogLevel.LOG_LEVEL_FATAL; + case 11: + case "LOG_LEVEL_PANIC": + return PostgresqlConfig14_LogLevel.LOG_LEVEL_PANIC; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig14_LogLevel.UNRECOGNIZED; + } +} + +export function postgresqlConfig14_LogLevelToJSON( + object: PostgresqlConfig14_LogLevel +): string { + switch (object) { + case PostgresqlConfig14_LogLevel.LOG_LEVEL_UNSPECIFIED: + return "LOG_LEVEL_UNSPECIFIED"; + case PostgresqlConfig14_LogLevel.LOG_LEVEL_DEBUG5: + return "LOG_LEVEL_DEBUG5"; + case PostgresqlConfig14_LogLevel.LOG_LEVEL_DEBUG4: + return "LOG_LEVEL_DEBUG4"; + case PostgresqlConfig14_LogLevel.LOG_LEVEL_DEBUG3: + return "LOG_LEVEL_DEBUG3"; + case PostgresqlConfig14_LogLevel.LOG_LEVEL_DEBUG2: + return "LOG_LEVEL_DEBUG2"; + case PostgresqlConfig14_LogLevel.LOG_LEVEL_DEBUG1: + return "LOG_LEVEL_DEBUG1"; + case PostgresqlConfig14_LogLevel.LOG_LEVEL_LOG: + return "LOG_LEVEL_LOG"; + case PostgresqlConfig14_LogLevel.LOG_LEVEL_NOTICE: + return "LOG_LEVEL_NOTICE"; + case PostgresqlConfig14_LogLevel.LOG_LEVEL_WARNING: + return "LOG_LEVEL_WARNING"; + case PostgresqlConfig14_LogLevel.LOG_LEVEL_ERROR: + return "LOG_LEVEL_ERROR"; + case PostgresqlConfig14_LogLevel.LOG_LEVEL_FATAL: + return "LOG_LEVEL_FATAL"; + case PostgresqlConfig14_LogLevel.LOG_LEVEL_PANIC: + return "LOG_LEVEL_PANIC"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig14_LogErrorVerbosity { + LOG_ERROR_VERBOSITY_UNSPECIFIED = 0, + LOG_ERROR_VERBOSITY_TERSE = 1, + LOG_ERROR_VERBOSITY_DEFAULT = 2, + LOG_ERROR_VERBOSITY_VERBOSE = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig14_LogErrorVerbosityFromJSON( + object: any +): PostgresqlConfig14_LogErrorVerbosity { + switch (object) { + case 0: + case "LOG_ERROR_VERBOSITY_UNSPECIFIED": + return PostgresqlConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED; + case 1: + case "LOG_ERROR_VERBOSITY_TERSE": + return PostgresqlConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE; + case 2: + case "LOG_ERROR_VERBOSITY_DEFAULT": + return PostgresqlConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT; + case 3: + case "LOG_ERROR_VERBOSITY_VERBOSE": + return PostgresqlConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig14_LogErrorVerbosity.UNRECOGNIZED; + } +} + +export function postgresqlConfig14_LogErrorVerbosityToJSON( + object: PostgresqlConfig14_LogErrorVerbosity +): string { + switch (object) { + case PostgresqlConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED: + return "LOG_ERROR_VERBOSITY_UNSPECIFIED"; + case PostgresqlConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE: + return "LOG_ERROR_VERBOSITY_TERSE"; + case PostgresqlConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT: + return "LOG_ERROR_VERBOSITY_DEFAULT"; + case PostgresqlConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE: + return "LOG_ERROR_VERBOSITY_VERBOSE"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig14_LogStatement { + LOG_STATEMENT_UNSPECIFIED = 0, + LOG_STATEMENT_NONE = 1, + LOG_STATEMENT_DDL = 2, + LOG_STATEMENT_MOD = 3, + LOG_STATEMENT_ALL = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig14_LogStatementFromJSON( + object: any +): PostgresqlConfig14_LogStatement { + switch (object) { + case 0: + case "LOG_STATEMENT_UNSPECIFIED": + return PostgresqlConfig14_LogStatement.LOG_STATEMENT_UNSPECIFIED; + case 1: + case "LOG_STATEMENT_NONE": + return PostgresqlConfig14_LogStatement.LOG_STATEMENT_NONE; + case 2: + case "LOG_STATEMENT_DDL": + return PostgresqlConfig14_LogStatement.LOG_STATEMENT_DDL; + case 3: + case "LOG_STATEMENT_MOD": + return PostgresqlConfig14_LogStatement.LOG_STATEMENT_MOD; + case 4: + case "LOG_STATEMENT_ALL": + return PostgresqlConfig14_LogStatement.LOG_STATEMENT_ALL; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig14_LogStatement.UNRECOGNIZED; + } +} + +export function postgresqlConfig14_LogStatementToJSON( + object: PostgresqlConfig14_LogStatement +): string { + switch (object) { + case PostgresqlConfig14_LogStatement.LOG_STATEMENT_UNSPECIFIED: + return "LOG_STATEMENT_UNSPECIFIED"; + case PostgresqlConfig14_LogStatement.LOG_STATEMENT_NONE: + return "LOG_STATEMENT_NONE"; + case PostgresqlConfig14_LogStatement.LOG_STATEMENT_DDL: + return "LOG_STATEMENT_DDL"; + case PostgresqlConfig14_LogStatement.LOG_STATEMENT_MOD: + return "LOG_STATEMENT_MOD"; + case PostgresqlConfig14_LogStatement.LOG_STATEMENT_ALL: + return "LOG_STATEMENT_ALL"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig14_TransactionIsolation { + TRANSACTION_ISOLATION_UNSPECIFIED = 0, + TRANSACTION_ISOLATION_READ_UNCOMMITTED = 1, + TRANSACTION_ISOLATION_READ_COMMITTED = 2, + TRANSACTION_ISOLATION_REPEATABLE_READ = 3, + TRANSACTION_ISOLATION_SERIALIZABLE = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig14_TransactionIsolationFromJSON( + object: any +): PostgresqlConfig14_TransactionIsolation { + switch (object) { + case 0: + case "TRANSACTION_ISOLATION_UNSPECIFIED": + return PostgresqlConfig14_TransactionIsolation.TRANSACTION_ISOLATION_UNSPECIFIED; + case 1: + case "TRANSACTION_ISOLATION_READ_UNCOMMITTED": + return PostgresqlConfig14_TransactionIsolation.TRANSACTION_ISOLATION_READ_UNCOMMITTED; + case 2: + case "TRANSACTION_ISOLATION_READ_COMMITTED": + return PostgresqlConfig14_TransactionIsolation.TRANSACTION_ISOLATION_READ_COMMITTED; + case 3: + case "TRANSACTION_ISOLATION_REPEATABLE_READ": + return PostgresqlConfig14_TransactionIsolation.TRANSACTION_ISOLATION_REPEATABLE_READ; + case 4: + case "TRANSACTION_ISOLATION_SERIALIZABLE": + return PostgresqlConfig14_TransactionIsolation.TRANSACTION_ISOLATION_SERIALIZABLE; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig14_TransactionIsolation.UNRECOGNIZED; + } +} + +export function postgresqlConfig14_TransactionIsolationToJSON( + object: PostgresqlConfig14_TransactionIsolation +): string { + switch (object) { + case PostgresqlConfig14_TransactionIsolation.TRANSACTION_ISOLATION_UNSPECIFIED: + return "TRANSACTION_ISOLATION_UNSPECIFIED"; + case PostgresqlConfig14_TransactionIsolation.TRANSACTION_ISOLATION_READ_UNCOMMITTED: + return "TRANSACTION_ISOLATION_READ_UNCOMMITTED"; + case PostgresqlConfig14_TransactionIsolation.TRANSACTION_ISOLATION_READ_COMMITTED: + return "TRANSACTION_ISOLATION_READ_COMMITTED"; + case PostgresqlConfig14_TransactionIsolation.TRANSACTION_ISOLATION_REPEATABLE_READ: + return "TRANSACTION_ISOLATION_REPEATABLE_READ"; + case PostgresqlConfig14_TransactionIsolation.TRANSACTION_ISOLATION_SERIALIZABLE: + return "TRANSACTION_ISOLATION_SERIALIZABLE"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig14_ByteaOutput { + BYTEA_OUTPUT_UNSPECIFIED = 0, + BYTEA_OUTPUT_HEX = 1, + BYTEA_OUTPUT_ESCAPED = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig14_ByteaOutputFromJSON( + object: any +): PostgresqlConfig14_ByteaOutput { + switch (object) { + case 0: + case "BYTEA_OUTPUT_UNSPECIFIED": + return PostgresqlConfig14_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED; + case 1: + case "BYTEA_OUTPUT_HEX": + return PostgresqlConfig14_ByteaOutput.BYTEA_OUTPUT_HEX; + case 2: + case "BYTEA_OUTPUT_ESCAPED": + return PostgresqlConfig14_ByteaOutput.BYTEA_OUTPUT_ESCAPED; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig14_ByteaOutput.UNRECOGNIZED; + } +} + +export function postgresqlConfig14_ByteaOutputToJSON( + object: PostgresqlConfig14_ByteaOutput +): string { + switch (object) { + case PostgresqlConfig14_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED: + return "BYTEA_OUTPUT_UNSPECIFIED"; + case PostgresqlConfig14_ByteaOutput.BYTEA_OUTPUT_HEX: + return "BYTEA_OUTPUT_HEX"; + case PostgresqlConfig14_ByteaOutput.BYTEA_OUTPUT_ESCAPED: + return "BYTEA_OUTPUT_ESCAPED"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig14_XmlBinary { + XML_BINARY_UNSPECIFIED = 0, + XML_BINARY_BASE64 = 1, + XML_BINARY_HEX = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig14_XmlBinaryFromJSON( + object: any +): PostgresqlConfig14_XmlBinary { + switch (object) { + case 0: + case "XML_BINARY_UNSPECIFIED": + return PostgresqlConfig14_XmlBinary.XML_BINARY_UNSPECIFIED; + case 1: + case "XML_BINARY_BASE64": + return PostgresqlConfig14_XmlBinary.XML_BINARY_BASE64; + case 2: + case "XML_BINARY_HEX": + return PostgresqlConfig14_XmlBinary.XML_BINARY_HEX; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig14_XmlBinary.UNRECOGNIZED; + } +} + +export function postgresqlConfig14_XmlBinaryToJSON( + object: PostgresqlConfig14_XmlBinary +): string { + switch (object) { + case PostgresqlConfig14_XmlBinary.XML_BINARY_UNSPECIFIED: + return "XML_BINARY_UNSPECIFIED"; + case PostgresqlConfig14_XmlBinary.XML_BINARY_BASE64: + return "XML_BINARY_BASE64"; + case PostgresqlConfig14_XmlBinary.XML_BINARY_HEX: + return "XML_BINARY_HEX"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig14_XmlOption { + XML_OPTION_UNSPECIFIED = 0, + XML_OPTION_DOCUMENT = 1, + XML_OPTION_CONTENT = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig14_XmlOptionFromJSON( + object: any +): PostgresqlConfig14_XmlOption { + switch (object) { + case 0: + case "XML_OPTION_UNSPECIFIED": + return PostgresqlConfig14_XmlOption.XML_OPTION_UNSPECIFIED; + case 1: + case "XML_OPTION_DOCUMENT": + return PostgresqlConfig14_XmlOption.XML_OPTION_DOCUMENT; + case 2: + case "XML_OPTION_CONTENT": + return PostgresqlConfig14_XmlOption.XML_OPTION_CONTENT; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig14_XmlOption.UNRECOGNIZED; + } +} + +export function postgresqlConfig14_XmlOptionToJSON( + object: PostgresqlConfig14_XmlOption +): string { + switch (object) { + case PostgresqlConfig14_XmlOption.XML_OPTION_UNSPECIFIED: + return "XML_OPTION_UNSPECIFIED"; + case PostgresqlConfig14_XmlOption.XML_OPTION_DOCUMENT: + return "XML_OPTION_DOCUMENT"; + case PostgresqlConfig14_XmlOption.XML_OPTION_CONTENT: + return "XML_OPTION_CONTENT"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig14_BackslashQuote { + BACKSLASH_QUOTE_UNSPECIFIED = 0, + BACKSLASH_QUOTE = 1, + BACKSLASH_QUOTE_ON = 2, + BACKSLASH_QUOTE_OFF = 3, + BACKSLASH_QUOTE_SAFE_ENCODING = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig14_BackslashQuoteFromJSON( + object: any +): PostgresqlConfig14_BackslashQuote { + switch (object) { + case 0: + case "BACKSLASH_QUOTE_UNSPECIFIED": + return PostgresqlConfig14_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED; + case 1: + case "BACKSLASH_QUOTE": + return PostgresqlConfig14_BackslashQuote.BACKSLASH_QUOTE; + case 2: + case "BACKSLASH_QUOTE_ON": + return PostgresqlConfig14_BackslashQuote.BACKSLASH_QUOTE_ON; + case 3: + case "BACKSLASH_QUOTE_OFF": + return PostgresqlConfig14_BackslashQuote.BACKSLASH_QUOTE_OFF; + case 4: + case "BACKSLASH_QUOTE_SAFE_ENCODING": + return PostgresqlConfig14_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig14_BackslashQuote.UNRECOGNIZED; + } +} + +export function postgresqlConfig14_BackslashQuoteToJSON( + object: PostgresqlConfig14_BackslashQuote +): string { + switch (object) { + case PostgresqlConfig14_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED: + return "BACKSLASH_QUOTE_UNSPECIFIED"; + case PostgresqlConfig14_BackslashQuote.BACKSLASH_QUOTE: + return "BACKSLASH_QUOTE"; + case PostgresqlConfig14_BackslashQuote.BACKSLASH_QUOTE_ON: + return "BACKSLASH_QUOTE_ON"; + case PostgresqlConfig14_BackslashQuote.BACKSLASH_QUOTE_OFF: + return "BACKSLASH_QUOTE_OFF"; + case PostgresqlConfig14_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING: + return "BACKSLASH_QUOTE_SAFE_ENCODING"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig14_PlanCacheMode { + PLAN_CACHE_MODE_UNSPECIFIED = 0, + PLAN_CACHE_MODE_AUTO = 1, + PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN = 2, + PLAN_CACHE_MODE_FORCE_GENERIC_PLAN = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig14_PlanCacheModeFromJSON( + object: any +): PostgresqlConfig14_PlanCacheMode { + switch (object) { + case 0: + case "PLAN_CACHE_MODE_UNSPECIFIED": + return PostgresqlConfig14_PlanCacheMode.PLAN_CACHE_MODE_UNSPECIFIED; + case 1: + case "PLAN_CACHE_MODE_AUTO": + return PostgresqlConfig14_PlanCacheMode.PLAN_CACHE_MODE_AUTO; + case 2: + case "PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN": + return PostgresqlConfig14_PlanCacheMode.PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN; + case 3: + case "PLAN_CACHE_MODE_FORCE_GENERIC_PLAN": + return PostgresqlConfig14_PlanCacheMode.PLAN_CACHE_MODE_FORCE_GENERIC_PLAN; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig14_PlanCacheMode.UNRECOGNIZED; + } +} + +export function postgresqlConfig14_PlanCacheModeToJSON( + object: PostgresqlConfig14_PlanCacheMode +): string { + switch (object) { + case PostgresqlConfig14_PlanCacheMode.PLAN_CACHE_MODE_UNSPECIFIED: + return "PLAN_CACHE_MODE_UNSPECIFIED"; + case PostgresqlConfig14_PlanCacheMode.PLAN_CACHE_MODE_AUTO: + return "PLAN_CACHE_MODE_AUTO"; + case PostgresqlConfig14_PlanCacheMode.PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN: + return "PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN"; + case PostgresqlConfig14_PlanCacheMode.PLAN_CACHE_MODE_FORCE_GENERIC_PLAN: + return "PLAN_CACHE_MODE_FORCE_GENERIC_PLAN"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig14_PgHintPlanDebugPrint { + PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED = 0, + PG_HINT_PLAN_DEBUG_PRINT_OFF = 1, + PG_HINT_PLAN_DEBUG_PRINT_ON = 2, + PG_HINT_PLAN_DEBUG_PRINT_DETAILED = 3, + PG_HINT_PLAN_DEBUG_PRINT_VERBOSE = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig14_PgHintPlanDebugPrintFromJSON( + object: any +): PostgresqlConfig14_PgHintPlanDebugPrint { + switch (object) { + case 0: + case "PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED": + return PostgresqlConfig14_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED; + case 1: + case "PG_HINT_PLAN_DEBUG_PRINT_OFF": + return PostgresqlConfig14_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_OFF; + case 2: + case "PG_HINT_PLAN_DEBUG_PRINT_ON": + return PostgresqlConfig14_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_ON; + case 3: + case "PG_HINT_PLAN_DEBUG_PRINT_DETAILED": + return PostgresqlConfig14_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_DETAILED; + case 4: + case "PG_HINT_PLAN_DEBUG_PRINT_VERBOSE": + return PostgresqlConfig14_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_VERBOSE; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig14_PgHintPlanDebugPrint.UNRECOGNIZED; + } +} + +export function postgresqlConfig14_PgHintPlanDebugPrintToJSON( + object: PostgresqlConfig14_PgHintPlanDebugPrint +): string { + switch (object) { + case PostgresqlConfig14_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED: + return "PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED"; + case PostgresqlConfig14_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_OFF: + return "PG_HINT_PLAN_DEBUG_PRINT_OFF"; + case PostgresqlConfig14_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_ON: + return "PG_HINT_PLAN_DEBUG_PRINT_ON"; + case PostgresqlConfig14_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_DETAILED: + return "PG_HINT_PLAN_DEBUG_PRINT_DETAILED"; + case PostgresqlConfig14_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_VERBOSE: + return "PG_HINT_PLAN_DEBUG_PRINT_VERBOSE"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig14_SharedPreloadLibraries { + SHARED_PRELOAD_LIBRARIES_UNSPECIFIED = 0, + SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN = 1, + SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN = 2, + SHARED_PRELOAD_LIBRARIES_TIMESCALEDB = 3, + SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig14_SharedPreloadLibrariesFromJSON( + object: any +): PostgresqlConfig14_SharedPreloadLibraries { + switch (object) { + case 0: + case "SHARED_PRELOAD_LIBRARIES_UNSPECIFIED": + return PostgresqlConfig14_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_UNSPECIFIED; + case 1: + case "SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN": + return PostgresqlConfig14_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN; + case 2: + case "SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN": + return PostgresqlConfig14_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN; + case 3: + case "SHARED_PRELOAD_LIBRARIES_TIMESCALEDB": + return PostgresqlConfig14_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_TIMESCALEDB; + case 4: + case "SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS": + return PostgresqlConfig14_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig14_SharedPreloadLibraries.UNRECOGNIZED; + } +} + +export function postgresqlConfig14_SharedPreloadLibrariesToJSON( + object: PostgresqlConfig14_SharedPreloadLibraries +): string { + switch (object) { + case PostgresqlConfig14_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_UNSPECIFIED: + return "SHARED_PRELOAD_LIBRARIES_UNSPECIFIED"; + case PostgresqlConfig14_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN: + return "SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN"; + case PostgresqlConfig14_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN: + return "SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN"; + case PostgresqlConfig14_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_TIMESCALEDB: + return "SHARED_PRELOAD_LIBRARIES_TIMESCALEDB"; + case PostgresqlConfig14_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS: + return "SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS"; + default: + return "UNKNOWN"; + } +} + +export interface PostgresqlConfigSet14 { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfigSet14"; + /** + * Effective settings for a PostgreSQL 14 cluster (a combination of settings defined + * in [user_config] and [default_config]). + */ + effectiveConfig?: PostgresqlConfig14; + /** User-defined settings for a PostgreSQL 14 cluster. */ + userConfig?: PostgresqlConfig14; + /** Default configuration for a PostgreSQL 14 cluster. */ + defaultConfig?: PostgresqlConfig14; +} + +const basePostgresqlConfig14: object = { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig14", + walLevel: 0, + synchronousCommit: 0, + constraintExclusion: 0, + forceParallelMode: 0, + clientMinMessages: 0, + logMinMessages: 0, + logMinErrorStatement: 0, + logErrorVerbosity: 0, + logStatement: 0, + searchPath: "", + defaultTransactionIsolation: 0, + byteaOutput: 0, + xmlbinary: 0, + xmloption: 0, + backslashQuote: 0, + timezone: "", + planCacheMode: 0, + sharedPreloadLibraries: 0, + pgHintPlanDebugPrint: 0, + pgHintPlanMessageLevel: 0, +}; + +export const PostgresqlConfig14 = { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig14" as const, + + encode( + message: PostgresqlConfig14, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.maxConnections !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.maxConnections! }, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.sharedBuffers !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.sharedBuffers! }, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.tempBuffers !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.tempBuffers! }, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.maxPreparedTransactions !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxPreparedTransactions!, + }, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.workMem !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.workMem! }, + writer.uint32(42).fork() + ).ldelim(); + } + if (message.maintenanceWorkMem !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maintenanceWorkMem!, + }, + writer.uint32(50).fork() + ).ldelim(); + } + if (message.autovacuumWorkMem !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autovacuumWorkMem!, + }, + writer.uint32(58).fork() + ).ldelim(); + } + if (message.tempFileLimit !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.tempFileLimit! }, + writer.uint32(66).fork() + ).ldelim(); + } + if (message.vacuumCostDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumCostDelay!, + }, + writer.uint32(74).fork() + ).ldelim(); + } + if (message.vacuumCostPageHit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumCostPageHit!, + }, + writer.uint32(82).fork() + ).ldelim(); + } + if (message.vacuumCostPageMiss !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumCostPageMiss!, + }, + writer.uint32(90).fork() + ).ldelim(); + } + if (message.vacuumCostPageDirty !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumCostPageDirty!, + }, + writer.uint32(98).fork() + ).ldelim(); + } + if (message.vacuumCostLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumCostLimit!, + }, + writer.uint32(106).fork() + ).ldelim(); + } + if (message.bgwriterDelay !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.bgwriterDelay! }, + writer.uint32(114).fork() + ).ldelim(); + } + if (message.bgwriterLruMaxpages !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.bgwriterLruMaxpages!, + }, + writer.uint32(122).fork() + ).ldelim(); + } + if (message.bgwriterLruMultiplier !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.bgwriterLruMultiplier!, + }, + writer.uint32(130).fork() + ).ldelim(); + } + if (message.bgwriterFlushAfter !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.bgwriterFlushAfter!, + }, + writer.uint32(138).fork() + ).ldelim(); + } + if (message.backendFlushAfter !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.backendFlushAfter!, + }, + writer.uint32(146).fork() + ).ldelim(); + } + if (message.oldSnapshotThreshold !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.oldSnapshotThreshold!, + }, + writer.uint32(154).fork() + ).ldelim(); + } + if (message.walLevel !== 0) { + writer.uint32(160).int32(message.walLevel); + } + if (message.synchronousCommit !== 0) { + writer.uint32(168).int32(message.synchronousCommit); + } + if (message.checkpointTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.checkpointTimeout!, + }, + writer.uint32(178).fork() + ).ldelim(); + } + if (message.checkpointCompletionTarget !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.checkpointCompletionTarget!, + }, + writer.uint32(186).fork() + ).ldelim(); + } + if (message.checkpointFlushAfter !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.checkpointFlushAfter!, + }, + writer.uint32(194).fork() + ).ldelim(); + } + if (message.maxWalSize !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.maxWalSize! }, + writer.uint32(202).fork() + ).ldelim(); + } + if (message.minWalSize !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.minWalSize! }, + writer.uint32(210).fork() + ).ldelim(); + } + if (message.maxStandbyStreamingDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxStandbyStreamingDelay!, + }, + writer.uint32(218).fork() + ).ldelim(); + } + if (message.defaultStatisticsTarget !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.defaultStatisticsTarget!, + }, + writer.uint32(226).fork() + ).ldelim(); + } + if (message.constraintExclusion !== 0) { + writer.uint32(232).int32(message.constraintExclusion); + } + if (message.cursorTupleFraction !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.cursorTupleFraction!, + }, + writer.uint32(242).fork() + ).ldelim(); + } + if (message.fromCollapseLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.fromCollapseLimit!, + }, + writer.uint32(250).fork() + ).ldelim(); + } + if (message.joinCollapseLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.joinCollapseLimit!, + }, + writer.uint32(258).fork() + ).ldelim(); + } + if (message.forceParallelMode !== 0) { + writer.uint32(264).int32(message.forceParallelMode); + } + if (message.clientMinMessages !== 0) { + writer.uint32(272).int32(message.clientMinMessages); + } + if (message.logMinMessages !== 0) { + writer.uint32(280).int32(message.logMinMessages); + } + if (message.logMinErrorStatement !== 0) { + writer.uint32(288).int32(message.logMinErrorStatement); + } + if (message.logMinDurationStatement !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logMinDurationStatement!, + }, + writer.uint32(298).fork() + ).ldelim(); + } + if (message.logCheckpoints !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logCheckpoints! }, + writer.uint32(306).fork() + ).ldelim(); + } + if (message.logConnections !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logConnections! }, + writer.uint32(314).fork() + ).ldelim(); + } + if (message.logDisconnections !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.logDisconnections!, + }, + writer.uint32(322).fork() + ).ldelim(); + } + if (message.logDuration !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logDuration! }, + writer.uint32(330).fork() + ).ldelim(); + } + if (message.logErrorVerbosity !== 0) { + writer.uint32(336).int32(message.logErrorVerbosity); + } + if (message.logLockWaits !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logLockWaits! }, + writer.uint32(346).fork() + ).ldelim(); + } + if (message.logStatement !== 0) { + writer.uint32(352).int32(message.logStatement); + } + if (message.logTempFiles !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.logTempFiles! }, + writer.uint32(362).fork() + ).ldelim(); + } + if (message.searchPath !== "") { + writer.uint32(370).string(message.searchPath); + } + if (message.rowSecurity !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.rowSecurity! }, + writer.uint32(378).fork() + ).ldelim(); + } + if (message.defaultTransactionIsolation !== 0) { + writer.uint32(384).int32(message.defaultTransactionIsolation); + } + if (message.statementTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.statementTimeout!, + }, + writer.uint32(394).fork() + ).ldelim(); + } + if (message.lockTimeout !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.lockTimeout! }, + writer.uint32(402).fork() + ).ldelim(); + } + if (message.idleInTransactionSessionTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.idleInTransactionSessionTimeout!, + }, + writer.uint32(410).fork() + ).ldelim(); + } + if (message.byteaOutput !== 0) { + writer.uint32(416).int32(message.byteaOutput); + } + if (message.xmlbinary !== 0) { + writer.uint32(424).int32(message.xmlbinary); + } + if (message.xmloption !== 0) { + writer.uint32(432).int32(message.xmloption); + } + if (message.ginPendingListLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.ginPendingListLimit!, + }, + writer.uint32(442).fork() + ).ldelim(); + } + if (message.deadlockTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.deadlockTimeout!, + }, + writer.uint32(450).fork() + ).ldelim(); + } + if (message.maxLocksPerTransaction !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxLocksPerTransaction!, + }, + writer.uint32(458).fork() + ).ldelim(); + } + if (message.maxPredLocksPerTransaction !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxPredLocksPerTransaction!, + }, + writer.uint32(466).fork() + ).ldelim(); + } + if (message.arrayNulls !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.arrayNulls! }, + writer.uint32(474).fork() + ).ldelim(); + } + if (message.backslashQuote !== 0) { + writer.uint32(480).int32(message.backslashQuote); + } + if (message.defaultWithOids !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.defaultWithOids! }, + writer.uint32(490).fork() + ).ldelim(); + } + if (message.escapeStringWarning !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.escapeStringWarning!, + }, + writer.uint32(498).fork() + ).ldelim(); + } + if (message.loCompatPrivileges !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.loCompatPrivileges!, + }, + writer.uint32(506).fork() + ).ldelim(); + } + if (message.quoteAllIdentifiers !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.quoteAllIdentifiers!, + }, + writer.uint32(522).fork() + ).ldelim(); + } + if (message.standardConformingStrings !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.standardConformingStrings!, + }, + writer.uint32(530).fork() + ).ldelim(); + } + if (message.synchronizeSeqscans !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.synchronizeSeqscans!, + }, + writer.uint32(538).fork() + ).ldelim(); + } + if (message.transformNullEquals !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.transformNullEquals!, + }, + writer.uint32(546).fork() + ).ldelim(); + } + if (message.exitOnError !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.exitOnError! }, + writer.uint32(554).fork() + ).ldelim(); + } + if (message.seqPageCost !== undefined) { + DoubleValue.encode( + { $type: "google.protobuf.DoubleValue", value: message.seqPageCost! }, + writer.uint32(562).fork() + ).ldelim(); + } + if (message.randomPageCost !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.randomPageCost!, + }, + writer.uint32(570).fork() + ).ldelim(); + } + if (message.autovacuumMaxWorkers !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autovacuumMaxWorkers!, + }, + writer.uint32(578).fork() + ).ldelim(); + } + if (message.autovacuumVacuumCostDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autovacuumVacuumCostDelay!, + }, + writer.uint32(586).fork() + ).ldelim(); + } + if (message.autovacuumVacuumCostLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autovacuumVacuumCostLimit!, + }, + writer.uint32(594).fork() + ).ldelim(); + } + if (message.autovacuumNaptime !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autovacuumNaptime!, + }, + writer.uint32(602).fork() + ).ldelim(); + } + if (message.archiveTimeout !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.archiveTimeout! }, + writer.uint32(610).fork() + ).ldelim(); + } + if (message.trackActivityQuerySize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.trackActivityQuerySize!, + }, + writer.uint32(618).fork() + ).ldelim(); + } + if (message.enableBitmapscan !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableBitmapscan!, + }, + writer.uint32(642).fork() + ).ldelim(); + } + if (message.enableHashagg !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableHashagg! }, + writer.uint32(650).fork() + ).ldelim(); + } + if (message.enableHashjoin !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableHashjoin! }, + writer.uint32(658).fork() + ).ldelim(); + } + if (message.enableIndexscan !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableIndexscan! }, + writer.uint32(666).fork() + ).ldelim(); + } + if (message.enableIndexonlyscan !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableIndexonlyscan!, + }, + writer.uint32(674).fork() + ).ldelim(); + } + if (message.enableMaterial !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableMaterial! }, + writer.uint32(682).fork() + ).ldelim(); + } + if (message.enableMergejoin !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableMergejoin! }, + writer.uint32(690).fork() + ).ldelim(); + } + if (message.enableNestloop !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableNestloop! }, + writer.uint32(698).fork() + ).ldelim(); + } + if (message.enableSeqscan !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableSeqscan! }, + writer.uint32(706).fork() + ).ldelim(); + } + if (message.enableSort !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableSort! }, + writer.uint32(714).fork() + ).ldelim(); + } + if (message.enableTidscan !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableTidscan! }, + writer.uint32(722).fork() + ).ldelim(); + } + if (message.maxWorkerProcesses !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxWorkerProcesses!, + }, + writer.uint32(730).fork() + ).ldelim(); + } + if (message.maxParallelWorkers !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxParallelWorkers!, + }, + writer.uint32(738).fork() + ).ldelim(); + } + if (message.maxParallelWorkersPerGather !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxParallelWorkersPerGather!, + }, + writer.uint32(746).fork() + ).ldelim(); + } + if (message.autovacuumVacuumScaleFactor !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.autovacuumVacuumScaleFactor!, + }, + writer.uint32(754).fork() + ).ldelim(); + } + if (message.autovacuumAnalyzeScaleFactor !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.autovacuumAnalyzeScaleFactor!, + }, + writer.uint32(762).fork() + ).ldelim(); + } + if (message.defaultTransactionReadOnly !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.defaultTransactionReadOnly!, + }, + writer.uint32(770).fork() + ).ldelim(); + } + if (message.timezone !== "") { + writer.uint32(778).string(message.timezone); + } + if (message.enableParallelAppend !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableParallelAppend!, + }, + writer.uint32(786).fork() + ).ldelim(); + } + if (message.enableParallelHash !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableParallelHash!, + }, + writer.uint32(794).fork() + ).ldelim(); + } + if (message.enablePartitionPruning !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enablePartitionPruning!, + }, + writer.uint32(802).fork() + ).ldelim(); + } + if (message.enablePartitionwiseAggregate !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enablePartitionwiseAggregate!, + }, + writer.uint32(810).fork() + ).ldelim(); + } + if (message.enablePartitionwiseJoin !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enablePartitionwiseJoin!, + }, + writer.uint32(818).fork() + ).ldelim(); + } + if (message.jit !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.jit! }, + writer.uint32(826).fork() + ).ldelim(); + } + if (message.maxParallelMaintenanceWorkers !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxParallelMaintenanceWorkers!, + }, + writer.uint32(834).fork() + ).ldelim(); + } + if (message.parallelLeaderParticipation !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.parallelLeaderParticipation!, + }, + writer.uint32(842).fork() + ).ldelim(); + } + if (message.logTransactionSampleRate !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.logTransactionSampleRate!, + }, + writer.uint32(858).fork() + ).ldelim(); + } + if (message.planCacheMode !== 0) { + writer.uint32(864).int32(message.planCacheMode); + } + if (message.effectiveIoConcurrency !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.effectiveIoConcurrency!, + }, + writer.uint32(874).fork() + ).ldelim(); + } + if (message.effectiveCacheSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.effectiveCacheSize!, + }, + writer.uint32(882).fork() + ).ldelim(); + } + writer.uint32(890).fork(); + for (const v of message.sharedPreloadLibraries) { + writer.int32(v); + } + writer.ldelim(); + if (message.autoExplainLogMinDuration !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autoExplainLogMinDuration!, + }, + writer.uint32(898).fork() + ).ldelim(); + } + if (message.autoExplainLogAnalyze !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.autoExplainLogAnalyze!, + }, + writer.uint32(906).fork() + ).ldelim(); + } + if (message.autoExplainLogBuffers !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.autoExplainLogBuffers!, + }, + writer.uint32(914).fork() + ).ldelim(); + } + if (message.autoExplainLogTiming !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.autoExplainLogTiming!, + }, + writer.uint32(922).fork() + ).ldelim(); + } + if (message.autoExplainLogTriggers !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.autoExplainLogTriggers!, + }, + writer.uint32(930).fork() + ).ldelim(); + } + if (message.autoExplainLogVerbose !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.autoExplainLogVerbose!, + }, + writer.uint32(938).fork() + ).ldelim(); + } + if (message.autoExplainLogNestedStatements !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.autoExplainLogNestedStatements!, + }, + writer.uint32(946).fork() + ).ldelim(); + } + if (message.autoExplainSampleRate !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.autoExplainSampleRate!, + }, + writer.uint32(954).fork() + ).ldelim(); + } + if (message.pgHintPlanEnableHint !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgHintPlanEnableHint!, + }, + writer.uint32(962).fork() + ).ldelim(); + } + if (message.pgHintPlanEnableHintTable !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgHintPlanEnableHintTable!, + }, + writer.uint32(970).fork() + ).ldelim(); + } + if (message.pgHintPlanDebugPrint !== 0) { + writer.uint32(976).int32(message.pgHintPlanDebugPrint); + } + if (message.pgHintPlanMessageLevel !== 0) { + writer.uint32(984).int32(message.pgHintPlanMessageLevel); + } + if (message.hashMemMultiplier !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.hashMemMultiplier!, + }, + writer.uint32(994).fork() + ).ldelim(); + } + if (message.logicalDecodingWorkMem !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logicalDecodingWorkMem!, + }, + writer.uint32(1010).fork() + ).ldelim(); + } + if (message.maintenanceIoConcurrency !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maintenanceIoConcurrency!, + }, + writer.uint32(1018).fork() + ).ldelim(); + } + if (message.maxSlotWalKeepSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxSlotWalKeepSize!, + }, + writer.uint32(1026).fork() + ).ldelim(); + } + if (message.walKeepSize !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.walKeepSize! }, + writer.uint32(1034).fork() + ).ldelim(); + } + if (message.enableIncrementalSort !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableIncrementalSort!, + }, + writer.uint32(1042).fork() + ).ldelim(); + } + if (message.autovacuumVacuumInsertThreshold !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autovacuumVacuumInsertThreshold!, + }, + writer.uint32(1050).fork() + ).ldelim(); + } + if (message.autovacuumVacuumInsertScaleFactor !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.autovacuumVacuumInsertScaleFactor!, + }, + writer.uint32(1058).fork() + ).ldelim(); + } + if (message.logMinDurationSample !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logMinDurationSample!, + }, + writer.uint32(1066).fork() + ).ldelim(); + } + if (message.logStatementSampleRate !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.logStatementSampleRate!, + }, + writer.uint32(1074).fork() + ).ldelim(); + } + if (message.logParameterMaxLength !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logParameterMaxLength!, + }, + writer.uint32(1082).fork() + ).ldelim(); + } + if (message.logParameterMaxLengthOnError !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logParameterMaxLengthOnError!, + }, + writer.uint32(1090).fork() + ).ldelim(); + } + if (message.clientConnectionCheckInterval !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.clientConnectionCheckInterval!, + }, + writer.uint32(1098).fork() + ).ldelim(); + } + if (message.enableAsyncAppend !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableAsyncAppend!, + }, + writer.uint32(1106).fork() + ).ldelim(); + } + if (message.enableGathermerge !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableGathermerge!, + }, + writer.uint32(1114).fork() + ).ldelim(); + } + if (message.enableMemoize !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableMemoize! }, + writer.uint32(1122).fork() + ).ldelim(); + } + if (message.logRecoveryConflictWaits !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.logRecoveryConflictWaits!, + }, + writer.uint32(1130).fork() + ).ldelim(); + } + if (message.vacuumFailsafeAge !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumFailsafeAge!, + }, + writer.uint32(1138).fork() + ).ldelim(); + } + if (message.vacuumMultixactFailsafeAge !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumMultixactFailsafeAge!, + }, + writer.uint32(1146).fork() + ).ldelim(); + } + if (message.pgQualstatsEnabled !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsEnabled!, + }, + writer.uint32(1154).fork() + ).ldelim(); + } + if (message.pgQualstatsTrackConstants !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsTrackConstants!, + }, + writer.uint32(1162).fork() + ).ldelim(); + } + if (message.pgQualstatsMax !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.pgQualstatsMax! }, + writer.uint32(1170).fork() + ).ldelim(); + } + if (message.pgQualstatsResolveOids !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsResolveOids!, + }, + writer.uint32(1178).fork() + ).ldelim(); + } + if (message.pgQualstatsSampleRate !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgQualstatsSampleRate!, + }, + writer.uint32(1186).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): PostgresqlConfig14 { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...basePostgresqlConfig14 } as PostgresqlConfig14; + message.sharedPreloadLibraries = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.maxConnections = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 2: + message.sharedBuffers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 3: + message.tempBuffers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 4: + message.maxPreparedTransactions = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 5: + message.workMem = Int64Value.decode(reader, reader.uint32()).value; + break; + case 6: + message.maintenanceWorkMem = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 7: + message.autovacuumWorkMem = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 8: + message.tempFileLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 9: + message.vacuumCostDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 10: + message.vacuumCostPageHit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 11: + message.vacuumCostPageMiss = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 12: + message.vacuumCostPageDirty = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 13: + message.vacuumCostLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 14: + message.bgwriterDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 15: + message.bgwriterLruMaxpages = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 16: + message.bgwriterLruMultiplier = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 17: + message.bgwriterFlushAfter = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 18: + message.backendFlushAfter = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 19: + message.oldSnapshotThreshold = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 20: + message.walLevel = reader.int32() as any; + break; + case 21: + message.synchronousCommit = reader.int32() as any; + break; + case 22: + message.checkpointTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 23: + message.checkpointCompletionTarget = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 24: + message.checkpointFlushAfter = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 25: + message.maxWalSize = Int64Value.decode(reader, reader.uint32()).value; + break; + case 26: + message.minWalSize = Int64Value.decode(reader, reader.uint32()).value; + break; + case 27: + message.maxStandbyStreamingDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 28: + message.defaultStatisticsTarget = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 29: + message.constraintExclusion = reader.int32() as any; + break; + case 30: + message.cursorTupleFraction = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 31: + message.fromCollapseLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 32: + message.joinCollapseLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 33: + message.forceParallelMode = reader.int32() as any; + break; + case 34: + message.clientMinMessages = reader.int32() as any; + break; + case 35: + message.logMinMessages = reader.int32() as any; + break; + case 36: + message.logMinErrorStatement = reader.int32() as any; + break; + case 37: + message.logMinDurationStatement = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 38: + message.logCheckpoints = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 39: + message.logConnections = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 40: + message.logDisconnections = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 41: + message.logDuration = BoolValue.decode(reader, reader.uint32()).value; + break; + case 42: + message.logErrorVerbosity = reader.int32() as any; + break; + case 43: + message.logLockWaits = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 44: + message.logStatement = reader.int32() as any; + break; + case 45: + message.logTempFiles = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 46: + message.searchPath = reader.string(); + break; + case 47: + message.rowSecurity = BoolValue.decode(reader, reader.uint32()).value; + break; + case 48: + message.defaultTransactionIsolation = reader.int32() as any; + break; + case 49: + message.statementTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 50: + message.lockTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 51: + message.idleInTransactionSessionTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 52: + message.byteaOutput = reader.int32() as any; + break; + case 53: + message.xmlbinary = reader.int32() as any; + break; + case 54: + message.xmloption = reader.int32() as any; + break; + case 55: + message.ginPendingListLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 56: + message.deadlockTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 57: + message.maxLocksPerTransaction = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 58: + message.maxPredLocksPerTransaction = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 59: + message.arrayNulls = BoolValue.decode(reader, reader.uint32()).value; + break; + case 60: + message.backslashQuote = reader.int32() as any; + break; + case 61: + message.defaultWithOids = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 62: + message.escapeStringWarning = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 63: + message.loCompatPrivileges = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 65: + message.quoteAllIdentifiers = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 66: + message.standardConformingStrings = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 67: + message.synchronizeSeqscans = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 68: + message.transformNullEquals = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 69: + message.exitOnError = BoolValue.decode(reader, reader.uint32()).value; + break; + case 70: + message.seqPageCost = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 71: + message.randomPageCost = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 72: + message.autovacuumMaxWorkers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 73: + message.autovacuumVacuumCostDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 74: + message.autovacuumVacuumCostLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 75: + message.autovacuumNaptime = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 76: + message.archiveTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 77: + message.trackActivityQuerySize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 80: + message.enableBitmapscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 81: + message.enableHashagg = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 82: + message.enableHashjoin = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 83: + message.enableIndexscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 84: + message.enableIndexonlyscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 85: + message.enableMaterial = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 86: + message.enableMergejoin = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 87: + message.enableNestloop = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 88: + message.enableSeqscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 89: + message.enableSort = BoolValue.decode(reader, reader.uint32()).value; + break; + case 90: + message.enableTidscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 91: + message.maxWorkerProcesses = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 92: + message.maxParallelWorkers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 93: + message.maxParallelWorkersPerGather = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 94: + message.autovacuumVacuumScaleFactor = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 95: + message.autovacuumAnalyzeScaleFactor = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 96: + message.defaultTransactionReadOnly = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 97: + message.timezone = reader.string(); + break; + case 98: + message.enableParallelAppend = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 99: + message.enableParallelHash = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 100: + message.enablePartitionPruning = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 101: + message.enablePartitionwiseAggregate = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 102: + message.enablePartitionwiseJoin = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 103: + message.jit = BoolValue.decode(reader, reader.uint32()).value; + break; + case 104: + message.maxParallelMaintenanceWorkers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 105: + message.parallelLeaderParticipation = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 107: + message.logTransactionSampleRate = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 108: + message.planCacheMode = reader.int32() as any; + break; + case 109: + message.effectiveIoConcurrency = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 110: + message.effectiveCacheSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 111: + if ((tag & 7) === 2) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.sharedPreloadLibraries.push(reader.int32() as any); + } + } else { + message.sharedPreloadLibraries.push(reader.int32() as any); + } + break; + case 112: + message.autoExplainLogMinDuration = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 113: + message.autoExplainLogAnalyze = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 114: + message.autoExplainLogBuffers = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 115: + message.autoExplainLogTiming = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 116: + message.autoExplainLogTriggers = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 117: + message.autoExplainLogVerbose = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 118: + message.autoExplainLogNestedStatements = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 119: + message.autoExplainSampleRate = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 120: + message.pgHintPlanEnableHint = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 121: + message.pgHintPlanEnableHintTable = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 122: + message.pgHintPlanDebugPrint = reader.int32() as any; + break; + case 123: + message.pgHintPlanMessageLevel = reader.int32() as any; + break; + case 124: + message.hashMemMultiplier = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 126: + message.logicalDecodingWorkMem = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 127: + message.maintenanceIoConcurrency = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 128: + message.maxSlotWalKeepSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 129: + message.walKeepSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 130: + message.enableIncrementalSort = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 131: + message.autovacuumVacuumInsertThreshold = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 132: + message.autovacuumVacuumInsertScaleFactor = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 133: + message.logMinDurationSample = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 134: + message.logStatementSampleRate = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 135: + message.logParameterMaxLength = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 136: + message.logParameterMaxLengthOnError = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 137: + message.clientConnectionCheckInterval = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 138: + message.enableAsyncAppend = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 139: + message.enableGathermerge = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 140: + message.enableMemoize = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 141: + message.logRecoveryConflictWaits = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 142: + message.vacuumFailsafeAge = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 143: + message.vacuumMultixactFailsafeAge = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 144: + message.pgQualstatsEnabled = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 145: + message.pgQualstatsTrackConstants = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 146: + message.pgQualstatsMax = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 147: + message.pgQualstatsResolveOids = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 148: + message.pgQualstatsSampleRate = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): PostgresqlConfig14 { + const message = { ...basePostgresqlConfig14 } as PostgresqlConfig14; + message.maxConnections = + object.maxConnections !== undefined && object.maxConnections !== null + ? Number(object.maxConnections) + : undefined; + message.sharedBuffers = + object.sharedBuffers !== undefined && object.sharedBuffers !== null + ? Number(object.sharedBuffers) + : undefined; + message.tempBuffers = + object.tempBuffers !== undefined && object.tempBuffers !== null + ? Number(object.tempBuffers) + : undefined; + message.maxPreparedTransactions = + object.maxPreparedTransactions !== undefined && + object.maxPreparedTransactions !== null + ? Number(object.maxPreparedTransactions) + : undefined; + message.workMem = + object.workMem !== undefined && object.workMem !== null + ? Number(object.workMem) + : undefined; + message.maintenanceWorkMem = + object.maintenanceWorkMem !== undefined && + object.maintenanceWorkMem !== null + ? Number(object.maintenanceWorkMem) + : undefined; + message.autovacuumWorkMem = + object.autovacuumWorkMem !== undefined && + object.autovacuumWorkMem !== null + ? Number(object.autovacuumWorkMem) + : undefined; + message.tempFileLimit = + object.tempFileLimit !== undefined && object.tempFileLimit !== null + ? Number(object.tempFileLimit) + : undefined; + message.vacuumCostDelay = + object.vacuumCostDelay !== undefined && object.vacuumCostDelay !== null + ? Number(object.vacuumCostDelay) + : undefined; + message.vacuumCostPageHit = + object.vacuumCostPageHit !== undefined && + object.vacuumCostPageHit !== null + ? Number(object.vacuumCostPageHit) + : undefined; + message.vacuumCostPageMiss = + object.vacuumCostPageMiss !== undefined && + object.vacuumCostPageMiss !== null + ? Number(object.vacuumCostPageMiss) + : undefined; + message.vacuumCostPageDirty = + object.vacuumCostPageDirty !== undefined && + object.vacuumCostPageDirty !== null + ? Number(object.vacuumCostPageDirty) + : undefined; + message.vacuumCostLimit = + object.vacuumCostLimit !== undefined && object.vacuumCostLimit !== null + ? Number(object.vacuumCostLimit) + : undefined; + message.bgwriterDelay = + object.bgwriterDelay !== undefined && object.bgwriterDelay !== null + ? Number(object.bgwriterDelay) + : undefined; + message.bgwriterLruMaxpages = + object.bgwriterLruMaxpages !== undefined && + object.bgwriterLruMaxpages !== null + ? Number(object.bgwriterLruMaxpages) + : undefined; + message.bgwriterLruMultiplier = + object.bgwriterLruMultiplier !== undefined && + object.bgwriterLruMultiplier !== null + ? Number(object.bgwriterLruMultiplier) + : undefined; + message.bgwriterFlushAfter = + object.bgwriterFlushAfter !== undefined && + object.bgwriterFlushAfter !== null + ? Number(object.bgwriterFlushAfter) + : undefined; + message.backendFlushAfter = + object.backendFlushAfter !== undefined && + object.backendFlushAfter !== null + ? Number(object.backendFlushAfter) + : undefined; + message.oldSnapshotThreshold = + object.oldSnapshotThreshold !== undefined && + object.oldSnapshotThreshold !== null + ? Number(object.oldSnapshotThreshold) + : undefined; + message.walLevel = + object.walLevel !== undefined && object.walLevel !== null + ? postgresqlConfig14_WalLevelFromJSON(object.walLevel) + : 0; + message.synchronousCommit = + object.synchronousCommit !== undefined && + object.synchronousCommit !== null + ? postgresqlConfig14_SynchronousCommitFromJSON(object.synchronousCommit) + : 0; + message.checkpointTimeout = + object.checkpointTimeout !== undefined && + object.checkpointTimeout !== null + ? Number(object.checkpointTimeout) + : undefined; + message.checkpointCompletionTarget = + object.checkpointCompletionTarget !== undefined && + object.checkpointCompletionTarget !== null + ? Number(object.checkpointCompletionTarget) + : undefined; + message.checkpointFlushAfter = + object.checkpointFlushAfter !== undefined && + object.checkpointFlushAfter !== null + ? Number(object.checkpointFlushAfter) + : undefined; + message.maxWalSize = + object.maxWalSize !== undefined && object.maxWalSize !== null + ? Number(object.maxWalSize) + : undefined; + message.minWalSize = + object.minWalSize !== undefined && object.minWalSize !== null + ? Number(object.minWalSize) + : undefined; + message.maxStandbyStreamingDelay = + object.maxStandbyStreamingDelay !== undefined && + object.maxStandbyStreamingDelay !== null + ? Number(object.maxStandbyStreamingDelay) + : undefined; + message.defaultStatisticsTarget = + object.defaultStatisticsTarget !== undefined && + object.defaultStatisticsTarget !== null + ? Number(object.defaultStatisticsTarget) + : undefined; + message.constraintExclusion = + object.constraintExclusion !== undefined && + object.constraintExclusion !== null + ? postgresqlConfig14_ConstraintExclusionFromJSON( + object.constraintExclusion + ) + : 0; + message.cursorTupleFraction = + object.cursorTupleFraction !== undefined && + object.cursorTupleFraction !== null + ? Number(object.cursorTupleFraction) + : undefined; + message.fromCollapseLimit = + object.fromCollapseLimit !== undefined && + object.fromCollapseLimit !== null + ? Number(object.fromCollapseLimit) + : undefined; + message.joinCollapseLimit = + object.joinCollapseLimit !== undefined && + object.joinCollapseLimit !== null + ? Number(object.joinCollapseLimit) + : undefined; + message.forceParallelMode = + object.forceParallelMode !== undefined && + object.forceParallelMode !== null + ? postgresqlConfig14_ForceParallelModeFromJSON(object.forceParallelMode) + : 0; + message.clientMinMessages = + object.clientMinMessages !== undefined && + object.clientMinMessages !== null + ? postgresqlConfig14_LogLevelFromJSON(object.clientMinMessages) + : 0; + message.logMinMessages = + object.logMinMessages !== undefined && object.logMinMessages !== null + ? postgresqlConfig14_LogLevelFromJSON(object.logMinMessages) + : 0; + message.logMinErrorStatement = + object.logMinErrorStatement !== undefined && + object.logMinErrorStatement !== null + ? postgresqlConfig14_LogLevelFromJSON(object.logMinErrorStatement) + : 0; + message.logMinDurationStatement = + object.logMinDurationStatement !== undefined && + object.logMinDurationStatement !== null + ? Number(object.logMinDurationStatement) + : undefined; + message.logCheckpoints = + object.logCheckpoints !== undefined && object.logCheckpoints !== null + ? Boolean(object.logCheckpoints) + : undefined; + message.logConnections = + object.logConnections !== undefined && object.logConnections !== null + ? Boolean(object.logConnections) + : undefined; + message.logDisconnections = + object.logDisconnections !== undefined && + object.logDisconnections !== null + ? Boolean(object.logDisconnections) + : undefined; + message.logDuration = + object.logDuration !== undefined && object.logDuration !== null + ? Boolean(object.logDuration) + : undefined; + message.logErrorVerbosity = + object.logErrorVerbosity !== undefined && + object.logErrorVerbosity !== null + ? postgresqlConfig14_LogErrorVerbosityFromJSON(object.logErrorVerbosity) + : 0; + message.logLockWaits = + object.logLockWaits !== undefined && object.logLockWaits !== null + ? Boolean(object.logLockWaits) + : undefined; + message.logStatement = + object.logStatement !== undefined && object.logStatement !== null + ? postgresqlConfig14_LogStatementFromJSON(object.logStatement) + : 0; + message.logTempFiles = + object.logTempFiles !== undefined && object.logTempFiles !== null + ? Number(object.logTempFiles) + : undefined; + message.searchPath = + object.searchPath !== undefined && object.searchPath !== null + ? String(object.searchPath) + : ""; + message.rowSecurity = + object.rowSecurity !== undefined && object.rowSecurity !== null + ? Boolean(object.rowSecurity) + : undefined; + message.defaultTransactionIsolation = + object.defaultTransactionIsolation !== undefined && + object.defaultTransactionIsolation !== null + ? postgresqlConfig14_TransactionIsolationFromJSON( + object.defaultTransactionIsolation + ) + : 0; + message.statementTimeout = + object.statementTimeout !== undefined && object.statementTimeout !== null + ? Number(object.statementTimeout) + : undefined; + message.lockTimeout = + object.lockTimeout !== undefined && object.lockTimeout !== null + ? Number(object.lockTimeout) + : undefined; + message.idleInTransactionSessionTimeout = + object.idleInTransactionSessionTimeout !== undefined && + object.idleInTransactionSessionTimeout !== null + ? Number(object.idleInTransactionSessionTimeout) + : undefined; + message.byteaOutput = + object.byteaOutput !== undefined && object.byteaOutput !== null + ? postgresqlConfig14_ByteaOutputFromJSON(object.byteaOutput) + : 0; + message.xmlbinary = + object.xmlbinary !== undefined && object.xmlbinary !== null + ? postgresqlConfig14_XmlBinaryFromJSON(object.xmlbinary) + : 0; + message.xmloption = + object.xmloption !== undefined && object.xmloption !== null + ? postgresqlConfig14_XmlOptionFromJSON(object.xmloption) + : 0; + message.ginPendingListLimit = + object.ginPendingListLimit !== undefined && + object.ginPendingListLimit !== null + ? Number(object.ginPendingListLimit) + : undefined; + message.deadlockTimeout = + object.deadlockTimeout !== undefined && object.deadlockTimeout !== null + ? Number(object.deadlockTimeout) + : undefined; + message.maxLocksPerTransaction = + object.maxLocksPerTransaction !== undefined && + object.maxLocksPerTransaction !== null + ? Number(object.maxLocksPerTransaction) + : undefined; + message.maxPredLocksPerTransaction = + object.maxPredLocksPerTransaction !== undefined && + object.maxPredLocksPerTransaction !== null + ? Number(object.maxPredLocksPerTransaction) + : undefined; + message.arrayNulls = + object.arrayNulls !== undefined && object.arrayNulls !== null + ? Boolean(object.arrayNulls) + : undefined; + message.backslashQuote = + object.backslashQuote !== undefined && object.backslashQuote !== null + ? postgresqlConfig14_BackslashQuoteFromJSON(object.backslashQuote) + : 0; + message.defaultWithOids = + object.defaultWithOids !== undefined && object.defaultWithOids !== null + ? Boolean(object.defaultWithOids) + : undefined; + message.escapeStringWarning = + object.escapeStringWarning !== undefined && + object.escapeStringWarning !== null + ? Boolean(object.escapeStringWarning) + : undefined; + message.loCompatPrivileges = + object.loCompatPrivileges !== undefined && + object.loCompatPrivileges !== null + ? Boolean(object.loCompatPrivileges) + : undefined; + message.quoteAllIdentifiers = + object.quoteAllIdentifiers !== undefined && + object.quoteAllIdentifiers !== null + ? Boolean(object.quoteAllIdentifiers) + : undefined; + message.standardConformingStrings = + object.standardConformingStrings !== undefined && + object.standardConformingStrings !== null + ? Boolean(object.standardConformingStrings) + : undefined; + message.synchronizeSeqscans = + object.synchronizeSeqscans !== undefined && + object.synchronizeSeqscans !== null + ? Boolean(object.synchronizeSeqscans) + : undefined; + message.transformNullEquals = + object.transformNullEquals !== undefined && + object.transformNullEquals !== null + ? Boolean(object.transformNullEquals) + : undefined; + message.exitOnError = + object.exitOnError !== undefined && object.exitOnError !== null + ? Boolean(object.exitOnError) + : undefined; + message.seqPageCost = + object.seqPageCost !== undefined && object.seqPageCost !== null + ? Number(object.seqPageCost) + : undefined; + message.randomPageCost = + object.randomPageCost !== undefined && object.randomPageCost !== null + ? Number(object.randomPageCost) + : undefined; + message.autovacuumMaxWorkers = + object.autovacuumMaxWorkers !== undefined && + object.autovacuumMaxWorkers !== null + ? Number(object.autovacuumMaxWorkers) + : undefined; + message.autovacuumVacuumCostDelay = + object.autovacuumVacuumCostDelay !== undefined && + object.autovacuumVacuumCostDelay !== null + ? Number(object.autovacuumVacuumCostDelay) + : undefined; + message.autovacuumVacuumCostLimit = + object.autovacuumVacuumCostLimit !== undefined && + object.autovacuumVacuumCostLimit !== null + ? Number(object.autovacuumVacuumCostLimit) + : undefined; + message.autovacuumNaptime = + object.autovacuumNaptime !== undefined && + object.autovacuumNaptime !== null + ? Number(object.autovacuumNaptime) + : undefined; + message.archiveTimeout = + object.archiveTimeout !== undefined && object.archiveTimeout !== null + ? Number(object.archiveTimeout) + : undefined; + message.trackActivityQuerySize = + object.trackActivityQuerySize !== undefined && + object.trackActivityQuerySize !== null + ? Number(object.trackActivityQuerySize) + : undefined; + message.enableBitmapscan = + object.enableBitmapscan !== undefined && object.enableBitmapscan !== null + ? Boolean(object.enableBitmapscan) + : undefined; + message.enableHashagg = + object.enableHashagg !== undefined && object.enableHashagg !== null + ? Boolean(object.enableHashagg) + : undefined; + message.enableHashjoin = + object.enableHashjoin !== undefined && object.enableHashjoin !== null + ? Boolean(object.enableHashjoin) + : undefined; + message.enableIndexscan = + object.enableIndexscan !== undefined && object.enableIndexscan !== null + ? Boolean(object.enableIndexscan) + : undefined; + message.enableIndexonlyscan = + object.enableIndexonlyscan !== undefined && + object.enableIndexonlyscan !== null + ? Boolean(object.enableIndexonlyscan) + : undefined; + message.enableMaterial = + object.enableMaterial !== undefined && object.enableMaterial !== null + ? Boolean(object.enableMaterial) + : undefined; + message.enableMergejoin = + object.enableMergejoin !== undefined && object.enableMergejoin !== null + ? Boolean(object.enableMergejoin) + : undefined; + message.enableNestloop = + object.enableNestloop !== undefined && object.enableNestloop !== null + ? Boolean(object.enableNestloop) + : undefined; + message.enableSeqscan = + object.enableSeqscan !== undefined && object.enableSeqscan !== null + ? Boolean(object.enableSeqscan) + : undefined; + message.enableSort = + object.enableSort !== undefined && object.enableSort !== null + ? Boolean(object.enableSort) + : undefined; + message.enableTidscan = + object.enableTidscan !== undefined && object.enableTidscan !== null + ? Boolean(object.enableTidscan) + : undefined; + message.maxWorkerProcesses = + object.maxWorkerProcesses !== undefined && + object.maxWorkerProcesses !== null + ? Number(object.maxWorkerProcesses) + : undefined; + message.maxParallelWorkers = + object.maxParallelWorkers !== undefined && + object.maxParallelWorkers !== null + ? Number(object.maxParallelWorkers) + : undefined; + message.maxParallelWorkersPerGather = + object.maxParallelWorkersPerGather !== undefined && + object.maxParallelWorkersPerGather !== null + ? Number(object.maxParallelWorkersPerGather) + : undefined; + message.autovacuumVacuumScaleFactor = + object.autovacuumVacuumScaleFactor !== undefined && + object.autovacuumVacuumScaleFactor !== null + ? Number(object.autovacuumVacuumScaleFactor) + : undefined; + message.autovacuumAnalyzeScaleFactor = + object.autovacuumAnalyzeScaleFactor !== undefined && + object.autovacuumAnalyzeScaleFactor !== null + ? Number(object.autovacuumAnalyzeScaleFactor) + : undefined; + message.defaultTransactionReadOnly = + object.defaultTransactionReadOnly !== undefined && + object.defaultTransactionReadOnly !== null + ? Boolean(object.defaultTransactionReadOnly) + : undefined; + message.timezone = + object.timezone !== undefined && object.timezone !== null + ? String(object.timezone) + : ""; + message.enableParallelAppend = + object.enableParallelAppend !== undefined && + object.enableParallelAppend !== null + ? Boolean(object.enableParallelAppend) + : undefined; + message.enableParallelHash = + object.enableParallelHash !== undefined && + object.enableParallelHash !== null + ? Boolean(object.enableParallelHash) + : undefined; + message.enablePartitionPruning = + object.enablePartitionPruning !== undefined && + object.enablePartitionPruning !== null + ? Boolean(object.enablePartitionPruning) + : undefined; + message.enablePartitionwiseAggregate = + object.enablePartitionwiseAggregate !== undefined && + object.enablePartitionwiseAggregate !== null + ? Boolean(object.enablePartitionwiseAggregate) + : undefined; + message.enablePartitionwiseJoin = + object.enablePartitionwiseJoin !== undefined && + object.enablePartitionwiseJoin !== null + ? Boolean(object.enablePartitionwiseJoin) + : undefined; + message.jit = + object.jit !== undefined && object.jit !== null + ? Boolean(object.jit) + : undefined; + message.maxParallelMaintenanceWorkers = + object.maxParallelMaintenanceWorkers !== undefined && + object.maxParallelMaintenanceWorkers !== null + ? Number(object.maxParallelMaintenanceWorkers) + : undefined; + message.parallelLeaderParticipation = + object.parallelLeaderParticipation !== undefined && + object.parallelLeaderParticipation !== null + ? Boolean(object.parallelLeaderParticipation) + : undefined; + message.logTransactionSampleRate = + object.logTransactionSampleRate !== undefined && + object.logTransactionSampleRate !== null + ? Number(object.logTransactionSampleRate) + : undefined; + message.planCacheMode = + object.planCacheMode !== undefined && object.planCacheMode !== null + ? postgresqlConfig14_PlanCacheModeFromJSON(object.planCacheMode) + : 0; + message.effectiveIoConcurrency = + object.effectiveIoConcurrency !== undefined && + object.effectiveIoConcurrency !== null + ? Number(object.effectiveIoConcurrency) + : undefined; + message.effectiveCacheSize = + object.effectiveCacheSize !== undefined && + object.effectiveCacheSize !== null + ? Number(object.effectiveCacheSize) + : undefined; + message.sharedPreloadLibraries = (object.sharedPreloadLibraries ?? []).map( + (e: any) => postgresqlConfig14_SharedPreloadLibrariesFromJSON(e) + ); + message.autoExplainLogMinDuration = + object.autoExplainLogMinDuration !== undefined && + object.autoExplainLogMinDuration !== null + ? Number(object.autoExplainLogMinDuration) + : undefined; + message.autoExplainLogAnalyze = + object.autoExplainLogAnalyze !== undefined && + object.autoExplainLogAnalyze !== null + ? Boolean(object.autoExplainLogAnalyze) + : undefined; + message.autoExplainLogBuffers = + object.autoExplainLogBuffers !== undefined && + object.autoExplainLogBuffers !== null + ? Boolean(object.autoExplainLogBuffers) + : undefined; + message.autoExplainLogTiming = + object.autoExplainLogTiming !== undefined && + object.autoExplainLogTiming !== null + ? Boolean(object.autoExplainLogTiming) + : undefined; + message.autoExplainLogTriggers = + object.autoExplainLogTriggers !== undefined && + object.autoExplainLogTriggers !== null + ? Boolean(object.autoExplainLogTriggers) + : undefined; + message.autoExplainLogVerbose = + object.autoExplainLogVerbose !== undefined && + object.autoExplainLogVerbose !== null + ? Boolean(object.autoExplainLogVerbose) + : undefined; + message.autoExplainLogNestedStatements = + object.autoExplainLogNestedStatements !== undefined && + object.autoExplainLogNestedStatements !== null + ? Boolean(object.autoExplainLogNestedStatements) + : undefined; + message.autoExplainSampleRate = + object.autoExplainSampleRate !== undefined && + object.autoExplainSampleRate !== null + ? Number(object.autoExplainSampleRate) + : undefined; + message.pgHintPlanEnableHint = + object.pgHintPlanEnableHint !== undefined && + object.pgHintPlanEnableHint !== null + ? Boolean(object.pgHintPlanEnableHint) + : undefined; + message.pgHintPlanEnableHintTable = + object.pgHintPlanEnableHintTable !== undefined && + object.pgHintPlanEnableHintTable !== null + ? Boolean(object.pgHintPlanEnableHintTable) + : undefined; + message.pgHintPlanDebugPrint = + object.pgHintPlanDebugPrint !== undefined && + object.pgHintPlanDebugPrint !== null + ? postgresqlConfig14_PgHintPlanDebugPrintFromJSON( + object.pgHintPlanDebugPrint + ) + : 0; + message.pgHintPlanMessageLevel = + object.pgHintPlanMessageLevel !== undefined && + object.pgHintPlanMessageLevel !== null + ? postgresqlConfig14_LogLevelFromJSON(object.pgHintPlanMessageLevel) + : 0; + message.hashMemMultiplier = + object.hashMemMultiplier !== undefined && + object.hashMemMultiplier !== null + ? Number(object.hashMemMultiplier) + : undefined; + message.logicalDecodingWorkMem = + object.logicalDecodingWorkMem !== undefined && + object.logicalDecodingWorkMem !== null + ? Number(object.logicalDecodingWorkMem) + : undefined; + message.maintenanceIoConcurrency = + object.maintenanceIoConcurrency !== undefined && + object.maintenanceIoConcurrency !== null + ? Number(object.maintenanceIoConcurrency) + : undefined; + message.maxSlotWalKeepSize = + object.maxSlotWalKeepSize !== undefined && + object.maxSlotWalKeepSize !== null + ? Number(object.maxSlotWalKeepSize) + : undefined; + message.walKeepSize = + object.walKeepSize !== undefined && object.walKeepSize !== null + ? Number(object.walKeepSize) + : undefined; + message.enableIncrementalSort = + object.enableIncrementalSort !== undefined && + object.enableIncrementalSort !== null + ? Boolean(object.enableIncrementalSort) + : undefined; + message.autovacuumVacuumInsertThreshold = + object.autovacuumVacuumInsertThreshold !== undefined && + object.autovacuumVacuumInsertThreshold !== null + ? Number(object.autovacuumVacuumInsertThreshold) + : undefined; + message.autovacuumVacuumInsertScaleFactor = + object.autovacuumVacuumInsertScaleFactor !== undefined && + object.autovacuumVacuumInsertScaleFactor !== null + ? Number(object.autovacuumVacuumInsertScaleFactor) + : undefined; + message.logMinDurationSample = + object.logMinDurationSample !== undefined && + object.logMinDurationSample !== null + ? Number(object.logMinDurationSample) + : undefined; + message.logStatementSampleRate = + object.logStatementSampleRate !== undefined && + object.logStatementSampleRate !== null + ? Number(object.logStatementSampleRate) + : undefined; + message.logParameterMaxLength = + object.logParameterMaxLength !== undefined && + object.logParameterMaxLength !== null + ? Number(object.logParameterMaxLength) + : undefined; + message.logParameterMaxLengthOnError = + object.logParameterMaxLengthOnError !== undefined && + object.logParameterMaxLengthOnError !== null + ? Number(object.logParameterMaxLengthOnError) + : undefined; + message.clientConnectionCheckInterval = + object.clientConnectionCheckInterval !== undefined && + object.clientConnectionCheckInterval !== null + ? Number(object.clientConnectionCheckInterval) + : undefined; + message.enableAsyncAppend = + object.enableAsyncAppend !== undefined && + object.enableAsyncAppend !== null + ? Boolean(object.enableAsyncAppend) + : undefined; + message.enableGathermerge = + object.enableGathermerge !== undefined && + object.enableGathermerge !== null + ? Boolean(object.enableGathermerge) + : undefined; + message.enableMemoize = + object.enableMemoize !== undefined && object.enableMemoize !== null + ? Boolean(object.enableMemoize) + : undefined; + message.logRecoveryConflictWaits = + object.logRecoveryConflictWaits !== undefined && + object.logRecoveryConflictWaits !== null + ? Boolean(object.logRecoveryConflictWaits) + : undefined; + message.vacuumFailsafeAge = + object.vacuumFailsafeAge !== undefined && + object.vacuumFailsafeAge !== null + ? Number(object.vacuumFailsafeAge) + : undefined; + message.vacuumMultixactFailsafeAge = + object.vacuumMultixactFailsafeAge !== undefined && + object.vacuumMultixactFailsafeAge !== null + ? Number(object.vacuumMultixactFailsafeAge) + : undefined; + message.pgQualstatsEnabled = + object.pgQualstatsEnabled !== undefined && + object.pgQualstatsEnabled !== null + ? Boolean(object.pgQualstatsEnabled) + : undefined; + message.pgQualstatsTrackConstants = + object.pgQualstatsTrackConstants !== undefined && + object.pgQualstatsTrackConstants !== null + ? Boolean(object.pgQualstatsTrackConstants) + : undefined; + message.pgQualstatsMax = + object.pgQualstatsMax !== undefined && object.pgQualstatsMax !== null + ? Number(object.pgQualstatsMax) + : undefined; + message.pgQualstatsResolveOids = + object.pgQualstatsResolveOids !== undefined && + object.pgQualstatsResolveOids !== null + ? Boolean(object.pgQualstatsResolveOids) + : undefined; + message.pgQualstatsSampleRate = + object.pgQualstatsSampleRate !== undefined && + object.pgQualstatsSampleRate !== null + ? Number(object.pgQualstatsSampleRate) + : undefined; + return message; + }, + + toJSON(message: PostgresqlConfig14): unknown { + const obj: any = {}; + message.maxConnections !== undefined && + (obj.maxConnections = message.maxConnections); + message.sharedBuffers !== undefined && + (obj.sharedBuffers = message.sharedBuffers); + message.tempBuffers !== undefined && + (obj.tempBuffers = message.tempBuffers); + message.maxPreparedTransactions !== undefined && + (obj.maxPreparedTransactions = message.maxPreparedTransactions); + message.workMem !== undefined && (obj.workMem = message.workMem); + message.maintenanceWorkMem !== undefined && + (obj.maintenanceWorkMem = message.maintenanceWorkMem); + message.autovacuumWorkMem !== undefined && + (obj.autovacuumWorkMem = message.autovacuumWorkMem); + message.tempFileLimit !== undefined && + (obj.tempFileLimit = message.tempFileLimit); + message.vacuumCostDelay !== undefined && + (obj.vacuumCostDelay = message.vacuumCostDelay); + message.vacuumCostPageHit !== undefined && + (obj.vacuumCostPageHit = message.vacuumCostPageHit); + message.vacuumCostPageMiss !== undefined && + (obj.vacuumCostPageMiss = message.vacuumCostPageMiss); + message.vacuumCostPageDirty !== undefined && + (obj.vacuumCostPageDirty = message.vacuumCostPageDirty); + message.vacuumCostLimit !== undefined && + (obj.vacuumCostLimit = message.vacuumCostLimit); + message.bgwriterDelay !== undefined && + (obj.bgwriterDelay = message.bgwriterDelay); + message.bgwriterLruMaxpages !== undefined && + (obj.bgwriterLruMaxpages = message.bgwriterLruMaxpages); + message.bgwriterLruMultiplier !== undefined && + (obj.bgwriterLruMultiplier = message.bgwriterLruMultiplier); + message.bgwriterFlushAfter !== undefined && + (obj.bgwriterFlushAfter = message.bgwriterFlushAfter); + message.backendFlushAfter !== undefined && + (obj.backendFlushAfter = message.backendFlushAfter); + message.oldSnapshotThreshold !== undefined && + (obj.oldSnapshotThreshold = message.oldSnapshotThreshold); + message.walLevel !== undefined && + (obj.walLevel = postgresqlConfig14_WalLevelToJSON(message.walLevel)); + message.synchronousCommit !== undefined && + (obj.synchronousCommit = postgresqlConfig14_SynchronousCommitToJSON( + message.synchronousCommit + )); + message.checkpointTimeout !== undefined && + (obj.checkpointTimeout = message.checkpointTimeout); + message.checkpointCompletionTarget !== undefined && + (obj.checkpointCompletionTarget = message.checkpointCompletionTarget); + message.checkpointFlushAfter !== undefined && + (obj.checkpointFlushAfter = message.checkpointFlushAfter); + message.maxWalSize !== undefined && (obj.maxWalSize = message.maxWalSize); + message.minWalSize !== undefined && (obj.minWalSize = message.minWalSize); + message.maxStandbyStreamingDelay !== undefined && + (obj.maxStandbyStreamingDelay = message.maxStandbyStreamingDelay); + message.defaultStatisticsTarget !== undefined && + (obj.defaultStatisticsTarget = message.defaultStatisticsTarget); + message.constraintExclusion !== undefined && + (obj.constraintExclusion = postgresqlConfig14_ConstraintExclusionToJSON( + message.constraintExclusion + )); + message.cursorTupleFraction !== undefined && + (obj.cursorTupleFraction = message.cursorTupleFraction); + message.fromCollapseLimit !== undefined && + (obj.fromCollapseLimit = message.fromCollapseLimit); + message.joinCollapseLimit !== undefined && + (obj.joinCollapseLimit = message.joinCollapseLimit); + message.forceParallelMode !== undefined && + (obj.forceParallelMode = postgresqlConfig14_ForceParallelModeToJSON( + message.forceParallelMode + )); + message.clientMinMessages !== undefined && + (obj.clientMinMessages = postgresqlConfig14_LogLevelToJSON( + message.clientMinMessages + )); + message.logMinMessages !== undefined && + (obj.logMinMessages = postgresqlConfig14_LogLevelToJSON( + message.logMinMessages + )); + message.logMinErrorStatement !== undefined && + (obj.logMinErrorStatement = postgresqlConfig14_LogLevelToJSON( + message.logMinErrorStatement + )); + message.logMinDurationStatement !== undefined && + (obj.logMinDurationStatement = message.logMinDurationStatement); + message.logCheckpoints !== undefined && + (obj.logCheckpoints = message.logCheckpoints); + message.logConnections !== undefined && + (obj.logConnections = message.logConnections); + message.logDisconnections !== undefined && + (obj.logDisconnections = message.logDisconnections); + message.logDuration !== undefined && + (obj.logDuration = message.logDuration); + message.logErrorVerbosity !== undefined && + (obj.logErrorVerbosity = postgresqlConfig14_LogErrorVerbosityToJSON( + message.logErrorVerbosity + )); + message.logLockWaits !== undefined && + (obj.logLockWaits = message.logLockWaits); + message.logStatement !== undefined && + (obj.logStatement = postgresqlConfig14_LogStatementToJSON( + message.logStatement + )); + message.logTempFiles !== undefined && + (obj.logTempFiles = message.logTempFiles); + message.searchPath !== undefined && (obj.searchPath = message.searchPath); + message.rowSecurity !== undefined && + (obj.rowSecurity = message.rowSecurity); + message.defaultTransactionIsolation !== undefined && + (obj.defaultTransactionIsolation = + postgresqlConfig14_TransactionIsolationToJSON( + message.defaultTransactionIsolation + )); + message.statementTimeout !== undefined && + (obj.statementTimeout = message.statementTimeout); + message.lockTimeout !== undefined && + (obj.lockTimeout = message.lockTimeout); + message.idleInTransactionSessionTimeout !== undefined && + (obj.idleInTransactionSessionTimeout = + message.idleInTransactionSessionTimeout); + message.byteaOutput !== undefined && + (obj.byteaOutput = postgresqlConfig14_ByteaOutputToJSON( + message.byteaOutput + )); + message.xmlbinary !== undefined && + (obj.xmlbinary = postgresqlConfig14_XmlBinaryToJSON(message.xmlbinary)); + message.xmloption !== undefined && + (obj.xmloption = postgresqlConfig14_XmlOptionToJSON(message.xmloption)); + message.ginPendingListLimit !== undefined && + (obj.ginPendingListLimit = message.ginPendingListLimit); + message.deadlockTimeout !== undefined && + (obj.deadlockTimeout = message.deadlockTimeout); + message.maxLocksPerTransaction !== undefined && + (obj.maxLocksPerTransaction = message.maxLocksPerTransaction); + message.maxPredLocksPerTransaction !== undefined && + (obj.maxPredLocksPerTransaction = message.maxPredLocksPerTransaction); + message.arrayNulls !== undefined && (obj.arrayNulls = message.arrayNulls); + message.backslashQuote !== undefined && + (obj.backslashQuote = postgresqlConfig14_BackslashQuoteToJSON( + message.backslashQuote + )); + message.defaultWithOids !== undefined && + (obj.defaultWithOids = message.defaultWithOids); + message.escapeStringWarning !== undefined && + (obj.escapeStringWarning = message.escapeStringWarning); + message.loCompatPrivileges !== undefined && + (obj.loCompatPrivileges = message.loCompatPrivileges); + message.quoteAllIdentifiers !== undefined && + (obj.quoteAllIdentifiers = message.quoteAllIdentifiers); + message.standardConformingStrings !== undefined && + (obj.standardConformingStrings = message.standardConformingStrings); + message.synchronizeSeqscans !== undefined && + (obj.synchronizeSeqscans = message.synchronizeSeqscans); + message.transformNullEquals !== undefined && + (obj.transformNullEquals = message.transformNullEquals); + message.exitOnError !== undefined && + (obj.exitOnError = message.exitOnError); + message.seqPageCost !== undefined && + (obj.seqPageCost = message.seqPageCost); + message.randomPageCost !== undefined && + (obj.randomPageCost = message.randomPageCost); + message.autovacuumMaxWorkers !== undefined && + (obj.autovacuumMaxWorkers = message.autovacuumMaxWorkers); + message.autovacuumVacuumCostDelay !== undefined && + (obj.autovacuumVacuumCostDelay = message.autovacuumVacuumCostDelay); + message.autovacuumVacuumCostLimit !== undefined && + (obj.autovacuumVacuumCostLimit = message.autovacuumVacuumCostLimit); + message.autovacuumNaptime !== undefined && + (obj.autovacuumNaptime = message.autovacuumNaptime); + message.archiveTimeout !== undefined && + (obj.archiveTimeout = message.archiveTimeout); + message.trackActivityQuerySize !== undefined && + (obj.trackActivityQuerySize = message.trackActivityQuerySize); + message.enableBitmapscan !== undefined && + (obj.enableBitmapscan = message.enableBitmapscan); + message.enableHashagg !== undefined && + (obj.enableHashagg = message.enableHashagg); + message.enableHashjoin !== undefined && + (obj.enableHashjoin = message.enableHashjoin); + message.enableIndexscan !== undefined && + (obj.enableIndexscan = message.enableIndexscan); + message.enableIndexonlyscan !== undefined && + (obj.enableIndexonlyscan = message.enableIndexonlyscan); + message.enableMaterial !== undefined && + (obj.enableMaterial = message.enableMaterial); + message.enableMergejoin !== undefined && + (obj.enableMergejoin = message.enableMergejoin); + message.enableNestloop !== undefined && + (obj.enableNestloop = message.enableNestloop); + message.enableSeqscan !== undefined && + (obj.enableSeqscan = message.enableSeqscan); + message.enableSort !== undefined && (obj.enableSort = message.enableSort); + message.enableTidscan !== undefined && + (obj.enableTidscan = message.enableTidscan); + message.maxWorkerProcesses !== undefined && + (obj.maxWorkerProcesses = message.maxWorkerProcesses); + message.maxParallelWorkers !== undefined && + (obj.maxParallelWorkers = message.maxParallelWorkers); + message.maxParallelWorkersPerGather !== undefined && + (obj.maxParallelWorkersPerGather = message.maxParallelWorkersPerGather); + message.autovacuumVacuumScaleFactor !== undefined && + (obj.autovacuumVacuumScaleFactor = message.autovacuumVacuumScaleFactor); + message.autovacuumAnalyzeScaleFactor !== undefined && + (obj.autovacuumAnalyzeScaleFactor = message.autovacuumAnalyzeScaleFactor); + message.defaultTransactionReadOnly !== undefined && + (obj.defaultTransactionReadOnly = message.defaultTransactionReadOnly); + message.timezone !== undefined && (obj.timezone = message.timezone); + message.enableParallelAppend !== undefined && + (obj.enableParallelAppend = message.enableParallelAppend); + message.enableParallelHash !== undefined && + (obj.enableParallelHash = message.enableParallelHash); + message.enablePartitionPruning !== undefined && + (obj.enablePartitionPruning = message.enablePartitionPruning); + message.enablePartitionwiseAggregate !== undefined && + (obj.enablePartitionwiseAggregate = message.enablePartitionwiseAggregate); + message.enablePartitionwiseJoin !== undefined && + (obj.enablePartitionwiseJoin = message.enablePartitionwiseJoin); + message.jit !== undefined && (obj.jit = message.jit); + message.maxParallelMaintenanceWorkers !== undefined && + (obj.maxParallelMaintenanceWorkers = + message.maxParallelMaintenanceWorkers); + message.parallelLeaderParticipation !== undefined && + (obj.parallelLeaderParticipation = message.parallelLeaderParticipation); + message.logTransactionSampleRate !== undefined && + (obj.logTransactionSampleRate = message.logTransactionSampleRate); + message.planCacheMode !== undefined && + (obj.planCacheMode = postgresqlConfig14_PlanCacheModeToJSON( + message.planCacheMode + )); + message.effectiveIoConcurrency !== undefined && + (obj.effectiveIoConcurrency = message.effectiveIoConcurrency); + message.effectiveCacheSize !== undefined && + (obj.effectiveCacheSize = message.effectiveCacheSize); + if (message.sharedPreloadLibraries) { + obj.sharedPreloadLibraries = message.sharedPreloadLibraries.map((e) => + postgresqlConfig14_SharedPreloadLibrariesToJSON(e) + ); + } else { + obj.sharedPreloadLibraries = []; + } + message.autoExplainLogMinDuration !== undefined && + (obj.autoExplainLogMinDuration = message.autoExplainLogMinDuration); + message.autoExplainLogAnalyze !== undefined && + (obj.autoExplainLogAnalyze = message.autoExplainLogAnalyze); + message.autoExplainLogBuffers !== undefined && + (obj.autoExplainLogBuffers = message.autoExplainLogBuffers); + message.autoExplainLogTiming !== undefined && + (obj.autoExplainLogTiming = message.autoExplainLogTiming); + message.autoExplainLogTriggers !== undefined && + (obj.autoExplainLogTriggers = message.autoExplainLogTriggers); + message.autoExplainLogVerbose !== undefined && + (obj.autoExplainLogVerbose = message.autoExplainLogVerbose); + message.autoExplainLogNestedStatements !== undefined && + (obj.autoExplainLogNestedStatements = + message.autoExplainLogNestedStatements); + message.autoExplainSampleRate !== undefined && + (obj.autoExplainSampleRate = message.autoExplainSampleRate); + message.pgHintPlanEnableHint !== undefined && + (obj.pgHintPlanEnableHint = message.pgHintPlanEnableHint); + message.pgHintPlanEnableHintTable !== undefined && + (obj.pgHintPlanEnableHintTable = message.pgHintPlanEnableHintTable); + message.pgHintPlanDebugPrint !== undefined && + (obj.pgHintPlanDebugPrint = postgresqlConfig14_PgHintPlanDebugPrintToJSON( + message.pgHintPlanDebugPrint + )); + message.pgHintPlanMessageLevel !== undefined && + (obj.pgHintPlanMessageLevel = postgresqlConfig14_LogLevelToJSON( + message.pgHintPlanMessageLevel + )); + message.hashMemMultiplier !== undefined && + (obj.hashMemMultiplier = message.hashMemMultiplier); + message.logicalDecodingWorkMem !== undefined && + (obj.logicalDecodingWorkMem = message.logicalDecodingWorkMem); + message.maintenanceIoConcurrency !== undefined && + (obj.maintenanceIoConcurrency = message.maintenanceIoConcurrency); + message.maxSlotWalKeepSize !== undefined && + (obj.maxSlotWalKeepSize = message.maxSlotWalKeepSize); + message.walKeepSize !== undefined && + (obj.walKeepSize = message.walKeepSize); + message.enableIncrementalSort !== undefined && + (obj.enableIncrementalSort = message.enableIncrementalSort); + message.autovacuumVacuumInsertThreshold !== undefined && + (obj.autovacuumVacuumInsertThreshold = + message.autovacuumVacuumInsertThreshold); + message.autovacuumVacuumInsertScaleFactor !== undefined && + (obj.autovacuumVacuumInsertScaleFactor = + message.autovacuumVacuumInsertScaleFactor); + message.logMinDurationSample !== undefined && + (obj.logMinDurationSample = message.logMinDurationSample); + message.logStatementSampleRate !== undefined && + (obj.logStatementSampleRate = message.logStatementSampleRate); + message.logParameterMaxLength !== undefined && + (obj.logParameterMaxLength = message.logParameterMaxLength); + message.logParameterMaxLengthOnError !== undefined && + (obj.logParameterMaxLengthOnError = message.logParameterMaxLengthOnError); + message.clientConnectionCheckInterval !== undefined && + (obj.clientConnectionCheckInterval = + message.clientConnectionCheckInterval); + message.enableAsyncAppend !== undefined && + (obj.enableAsyncAppend = message.enableAsyncAppend); + message.enableGathermerge !== undefined && + (obj.enableGathermerge = message.enableGathermerge); + message.enableMemoize !== undefined && + (obj.enableMemoize = message.enableMemoize); + message.logRecoveryConflictWaits !== undefined && + (obj.logRecoveryConflictWaits = message.logRecoveryConflictWaits); + message.vacuumFailsafeAge !== undefined && + (obj.vacuumFailsafeAge = message.vacuumFailsafeAge); + message.vacuumMultixactFailsafeAge !== undefined && + (obj.vacuumMultixactFailsafeAge = message.vacuumMultixactFailsafeAge); + message.pgQualstatsEnabled !== undefined && + (obj.pgQualstatsEnabled = message.pgQualstatsEnabled); + message.pgQualstatsTrackConstants !== undefined && + (obj.pgQualstatsTrackConstants = message.pgQualstatsTrackConstants); + message.pgQualstatsMax !== undefined && + (obj.pgQualstatsMax = message.pgQualstatsMax); + message.pgQualstatsResolveOids !== undefined && + (obj.pgQualstatsResolveOids = message.pgQualstatsResolveOids); + message.pgQualstatsSampleRate !== undefined && + (obj.pgQualstatsSampleRate = message.pgQualstatsSampleRate); + return obj; + }, + + fromPartial, I>>( + object: I + ): PostgresqlConfig14 { + const message = { ...basePostgresqlConfig14 } as PostgresqlConfig14; + message.maxConnections = object.maxConnections ?? undefined; + message.sharedBuffers = object.sharedBuffers ?? undefined; + message.tempBuffers = object.tempBuffers ?? undefined; + message.maxPreparedTransactions = + object.maxPreparedTransactions ?? undefined; + message.workMem = object.workMem ?? undefined; + message.maintenanceWorkMem = object.maintenanceWorkMem ?? undefined; + message.autovacuumWorkMem = object.autovacuumWorkMem ?? undefined; + message.tempFileLimit = object.tempFileLimit ?? undefined; + message.vacuumCostDelay = object.vacuumCostDelay ?? undefined; + message.vacuumCostPageHit = object.vacuumCostPageHit ?? undefined; + message.vacuumCostPageMiss = object.vacuumCostPageMiss ?? undefined; + message.vacuumCostPageDirty = object.vacuumCostPageDirty ?? undefined; + message.vacuumCostLimit = object.vacuumCostLimit ?? undefined; + message.bgwriterDelay = object.bgwriterDelay ?? undefined; + message.bgwriterLruMaxpages = object.bgwriterLruMaxpages ?? undefined; + message.bgwriterLruMultiplier = object.bgwriterLruMultiplier ?? undefined; + message.bgwriterFlushAfter = object.bgwriterFlushAfter ?? undefined; + message.backendFlushAfter = object.backendFlushAfter ?? undefined; + message.oldSnapshotThreshold = object.oldSnapshotThreshold ?? undefined; + message.walLevel = object.walLevel ?? 0; + message.synchronousCommit = object.synchronousCommit ?? 0; + message.checkpointTimeout = object.checkpointTimeout ?? undefined; + message.checkpointCompletionTarget = + object.checkpointCompletionTarget ?? undefined; + message.checkpointFlushAfter = object.checkpointFlushAfter ?? undefined; + message.maxWalSize = object.maxWalSize ?? undefined; + message.minWalSize = object.minWalSize ?? undefined; + message.maxStandbyStreamingDelay = + object.maxStandbyStreamingDelay ?? undefined; + message.defaultStatisticsTarget = + object.defaultStatisticsTarget ?? undefined; + message.constraintExclusion = object.constraintExclusion ?? 0; + message.cursorTupleFraction = object.cursorTupleFraction ?? undefined; + message.fromCollapseLimit = object.fromCollapseLimit ?? undefined; + message.joinCollapseLimit = object.joinCollapseLimit ?? undefined; + message.forceParallelMode = object.forceParallelMode ?? 0; + message.clientMinMessages = object.clientMinMessages ?? 0; + message.logMinMessages = object.logMinMessages ?? 0; + message.logMinErrorStatement = object.logMinErrorStatement ?? 0; + message.logMinDurationStatement = + object.logMinDurationStatement ?? undefined; + message.logCheckpoints = object.logCheckpoints ?? undefined; + message.logConnections = object.logConnections ?? undefined; + message.logDisconnections = object.logDisconnections ?? undefined; + message.logDuration = object.logDuration ?? undefined; + message.logErrorVerbosity = object.logErrorVerbosity ?? 0; + message.logLockWaits = object.logLockWaits ?? undefined; + message.logStatement = object.logStatement ?? 0; + message.logTempFiles = object.logTempFiles ?? undefined; + message.searchPath = object.searchPath ?? ""; + message.rowSecurity = object.rowSecurity ?? undefined; + message.defaultTransactionIsolation = + object.defaultTransactionIsolation ?? 0; + message.statementTimeout = object.statementTimeout ?? undefined; + message.lockTimeout = object.lockTimeout ?? undefined; + message.idleInTransactionSessionTimeout = + object.idleInTransactionSessionTimeout ?? undefined; + message.byteaOutput = object.byteaOutput ?? 0; + message.xmlbinary = object.xmlbinary ?? 0; + message.xmloption = object.xmloption ?? 0; + message.ginPendingListLimit = object.ginPendingListLimit ?? undefined; + message.deadlockTimeout = object.deadlockTimeout ?? undefined; + message.maxLocksPerTransaction = object.maxLocksPerTransaction ?? undefined; + message.maxPredLocksPerTransaction = + object.maxPredLocksPerTransaction ?? undefined; + message.arrayNulls = object.arrayNulls ?? undefined; + message.backslashQuote = object.backslashQuote ?? 0; + message.defaultWithOids = object.defaultWithOids ?? undefined; + message.escapeStringWarning = object.escapeStringWarning ?? undefined; + message.loCompatPrivileges = object.loCompatPrivileges ?? undefined; + message.quoteAllIdentifiers = object.quoteAllIdentifiers ?? undefined; + message.standardConformingStrings = + object.standardConformingStrings ?? undefined; + message.synchronizeSeqscans = object.synchronizeSeqscans ?? undefined; + message.transformNullEquals = object.transformNullEquals ?? undefined; + message.exitOnError = object.exitOnError ?? undefined; + message.seqPageCost = object.seqPageCost ?? undefined; + message.randomPageCost = object.randomPageCost ?? undefined; + message.autovacuumMaxWorkers = object.autovacuumMaxWorkers ?? undefined; + message.autovacuumVacuumCostDelay = + object.autovacuumVacuumCostDelay ?? undefined; + message.autovacuumVacuumCostLimit = + object.autovacuumVacuumCostLimit ?? undefined; + message.autovacuumNaptime = object.autovacuumNaptime ?? undefined; + message.archiveTimeout = object.archiveTimeout ?? undefined; + message.trackActivityQuerySize = object.trackActivityQuerySize ?? undefined; + message.enableBitmapscan = object.enableBitmapscan ?? undefined; + message.enableHashagg = object.enableHashagg ?? undefined; + message.enableHashjoin = object.enableHashjoin ?? undefined; + message.enableIndexscan = object.enableIndexscan ?? undefined; + message.enableIndexonlyscan = object.enableIndexonlyscan ?? undefined; + message.enableMaterial = object.enableMaterial ?? undefined; + message.enableMergejoin = object.enableMergejoin ?? undefined; + message.enableNestloop = object.enableNestloop ?? undefined; + message.enableSeqscan = object.enableSeqscan ?? undefined; + message.enableSort = object.enableSort ?? undefined; + message.enableTidscan = object.enableTidscan ?? undefined; + message.maxWorkerProcesses = object.maxWorkerProcesses ?? undefined; + message.maxParallelWorkers = object.maxParallelWorkers ?? undefined; + message.maxParallelWorkersPerGather = + object.maxParallelWorkersPerGather ?? undefined; + message.autovacuumVacuumScaleFactor = + object.autovacuumVacuumScaleFactor ?? undefined; + message.autovacuumAnalyzeScaleFactor = + object.autovacuumAnalyzeScaleFactor ?? undefined; + message.defaultTransactionReadOnly = + object.defaultTransactionReadOnly ?? undefined; + message.timezone = object.timezone ?? ""; + message.enableParallelAppend = object.enableParallelAppend ?? undefined; + message.enableParallelHash = object.enableParallelHash ?? undefined; + message.enablePartitionPruning = object.enablePartitionPruning ?? undefined; + message.enablePartitionwiseAggregate = + object.enablePartitionwiseAggregate ?? undefined; + message.enablePartitionwiseJoin = + object.enablePartitionwiseJoin ?? undefined; + message.jit = object.jit ?? undefined; + message.maxParallelMaintenanceWorkers = + object.maxParallelMaintenanceWorkers ?? undefined; + message.parallelLeaderParticipation = + object.parallelLeaderParticipation ?? undefined; + message.logTransactionSampleRate = + object.logTransactionSampleRate ?? undefined; + message.planCacheMode = object.planCacheMode ?? 0; + message.effectiveIoConcurrency = object.effectiveIoConcurrency ?? undefined; + message.effectiveCacheSize = object.effectiveCacheSize ?? undefined; + message.sharedPreloadLibraries = + object.sharedPreloadLibraries?.map((e) => e) || []; + message.autoExplainLogMinDuration = + object.autoExplainLogMinDuration ?? undefined; + message.autoExplainLogAnalyze = object.autoExplainLogAnalyze ?? undefined; + message.autoExplainLogBuffers = object.autoExplainLogBuffers ?? undefined; + message.autoExplainLogTiming = object.autoExplainLogTiming ?? undefined; + message.autoExplainLogTriggers = object.autoExplainLogTriggers ?? undefined; + message.autoExplainLogVerbose = object.autoExplainLogVerbose ?? undefined; + message.autoExplainLogNestedStatements = + object.autoExplainLogNestedStatements ?? undefined; + message.autoExplainSampleRate = object.autoExplainSampleRate ?? undefined; + message.pgHintPlanEnableHint = object.pgHintPlanEnableHint ?? undefined; + message.pgHintPlanEnableHintTable = + object.pgHintPlanEnableHintTable ?? undefined; + message.pgHintPlanDebugPrint = object.pgHintPlanDebugPrint ?? 0; + message.pgHintPlanMessageLevel = object.pgHintPlanMessageLevel ?? 0; + message.hashMemMultiplier = object.hashMemMultiplier ?? undefined; + message.logicalDecodingWorkMem = object.logicalDecodingWorkMem ?? undefined; + message.maintenanceIoConcurrency = + object.maintenanceIoConcurrency ?? undefined; + message.maxSlotWalKeepSize = object.maxSlotWalKeepSize ?? undefined; + message.walKeepSize = object.walKeepSize ?? undefined; + message.enableIncrementalSort = object.enableIncrementalSort ?? undefined; + message.autovacuumVacuumInsertThreshold = + object.autovacuumVacuumInsertThreshold ?? undefined; + message.autovacuumVacuumInsertScaleFactor = + object.autovacuumVacuumInsertScaleFactor ?? undefined; + message.logMinDurationSample = object.logMinDurationSample ?? undefined; + message.logStatementSampleRate = object.logStatementSampleRate ?? undefined; + message.logParameterMaxLength = object.logParameterMaxLength ?? undefined; + message.logParameterMaxLengthOnError = + object.logParameterMaxLengthOnError ?? undefined; + message.clientConnectionCheckInterval = + object.clientConnectionCheckInterval ?? undefined; + message.enableAsyncAppend = object.enableAsyncAppend ?? undefined; + message.enableGathermerge = object.enableGathermerge ?? undefined; + message.enableMemoize = object.enableMemoize ?? undefined; + message.logRecoveryConflictWaits = + object.logRecoveryConflictWaits ?? undefined; + message.vacuumFailsafeAge = object.vacuumFailsafeAge ?? undefined; + message.vacuumMultixactFailsafeAge = + object.vacuumMultixactFailsafeAge ?? undefined; + message.pgQualstatsEnabled = object.pgQualstatsEnabled ?? undefined; + message.pgQualstatsTrackConstants = + object.pgQualstatsTrackConstants ?? undefined; + message.pgQualstatsMax = object.pgQualstatsMax ?? undefined; + message.pgQualstatsResolveOids = object.pgQualstatsResolveOids ?? undefined; + message.pgQualstatsSampleRate = object.pgQualstatsSampleRate ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(PostgresqlConfig14.$type, PostgresqlConfig14); + +const basePostgresqlConfigSet14: object = { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfigSet14", +}; + +export const PostgresqlConfigSet14 = { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfigSet14" as const, + + encode( + message: PostgresqlConfigSet14, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.effectiveConfig !== undefined) { + PostgresqlConfig14.encode( + message.effectiveConfig, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.userConfig !== undefined) { + PostgresqlConfig14.encode( + message.userConfig, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.defaultConfig !== undefined) { + PostgresqlConfig14.encode( + message.defaultConfig, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): PostgresqlConfigSet14 { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...basePostgresqlConfigSet14 } as PostgresqlConfigSet14; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.effectiveConfig = PostgresqlConfig14.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.userConfig = PostgresqlConfig14.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.defaultConfig = PostgresqlConfig14.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): PostgresqlConfigSet14 { + const message = { ...basePostgresqlConfigSet14 } as PostgresqlConfigSet14; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? PostgresqlConfig14.fromJSON(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? PostgresqlConfig14.fromJSON(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? PostgresqlConfig14.fromJSON(object.defaultConfig) + : undefined; + return message; + }, + + toJSON(message: PostgresqlConfigSet14): unknown { + const obj: any = {}; + message.effectiveConfig !== undefined && + (obj.effectiveConfig = message.effectiveConfig + ? PostgresqlConfig14.toJSON(message.effectiveConfig) + : undefined); + message.userConfig !== undefined && + (obj.userConfig = message.userConfig + ? PostgresqlConfig14.toJSON(message.userConfig) + : undefined); + message.defaultConfig !== undefined && + (obj.defaultConfig = message.defaultConfig + ? PostgresqlConfig14.toJSON(message.defaultConfig) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): PostgresqlConfigSet14 { + const message = { ...basePostgresqlConfigSet14 } as PostgresqlConfigSet14; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? PostgresqlConfig14.fromPartial(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? PostgresqlConfig14.fromPartial(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? PostgresqlConfig14.fromPartial(object.defaultConfig) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(PostgresqlConfigSet14.$type, PostgresqlConfigSet14); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/redis/v1/cluster.ts b/src/generated/yandex/cloud/mdb/redis/v1/cluster.ts index 74573e49..02dee9be 100644 --- a/src/generated/yandex/cloud/mdb/redis/v1/cluster.ts +++ b/src/generated/yandex/cloud/mdb/redis/v1/cluster.ts @@ -11,6 +11,7 @@ import { Timestamp } from "../../../../../google/protobuf/timestamp"; import { Redisconfigset50 } from "../../../../../yandex/cloud/mdb/redis/v1/config/redis5_0"; import { Redisconfigset60 } from "../../../../../yandex/cloud/mdb/redis/v1/config/redis6_0"; import { Redisconfigset62 } from "../../../../../yandex/cloud/mdb/redis/v1/config/redis6_2"; +import { Int64Value } from "../../../../../google/protobuf/wrappers"; export const protobufPackage = "yandex.cloud.mdb.redis.v1"; @@ -64,6 +65,8 @@ export interface Cluster { tlsEnabled: boolean; /** Deletion Protection inhibits deletion of the cluster */ deletionProtection: boolean; + /** Persistence mode */ + persistenceMode: Cluster_PersistenceMode; } export enum Cluster_Environment { @@ -236,6 +239,44 @@ export function cluster_StatusToJSON(object: Cluster_Status): string { } } +export enum Cluster_PersistenceMode { + /** ON - cluster persistence mode on */ + ON = 0, + /** OFF - cluster persistence mode off */ + OFF = 1, + UNRECOGNIZED = -1, +} + +export function cluster_PersistenceModeFromJSON( + object: any +): Cluster_PersistenceMode { + switch (object) { + case 0: + case "ON": + return Cluster_PersistenceMode.ON; + case 1: + case "OFF": + return Cluster_PersistenceMode.OFF; + case -1: + case "UNRECOGNIZED": + default: + return Cluster_PersistenceMode.UNRECOGNIZED; + } +} + +export function cluster_PersistenceModeToJSON( + object: Cluster_PersistenceMode +): string { + switch (object) { + case Cluster_PersistenceMode.ON: + return "ON"; + case Cluster_PersistenceMode.OFF: + return "OFF"; + default: + return "UNKNOWN"; + } +} + export interface Cluster_LabelsEntry { $type: "yandex.cloud.mdb.redis.v1.Cluster.LabelsEntry"; key: string; @@ -287,7 +328,7 @@ export interface Host { * Name of the Redis host. The host name is assigned by MDB at creation time, and cannot be changed. * 1-63 characters long. * - * The name is unique across all existing MDB hosts in Yandex.Cloud, as it defines the FQDN of the host. + * The name is unique across all existing MDB hosts in Yandex Cloud, as it defines the FQDN of the host. */ name: string; /** ID of the Redis cluster. The ID is assigned by MDB at creation time. */ @@ -305,6 +346,14 @@ export interface Host { /** Services provided by the host. */ services: Service[]; shardName: string; + /** + * A replica with a low priority number is considered better for promotion. + * A replica with priority of 0 will never be selected by Redis Sentinel for promotion. + * Works only for non-sharded clusters. Default value is 100. + */ + replicaPriority?: number; + /** Flag showing public IP assignment status to this host. */ + assignPublicIp: boolean; } export enum Host_Role { @@ -530,6 +579,7 @@ const baseCluster: object = { securityGroupIds: "", tlsEnabled: false, deletionProtection: false, + persistenceMode: 0, }; export const Cluster = { @@ -609,6 +659,9 @@ export const Cluster = { if (message.deletionProtection === true) { writer.uint32(144).bool(message.deletionProtection); } + if (message.persistenceMode !== 0) { + writer.uint32(152).int32(message.persistenceMode); + } return writer; }, @@ -687,6 +740,9 @@ export const Cluster = { case 18: message.deletionProtection = reader.bool(); break; + case 19: + message.persistenceMode = reader.int32() as any; + break; default: reader.skipType(tag & 7); break; @@ -769,6 +825,10 @@ export const Cluster = { object.deletionProtection !== null ? Boolean(object.deletionProtection) : false; + message.persistenceMode = + object.persistenceMode !== undefined && object.persistenceMode !== null + ? cluster_PersistenceModeFromJSON(object.persistenceMode) + : 0; return message; }, @@ -822,6 +882,10 @@ export const Cluster = { message.tlsEnabled !== undefined && (obj.tlsEnabled = message.tlsEnabled); message.deletionProtection !== undefined && (obj.deletionProtection = message.deletionProtection); + message.persistenceMode !== undefined && + (obj.persistenceMode = cluster_PersistenceModeToJSON( + message.persistenceMode + )); return obj; }, @@ -863,6 +927,7 @@ export const Cluster = { message.securityGroupIds = object.securityGroupIds?.map((e) => e) || []; message.tlsEnabled = object.tlsEnabled ?? false; message.deletionProtection = object.deletionProtection ?? false; + message.persistenceMode = object.persistenceMode ?? 0; return message; }, }; @@ -1301,6 +1366,7 @@ const baseHost: object = { role: 0, health: 0, shardName: "", + assignPublicIp: false, }; export const Host = { @@ -1334,6 +1400,18 @@ export const Host = { if (message.shardName !== "") { writer.uint32(74).string(message.shardName); } + if (message.replicaPriority !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.replicaPriority!, + }, + writer.uint32(82).fork() + ).ldelim(); + } + if (message.assignPublicIp === true) { + writer.uint32(88).bool(message.assignPublicIp); + } return writer; }, @@ -1372,6 +1450,15 @@ export const Host = { case 9: message.shardName = reader.string(); break; + case 10: + message.replicaPriority = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 11: + message.assignPublicIp = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -1417,6 +1504,14 @@ export const Host = { object.shardName !== undefined && object.shardName !== null ? String(object.shardName) : ""; + message.replicaPriority = + object.replicaPriority !== undefined && object.replicaPriority !== null + ? Number(object.replicaPriority) + : undefined; + message.assignPublicIp = + object.assignPublicIp !== undefined && object.assignPublicIp !== null + ? Boolean(object.assignPublicIp) + : false; return message; }, @@ -1441,6 +1536,10 @@ export const Host = { obj.services = []; } message.shardName !== undefined && (obj.shardName = message.shardName); + message.replicaPriority !== undefined && + (obj.replicaPriority = message.replicaPriority); + message.assignPublicIp !== undefined && + (obj.assignPublicIp = message.assignPublicIp); return obj; }, @@ -1459,6 +1558,8 @@ export const Host = { message.services = object.services?.map((e) => Service.fromPartial(e)) || []; message.shardName = object.shardName ?? ""; + message.replicaPriority = object.replicaPriority ?? undefined; + message.assignPublicIp = object.assignPublicIp ?? false; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/redis/v1/cluster_service.ts b/src/generated/yandex/cloud/mdb/redis/v1/cluster_service.ts index 14456c8d..2eb6d55e 100644 --- a/src/generated/yandex/cloud/mdb/redis/v1/cluster_service.ts +++ b/src/generated/yandex/cloud/mdb/redis/v1/cluster_service.ts @@ -18,13 +18,16 @@ import { import _m0 from "protobufjs/minimal"; import { Cluster_Environment, + Cluster_PersistenceMode, Resources, Access, Cluster, Host, Shard, cluster_EnvironmentFromJSON, + cluster_PersistenceModeFromJSON, cluster_EnvironmentToJSON, + cluster_PersistenceModeToJSON, } from "../../../../../yandex/cloud/mdb/redis/v1/cluster"; import { FieldMask } from "../../../../../google/protobuf/field_mask"; import { MaintenanceWindow } from "../../../../../yandex/cloud/mdb/redis/v1/maintenance"; @@ -35,7 +38,7 @@ import { Backup } from "../../../../../yandex/cloud/mdb/redis/v1/backup"; import { Redisconfig50 } from "../../../../../yandex/cloud/mdb/redis/v1/config/redis5_0"; import { Redisconfig60 } from "../../../../../yandex/cloud/mdb/redis/v1/config/redis6_0"; import { Redisconfig62 } from "../../../../../yandex/cloud/mdb/redis/v1/config/redis6_2"; -import { BoolValue } from "../../../../../google/protobuf/wrappers"; +import { BoolValue, Int64Value } from "../../../../../google/protobuf/wrappers"; export const protobufPackage = "yandex.cloud.mdb.redis.v1"; @@ -118,6 +121,8 @@ export interface CreateClusterRequest { tlsEnabled?: boolean; /** Deletion Protection inhibits deletion of the cluster */ deletionProtection: boolean; + /** Persistence mode */ + persistenceMode: Cluster_PersistenceMode; } export interface CreateClusterRequest_LabelsEntry { @@ -161,6 +166,8 @@ export interface UpdateClusterRequest { securityGroupIds: string[]; /** Deletion Protection inhibits deletion of the cluster */ deletionProtection: boolean; + /** Persistence mode */ + persistenceMode: Cluster_PersistenceMode; } export interface UpdateClusterRequest_LabelsEntry { @@ -232,6 +239,25 @@ export interface MoveClusterMetadata { destinationFolderId: string; } +export interface UpdateClusterHostsRequest { + $type: "yandex.cloud.mdb.redis.v1.UpdateClusterHostsRequest"; + /** + * ID of the Redis cluster to update hosts in. + * To get the Redis cluster ID, use a [ClusterService.List] request. + */ + clusterId: string; + /** New configurations to apply to hosts. */ + updateHostSpecs: UpdateHostSpec[]; +} + +export interface UpdateClusterHostsMetadata { + $type: "yandex.cloud.mdb.redis.v1.UpdateClusterHostsMetadata"; + /** ID of the Redis cluster to update hosts in. */ + clusterId: string; + /** Names of hosts that are being updated. */ + hostNames: string[]; +} + export interface BackupClusterRequest { $type: "yandex.cloud.mdb.redis.v1.BackupClusterRequest"; /** @@ -280,6 +306,8 @@ export interface RestoreClusterRequest { securityGroupIds: string[]; /** TLS port and functionality on\off */ tlsEnabled?: boolean; + /** Persistence mode */ + persistenceMode: Cluster_PersistenceMode; } export interface RestoreClusterRequest_LabelsEntry { @@ -806,6 +834,25 @@ export interface RebalanceClusterMetadata { clusterId: string; } +export interface UpdateHostSpec { + $type: "yandex.cloud.mdb.redis.v1.UpdateHostSpec"; + /** + * Name of the host to update. + * To get the Redis host name, use a [ClusterService.ListHosts] request. + */ + hostName: string; + /** + * A replica with a low priority number is considered better for promotion. + * A replica with priority of 0 will never be selected by Redis Sentinel for promotion. + * Works only for non-sharded clusters. Default value is 100. + */ + replicaPriority?: number; + /** Whether the host should get a public IP address on update. */ + assignPublicIp: boolean; + /** Field mask that specifies which fields of the Redis host should be updated. */ + updateMask?: FieldMask; +} + export interface HostSpec { $type: "yandex.cloud.mdb.redis.v1.HostSpec"; /** @@ -824,6 +871,20 @@ export interface HostSpec { * To get the shard ID use a [ClusterService.ListShards] request. */ shardName: string; + /** + * A replica with a low priority number is considered better for promotion. + * A replica with priority of 0 will never be selected by Redis Sentinel for promotion. + * Works only for non-sharded clusters. Default value is 100. + */ + replicaPriority?: number; + /** + * Whether the host should get a public IP address on creation. + * + * Possible values: + * * false - don't assign a public IP to the host. + * * true - the host should have a public IP address. + */ + assignPublicIp: boolean; } export interface ConfigSpec { @@ -1100,6 +1161,7 @@ const baseCreateClusterRequest: object = { sharded: false, securityGroupIds: "", deletionProtection: false, + persistenceMode: 0, }; export const CreateClusterRequest = { @@ -1155,6 +1217,9 @@ export const CreateClusterRequest = { if (message.deletionProtection === true) { writer.uint32(112).bool(message.deletionProtection); } + if (message.persistenceMode !== 0) { + writer.uint32(120).int32(message.persistenceMode); + } return writer; }, @@ -1213,6 +1278,9 @@ export const CreateClusterRequest = { case 14: message.deletionProtection = reader.bool(); break; + case 15: + message.persistenceMode = reader.int32() as any; + break; default: reader.skipType(tag & 7); break; @@ -1272,6 +1340,10 @@ export const CreateClusterRequest = { object.deletionProtection !== null ? Boolean(object.deletionProtection) : false; + message.persistenceMode = + object.persistenceMode !== undefined && object.persistenceMode !== null + ? cluster_PersistenceModeFromJSON(object.persistenceMode) + : 0; return message; }, @@ -1310,6 +1382,10 @@ export const CreateClusterRequest = { message.tlsEnabled !== undefined && (obj.tlsEnabled = message.tlsEnabled); message.deletionProtection !== undefined && (obj.deletionProtection = message.deletionProtection); + message.persistenceMode !== undefined && + (obj.persistenceMode = cluster_PersistenceModeToJSON( + message.persistenceMode + )); return obj; }, @@ -1340,6 +1416,7 @@ export const CreateClusterRequest = { message.securityGroupIds = object.securityGroupIds?.map((e) => e) || []; message.tlsEnabled = object.tlsEnabled ?? undefined; message.deletionProtection = object.deletionProtection ?? false; + message.persistenceMode = object.persistenceMode ?? 0; return message; }, }; @@ -1503,6 +1580,7 @@ const baseUpdateClusterRequest: object = { name: "", securityGroupIds: "", deletionProtection: false, + persistenceMode: 0, }; export const UpdateClusterRequest = { @@ -1549,6 +1627,9 @@ export const UpdateClusterRequest = { if (message.deletionProtection === true) { writer.uint32(72).bool(message.deletionProtection); } + if (message.persistenceMode !== 0) { + writer.uint32(80).int32(message.persistenceMode); + } return writer; }, @@ -1600,6 +1681,9 @@ export const UpdateClusterRequest = { case 9: message.deletionProtection = reader.bool(); break; + case 10: + message.persistenceMode = reader.int32() as any; + break; default: reader.skipType(tag & 7); break; @@ -1649,6 +1733,10 @@ export const UpdateClusterRequest = { object.deletionProtection !== null ? Boolean(object.deletionProtection) : false; + message.persistenceMode = + object.persistenceMode !== undefined && object.persistenceMode !== null + ? cluster_PersistenceModeFromJSON(object.persistenceMode) + : 0; return message; }, @@ -1683,6 +1771,10 @@ export const UpdateClusterRequest = { } message.deletionProtection !== undefined && (obj.deletionProtection = message.deletionProtection); + message.persistenceMode !== undefined && + (obj.persistenceMode = cluster_PersistenceModeToJSON( + message.persistenceMode + )); return obj; }, @@ -1716,6 +1808,7 @@ export const UpdateClusterRequest = { : undefined; message.securityGroupIds = object.securityGroupIds?.map((e) => e) || []; message.deletionProtection = object.deletionProtection ?? false; + message.persistenceMode = object.persistenceMode ?? 0; return message; }, }; @@ -2421,6 +2514,190 @@ export const MoveClusterMetadata = { messageTypeRegistry.set(MoveClusterMetadata.$type, MoveClusterMetadata); +const baseUpdateClusterHostsRequest: object = { + $type: "yandex.cloud.mdb.redis.v1.UpdateClusterHostsRequest", + clusterId: "", +}; + +export const UpdateClusterHostsRequest = { + $type: "yandex.cloud.mdb.redis.v1.UpdateClusterHostsRequest" as const, + + encode( + message: UpdateClusterHostsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + for (const v of message.updateHostSpecs) { + UpdateHostSpec.encode(v!, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateClusterHostsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateClusterHostsRequest, + } as UpdateClusterHostsRequest; + message.updateHostSpecs = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.updateHostSpecs.push( + UpdateHostSpec.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateClusterHostsRequest { + const message = { + ...baseUpdateClusterHostsRequest, + } as UpdateClusterHostsRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.updateHostSpecs = (object.updateHostSpecs ?? []).map((e: any) => + UpdateHostSpec.fromJSON(e) + ); + return message; + }, + + toJSON(message: UpdateClusterHostsRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + if (message.updateHostSpecs) { + obj.updateHostSpecs = message.updateHostSpecs.map((e) => + e ? UpdateHostSpec.toJSON(e) : undefined + ); + } else { + obj.updateHostSpecs = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateClusterHostsRequest { + const message = { + ...baseUpdateClusterHostsRequest, + } as UpdateClusterHostsRequest; + message.clusterId = object.clusterId ?? ""; + message.updateHostSpecs = + object.updateHostSpecs?.map((e) => UpdateHostSpec.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set( + UpdateClusterHostsRequest.$type, + UpdateClusterHostsRequest +); + +const baseUpdateClusterHostsMetadata: object = { + $type: "yandex.cloud.mdb.redis.v1.UpdateClusterHostsMetadata", + clusterId: "", + hostNames: "", +}; + +export const UpdateClusterHostsMetadata = { + $type: "yandex.cloud.mdb.redis.v1.UpdateClusterHostsMetadata" as const, + + encode( + message: UpdateClusterHostsMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + for (const v of message.hostNames) { + writer.uint32(18).string(v!); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateClusterHostsMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateClusterHostsMetadata, + } as UpdateClusterHostsMetadata; + message.hostNames = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.hostNames.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateClusterHostsMetadata { + const message = { + ...baseUpdateClusterHostsMetadata, + } as UpdateClusterHostsMetadata; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.hostNames = (object.hostNames ?? []).map((e: any) => String(e)); + return message; + }, + + toJSON(message: UpdateClusterHostsMetadata): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + if (message.hostNames) { + obj.hostNames = message.hostNames.map((e) => e); + } else { + obj.hostNames = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateClusterHostsMetadata { + const message = { + ...baseUpdateClusterHostsMetadata, + } as UpdateClusterHostsMetadata; + message.clusterId = object.clusterId ?? ""; + message.hostNames = object.hostNames?.map((e) => e) || []; + return message; + }, +}; + +messageTypeRegistry.set( + UpdateClusterHostsMetadata.$type, + UpdateClusterHostsMetadata +); + const baseBackupClusterRequest: object = { $type: "yandex.cloud.mdb.redis.v1.BackupClusterRequest", clusterId: "", @@ -2560,6 +2837,7 @@ const baseRestoreClusterRequest: object = { networkId: "", folderId: "", securityGroupIds: "", + persistenceMode: 0, }; export const RestoreClusterRequest = { @@ -2612,6 +2890,9 @@ export const RestoreClusterRequest = { writer.uint32(90).fork() ).ldelim(); } + if (message.persistenceMode !== 0) { + writer.uint32(96).int32(message.persistenceMode); + } return writer; }, @@ -2667,6 +2948,9 @@ export const RestoreClusterRequest = { case 11: message.tlsEnabled = BoolValue.decode(reader, reader.uint32()).value; break; + case 12: + message.persistenceMode = reader.int32() as any; + break; default: reader.skipType(tag & 7); break; @@ -2721,6 +3005,10 @@ export const RestoreClusterRequest = { object.tlsEnabled !== undefined && object.tlsEnabled !== null ? Boolean(object.tlsEnabled) : undefined; + message.persistenceMode = + object.persistenceMode !== undefined && object.persistenceMode !== null + ? cluster_PersistenceModeFromJSON(object.persistenceMode) + : 0; return message; }, @@ -2757,6 +3045,10 @@ export const RestoreClusterRequest = { obj.securityGroupIds = []; } message.tlsEnabled !== undefined && (obj.tlsEnabled = message.tlsEnabled); + message.persistenceMode !== undefined && + (obj.persistenceMode = cluster_PersistenceModeToJSON( + message.persistenceMode + )); return obj; }, @@ -2786,6 +3078,7 @@ export const RestoreClusterRequest = { message.folderId = object.folderId ?? ""; message.securityGroupIds = object.securityGroupIds?.map((e) => e) || []; message.tlsEnabled = object.tlsEnabled ?? undefined; + message.persistenceMode = object.persistenceMode ?? 0; return message; }, }; @@ -5701,11 +5994,128 @@ messageTypeRegistry.set( RebalanceClusterMetadata ); +const baseUpdateHostSpec: object = { + $type: "yandex.cloud.mdb.redis.v1.UpdateHostSpec", + hostName: "", + assignPublicIp: false, +}; + +export const UpdateHostSpec = { + $type: "yandex.cloud.mdb.redis.v1.UpdateHostSpec" as const, + + encode( + message: UpdateHostSpec, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.hostName !== "") { + writer.uint32(10).string(message.hostName); + } + if (message.replicaPriority !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.replicaPriority!, + }, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.assignPublicIp === true) { + writer.uint32(24).bool(message.assignPublicIp); + } + if (message.updateMask !== undefined) { + FieldMask.encode(message.updateMask, writer.uint32(34).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): UpdateHostSpec { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseUpdateHostSpec } as UpdateHostSpec; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.hostName = reader.string(); + break; + case 2: + message.replicaPriority = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 3: + message.assignPublicIp = reader.bool(); + break; + case 4: + message.updateMask = FieldMask.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateHostSpec { + const message = { ...baseUpdateHostSpec } as UpdateHostSpec; + message.hostName = + object.hostName !== undefined && object.hostName !== null + ? String(object.hostName) + : ""; + message.replicaPriority = + object.replicaPriority !== undefined && object.replicaPriority !== null + ? Number(object.replicaPriority) + : undefined; + message.assignPublicIp = + object.assignPublicIp !== undefined && object.assignPublicIp !== null + ? Boolean(object.assignPublicIp) + : false; + message.updateMask = + object.updateMask !== undefined && object.updateMask !== null + ? FieldMask.fromJSON(object.updateMask) + : undefined; + return message; + }, + + toJSON(message: UpdateHostSpec): unknown { + const obj: any = {}; + message.hostName !== undefined && (obj.hostName = message.hostName); + message.replicaPriority !== undefined && + (obj.replicaPriority = message.replicaPriority); + message.assignPublicIp !== undefined && + (obj.assignPublicIp = message.assignPublicIp); + message.updateMask !== undefined && + (obj.updateMask = message.updateMask + ? FieldMask.toJSON(message.updateMask) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateHostSpec { + const message = { ...baseUpdateHostSpec } as UpdateHostSpec; + message.hostName = object.hostName ?? ""; + message.replicaPriority = object.replicaPriority ?? undefined; + message.assignPublicIp = object.assignPublicIp ?? false; + message.updateMask = + object.updateMask !== undefined && object.updateMask !== null + ? FieldMask.fromPartial(object.updateMask) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(UpdateHostSpec.$type, UpdateHostSpec); + const baseHostSpec: object = { $type: "yandex.cloud.mdb.redis.v1.HostSpec", zoneId: "", subnetId: "", shardName: "", + assignPublicIp: false, }; export const HostSpec = { @@ -5724,6 +6134,18 @@ export const HostSpec = { if (message.shardName !== "") { writer.uint32(26).string(message.shardName); } + if (message.replicaPriority !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.replicaPriority!, + }, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.assignPublicIp === true) { + writer.uint32(40).bool(message.assignPublicIp); + } return writer; }, @@ -5743,6 +6165,15 @@ export const HostSpec = { case 3: message.shardName = reader.string(); break; + case 4: + message.replicaPriority = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 5: + message.assignPublicIp = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -5765,6 +6196,14 @@ export const HostSpec = { object.shardName !== undefined && object.shardName !== null ? String(object.shardName) : ""; + message.replicaPriority = + object.replicaPriority !== undefined && object.replicaPriority !== null + ? Number(object.replicaPriority) + : undefined; + message.assignPublicIp = + object.assignPublicIp !== undefined && object.assignPublicIp !== null + ? Boolean(object.assignPublicIp) + : false; return message; }, @@ -5773,6 +6212,10 @@ export const HostSpec = { message.zoneId !== undefined && (obj.zoneId = message.zoneId); message.subnetId !== undefined && (obj.subnetId = message.subnetId); message.shardName !== undefined && (obj.shardName = message.shardName); + message.replicaPriority !== undefined && + (obj.replicaPriority = message.replicaPriority); + message.assignPublicIp !== undefined && + (obj.assignPublicIp = message.assignPublicIp); return obj; }, @@ -5781,6 +6224,8 @@ export const HostSpec = { message.zoneId = object.zoneId ?? ""; message.subnetId = object.subnetId ?? ""; message.shardName = object.shardName ?? ""; + message.replicaPriority = object.replicaPriority ?? undefined; + message.assignPublicIp = object.assignPublicIp ?? false; return message; }, }; @@ -6217,6 +6662,19 @@ export const ClusterServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, + /** Updates the specified hosts. */ + updateHosts: { + path: "/yandex.cloud.mdb.redis.v1.ClusterService/UpdateHosts", + requestStream: false, + responseStream: false, + requestSerialize: (value: UpdateClusterHostsRequest) => + Buffer.from(UpdateClusterHostsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + UpdateClusterHostsRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, /** Returns the specified shard. */ getShard: { path: "/yandex.cloud.mdb.redis.v1.ClusterService/GetShard", @@ -6341,6 +6799,8 @@ export interface ClusterServiceServer extends UntypedServiceImplementation { addHosts: handleUnaryCall; /** Deletes the specified hosts for a cluster. */ deleteHosts: handleUnaryCall; + /** Updates the specified hosts. */ + updateHosts: handleUnaryCall; /** Returns the specified shard. */ getShard: handleUnaryCall; /** Retrieves a list of shards. */ @@ -6707,6 +7167,22 @@ export interface ClusterServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; + /** Updates the specified hosts. */ + updateHosts( + request: UpdateClusterHostsRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + updateHosts( + request: UpdateClusterHostsRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + updateHosts( + request: UpdateClusterHostsRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; /** Returns the specified shard. */ getShard( request: GetClusterShardRequest, diff --git a/src/generated/yandex/cloud/mdb/redis/v1/config/redis5_0.ts b/src/generated/yandex/cloud/mdb/redis/v1/config/redis5_0.ts index e6f6dda6..f95c9aa0 100644 --- a/src/generated/yandex/cloud/mdb/redis/v1/config/redis5_0.ts +++ b/src/generated/yandex/cloud/mdb/redis/v1/config/redis5_0.ts @@ -35,6 +35,10 @@ export interface Redisconfig50 { slowlogMaxLen?: number; /** String setting for pub\sub functionality; subset of KEg$lshzxeAt. */ notifyKeyspaceEvents: string; + /** Redis connection output buffers limits for pubsub operations. */ + clientOutputBufferLimitPubsub?: Redisconfig50_ClientOutputBufferLimit; + /** Redis connection output buffers limits for clients. */ + clientOutputBufferLimitNormal?: Redisconfig50_ClientOutputBufferLimit; } export enum Redisconfig50_MaxmemoryPolicy { @@ -129,6 +133,16 @@ export function redisconfig50_MaxmemoryPolicyToJSON( } } +export interface Redisconfig50_ClientOutputBufferLimit { + $type: "yandex.cloud.mdb.redis.v1.config.RedisConfig5_0.ClientOutputBufferLimit"; + /** Total limit in bytes. */ + hardLimit?: number; + /** Limit in bytes during certain time period. */ + softLimit?: number; + /** Seconds for soft limit. */ + softSeconds?: number; +} + export interface Redisconfigset50 { $type: "yandex.cloud.mdb.redis.v1.config.RedisConfigSet5_0"; /** @@ -192,6 +206,18 @@ export const Redisconfig50 = { if (message.notifyKeyspaceEvents !== "") { writer.uint32(58).string(message.notifyKeyspaceEvents); } + if (message.clientOutputBufferLimitPubsub !== undefined) { + Redisconfig50_ClientOutputBufferLimit.encode( + message.clientOutputBufferLimitPubsub, + writer.uint32(66).fork() + ).ldelim(); + } + if (message.clientOutputBufferLimitNormal !== undefined) { + Redisconfig50_ClientOutputBufferLimit.encode( + message.clientOutputBufferLimitNormal, + writer.uint32(74).fork() + ).ldelim(); + } return writer; }, @@ -229,6 +255,20 @@ export const Redisconfig50 = { case 7: message.notifyKeyspaceEvents = reader.string(); break; + case 8: + message.clientOutputBufferLimitPubsub = + Redisconfig50_ClientOutputBufferLimit.decode( + reader, + reader.uint32() + ); + break; + case 9: + message.clientOutputBufferLimitNormal = + Redisconfig50_ClientOutputBufferLimit.decode( + reader, + reader.uint32() + ); + break; default: reader.skipType(tag & 7); break; @@ -269,6 +309,20 @@ export const Redisconfig50 = { object.notifyKeyspaceEvents !== null ? String(object.notifyKeyspaceEvents) : ""; + message.clientOutputBufferLimitPubsub = + object.clientOutputBufferLimitPubsub !== undefined && + object.clientOutputBufferLimitPubsub !== null + ? Redisconfig50_ClientOutputBufferLimit.fromJSON( + object.clientOutputBufferLimitPubsub + ) + : undefined; + message.clientOutputBufferLimitNormal = + object.clientOutputBufferLimitNormal !== undefined && + object.clientOutputBufferLimitNormal !== null + ? Redisconfig50_ClientOutputBufferLimit.fromJSON( + object.clientOutputBufferLimitNormal + ) + : undefined; return message; }, @@ -287,6 +341,18 @@ export const Redisconfig50 = { (obj.slowlogMaxLen = message.slowlogMaxLen); message.notifyKeyspaceEvents !== undefined && (obj.notifyKeyspaceEvents = message.notifyKeyspaceEvents); + message.clientOutputBufferLimitPubsub !== undefined && + (obj.clientOutputBufferLimitPubsub = message.clientOutputBufferLimitPubsub + ? Redisconfig50_ClientOutputBufferLimit.toJSON( + message.clientOutputBufferLimitPubsub + ) + : undefined); + message.clientOutputBufferLimitNormal !== undefined && + (obj.clientOutputBufferLimitNormal = message.clientOutputBufferLimitNormal + ? Redisconfig50_ClientOutputBufferLimit.toJSON( + message.clientOutputBufferLimitNormal + ) + : undefined); return obj; }, @@ -301,12 +367,138 @@ export const Redisconfig50 = { message.slowlogLogSlowerThan = object.slowlogLogSlowerThan ?? undefined; message.slowlogMaxLen = object.slowlogMaxLen ?? undefined; message.notifyKeyspaceEvents = object.notifyKeyspaceEvents ?? ""; + message.clientOutputBufferLimitPubsub = + object.clientOutputBufferLimitPubsub !== undefined && + object.clientOutputBufferLimitPubsub !== null + ? Redisconfig50_ClientOutputBufferLimit.fromPartial( + object.clientOutputBufferLimitPubsub + ) + : undefined; + message.clientOutputBufferLimitNormal = + object.clientOutputBufferLimitNormal !== undefined && + object.clientOutputBufferLimitNormal !== null + ? Redisconfig50_ClientOutputBufferLimit.fromPartial( + object.clientOutputBufferLimitNormal + ) + : undefined; return message; }, }; messageTypeRegistry.set(Redisconfig50.$type, Redisconfig50); +const baseRedisconfig50_ClientOutputBufferLimit: object = { + $type: + "yandex.cloud.mdb.redis.v1.config.RedisConfig5_0.ClientOutputBufferLimit", +}; + +export const Redisconfig50_ClientOutputBufferLimit = { + $type: + "yandex.cloud.mdb.redis.v1.config.RedisConfig5_0.ClientOutputBufferLimit" as const, + + encode( + message: Redisconfig50_ClientOutputBufferLimit, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.hardLimit !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.hardLimit! }, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.softLimit !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.softLimit! }, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.softSeconds !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.softSeconds! }, + writer.uint32(42).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Redisconfig50_ClientOutputBufferLimit { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseRedisconfig50_ClientOutputBufferLimit, + } as Redisconfig50_ClientOutputBufferLimit; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.hardLimit = Int64Value.decode(reader, reader.uint32()).value; + break; + case 3: + message.softLimit = Int64Value.decode(reader, reader.uint32()).value; + break; + case 5: + message.softSeconds = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Redisconfig50_ClientOutputBufferLimit { + const message = { + ...baseRedisconfig50_ClientOutputBufferLimit, + } as Redisconfig50_ClientOutputBufferLimit; + message.hardLimit = + object.hardLimit !== undefined && object.hardLimit !== null + ? Number(object.hardLimit) + : undefined; + message.softLimit = + object.softLimit !== undefined && object.softLimit !== null + ? Number(object.softLimit) + : undefined; + message.softSeconds = + object.softSeconds !== undefined && object.softSeconds !== null + ? Number(object.softSeconds) + : undefined; + return message; + }, + + toJSON(message: Redisconfig50_ClientOutputBufferLimit): unknown { + const obj: any = {}; + message.hardLimit !== undefined && (obj.hardLimit = message.hardLimit); + message.softLimit !== undefined && (obj.softLimit = message.softLimit); + message.softSeconds !== undefined && + (obj.softSeconds = message.softSeconds); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Redisconfig50_ClientOutputBufferLimit { + const message = { + ...baseRedisconfig50_ClientOutputBufferLimit, + } as Redisconfig50_ClientOutputBufferLimit; + message.hardLimit = object.hardLimit ?? undefined; + message.softLimit = object.softLimit ?? undefined; + message.softSeconds = object.softSeconds ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Redisconfig50_ClientOutputBufferLimit.$type, + Redisconfig50_ClientOutputBufferLimit +); + const baseRedisconfigset50: object = { $type: "yandex.cloud.mdb.redis.v1.config.RedisConfigSet5_0", }; diff --git a/src/generated/yandex/cloud/mdb/redis/v1/config/redis6_0.ts b/src/generated/yandex/cloud/mdb/redis/v1/config/redis6_0.ts index e397847f..fbb5b756 100644 --- a/src/generated/yandex/cloud/mdb/redis/v1/config/redis6_0.ts +++ b/src/generated/yandex/cloud/mdb/redis/v1/config/redis6_0.ts @@ -35,6 +35,10 @@ export interface Redisconfig60 { slowlogMaxLen?: number; /** String setting for pub\sub functionality; subset of KEg$lshzxeAtm. */ notifyKeyspaceEvents: string; + /** Redis connection output buffers limits for pubsub operations. */ + clientOutputBufferLimitPubsub?: Redisconfig60_ClientOutputBufferLimit; + /** Redis connection output buffers limits for clients. */ + clientOutputBufferLimitNormal?: Redisconfig60_ClientOutputBufferLimit; } export enum Redisconfig60_MaxmemoryPolicy { @@ -129,6 +133,16 @@ export function redisconfig60_MaxmemoryPolicyToJSON( } } +export interface Redisconfig60_ClientOutputBufferLimit { + $type: "yandex.cloud.mdb.redis.v1.config.RedisConfig6_0.ClientOutputBufferLimit"; + /** Total limit in bytes. */ + hardLimit?: number; + /** Limit in bytes during certain time period. */ + softLimit?: number; + /** Seconds for soft limit. */ + softSeconds?: number; +} + export interface Redisconfigset60 { $type: "yandex.cloud.mdb.redis.v1.config.RedisConfigSet6_0"; /** @@ -192,6 +206,18 @@ export const Redisconfig60 = { if (message.notifyKeyspaceEvents !== "") { writer.uint32(58).string(message.notifyKeyspaceEvents); } + if (message.clientOutputBufferLimitPubsub !== undefined) { + Redisconfig60_ClientOutputBufferLimit.encode( + message.clientOutputBufferLimitPubsub, + writer.uint32(66).fork() + ).ldelim(); + } + if (message.clientOutputBufferLimitNormal !== undefined) { + Redisconfig60_ClientOutputBufferLimit.encode( + message.clientOutputBufferLimitNormal, + writer.uint32(74).fork() + ).ldelim(); + } return writer; }, @@ -229,6 +255,20 @@ export const Redisconfig60 = { case 7: message.notifyKeyspaceEvents = reader.string(); break; + case 8: + message.clientOutputBufferLimitPubsub = + Redisconfig60_ClientOutputBufferLimit.decode( + reader, + reader.uint32() + ); + break; + case 9: + message.clientOutputBufferLimitNormal = + Redisconfig60_ClientOutputBufferLimit.decode( + reader, + reader.uint32() + ); + break; default: reader.skipType(tag & 7); break; @@ -269,6 +309,20 @@ export const Redisconfig60 = { object.notifyKeyspaceEvents !== null ? String(object.notifyKeyspaceEvents) : ""; + message.clientOutputBufferLimitPubsub = + object.clientOutputBufferLimitPubsub !== undefined && + object.clientOutputBufferLimitPubsub !== null + ? Redisconfig60_ClientOutputBufferLimit.fromJSON( + object.clientOutputBufferLimitPubsub + ) + : undefined; + message.clientOutputBufferLimitNormal = + object.clientOutputBufferLimitNormal !== undefined && + object.clientOutputBufferLimitNormal !== null + ? Redisconfig60_ClientOutputBufferLimit.fromJSON( + object.clientOutputBufferLimitNormal + ) + : undefined; return message; }, @@ -287,6 +341,18 @@ export const Redisconfig60 = { (obj.slowlogMaxLen = message.slowlogMaxLen); message.notifyKeyspaceEvents !== undefined && (obj.notifyKeyspaceEvents = message.notifyKeyspaceEvents); + message.clientOutputBufferLimitPubsub !== undefined && + (obj.clientOutputBufferLimitPubsub = message.clientOutputBufferLimitPubsub + ? Redisconfig60_ClientOutputBufferLimit.toJSON( + message.clientOutputBufferLimitPubsub + ) + : undefined); + message.clientOutputBufferLimitNormal !== undefined && + (obj.clientOutputBufferLimitNormal = message.clientOutputBufferLimitNormal + ? Redisconfig60_ClientOutputBufferLimit.toJSON( + message.clientOutputBufferLimitNormal + ) + : undefined); return obj; }, @@ -301,12 +367,138 @@ export const Redisconfig60 = { message.slowlogLogSlowerThan = object.slowlogLogSlowerThan ?? undefined; message.slowlogMaxLen = object.slowlogMaxLen ?? undefined; message.notifyKeyspaceEvents = object.notifyKeyspaceEvents ?? ""; + message.clientOutputBufferLimitPubsub = + object.clientOutputBufferLimitPubsub !== undefined && + object.clientOutputBufferLimitPubsub !== null + ? Redisconfig60_ClientOutputBufferLimit.fromPartial( + object.clientOutputBufferLimitPubsub + ) + : undefined; + message.clientOutputBufferLimitNormal = + object.clientOutputBufferLimitNormal !== undefined && + object.clientOutputBufferLimitNormal !== null + ? Redisconfig60_ClientOutputBufferLimit.fromPartial( + object.clientOutputBufferLimitNormal + ) + : undefined; return message; }, }; messageTypeRegistry.set(Redisconfig60.$type, Redisconfig60); +const baseRedisconfig60_ClientOutputBufferLimit: object = { + $type: + "yandex.cloud.mdb.redis.v1.config.RedisConfig6_0.ClientOutputBufferLimit", +}; + +export const Redisconfig60_ClientOutputBufferLimit = { + $type: + "yandex.cloud.mdb.redis.v1.config.RedisConfig6_0.ClientOutputBufferLimit" as const, + + encode( + message: Redisconfig60_ClientOutputBufferLimit, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.hardLimit !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.hardLimit! }, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.softLimit !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.softLimit! }, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.softSeconds !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.softSeconds! }, + writer.uint32(42).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Redisconfig60_ClientOutputBufferLimit { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseRedisconfig60_ClientOutputBufferLimit, + } as Redisconfig60_ClientOutputBufferLimit; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.hardLimit = Int64Value.decode(reader, reader.uint32()).value; + break; + case 3: + message.softLimit = Int64Value.decode(reader, reader.uint32()).value; + break; + case 5: + message.softSeconds = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Redisconfig60_ClientOutputBufferLimit { + const message = { + ...baseRedisconfig60_ClientOutputBufferLimit, + } as Redisconfig60_ClientOutputBufferLimit; + message.hardLimit = + object.hardLimit !== undefined && object.hardLimit !== null + ? Number(object.hardLimit) + : undefined; + message.softLimit = + object.softLimit !== undefined && object.softLimit !== null + ? Number(object.softLimit) + : undefined; + message.softSeconds = + object.softSeconds !== undefined && object.softSeconds !== null + ? Number(object.softSeconds) + : undefined; + return message; + }, + + toJSON(message: Redisconfig60_ClientOutputBufferLimit): unknown { + const obj: any = {}; + message.hardLimit !== undefined && (obj.hardLimit = message.hardLimit); + message.softLimit !== undefined && (obj.softLimit = message.softLimit); + message.softSeconds !== undefined && + (obj.softSeconds = message.softSeconds); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Redisconfig60_ClientOutputBufferLimit { + const message = { + ...baseRedisconfig60_ClientOutputBufferLimit, + } as Redisconfig60_ClientOutputBufferLimit; + message.hardLimit = object.hardLimit ?? undefined; + message.softLimit = object.softLimit ?? undefined; + message.softSeconds = object.softSeconds ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Redisconfig60_ClientOutputBufferLimit.$type, + Redisconfig60_ClientOutputBufferLimit +); + const baseRedisconfigset60: object = { $type: "yandex.cloud.mdb.redis.v1.config.RedisConfigSet6_0", }; diff --git a/src/generated/yandex/cloud/mdb/redis/v1/config/redis6_2.ts b/src/generated/yandex/cloud/mdb/redis/v1/config/redis6_2.ts index 53648a05..3cbf65e9 100644 --- a/src/generated/yandex/cloud/mdb/redis/v1/config/redis6_2.ts +++ b/src/generated/yandex/cloud/mdb/redis/v1/config/redis6_2.ts @@ -35,6 +35,10 @@ export interface Redisconfig62 { slowlogMaxLen?: number; /** String setting for pub\sub functionality; subset of KEg$lshzxeAtm. */ notifyKeyspaceEvents: string; + /** Redis connection output buffers limits for pubsub operations. */ + clientOutputBufferLimitPubsub?: Redisconfig62_ClientOutputBufferLimit; + /** Redis connection output buffers limits for clients. */ + clientOutputBufferLimitNormal?: Redisconfig62_ClientOutputBufferLimit; } export enum Redisconfig62_MaxmemoryPolicy { @@ -129,6 +133,16 @@ export function redisconfig62_MaxmemoryPolicyToJSON( } } +export interface Redisconfig62_ClientOutputBufferLimit { + $type: "yandex.cloud.mdb.redis.v1.config.RedisConfig6_2.ClientOutputBufferLimit"; + /** Total limit in bytes. */ + hardLimit?: number; + /** Limit in bytes during certain time period. */ + softLimit?: number; + /** Seconds for soft limit. */ + softSeconds?: number; +} + export interface Redisconfigset62 { $type: "yandex.cloud.mdb.redis.v1.config.RedisConfigSet6_2"; /** @@ -192,6 +206,18 @@ export const Redisconfig62 = { if (message.notifyKeyspaceEvents !== "") { writer.uint32(58).string(message.notifyKeyspaceEvents); } + if (message.clientOutputBufferLimitPubsub !== undefined) { + Redisconfig62_ClientOutputBufferLimit.encode( + message.clientOutputBufferLimitPubsub, + writer.uint32(66).fork() + ).ldelim(); + } + if (message.clientOutputBufferLimitNormal !== undefined) { + Redisconfig62_ClientOutputBufferLimit.encode( + message.clientOutputBufferLimitNormal, + writer.uint32(74).fork() + ).ldelim(); + } return writer; }, @@ -229,6 +255,20 @@ export const Redisconfig62 = { case 7: message.notifyKeyspaceEvents = reader.string(); break; + case 8: + message.clientOutputBufferLimitPubsub = + Redisconfig62_ClientOutputBufferLimit.decode( + reader, + reader.uint32() + ); + break; + case 9: + message.clientOutputBufferLimitNormal = + Redisconfig62_ClientOutputBufferLimit.decode( + reader, + reader.uint32() + ); + break; default: reader.skipType(tag & 7); break; @@ -269,6 +309,20 @@ export const Redisconfig62 = { object.notifyKeyspaceEvents !== null ? String(object.notifyKeyspaceEvents) : ""; + message.clientOutputBufferLimitPubsub = + object.clientOutputBufferLimitPubsub !== undefined && + object.clientOutputBufferLimitPubsub !== null + ? Redisconfig62_ClientOutputBufferLimit.fromJSON( + object.clientOutputBufferLimitPubsub + ) + : undefined; + message.clientOutputBufferLimitNormal = + object.clientOutputBufferLimitNormal !== undefined && + object.clientOutputBufferLimitNormal !== null + ? Redisconfig62_ClientOutputBufferLimit.fromJSON( + object.clientOutputBufferLimitNormal + ) + : undefined; return message; }, @@ -287,6 +341,18 @@ export const Redisconfig62 = { (obj.slowlogMaxLen = message.slowlogMaxLen); message.notifyKeyspaceEvents !== undefined && (obj.notifyKeyspaceEvents = message.notifyKeyspaceEvents); + message.clientOutputBufferLimitPubsub !== undefined && + (obj.clientOutputBufferLimitPubsub = message.clientOutputBufferLimitPubsub + ? Redisconfig62_ClientOutputBufferLimit.toJSON( + message.clientOutputBufferLimitPubsub + ) + : undefined); + message.clientOutputBufferLimitNormal !== undefined && + (obj.clientOutputBufferLimitNormal = message.clientOutputBufferLimitNormal + ? Redisconfig62_ClientOutputBufferLimit.toJSON( + message.clientOutputBufferLimitNormal + ) + : undefined); return obj; }, @@ -301,12 +367,138 @@ export const Redisconfig62 = { message.slowlogLogSlowerThan = object.slowlogLogSlowerThan ?? undefined; message.slowlogMaxLen = object.slowlogMaxLen ?? undefined; message.notifyKeyspaceEvents = object.notifyKeyspaceEvents ?? ""; + message.clientOutputBufferLimitPubsub = + object.clientOutputBufferLimitPubsub !== undefined && + object.clientOutputBufferLimitPubsub !== null + ? Redisconfig62_ClientOutputBufferLimit.fromPartial( + object.clientOutputBufferLimitPubsub + ) + : undefined; + message.clientOutputBufferLimitNormal = + object.clientOutputBufferLimitNormal !== undefined && + object.clientOutputBufferLimitNormal !== null + ? Redisconfig62_ClientOutputBufferLimit.fromPartial( + object.clientOutputBufferLimitNormal + ) + : undefined; return message; }, }; messageTypeRegistry.set(Redisconfig62.$type, Redisconfig62); +const baseRedisconfig62_ClientOutputBufferLimit: object = { + $type: + "yandex.cloud.mdb.redis.v1.config.RedisConfig6_2.ClientOutputBufferLimit", +}; + +export const Redisconfig62_ClientOutputBufferLimit = { + $type: + "yandex.cloud.mdb.redis.v1.config.RedisConfig6_2.ClientOutputBufferLimit" as const, + + encode( + message: Redisconfig62_ClientOutputBufferLimit, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.hardLimit !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.hardLimit! }, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.softLimit !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.softLimit! }, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.softSeconds !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.softSeconds! }, + writer.uint32(42).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Redisconfig62_ClientOutputBufferLimit { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseRedisconfig62_ClientOutputBufferLimit, + } as Redisconfig62_ClientOutputBufferLimit; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.hardLimit = Int64Value.decode(reader, reader.uint32()).value; + break; + case 3: + message.softLimit = Int64Value.decode(reader, reader.uint32()).value; + break; + case 5: + message.softSeconds = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Redisconfig62_ClientOutputBufferLimit { + const message = { + ...baseRedisconfig62_ClientOutputBufferLimit, + } as Redisconfig62_ClientOutputBufferLimit; + message.hardLimit = + object.hardLimit !== undefined && object.hardLimit !== null + ? Number(object.hardLimit) + : undefined; + message.softLimit = + object.softLimit !== undefined && object.softLimit !== null + ? Number(object.softLimit) + : undefined; + message.softSeconds = + object.softSeconds !== undefined && object.softSeconds !== null + ? Number(object.softSeconds) + : undefined; + return message; + }, + + toJSON(message: Redisconfig62_ClientOutputBufferLimit): unknown { + const obj: any = {}; + message.hardLimit !== undefined && (obj.hardLimit = message.hardLimit); + message.softLimit !== undefined && (obj.softLimit = message.softLimit); + message.softSeconds !== undefined && + (obj.softSeconds = message.softSeconds); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Redisconfig62_ClientOutputBufferLimit { + const message = { + ...baseRedisconfig62_ClientOutputBufferLimit, + } as Redisconfig62_ClientOutputBufferLimit; + message.hardLimit = object.hardLimit ?? undefined; + message.softLimit = object.softLimit ?? undefined; + message.softSeconds = object.softSeconds ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Redisconfig62_ClientOutputBufferLimit.$type, + Redisconfig62_ClientOutputBufferLimit +); + const baseRedisconfigset62: object = { $type: "yandex.cloud.mdb.redis.v1.config.RedisConfigSet6_2", }; diff --git a/src/generated/yandex/cloud/mdb/sqlserver/v1/cluster.ts b/src/generated/yandex/cloud/mdb/sqlserver/v1/cluster.ts index 70a41b1c..e0bae4e4 100644 --- a/src/generated/yandex/cloud/mdb/sqlserver/v1/cluster.ts +++ b/src/generated/yandex/cloud/mdb/sqlserver/v1/cluster.ts @@ -56,6 +56,8 @@ export interface Cluster { deletionProtection: boolean; /** SQL Server Collation */ sqlcollation: string; + /** Host groups hosting VMs of the cluster. */ + hostGroupIds: string[]; } export enum Cluster_Environment { @@ -266,7 +268,7 @@ export interface Host { * Name of the SQL Server host. The host name is assigned by Managed Service for SQL Server * at creation time, and cannot be changed. 1-63 characters long. * - * The name is unique across all existing database hosts in Yandex.Cloud, + * The name is unique across all existing database hosts in Yandex Cloud, * as it defines the FQDN of the host. */ name: string; @@ -486,6 +488,8 @@ export interface Access { $type: "yandex.cloud.mdb.sqlserver.v1.Access"; /** Allow access for DataLens */ dataLens: boolean; + /** Allow access for Web SQL. */ + webSql: boolean; } const baseCluster: object = { @@ -501,6 +505,7 @@ const baseCluster: object = { securityGroupIds: "", deletionProtection: false, sqlcollation: "", + hostGroupIds: "", }; export const Cluster = { @@ -565,6 +570,9 @@ export const Cluster = { if (message.sqlcollation !== "") { writer.uint32(122).string(message.sqlcollation); } + for (const v of message.hostGroupIds) { + writer.uint32(130).string(v!); + } return writer; }, @@ -575,6 +583,7 @@ export const Cluster = { message.labels = {}; message.monitoring = []; message.securityGroupIds = []; + message.hostGroupIds = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -628,6 +637,9 @@ export const Cluster = { case 15: message.sqlcollation = reader.string(); break; + case 16: + message.hostGroupIds.push(reader.string()); + break; default: reader.skipType(tag & 7); break; @@ -697,6 +709,9 @@ export const Cluster = { object.sqlcollation !== undefined && object.sqlcollation !== null ? String(object.sqlcollation) : ""; + message.hostGroupIds = (object.hostGroupIds ?? []).map((e: any) => + String(e) + ); return message; }, @@ -742,6 +757,11 @@ export const Cluster = { (obj.deletionProtection = message.deletionProtection); message.sqlcollation !== undefined && (obj.sqlcollation = message.sqlcollation); + if (message.hostGroupIds) { + obj.hostGroupIds = message.hostGroupIds.map((e) => e); + } else { + obj.hostGroupIds = []; + } return obj; }, @@ -773,6 +793,7 @@ export const Cluster = { message.securityGroupIds = object.securityGroupIds?.map((e) => e) || []; message.deletionProtection = object.deletionProtection ?? false; message.sqlcollation = object.sqlcollation ?? ""; + message.hostGroupIds = object.hostGroupIds?.map((e) => e) || []; return message; }, }; @@ -1457,6 +1478,7 @@ messageTypeRegistry.set(Resources.$type, Resources); const baseAccess: object = { $type: "yandex.cloud.mdb.sqlserver.v1.Access", dataLens: false, + webSql: false, }; export const Access = { @@ -1469,6 +1491,9 @@ export const Access = { if (message.dataLens === true) { writer.uint32(8).bool(message.dataLens); } + if (message.webSql === true) { + writer.uint32(16).bool(message.webSql); + } return writer; }, @@ -1482,6 +1507,9 @@ export const Access = { case 1: message.dataLens = reader.bool(); break; + case 2: + message.webSql = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -1496,18 +1524,24 @@ export const Access = { object.dataLens !== undefined && object.dataLens !== null ? Boolean(object.dataLens) : false; + message.webSql = + object.webSql !== undefined && object.webSql !== null + ? Boolean(object.webSql) + : false; return message; }, toJSON(message: Access): unknown { const obj: any = {}; message.dataLens !== undefined && (obj.dataLens = message.dataLens); + message.webSql !== undefined && (obj.webSql = message.webSql); return obj; }, fromPartial, I>>(object: I): Access { const message = { ...baseAccess } as Access; message.dataLens = object.dataLens ?? false; + message.webSql = object.webSql ?? false; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/sqlserver/v1/cluster_service.ts b/src/generated/yandex/cloud/mdb/sqlserver/v1/cluster_service.ts index a5a8f2d2..ff912a4c 100644 --- a/src/generated/yandex/cloud/mdb/sqlserver/v1/cluster_service.ts +++ b/src/generated/yandex/cloud/mdb/sqlserver/v1/cluster_service.ts @@ -126,6 +126,8 @@ export interface CreateClusterRequest { deletionProtection: boolean; /** name of SQL Collation that cluster will be created with */ sqlcollation: string; + /** Host groups hosting VMs of the cluster. */ + hostGroupIds: string[]; } export interface CreateClusterRequest_LabelsEntry { @@ -250,6 +252,10 @@ export interface RestoreClusterRequest { folderId: string; /** User security groups */ securityGroupIds: string[]; + /** Deletion Protection inhibits deletion of the cluster */ + deletionProtection: boolean; + /** Host groups hosting VMs of the cluster. */ + hostGroupIds: string[]; } export interface RestoreClusterRequest_LabelsEntry { @@ -858,6 +864,7 @@ const baseCreateClusterRequest: object = { securityGroupIds: "", deletionProtection: false, sqlcollation: "", + hostGroupIds: "", }; export const CreateClusterRequest = { @@ -914,6 +921,9 @@ export const CreateClusterRequest = { if (message.sqlcollation !== "") { writer.uint32(106).string(message.sqlcollation); } + for (const v of message.hostGroupIds) { + writer.uint32(114).string(v!); + } return writer; }, @@ -929,6 +939,7 @@ export const CreateClusterRequest = { message.userSpecs = []; message.hostSpecs = []; message.securityGroupIds = []; + message.hostGroupIds = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -979,6 +990,9 @@ export const CreateClusterRequest = { case 13: message.sqlcollation = reader.string(); break; + case 14: + message.hostGroupIds.push(reader.string()); + break; default: reader.skipType(tag & 7); break; @@ -1040,6 +1054,9 @@ export const CreateClusterRequest = { object.sqlcollation !== undefined && object.sqlcollation !== null ? String(object.sqlcollation) : ""; + message.hostGroupIds = (object.hostGroupIds ?? []).map((e: any) => + String(e) + ); return message; }, @@ -1092,6 +1109,11 @@ export const CreateClusterRequest = { (obj.deletionProtection = message.deletionProtection); message.sqlcollation !== undefined && (obj.sqlcollation = message.sqlcollation); + if (message.hostGroupIds) { + obj.hostGroupIds = message.hostGroupIds.map((e) => e); + } else { + obj.hostGroupIds = []; + } return obj; }, @@ -1125,6 +1147,7 @@ export const CreateClusterRequest = { message.securityGroupIds = object.securityGroupIds?.map((e) => e) || []; message.deletionProtection = object.deletionProtection ?? false; message.sqlcollation = object.sqlcollation ?? ""; + message.hostGroupIds = object.hostGroupIds?.map((e) => e) || []; return message; }, }; @@ -1903,6 +1926,8 @@ const baseRestoreClusterRequest: object = { networkId: "", folderId: "", securityGroupIds: "", + deletionProtection: false, + hostGroupIds: "", }; export const RestoreClusterRequest = { @@ -1956,6 +1981,12 @@ export const RestoreClusterRequest = { for (const v of message.securityGroupIds) { writer.uint32(98).string(v!); } + if (message.deletionProtection === true) { + writer.uint32(104).bool(message.deletionProtection); + } + for (const v of message.hostGroupIds) { + writer.uint32(114).string(v!); + } return writer; }, @@ -1969,6 +2000,7 @@ export const RestoreClusterRequest = { message.labels = {}; message.hostSpecs = []; message.securityGroupIds = []; + message.hostGroupIds = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -2013,6 +2045,12 @@ export const RestoreClusterRequest = { case 12: message.securityGroupIds.push(reader.string()); break; + case 13: + message.deletionProtection = reader.bool(); + break; + case 14: + message.hostGroupIds.push(reader.string()); + break; default: reader.skipType(tag & 7); break; @@ -2067,6 +2105,14 @@ export const RestoreClusterRequest = { message.securityGroupIds = (object.securityGroupIds ?? []).map((e: any) => String(e) ); + message.deletionProtection = + object.deletionProtection !== undefined && + object.deletionProtection !== null + ? Boolean(object.deletionProtection) + : false; + message.hostGroupIds = (object.hostGroupIds ?? []).map((e: any) => + String(e) + ); return message; }, @@ -2103,6 +2149,13 @@ export const RestoreClusterRequest = { } else { obj.securityGroupIds = []; } + message.deletionProtection !== undefined && + (obj.deletionProtection = message.deletionProtection); + if (message.hostGroupIds) { + obj.hostGroupIds = message.hostGroupIds.map((e) => e); + } else { + obj.hostGroupIds = []; + } return obj; }, @@ -2132,6 +2185,8 @@ export const RestoreClusterRequest = { message.networkId = object.networkId ?? ""; message.folderId = object.folderId ?? ""; message.securityGroupIds = object.securityGroupIds?.map((e) => e) || []; + message.deletionProtection = object.deletionProtection ?? false; + message.hostGroupIds = object.hostGroupIds?.map((e) => e) || []; return message; }, }; diff --git a/src/generated/yandex/cloud/operation/operation_service.ts b/src/generated/yandex/cloud/operation/operation_service.ts index 8f858fe0..18ca3a71 100644 --- a/src/generated/yandex/cloud/operation/operation_service.ts +++ b/src/generated/yandex/cloud/operation/operation_service.ts @@ -173,7 +173,11 @@ export const OperationServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Cancels the specified operation. */ + /** + * Cancels the specified operation. + * + * Note that currently Yandex Object Storage API does not support cancelling operations. + */ cancel: { path: "/yandex.cloud.operation.OperationService/Cancel", requestStream: false, @@ -190,7 +194,11 @@ export const OperationServiceService = { export interface OperationServiceServer extends UntypedServiceImplementation { /** Returns the specified Operation resource. */ get: handleUnaryCall; - /** Cancels the specified operation. */ + /** + * Cancels the specified operation. + * + * Note that currently Yandex Object Storage API does not support cancelling operations. + */ cancel: handleUnaryCall; } @@ -211,7 +219,11 @@ export interface OperationServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Cancels the specified operation. */ + /** + * Cancels the specified operation. + * + * Note that currently Yandex Object Storage API does not support cancelling operations. + */ cancel( request: CancelOperationRequest, callback: (error: ServiceError | null, response: Operation) => void diff --git a/src/generated/yandex/cloud/organizationmanager/v1/user_account.ts b/src/generated/yandex/cloud/organizationmanager/v1/user_account.ts index ca1faeac..b205afd8 100644 --- a/src/generated/yandex/cloud/organizationmanager/v1/user_account.ts +++ b/src/generated/yandex/cloud/organizationmanager/v1/user_account.ts @@ -5,7 +5,7 @@ import _m0 from "protobufjs/minimal"; export const protobufPackage = "yandex.cloud.organizationmanager.v1"; -/** Currently represents only [Yandex.Passport account](/docs/iam/concepts/#passport). */ +/** Currently represents only [Yandex account](/docs/iam/concepts/#passport). */ export interface UserAccount { $type: "yandex.cloud.organizationmanager.v1.UserAccount"; /** ID of the user account. */ @@ -18,13 +18,13 @@ export interface UserAccount { /** * A YandexPassportUserAccount resource. - * For more information, see [Yandex.Passport account](/docs/iam/concepts/#passport). + * For more information, see [Yandex account](/docs/iam/concepts/#passport). */ export interface YandexPassportUserAccount { $type: "yandex.cloud.organizationmanager.v1.YandexPassportUserAccount"; - /** Login of the Yandex.Passport user account. */ + /** Login of the Yandex user account. */ login: string; - /** Default email of the Yandex.Passport user account. */ + /** Default email of the Yandex user account. */ defaultEmail: string; } diff --git a/src/generated/yandex/cloud/organizationmanager/v1/user_service.ts b/src/generated/yandex/cloud/organizationmanager/v1/user_service.ts index 3a8d2016..0bd8bd79 100644 --- a/src/generated/yandex/cloud/organizationmanager/v1/user_service.ts +++ b/src/generated/yandex/cloud/organizationmanager/v1/user_service.ts @@ -53,7 +53,7 @@ export interface ListMembersResponse { export interface ListMembersResponse_OrganizationUser { $type: "yandex.cloud.organizationmanager.v1.ListMembersResponse.OrganizationUser"; - /** OpenID standard claims with additional Yandex.Organization claims. */ + /** OpenID standard claims with additional Yandex Cloud Organization claims. */ subjectClaims?: SubjectClaims; } diff --git a/src/generated/yandex/cloud/serverless/apigateway/v1/apigateway.ts b/src/generated/yandex/cloud/serverless/apigateway/v1/apigateway.ts index 68ab2d15..ad0d2381 100644 --- a/src/generated/yandex/cloud/serverless/apigateway/v1/apigateway.ts +++ b/src/generated/yandex/cloud/serverless/apigateway/v1/apigateway.ts @@ -28,6 +28,8 @@ export interface ApiGateway { logGroupId: string; /** List of domains attached to API gateway. */ attachedDomains: AttachedDomain[]; + /** Network access. If specified the gateway will be attached to specified network/subnet(s). */ + connectivity?: Connectivity; } export enum ApiGateway_Status { @@ -109,6 +111,21 @@ export interface AttachedDomain { domain: string; } +/** Gateway connectivity specification. */ +export interface Connectivity { + $type: "yandex.cloud.serverless.apigateway.v1.Connectivity"; + /** + * Network the gateway will have access to. + * It's essential to specify network with subnets in all availability zones. + */ + networkId: string; + /** + * Complete list of subnets (from the same network) the gateway can be attached to. + * It's essential to specify at least one subnet for each availability zones. + */ + subnetId: string[]; +} + const baseApiGateway: object = { $type: "yandex.cloud.serverless.apigateway.v1.ApiGateway", id: "", @@ -167,6 +184,12 @@ export const ApiGateway = { for (const v of message.attachedDomains) { AttachedDomain.encode(v!, writer.uint32(90).fork()).ldelim(); } + if (message.connectivity !== undefined) { + Connectivity.encode( + message.connectivity, + writer.uint32(98).fork() + ).ldelim(); + } return writer; }, @@ -216,6 +239,9 @@ export const ApiGateway = { AttachedDomain.decode(reader, reader.uint32()) ); break; + case 12: + message.connectivity = Connectivity.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -265,6 +291,10 @@ export const ApiGateway = { message.attachedDomains = (object.attachedDomains ?? []).map((e: any) => AttachedDomain.fromJSON(e) ); + message.connectivity = + object.connectivity !== undefined && object.connectivity !== null + ? Connectivity.fromJSON(object.connectivity) + : undefined; return message; }, @@ -294,6 +324,10 @@ export const ApiGateway = { } else { obj.attachedDomains = []; } + message.connectivity !== undefined && + (obj.connectivity = message.connectivity + ? Connectivity.toJSON(message.connectivity) + : undefined); return obj; }, @@ -319,6 +353,10 @@ export const ApiGateway = { message.logGroupId = object.logGroupId ?? ""; message.attachedDomains = object.attachedDomains?.map((e) => AttachedDomain.fromPartial(e)) || []; + message.connectivity = + object.connectivity !== undefined && object.connectivity !== null + ? Connectivity.fromPartial(object.connectivity) + : undefined; return message; }, }; @@ -504,6 +542,83 @@ export const AttachedDomain = { messageTypeRegistry.set(AttachedDomain.$type, AttachedDomain); +const baseConnectivity: object = { + $type: "yandex.cloud.serverless.apigateway.v1.Connectivity", + networkId: "", + subnetId: "", +}; + +export const Connectivity = { + $type: "yandex.cloud.serverless.apigateway.v1.Connectivity" as const, + + encode( + message: Connectivity, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.networkId !== "") { + writer.uint32(10).string(message.networkId); + } + for (const v of message.subnetId) { + writer.uint32(18).string(v!); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Connectivity { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseConnectivity } as Connectivity; + message.subnetId = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.networkId = reader.string(); + break; + case 2: + message.subnetId.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Connectivity { + const message = { ...baseConnectivity } as Connectivity; + message.networkId = + object.networkId !== undefined && object.networkId !== null + ? String(object.networkId) + : ""; + message.subnetId = (object.subnetId ?? []).map((e: any) => String(e)); + return message; + }, + + toJSON(message: Connectivity): unknown { + const obj: any = {}; + message.networkId !== undefined && (obj.networkId = message.networkId); + if (message.subnetId) { + obj.subnetId = message.subnetId.map((e) => e); + } else { + obj.subnetId = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): Connectivity { + const message = { ...baseConnectivity } as Connectivity; + message.networkId = object.networkId ?? ""; + message.subnetId = object.subnetId?.map((e) => e) || []; + return message; + }, +}; + +messageTypeRegistry.set(Connectivity.$type, Connectivity); + type Builtin = | Date | Function diff --git a/src/generated/yandex/cloud/serverless/apigateway/v1/apigateway_service.ts b/src/generated/yandex/cloud/serverless/apigateway/v1/apigateway_service.ts index 03f29fd1..8692333c 100644 --- a/src/generated/yandex/cloud/serverless/apigateway/v1/apigateway_service.ts +++ b/src/generated/yandex/cloud/serverless/apigateway/v1/apigateway_service.ts @@ -14,8 +14,11 @@ import { ServiceError, } from "@grpc/grpc-js"; import _m0 from "protobufjs/minimal"; +import { + Connectivity, + ApiGateway, +} from "../../../../../yandex/cloud/serverless/apigateway/v1/apigateway"; import { FieldMask } from "../../../../../google/protobuf/field_mask"; -import { ApiGateway } from "../../../../../yandex/cloud/serverless/apigateway/v1/apigateway"; import { Operation } from "../../../../../yandex/cloud/operation/operation"; import { ListAccessBindingsRequest, @@ -102,6 +105,8 @@ export interface CreateApiGatewayRequest { labels: { [key: string]: string }; /** The text of specification, JSON or YAML. */ openapiSpec: string | undefined; + /** Gateway connectivity. If specified the gateway will be attached to specified network/subnet(s). */ + connectivity?: Connectivity; } export interface CreateApiGatewayRequest_LabelsEntry { @@ -136,6 +141,8 @@ export interface UpdateApiGatewayRequest { labels: { [key: string]: string }; /** The text of specification, JSON or YAML. */ openapiSpec: string | undefined; + /** Gateway connectivity. If specified the gateway will be attached to specified network/subnet(s). */ + connectivity?: Connectivity; } export interface UpdateApiGatewayRequest_LabelsEntry { @@ -600,6 +607,12 @@ export const CreateApiGatewayRequest = { if (message.openapiSpec !== undefined) { writer.uint32(42).string(message.openapiSpec); } + if (message.connectivity !== undefined) { + Connectivity.encode( + message.connectivity, + writer.uint32(50).fork() + ).ldelim(); + } return writer; }, @@ -637,6 +650,9 @@ export const CreateApiGatewayRequest = { case 5: message.openapiSpec = reader.string(); break; + case 6: + message.connectivity = Connectivity.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -671,6 +687,10 @@ export const CreateApiGatewayRequest = { object.openapiSpec !== undefined && object.openapiSpec !== null ? String(object.openapiSpec) : undefined; + message.connectivity = + object.connectivity !== undefined && object.connectivity !== null + ? Connectivity.fromJSON(object.connectivity) + : undefined; return message; }, @@ -688,6 +708,10 @@ export const CreateApiGatewayRequest = { } message.openapiSpec !== undefined && (obj.openapiSpec = message.openapiSpec); + message.connectivity !== undefined && + (obj.connectivity = message.connectivity + ? Connectivity.toJSON(message.connectivity) + : undefined); return obj; }, @@ -709,6 +733,10 @@ export const CreateApiGatewayRequest = { return acc; }, {}); message.openapiSpec = object.openapiSpec ?? undefined; + message.connectivity = + object.connectivity !== undefined && object.connectivity !== null + ? Connectivity.fromPartial(object.connectivity) + : undefined; return message; }, }; @@ -843,6 +871,12 @@ export const UpdateApiGatewayRequest = { if (message.openapiSpec !== undefined) { writer.uint32(50).string(message.openapiSpec); } + if (message.connectivity !== undefined) { + Connectivity.encode( + message.connectivity, + writer.uint32(58).fork() + ).ldelim(); + } return writer; }, @@ -883,6 +917,9 @@ export const UpdateApiGatewayRequest = { case 6: message.openapiSpec = reader.string(); break; + case 7: + message.connectivity = Connectivity.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -921,6 +958,10 @@ export const UpdateApiGatewayRequest = { object.openapiSpec !== undefined && object.openapiSpec !== null ? String(object.openapiSpec) : undefined; + message.connectivity = + object.connectivity !== undefined && object.connectivity !== null + ? Connectivity.fromJSON(object.connectivity) + : undefined; return message; }, @@ -943,6 +984,10 @@ export const UpdateApiGatewayRequest = { } message.openapiSpec !== undefined && (obj.openapiSpec = message.openapiSpec); + message.connectivity !== undefined && + (obj.connectivity = message.connectivity + ? Connectivity.toJSON(message.connectivity) + : undefined); return obj; }, @@ -968,6 +1013,10 @@ export const UpdateApiGatewayRequest = { return acc; }, {}); message.openapiSpec = object.openapiSpec ?? undefined; + message.connectivity = + object.connectivity !== undefined && object.connectivity !== null + ? Connectivity.fromPartial(object.connectivity) + : undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/serverless/containers/v1/container.ts b/src/generated/yandex/cloud/serverless/containers/v1/container.ts index 34b754db..b1c7d6d6 100644 --- a/src/generated/yandex/cloud/serverless/containers/v1/container.ts +++ b/src/generated/yandex/cloud/serverless/containers/v1/container.ts @@ -87,6 +87,8 @@ export interface Revision { concurrency: number; serviceAccountId: string; status: Revision_Status; + secrets: Secret[]; + connectivity?: Connectivity; } export enum Revision_Status { @@ -166,6 +168,20 @@ export interface Resources { coreFraction: number; } +export interface Secret { + $type: "yandex.cloud.serverless.containers.v1.Secret"; + id: string; + versionId: string; + key: string; + environmentVariable: string | undefined; +} + +export interface Connectivity { + $type: "yandex.cloud.serverless.containers.v1.Connectivity"; + networkId: string; + subnetIds: string[]; +} + const baseContainer: object = { $type: "yandex.cloud.serverless.containers.v1.Container", id: "", @@ -475,6 +491,15 @@ export const Revision = { if (message.status !== 0) { writer.uint32(80).int32(message.status); } + for (const v of message.secrets) { + Secret.encode(v!, writer.uint32(90).fork()).ldelim(); + } + if (message.connectivity !== undefined) { + Connectivity.encode( + message.connectivity, + writer.uint32(98).fork() + ).ldelim(); + } return writer; }, @@ -482,6 +507,7 @@ export const Revision = { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = { ...baseRevision } as Revision; + message.secrets = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -517,6 +543,12 @@ export const Revision = { case 10: message.status = reader.int32() as any; break; + case 11: + message.secrets.push(Secret.decode(reader, reader.uint32())); + break; + case 12: + message.connectivity = Connectivity.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -565,6 +597,13 @@ export const Revision = { object.status !== undefined && object.status !== null ? revision_StatusFromJSON(object.status) : 0; + message.secrets = (object.secrets ?? []).map((e: any) => + Secret.fromJSON(e) + ); + message.connectivity = + object.connectivity !== undefined && object.connectivity !== null + ? Connectivity.fromJSON(object.connectivity) + : undefined; return message; }, @@ -593,6 +632,17 @@ export const Revision = { (obj.serviceAccountId = message.serviceAccountId); message.status !== undefined && (obj.status = revision_StatusToJSON(message.status)); + if (message.secrets) { + obj.secrets = message.secrets.map((e) => + e ? Secret.toJSON(e) : undefined + ); + } else { + obj.secrets = []; + } + message.connectivity !== undefined && + (obj.connectivity = message.connectivity + ? Connectivity.toJSON(message.connectivity) + : undefined); return obj; }, @@ -617,6 +667,11 @@ export const Revision = { message.concurrency = object.concurrency ?? 0; message.serviceAccountId = object.serviceAccountId ?? ""; message.status = object.status ?? 0; + message.secrets = object.secrets?.map((e) => Secret.fromPartial(e)) || []; + message.connectivity = + object.connectivity !== undefined && object.connectivity !== null + ? Connectivity.fromPartial(object.connectivity) + : undefined; return message; }, }; @@ -1065,6 +1120,179 @@ export const Resources = { messageTypeRegistry.set(Resources.$type, Resources); +const baseSecret: object = { + $type: "yandex.cloud.serverless.containers.v1.Secret", + id: "", + versionId: "", + key: "", +}; + +export const Secret = { + $type: "yandex.cloud.serverless.containers.v1.Secret" as const, + + encode( + message: Secret, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.id !== "") { + writer.uint32(10).string(message.id); + } + if (message.versionId !== "") { + writer.uint32(18).string(message.versionId); + } + if (message.key !== "") { + writer.uint32(26).string(message.key); + } + if (message.environmentVariable !== undefined) { + writer.uint32(34).string(message.environmentVariable); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Secret { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseSecret } as Secret; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = reader.string(); + break; + case 2: + message.versionId = reader.string(); + break; + case 3: + message.key = reader.string(); + break; + case 4: + message.environmentVariable = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Secret { + const message = { ...baseSecret } as Secret; + message.id = + object.id !== undefined && object.id !== null ? String(object.id) : ""; + message.versionId = + object.versionId !== undefined && object.versionId !== null + ? String(object.versionId) + : ""; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.environmentVariable = + object.environmentVariable !== undefined && + object.environmentVariable !== null + ? String(object.environmentVariable) + : undefined; + return message; + }, + + toJSON(message: Secret): unknown { + const obj: any = {}; + message.id !== undefined && (obj.id = message.id); + message.versionId !== undefined && (obj.versionId = message.versionId); + message.key !== undefined && (obj.key = message.key); + message.environmentVariable !== undefined && + (obj.environmentVariable = message.environmentVariable); + return obj; + }, + + fromPartial, I>>(object: I): Secret { + const message = { ...baseSecret } as Secret; + message.id = object.id ?? ""; + message.versionId = object.versionId ?? ""; + message.key = object.key ?? ""; + message.environmentVariable = object.environmentVariable ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(Secret.$type, Secret); + +const baseConnectivity: object = { + $type: "yandex.cloud.serverless.containers.v1.Connectivity", + networkId: "", + subnetIds: "", +}; + +export const Connectivity = { + $type: "yandex.cloud.serverless.containers.v1.Connectivity" as const, + + encode( + message: Connectivity, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.networkId !== "") { + writer.uint32(10).string(message.networkId); + } + for (const v of message.subnetIds) { + writer.uint32(18).string(v!); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Connectivity { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseConnectivity } as Connectivity; + message.subnetIds = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.networkId = reader.string(); + break; + case 2: + message.subnetIds.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Connectivity { + const message = { ...baseConnectivity } as Connectivity; + message.networkId = + object.networkId !== undefined && object.networkId !== null + ? String(object.networkId) + : ""; + message.subnetIds = (object.subnetIds ?? []).map((e: any) => String(e)); + return message; + }, + + toJSON(message: Connectivity): unknown { + const obj: any = {}; + message.networkId !== undefined && (obj.networkId = message.networkId); + if (message.subnetIds) { + obj.subnetIds = message.subnetIds.map((e) => e); + } else { + obj.subnetIds = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): Connectivity { + const message = { ...baseConnectivity } as Connectivity; + message.networkId = object.networkId ?? ""; + message.subnetIds = object.subnetIds?.map((e) => e) || []; + return message; + }, +}; + +messageTypeRegistry.set(Connectivity.$type, Connectivity); + declare var self: any | undefined; declare var window: any | undefined; declare var global: any | undefined; diff --git a/src/generated/yandex/cloud/serverless/containers/v1/container_service.ts b/src/generated/yandex/cloud/serverless/containers/v1/container_service.ts index 183c1994..d4c7972c 100644 --- a/src/generated/yandex/cloud/serverless/containers/v1/container_service.ts +++ b/src/generated/yandex/cloud/serverless/containers/v1/container_service.ts @@ -17,10 +17,12 @@ import _m0 from "protobufjs/minimal"; import { FieldMask } from "../../../../../google/protobuf/field_mask"; import { Resources, + Connectivity, Command, Args, Container, Revision, + Secret, } from "../../../../../yandex/cloud/serverless/containers/v1/container"; import { Duration } from "../../../../../google/protobuf/duration"; import { Operation } from "../../../../../yandex/cloud/operation/operation"; @@ -130,6 +132,8 @@ export interface DeployContainerRevisionRequest { serviceAccountId: string; imageSpec?: ImageSpec; concurrency: number; + secrets: Secret[]; + connectivity?: Connectivity; } export interface ImageSpec { @@ -1507,6 +1511,15 @@ export const DeployContainerRevisionRequest = { if (message.concurrency !== 0) { writer.uint32(72).int64(message.concurrency); } + for (const v of message.secrets) { + Secret.encode(v!, writer.uint32(82).fork()).ldelim(); + } + if (message.connectivity !== undefined) { + Connectivity.encode( + message.connectivity, + writer.uint32(90).fork() + ).ldelim(); + } return writer; }, @@ -1519,6 +1532,7 @@ export const DeployContainerRevisionRequest = { const message = { ...baseDeployContainerRevisionRequest, } as DeployContainerRevisionRequest; + message.secrets = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -1543,6 +1557,12 @@ export const DeployContainerRevisionRequest = { case 9: message.concurrency = longToNumber(reader.int64() as Long); break; + case 10: + message.secrets.push(Secret.decode(reader, reader.uint32())); + break; + case 11: + message.connectivity = Connectivity.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -1583,6 +1603,13 @@ export const DeployContainerRevisionRequest = { object.concurrency !== undefined && object.concurrency !== null ? Number(object.concurrency) : 0; + message.secrets = (object.secrets ?? []).map((e: any) => + Secret.fromJSON(e) + ); + message.connectivity = + object.connectivity !== undefined && object.connectivity !== null + ? Connectivity.fromJSON(object.connectivity) + : undefined; return message; }, @@ -1608,6 +1635,17 @@ export const DeployContainerRevisionRequest = { : undefined); message.concurrency !== undefined && (obj.concurrency = Math.round(message.concurrency)); + if (message.secrets) { + obj.secrets = message.secrets.map((e) => + e ? Secret.toJSON(e) : undefined + ); + } else { + obj.secrets = []; + } + message.connectivity !== undefined && + (obj.connectivity = message.connectivity + ? Connectivity.toJSON(message.connectivity) + : undefined); return obj; }, @@ -1633,6 +1671,11 @@ export const DeployContainerRevisionRequest = { ? ImageSpec.fromPartial(object.imageSpec) : undefined; message.concurrency = object.concurrency ?? 0; + message.secrets = object.secrets?.map((e) => Secret.fromPartial(e)) || []; + message.connectivity = + object.connectivity !== undefined && object.connectivity !== null + ? Connectivity.fromPartial(object.connectivity) + : undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/serverless/functions/v1/function.ts b/src/generated/yandex/cloud/serverless/functions/v1/function.ts index f511b127..3c0bf2b6 100644 --- a/src/generated/yandex/cloud/serverless/functions/v1/function.ts +++ b/src/generated/yandex/cloud/serverless/functions/v1/function.ts @@ -137,6 +137,8 @@ export interface Version { connectivity?: Connectivity; /** Additional service accounts to be used by the version. */ namedServiceAccounts: { [key: string]: string }; + /** Lockbox secrets to be used by the version */ + secrets: Secret[]; } export enum Version_Status { @@ -251,6 +253,19 @@ export interface ScalingPolicy { zoneRequestsLimit: number; } +/** Secret for serverless function */ +export interface Secret { + $type: "yandex.cloud.serverless.functions.v1.Secret"; + /** ID of lockbox secret */ + id: string; + /** ID of secret version */ + versionId: string; + /** Key in secret's payload, which value to be delivered into function environment */ + key: string; + /** environment variable in which secret's value to be delivered */ + environmentVariable: string | undefined; +} + const baseFunction: object = { $type: "yandex.cloud.serverless.functions.v1.Function", id: "", @@ -615,6 +630,9 @@ export const Version = { writer.uint32(146).fork() ).ldelim(); }); + for (const v of message.secrets) { + Secret.encode(v!, writer.uint32(154).fork()).ldelim(); + } return writer; }, @@ -625,6 +643,7 @@ export const Version = { message.tags = []; message.environment = {}; message.namedServiceAccounts = {}; + message.secrets = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -690,6 +709,9 @@ export const Version = { message.namedServiceAccounts[entry18.key] = entry18.value; } break; + case 19: + message.secrets.push(Secret.decode(reader, reader.uint32())); + break; default: reader.skipType(tag & 7); break; @@ -763,6 +785,9 @@ export const Version = { acc[key] = String(value); return acc; }, {}); + message.secrets = (object.secrets ?? []).map((e: any) => + Secret.fromJSON(e) + ); return message; }, @@ -812,6 +837,13 @@ export const Version = { obj.namedServiceAccounts[k] = v; }); } + if (message.secrets) { + obj.secrets = message.secrets.map((e) => + e ? Secret.toJSON(e) : undefined + ); + } else { + obj.secrets = []; + } return obj; }, @@ -856,6 +888,7 @@ export const Version = { } return acc; }, {}); + message.secrets = object.secrets?.map((e) => Secret.fromPartial(e)) || []; return message; }, }; @@ -1418,6 +1451,102 @@ export const ScalingPolicy = { messageTypeRegistry.set(ScalingPolicy.$type, ScalingPolicy); +const baseSecret: object = { + $type: "yandex.cloud.serverless.functions.v1.Secret", + id: "", + versionId: "", + key: "", +}; + +export const Secret = { + $type: "yandex.cloud.serverless.functions.v1.Secret" as const, + + encode( + message: Secret, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.id !== "") { + writer.uint32(10).string(message.id); + } + if (message.versionId !== "") { + writer.uint32(18).string(message.versionId); + } + if (message.key !== "") { + writer.uint32(26).string(message.key); + } + if (message.environmentVariable !== undefined) { + writer.uint32(34).string(message.environmentVariable); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Secret { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseSecret } as Secret; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = reader.string(); + break; + case 2: + message.versionId = reader.string(); + break; + case 3: + message.key = reader.string(); + break; + case 4: + message.environmentVariable = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Secret { + const message = { ...baseSecret } as Secret; + message.id = + object.id !== undefined && object.id !== null ? String(object.id) : ""; + message.versionId = + object.versionId !== undefined && object.versionId !== null + ? String(object.versionId) + : ""; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.environmentVariable = + object.environmentVariable !== undefined && + object.environmentVariable !== null + ? String(object.environmentVariable) + : undefined; + return message; + }, + + toJSON(message: Secret): unknown { + const obj: any = {}; + message.id !== undefined && (obj.id = message.id); + message.versionId !== undefined && (obj.versionId = message.versionId); + message.key !== undefined && (obj.key = message.key); + message.environmentVariable !== undefined && + (obj.environmentVariable = message.environmentVariable); + return obj; + }, + + fromPartial, I>>(object: I): Secret { + const message = { ...baseSecret } as Secret; + message.id = object.id ?? ""; + message.versionId = object.versionId ?? ""; + message.key = object.key ?? ""; + message.environmentVariable = object.environmentVariable ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(Secret.$type, Secret); + declare var self: any | undefined; declare var window: any | undefined; declare var global: any | undefined; diff --git a/src/generated/yandex/cloud/serverless/functions/v1/function_service.ts b/src/generated/yandex/cloud/serverless/functions/v1/function_service.ts index 025ceb3f..8d05039f 100644 --- a/src/generated/yandex/cloud/serverless/functions/v1/function_service.ts +++ b/src/generated/yandex/cloud/serverless/functions/v1/function_service.ts @@ -21,6 +21,7 @@ import { Function, Version, Package, + Secret, ScalingPolicy, } from "../../../../../yandex/cloud/serverless/functions/v1/function"; import { Duration } from "../../../../../google/protobuf/duration"; @@ -347,6 +348,8 @@ export interface CreateFunctionVersionRequest { connectivity?: Connectivity; /** Additional service accounts to be used by the version. */ namedServiceAccounts: { [key: string]: string }; + /** Lockbox secrets to be used by the version */ + secrets: Secret[]; } export interface CreateFunctionVersionRequest_EnvironmentEntry { @@ -2324,6 +2327,9 @@ export const CreateFunctionVersionRequest = { writer.uint32(122).fork() ).ldelim(); }); + for (const v of message.secrets) { + Secret.encode(v!, writer.uint32(146).fork()).ldelim(); + } return writer; }, @@ -2339,6 +2345,7 @@ export const CreateFunctionVersionRequest = { message.environment = {}; message.tag = []; message.namedServiceAccounts = {}; + message.secrets = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -2397,6 +2404,9 @@ export const CreateFunctionVersionRequest = { message.namedServiceAccounts[entry15.key] = entry15.value; } break; + case 18: + message.secrets.push(Secret.decode(reader, reader.uint32())); + break; default: reader.skipType(tag & 7); break; @@ -2466,6 +2476,9 @@ export const CreateFunctionVersionRequest = { acc[key] = String(value); return acc; }, {}); + message.secrets = (object.secrets ?? []).map((e: any) => + Secret.fromJSON(e) + ); return message; }, @@ -2517,6 +2530,13 @@ export const CreateFunctionVersionRequest = { obj.namedServiceAccounts[k] = v; }); } + if (message.secrets) { + obj.secrets = message.secrets.map((e) => + e ? Secret.toJSON(e) : undefined + ); + } else { + obj.secrets = []; + } return obj; }, @@ -2566,6 +2586,7 @@ export const CreateFunctionVersionRequest = { } return acc; }, {}); + message.secrets = object.secrets?.map((e) => Secret.fromPartial(e)) || []; return message; }, }; diff --git a/src/generated/yandex/cloud/service_clients.ts b/src/generated/yandex/cloud/service_clients.ts index 07d480a0..255d6f2e 100644 --- a/src/generated/yandex/cloud/service_clients.ts +++ b/src/generated/yandex/cloud/service_clients.ts @@ -3,6 +3,7 @@ export const SttServiceClient = cloudApi.ai.stt_service.SttServiceClient; export const TranslationServiceClient = cloudApi.ai.translate_translation_service.TranslationServiceClient; export const SynthesizerClient = cloudApi.ai.tts_service.SynthesizerClient; export const VisionServiceClient = cloudApi.ai.vision_service.VisionServiceClient; +export const ImageClassifierServiceClient = cloudApi.ai.vision_image_classifier_service.ImageClassifierServiceClient; export const BackendGroupServiceClient = cloudApi.apploadbalancer.backend_group_service.BackendGroupServiceClient; export const HttpRouterServiceClient = cloudApi.apploadbalancer.http_router_service.HttpRouterServiceClient; export const LoadBalancerServiceClient = cloudApi.apploadbalancer.load_balancer_service.LoadBalancerServiceClient; @@ -17,6 +18,7 @@ export const CacheServiceClient = cloudApi.cdn.cache_service.CacheServiceClient; export const OriginGroupServiceClient = cloudApi.cdn.origin_group_service.OriginGroupServiceClient; export const OriginServiceClient = cloudApi.cdn.origin_service.OriginServiceClient; export const ProviderServiceClient = cloudApi.cdn.provider_service.ProviderServiceClient; +export const RawLogsServiceClient = cloudApi.cdn.raw_logs_service.RawLogsServiceClient; export const ResourceServiceClient = cloudApi.cdn.resource_service.ResourceServiceClient; export const CertificateContentServiceClient = cloudApi.certificatemanager.certificate_content_service.CertificateContentServiceClient; export const CertificateServiceClient = cloudApi.certificatemanager.certificate_service.CertificateServiceClient; @@ -86,10 +88,14 @@ export const ClickHouseResourcePresetServiceClient = cloudApi.mdb.clickhouse_res export const ClickHouseUserServiceClient = cloudApi.mdb.clickhouse_user_service.UserServiceClient; export const VersionsServiceClient = cloudApi.mdb.clickhouse_versions_service.VersionsServiceClient; export const AuthServiceClient = cloudApi.mdb.elasticsearch_auth_service.AuthServiceClient; +export const ElasticBackupServiceClient = cloudApi.mdb.elasticsearch_backup_service.BackupServiceClient; export const ElasticClusterServiceClient = cloudApi.mdb.elasticsearch_cluster_service.ClusterServiceClient; +export const ElasticExtensionServiceClient = cloudApi.mdb.elasticsearch_extension_service.ExtensionServiceClient; export const ElasticResourcePresetServiceClient = cloudApi.mdb.elasticsearch_resource_preset_service.ResourcePresetServiceClient; export const ElasticUserServiceClient = cloudApi.mdb.elasticsearch_user_service.UserServiceClient; +export const GreenplumBackupServiceClient = cloudApi.mdb.greenplum_backup_service.BackupServiceClient; export const GreenplumClusterServiceClient = cloudApi.mdb.greenplum_cluster_service.ClusterServiceClient; +export const GreenplumResourcePresetServiceClient = cloudApi.mdb.greenplum_resource_preset_service.ResourcePresetServiceClient; export const KafkaClusterServiceClient = cloudApi.mdb.kafka_cluster_service.ClusterServiceClient; export const ConnectorServiceClient = cloudApi.mdb.kafka_connector_service.ConnectorServiceClient; export const KafkaResourcePresetServiceClient = cloudApi.mdb.kafka_resource_preset_service.ResourcePresetServiceClient; @@ -139,4 +145,4 @@ export const YdbBackupServiceClient = cloudApi.ydb.backup_service.BackupServiceC export const YdbDatabaseServiceClient = cloudApi.ydb.database_service.DatabaseServiceClient; export const LocationServiceClient = cloudApi.ydb.location_service.LocationServiceClient; export const YdbResourcePresetServiceClient = cloudApi.ydb.resource_preset_service.ResourcePresetServiceClient; -export const StorageTypeServiceClient = cloudApi.ydb.storage_type_service.StorageTypeServiceClient; \ No newline at end of file +export const StorageTypeServiceClient = cloudApi.ydb.storage_type_service.StorageTypeServiceClient; diff --git a/src/service-endpoints.ts b/src/service-endpoints.ts index 726a8726..87164e5b 100644 --- a/src/service-endpoints.ts +++ b/src/service-endpoints.ts @@ -64,7 +64,11 @@ const SERVICE_ENDPOINTS_LIST: ServiceEndpointsList = [ 'yandex.cloud.mdb.elasticsearch.v1.ClusterService', 'yandex.cloud.mdb.elasticsearch.v1.ResourcePresetService', 'yandex.cloud.mdb.elasticsearch.v1.UserService', + 'yandex.cloud.mdb.elasticsearch.v1.BackupService', + 'yandex.cloud.mdb.elasticsearch.v1.ExtensionService', 'yandex.cloud.mdb.greenplum.v1.ClusterService', + 'yandex.cloud.mdb.greenplum.v1.BackupService', + 'yandex.cloud.mdb.greenplum.v1.ResourcePresetService', 'yandex.cloud.mdb.kafka.v1.ClusterService', 'yandex.cloud.mdb.kafka.v1.ConnectorService', 'yandex.cloud.mdb.kafka.v1.ResourcePresetService', @@ -224,7 +228,10 @@ const SERVICE_ENDPOINTS_LIST: ServiceEndpointsList = [ endpoint: 'translate.api.cloud.yandex.net:443', }, { - serviceIds: ['yandex.cloud.ai.vision.v1.VisionService'], + serviceIds: [ + 'yandex.cloud.ai.vision.v1.VisionService', + 'yandex.cloud.ai.vision.v2.ImageClassifierService', + ], endpoint: 'vision.api.cloud.yandex.net:443', }, { @@ -258,6 +265,7 @@ const SERVICE_ENDPOINTS_LIST: ServiceEndpointsList = [ 'yandex.cloud.cdn.v1.OriginService', 'yandex.cloud.cdn.v1.ProviderService', 'yandex.cloud.cdn.v1.ResourceService', + 'yandex.cloud.cdn.v1.RawLogsService', ], endpoint: 'cdn.api.cloud.yandex.net:443', }, From ad52e6f129ed1e83983579b98771050ded1ef00f Mon Sep 17 00:00:00 2001 From: Ivan Zuev Date: Fri, 1 Apr 2022 14:03:36 +0300 Subject: [PATCH 11/54] feat: increase max memory level for build --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 2b801f3e..30a36df6 100644 --- a/package.json +++ b/package.json @@ -65,7 +65,7 @@ "scripts": { "test": "jest -c config/jest.ts --passWithNoTests", "lint": "eslint src config", - "build": "tsc -p .", + "build": "NODE_OPTIONS=\"--max-old-space-size=2048\" tsc -p .", "generate-code": "ts-node scripts/generate-code.ts", "prepare": "husky install", "prepublishOnly": "npm run build" From 5ed959230001d50de0271ab1cb135610f3331743 Mon Sep 17 00:00:00 2001 From: Ivan Zuev Date: Fri, 1 Apr 2022 14:12:58 +0300 Subject: [PATCH 12/54] fixup! feat: increase max memory level for build --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 30a36df6..c9711794 100644 --- a/package.json +++ b/package.json @@ -65,7 +65,7 @@ "scripts": { "test": "jest -c config/jest.ts --passWithNoTests", "lint": "eslint src config", - "build": "NODE_OPTIONS=\"--max-old-space-size=2048\" tsc -p .", + "build": "NODE_OPTIONS=\"--max-old-space-size=4096\" tsc -p .", "generate-code": "ts-node scripts/generate-code.ts", "prepare": "husky install", "prepublishOnly": "npm run build" From 3143b4d9c5b05a59d5d56ff992a5106ac3d1973c Mon Sep 17 00:00:00 2001 From: yandex-cloud-bot Date: Fri, 1 Apr 2022 11:16:31 +0000 Subject: [PATCH 13/54] chore(release): 2.0.0-beta.3 [skip ci] # [2.0.0-beta.3](https://github.com/yandex-cloud/nodejs-sdk/compare/v2.0.0-beta.2...v2.0.0-beta.3) (2022-04-01) ### Features * increase max memory level for build ([ad52e6f](https://github.com/yandex-cloud/nodejs-sdk/commit/ad52e6f129ed1e83983579b98771050ded1ef00f)) * update @grpc/grpc-js ([215fab1](https://github.com/yandex-cloud/nodejs-sdk/commit/215fab13cbc7ead95c67f54c1e9f07ea96b69395)) * updated services definitions ([835381c](https://github.com/yandex-cloud/nodejs-sdk/commit/835381c27596f6c3b866b35162d7bab2da94ce6b)) --- package-lock.json | 4 ++-- package.json | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/package-lock.json b/package-lock.json index 4ebae0a8..df790f91 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.0.0-beta.2", + "version": "2.0.0-beta.3", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.0.0-beta.2", + "version": "2.0.0-beta.3", "license": "MIT", "dependencies": { "@grpc/grpc-js": "1.6.0", diff --git a/package.json b/package.json index 759f0f6b..20f1c83d 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.0.0-beta.2", + "version": "2.0.0-beta.3", "description": "Yandex.Cloud NodeJS SDK", "keywords": [ "yandex-cloud", From 0ebefb1b4e673bac1da104df30f2bea48c9d6541 Mon Sep 17 00:00:00 2001 From: yandex-cloud-bot Date: Fri, 1 Apr 2022 11:20:18 +0000 Subject: [PATCH 14/54] chore(release): 2.0.0 [skip ci] # [2.0.0](https://github.com/yandex-cloud/nodejs-sdk/compare/v1.4.3...v2.0.0) (2022-04-01) ### Bug Fixes * cache node_modules between jobs ([0a9490a](https://github.com/yandex-cloud/nodejs-sdk/commit/0a9490a91fdf201b9a802302849735d3e17686f6)) * correct logging service endpoints ([a712923](https://github.com/yandex-cloud/nodejs-sdk/commit/a71292314fbbd31e6fca6e2931a07db0fd69f647)) * disable husky hooks in CI ([4919d54](https://github.com/yandex-cloud/nodejs-sdk/commit/4919d5488515729ec9fa52dc2317bbc3c2c99b3e)) * eslint rule ([e097134](https://github.com/yandex-cloud/nodejs-sdk/commit/e097134907090e76aebb6b7130bfe0a81609d2ae)) * install long package ([7120858](https://github.com/yandex-cloud/nodejs-sdk/commit/71208586ef1c9163781a84f2ab3e214bd6b61df2)) * move legacy code to separate directory ([b38248c](https://github.com/yandex-cloud/nodejs-sdk/commit/b38248c5a06633366f01f1125e9536088f8e39ae)) * move to axios ([438f7c6](https://github.com/yandex-cloud/nodejs-sdk/commit/438f7c6274f1a71bacb7218961e03b7a3440e002)) * remove prettier ([6bf0f24](https://github.com/yandex-cloud/nodejs-sdk/commit/6bf0f24835f4baaa4d7c36200198d75100ee747a)) * remove unused test module ([4e90a92](https://github.com/yandex-cloud/nodejs-sdk/commit/4e90a9286c6a72456f8a561cfe851df73167e824)) * removed legacy example ([10c9409](https://github.com/yandex-cloud/nodejs-sdk/commit/10c94090f752f39e708b8884aaf9791f7571df7f)) * set public access for npm package ([a68360d](https://github.com/yandex-cloud/nodejs-sdk/commit/a68360dbb8f6a618cede5bfdd6111ace7dff62e6)) * update ts-proto ([a20a650](https://github.com/yandex-cloud/nodejs-sdk/commit/a20a6503cfbf93841f112be0d068a6ffed0b9136)) * use bot's name/email for pushing release commits ([3b2553a](https://github.com/yandex-cloud/nodejs-sdk/commit/3b2553ae557464f9779ec74eb382ab15662fe85a)) * use bot's token for pushing release commits ([76cf6e7](https://github.com/yandex-cloud/nodejs-sdk/commit/76cf6e7deda2822404c21f6ab17ca826d11bfa3f)) * use exact versions of dependencies ([b564178](https://github.com/yandex-cloud/nodejs-sdk/commit/b564178c3e7e680e93a4c3905f09f41eeb14b0a8)) * use serviceName property of client ctor ([19129f3](https://github.com/yandex-cloud/nodejs-sdk/commit/19129f329cd33583e02bdaead83440d392e44e20)) ### Features * added git submodule with yandex cloud api specs ([7916612](https://github.com/yandex-cloud/nodejs-sdk/commit/79166129c9deb43328983d15ad905915b1c4249b)) * allow to override endpoint in service factory ([12e118e](https://github.com/yandex-cloud/nodejs-sdk/commit/12e118e371003bb9082227688a9d6b428e2d650a)) * change package description ([e9ecf16](https://github.com/yandex-cloud/nodejs-sdk/commit/e9ecf16d60448e68e4adbfd89e7d87506cbc684a)) * endpoints list for all available service clients ([78c2355](https://github.com/yandex-cloud/nodejs-sdk/commit/78c235530e00823de2878d59f2b3b86abce12f6b)) * export generic type of wrapped service client ([bec2aca](https://github.com/yandex-cloud/nodejs-sdk/commit/bec2acaa282130238cd2d91ecfebabca0379a0b2)) * generate exports for all entities in cloud api ([5136bb5](https://github.com/yandex-cloud/nodejs-sdk/commit/5136bb5930395760c7f46c50a69c85d681d82ea8)) * increase max memory level for build ([ad52e6f](https://github.com/yandex-cloud/nodejs-sdk/commit/ad52e6f129ed1e83983579b98771050ded1ef00f)) * operation utilities ([e82e279](https://github.com/yandex-cloud/nodejs-sdk/commit/e82e27990533aeab73a9fb298cff888e5f422e1e)) * regenerate code with new ts-proto ([1e2dcc2](https://github.com/yandex-cloud/nodejs-sdk/commit/1e2dcc2bbb4a7614f23643c3520bd04d702920af)) * remove old generated code ([a5ecb33](https://github.com/yandex-cloud/nodejs-sdk/commit/a5ecb335acaa7e2dbbcc6beb2b00040cb82182d4)) * remove unused legacy code and dependencies ([3d3a6f2](https://github.com/yandex-cloud/nodejs-sdk/commit/3d3a6f2fea427d08571f6a5c443efed80ea2e0c0)) * restructure directories ([55bf685](https://github.com/yandex-cloud/nodejs-sdk/commit/55bf6857e2b4ce4226129775acf18b7818375560)) * reworked examples ([2875275](https://github.com/yandex-cloud/nodejs-sdk/commit/2875275a44411295469211f948c343123072e7cf)) * rewrite index module in ts ([99e8ba8](https://github.com/yandex-cloud/nodejs-sdk/commit/99e8ba8b913e5943a0125a81df020e50a79760cf)) * session class ([826e6de](https://github.com/yandex-cloud/nodejs-sdk/commit/826e6de7198d4cc00d33084a76454ad645cf6708)) * token services ([8b730df](https://github.com/yandex-cloud/nodejs-sdk/commit/8b730dfbeac5477ef49f0af5b697d1c9cdd0b157)) * tool for code generation ([b463cf8](https://github.com/yandex-cloud/nodejs-sdk/commit/b463cf8e44fcbcddd99c92337c1a9cca665399a1)) * update @grpc/grpc-js ([215fab1](https://github.com/yandex-cloud/nodejs-sdk/commit/215fab13cbc7ead95c67f54c1e9f07ea96b69395)) * update nice-grpc ([22ea100](https://github.com/yandex-cloud/nodejs-sdk/commit/22ea1005f4fb19e9b4bc9c38724a2f659cdc9d3d)) * updated services definitions ([835381c](https://github.com/yandex-cloud/nodejs-sdk/commit/835381c27596f6c3b866b35162d7bab2da94ce6b)) ### BREAKING CHANGES * removed some useless classes * changed API of Session * changed API of service clients --- package-lock.json | 4 ++-- package.json | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/package-lock.json b/package-lock.json index df790f91..668f2f31 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.0.0-beta.3", + "version": "2.0.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.0.0-beta.3", + "version": "2.0.0", "license": "MIT", "dependencies": { "@grpc/grpc-js": "1.6.0", diff --git a/package.json b/package.json index 20f1c83d..519ea938 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.0.0-beta.3", + "version": "2.0.0", "description": "Yandex.Cloud NodeJS SDK", "keywords": [ "yandex-cloud", From f7428d2c4a3ea22cbb9fd8cb2977ab8519425467 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 1 Apr 2022 11:21:47 +0000 Subject: [PATCH 15/54] chore(deps): bump node-fetch from 2.6.6 to 2.6.7 Bumps [node-fetch](https://github.com/node-fetch/node-fetch) from 2.6.6 to 2.6.7. - [Release notes](https://github.com/node-fetch/node-fetch/releases) - [Commits](https://github.com/node-fetch/node-fetch/compare/v2.6.6...v2.6.7) --- updated-dependencies: - dependency-name: node-fetch dependency-type: indirect ... Signed-off-by: dependabot[bot] --- package-lock.json | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/package-lock.json b/package-lock.json index 668f2f31..0fec9e55 100644 --- a/package-lock.json +++ b/package-lock.json @@ -7452,15 +7452,23 @@ } }, "node_modules/node-fetch": { - "version": "2.6.6", - "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.6.tgz", - "integrity": "sha512-Z8/6vRlTUChSdIgMa51jxQ4lrw/Jy5SOW10ObaA47/RElsAN2c5Pn8bTgFGWn/ibwzXTE8qwr1Yzx28vsecXEA==", + "version": "2.6.7", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.7.tgz", + "integrity": "sha512-ZjMPFEfVx5j+y2yF35Kzx5sF7kDzxuDj6ziH4FFbOp87zKDZNx8yExJIb05OGF4Nlt9IHFIMBkRl41VdvcNdbQ==", "dev": true, "dependencies": { "whatwg-url": "^5.0.0" }, "engines": { "node": "4.x || >=6.0.0" + }, + "peerDependencies": { + "encoding": "^0.1.0" + }, + "peerDependenciesMeta": { + "encoding": { + "optional": true + } } }, "node_modules/node-int64": { @@ -18581,9 +18589,9 @@ } }, "node-fetch": { - "version": "2.6.6", - "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.6.tgz", - "integrity": "sha512-Z8/6vRlTUChSdIgMa51jxQ4lrw/Jy5SOW10ObaA47/RElsAN2c5Pn8bTgFGWn/ibwzXTE8qwr1Yzx28vsecXEA==", + "version": "2.6.7", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.7.tgz", + "integrity": "sha512-ZjMPFEfVx5j+y2yF35Kzx5sF7kDzxuDj6ziH4FFbOp87zKDZNx8yExJIb05OGF4Nlt9IHFIMBkRl41VdvcNdbQ==", "dev": true, "requires": { "whatwg-url": "^5.0.0" From b16b6c4fc2b30a9e48075bf72e9a1b7684c7832c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 1 Apr 2022 11:21:02 +0000 Subject: [PATCH 16/54] chore(deps): bump follow-redirects from 1.14.7 to 1.14.9 in /examples Bumps [follow-redirects](https://github.com/follow-redirects/follow-redirects) from 1.14.7 to 1.14.9. - [Release notes](https://github.com/follow-redirects/follow-redirects/releases) - [Commits](https://github.com/follow-redirects/follow-redirects/compare/v1.14.7...v1.14.9) --- updated-dependencies: - dependency-name: follow-redirects dependency-type: indirect ... Signed-off-by: dependabot[bot] --- examples/package-lock.json | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/examples/package-lock.json b/examples/package-lock.json index 3af69824..5390f16d 100644 --- a/examples/package-lock.json +++ b/examples/package-lock.json @@ -385,9 +385,9 @@ "integrity": "sha512-r5wGx7YeOwNWNlCA0wQ86zKyDLMQr+/RB8xy74M4hTphfmjlijTSSXGuH8rnvKZnfT9i+75zmd8jcKdMR4O6jA==" }, "node_modules/follow-redirects": { - "version": "1.14.7", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.14.7.tgz", - "integrity": "sha512-+hbxoLbFMbRKDwohX8GkTataGqO6Jb7jGwpAlwgy2bIz25XtRm7KEzJM76R1WiNT5SwZkX4Y75SwBolkpmE7iQ==", + "version": "1.14.9", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.14.9.tgz", + "integrity": "sha512-MQDfihBQYMcyy5dhRDJUHcw7lb2Pv/TuE6xP1vyraLukNDHKbDxDNaOE3NbCAdKQApno+GPRyo1YAp89yCjK4w==", "funding": [ { "type": "individual", @@ -1142,9 +1142,9 @@ "integrity": "sha512-r5wGx7YeOwNWNlCA0wQ86zKyDLMQr+/RB8xy74M4hTphfmjlijTSSXGuH8rnvKZnfT9i+75zmd8jcKdMR4O6jA==" }, "follow-redirects": { - "version": "1.14.7", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.14.7.tgz", - "integrity": "sha512-+hbxoLbFMbRKDwohX8GkTataGqO6Jb7jGwpAlwgy2bIz25XtRm7KEzJM76R1WiNT5SwZkX4Y75SwBolkpmE7iQ==" + "version": "1.14.9", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.14.9.tgz", + "integrity": "sha512-MQDfihBQYMcyy5dhRDJUHcw7lb2Pv/TuE6xP1vyraLukNDHKbDxDNaOE3NbCAdKQApno+GPRyo1YAp89yCjK4w==" }, "fs-extra": { "version": "8.1.0", From a746ab96d220491d9e8cba167b7647e0ee25eac2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 1 Apr 2022 11:31:01 +0000 Subject: [PATCH 17/54] chore(deps): bump follow-redirects from 1.14.6 to 1.14.9 Bumps [follow-redirects](https://github.com/follow-redirects/follow-redirects) from 1.14.6 to 1.14.9. - [Release notes](https://github.com/follow-redirects/follow-redirects/releases) - [Commits](https://github.com/follow-redirects/follow-redirects/compare/v1.14.6...v1.14.9) --- updated-dependencies: - dependency-name: follow-redirects dependency-type: indirect ... Signed-off-by: dependabot[bot] --- package-lock.json | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/package-lock.json b/package-lock.json index 0fec9e55..0368bd92 100644 --- a/package-lock.json +++ b/package-lock.json @@ -4771,9 +4771,9 @@ "dev": true }, "node_modules/follow-redirects": { - "version": "1.14.6", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.14.6.tgz", - "integrity": "sha512-fhUl5EwSJbbl8AR+uYL2KQDxLkdSjZGR36xy46AO7cOMTrCMON6Sa28FmAnC2tRTDbd/Uuzz3aJBv7EBN7JH8A==", + "version": "1.14.9", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.14.9.tgz", + "integrity": "sha512-MQDfihBQYMcyy5dhRDJUHcw7lb2Pv/TuE6xP1vyraLukNDHKbDxDNaOE3NbCAdKQApno+GPRyo1YAp89yCjK4w==", "funding": [ { "type": "individual", @@ -16521,9 +16521,9 @@ "dev": true }, "follow-redirects": { - "version": "1.14.6", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.14.6.tgz", - "integrity": "sha512-fhUl5EwSJbbl8AR+uYL2KQDxLkdSjZGR36xy46AO7cOMTrCMON6Sa28FmAnC2tRTDbd/Uuzz3aJBv7EBN7JH8A==" + "version": "1.14.9", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.14.9.tgz", + "integrity": "sha512-MQDfihBQYMcyy5dhRDJUHcw7lb2Pv/TuE6xP1vyraLukNDHKbDxDNaOE3NbCAdKQApno+GPRyo1YAp89yCjK4w==" }, "form-data": { "version": "3.0.1", From 483211c4c67aa613c6635d9ae7d9ef165743577a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 1 Apr 2022 11:43:01 +0000 Subject: [PATCH 18/54] chore(deps): bump log4js from 6.3.0 to 6.4.0 Bumps [log4js](https://github.com/log4js-node/log4js-node) from 6.3.0 to 6.4.0. - [Release notes](https://github.com/log4js-node/log4js-node/releases) - [Changelog](https://github.com/log4js-node/log4js-node/blob/master/CHANGELOG.md) - [Commits](https://github.com/log4js-node/log4js-node/compare/v6.3.0...v6.4.0) --- updated-dependencies: - dependency-name: log4js dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- package-lock.json | 379 ++++++++++++---------------------------------- package.json | 2 +- 2 files changed, 99 insertions(+), 282 deletions(-) diff --git a/package-lock.json b/package-lock.json index 0368bd92..8496351f 100644 --- a/package-lock.json +++ b/package-lock.json @@ -13,7 +13,7 @@ "axios": "0.24.0", "jsonwebtoken": "8.5.1", "lodash": "4.17.21", - "log4js": "6.3.0", + "log4js": "6.4.0", "long": "5.2.0", "luxon": "2.2.0", "nice-grpc": "1.0.6", @@ -848,41 +848,6 @@ "node": ">=v12" } }, - "node_modules/@commitlint/read/node_modules/fs-extra": { - "version": "10.0.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.0.0.tgz", - "integrity": "sha512-C5owb14u9eJwizKGdchcDUQeFtlSHHthBk8pbX9Vc1PFZrLombudjDnNns88aYslCyF6IY5SUw3Roz6xShcEIQ==", - "dev": true, - "dependencies": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/@commitlint/read/node_modules/jsonfile": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", - "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", - "dev": true, - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/@commitlint/read/node_modules/universalify": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", - "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==", - "dev": true, - "engines": { - "node": ">= 10.0.0" - } - }, "node_modules/@commitlint/resolve-extends": { "version": "15.0.0", "resolved": "https://registry.npmjs.org/@commitlint/resolve-extends/-/resolve-extends-15.0.0.tgz", @@ -1853,20 +1818,6 @@ "node": ">= 10" } }, - "node_modules/@semantic-release/github/node_modules/fs-extra": { - "version": "10.0.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.0.0.tgz", - "integrity": "sha512-C5owb14u9eJwizKGdchcDUQeFtlSHHthBk8pbX9Vc1PFZrLombudjDnNns88aYslCyF6IY5SUw3Roz6xShcEIQ==", - "dev": true, - "dependencies": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=12" - } - }, "node_modules/@semantic-release/github/node_modules/http-proxy-agent": { "version": "5.0.0", "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-5.0.0.tgz", @@ -1881,27 +1832,6 @@ "node": ">= 6" } }, - "node_modules/@semantic-release/github/node_modules/jsonfile": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", - "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", - "dev": true, - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/@semantic-release/github/node_modules/universalify": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", - "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==", - "dev": true, - "engines": { - "node": ">= 10.0.0" - } - }, "node_modules/@semantic-release/npm": { "version": "8.0.3", "resolved": "https://registry.npmjs.org/@semantic-release/npm/-/npm-8.0.3.tgz", @@ -1929,41 +1859,6 @@ "semantic-release": ">=18.0.0" } }, - "node_modules/@semantic-release/npm/node_modules/fs-extra": { - "version": "10.0.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.0.0.tgz", - "integrity": "sha512-C5owb14u9eJwizKGdchcDUQeFtlSHHthBk8pbX9Vc1PFZrLombudjDnNns88aYslCyF6IY5SUw3Roz6xShcEIQ==", - "dev": true, - "dependencies": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/@semantic-release/npm/node_modules/jsonfile": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", - "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", - "dev": true, - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/@semantic-release/npm/node_modules/universalify": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", - "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==", - "dev": true, - "engines": { - "node": ">= 10.0.0" - } - }, "node_modules/@semantic-release/release-notes-generator": { "version": "10.0.3", "resolved": "https://registry.npmjs.org/@semantic-release/release-notes-generator/-/release-notes-generator-10.0.3.tgz", @@ -3464,9 +3359,9 @@ "dev": true }, "node_modules/date-format": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/date-format/-/date-format-3.0.0.tgz", - "integrity": "sha512-eyTcpKOcamdhWJXj56DpQMo1ylSQpcGtGKXcU0Tb97+K56/CF5amAqqqNj0+KvA0iw2ynxtHWFsPDSClCxe48w==", + "version": "4.0.6", + "resolved": "https://registry.npmjs.org/date-format/-/date-format-4.0.6.tgz", + "integrity": "sha512-B9vvg5rHuQ8cbUXE/RMWMyX2YA5TecT3jKF5fLtGNlzPlU7zblSPmAm2OImDbWL+LDOQ6pUm+4LOFz+ywS41Zw==", "engines": { "node": ">=4.0" } @@ -3481,9 +3376,9 @@ } }, "node_modules/debug": { - "version": "4.3.3", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.3.tgz", - "integrity": "sha512-/zxw5+vh1Tfv+4Qn7a5nsbcJKPaSvCDhojn6FEl9vupwK2VCSDtEiEtqr8DFtzYFOdz63LBkxec7DYuc2jon6Q==", + "version": "4.3.4", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", + "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", "dependencies": { "ms": "2.1.2" }, @@ -4767,8 +4662,7 @@ "node_modules/flatted": { "version": "3.2.4", "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.2.4.tgz", - "integrity": "sha512-8/sOawo8tJ4QOBX8YlQBMxL8+RLZfxMQOif9o0KUKTNTjMYElWPE0r/m5VNFxTRd0NSw8qSy8dajrwX4RYI1Hw==", - "dev": true + "integrity": "sha512-8/sOawo8tJ4QOBX8YlQBMxL8+RLZfxMQOif9o0KUKTNTjMYElWPE0r/m5VNFxTRd0NSw8qSy8dajrwX4RYI1Hw==" }, "node_modules/follow-redirects": { "version": "1.14.9", @@ -4864,16 +4758,24 @@ ] }, "node_modules/fs-extra": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-8.1.0.tgz", - "integrity": "sha512-yhlQgA6mnOJUKOsRUFsgJdQCvkKhcz8tlZG5HBQfReYZy46OwLcY+Zia0mtdHsOo9y/hP+CxMN0TU9QxoOtG4g==", + "version": "10.0.1", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.0.1.tgz", + "integrity": "sha512-NbdoVMZso2Lsrn/QwLXOy6rm0ufY2zEOKCDzJR/0kBsb0E6qed0P3iYK+Ath3BfvXEeu4JhEtXLgILx5psUfag==", "dependencies": { "graceful-fs": "^4.2.0", - "jsonfile": "^4.0.0", - "universalify": "^0.1.0" + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" }, "engines": { - "node": ">=6 <7 || >=8" + "node": ">=12" + } + }, + "node_modules/fs-extra/node_modules/universalify": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", + "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==", + "engines": { + "node": ">= 10.0.0" } }, "node_modules/fs-minipass": { @@ -6734,13 +6636,24 @@ } }, "node_modules/jsonfile": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-4.0.0.tgz", - "integrity": "sha1-h3Gq4HmbZAdrdmQPygWPnBDjPss=", + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", + "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", + "dependencies": { + "universalify": "^2.0.0" + }, "optionalDependencies": { "graceful-fs": "^4.1.6" } }, + "node_modules/jsonfile/node_modules/universalify": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", + "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==", + "engines": { + "node": ">= 10.0.0" + } + }, "node_modules/jsonparse": { "version": "1.3.1", "resolved": "https://registry.npmjs.org/jsonparse/-/jsonparse-1.3.1.tgz", @@ -7025,25 +6938,20 @@ "dev": true }, "node_modules/log4js": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/log4js/-/log4js-6.3.0.tgz", - "integrity": "sha512-Mc8jNuSFImQUIateBFwdOQcmC6Q5maU0VVvdC2R6XMb66/VnT+7WS4D/0EeNMZu1YODmJe5NIn2XftCzEocUgw==", + "version": "6.4.0", + "resolved": "https://registry.npmjs.org/log4js/-/log4js-6.4.0.tgz", + "integrity": "sha512-ysc/XUecZJuN8NoKOssk3V0cQ29xY4fra6fnigZa5VwxFsCsvdqsdnEuAxNN89LlHpbE4KUD3zGcn+kFqonSVQ==", "dependencies": { - "date-format": "^3.0.0", - "debug": "^4.1.1", - "flatted": "^2.0.1", - "rfdc": "^1.1.4", - "streamroller": "^2.2.4" + "date-format": "^4.0.3", + "debug": "^4.3.3", + "flatted": "^3.2.4", + "rfdc": "^1.3.0", + "streamroller": "^3.0.2" }, "engines": { "node": ">=8.0" } }, - "node_modules/log4js/node_modules/flatted": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/flatted/-/flatted-2.0.2.tgz", - "integrity": "sha512-r5wGx7YeOwNWNlCA0wQ86zKyDLMQr+/RB8xy74M4hTphfmjlijTSSXGuH8rnvKZnfT9i+75zmd8jcKdMR4O6jA==" - }, "node_modules/long": { "version": "5.2.0", "resolved": "https://registry.npmjs.org/long/-/long-5.2.0.tgz", @@ -11829,26 +11737,18 @@ } }, "node_modules/streamroller": { - "version": "2.2.4", - "resolved": "https://registry.npmjs.org/streamroller/-/streamroller-2.2.4.tgz", - "integrity": "sha512-OG79qm3AujAM9ImoqgWEY1xG4HX+Lw+yY6qZj9R1K2mhF5bEmQ849wvrb+4vt4jLMLzwXttJlQbOdPOQVRv7DQ==", + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/streamroller/-/streamroller-3.0.6.tgz", + "integrity": "sha512-Qz32plKq/MZywYyhEatxyYc8vs994Gz0Hu2MSYXXLD233UyPeIeRBZARIIGwFer4Mdb8r3Y2UqKkgyDghM6QCg==", "dependencies": { - "date-format": "^2.1.0", - "debug": "^4.1.1", - "fs-extra": "^8.1.0" + "date-format": "^4.0.6", + "debug": "^4.3.4", + "fs-extra": "^10.0.1" }, "engines": { "node": ">=8.0" } }, - "node_modules/streamroller/node_modules/date-format": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/date-format/-/date-format-2.1.0.tgz", - "integrity": "sha512-bYQuGLeFxhkxNOF3rcMtiZxvCBAquGzZm6oWA1oZ0g2THUzivaRhv8uOhdr19LmoobSOLoIAxeUK2RdbM8IFTA==", - "engines": { - "node": ">=4.0" - } - }, "node_modules/string_decoder": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", @@ -12482,6 +12382,7 @@ "version": "0.1.2", "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz", "integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==", + "dev": true, "engines": { "node": ">= 4.0.0" } @@ -13427,35 +13328,6 @@ "@commitlint/types": "^15.0.0", "fs-extra": "^10.0.0", "git-raw-commits": "^2.0.0" - }, - "dependencies": { - "fs-extra": { - "version": "10.0.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.0.0.tgz", - "integrity": "sha512-C5owb14u9eJwizKGdchcDUQeFtlSHHthBk8pbX9Vc1PFZrLombudjDnNns88aYslCyF6IY5SUw3Roz6xShcEIQ==", - "dev": true, - "requires": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - } - }, - "jsonfile": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", - "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", - "dev": true, - "requires": { - "graceful-fs": "^4.1.6", - "universalify": "^2.0.0" - } - }, - "universalify": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", - "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==", - "dev": true - } } }, "@commitlint/resolve-extends": { @@ -14233,17 +14105,6 @@ "integrity": "sha512-XCuKFP5PS55gnMVu3dty8KPatLqUoy/ZYzDzAGCQ8JNFCkLXzmI7vNHCR+XpbZaMWQK/vQubr7PkYq8g470J/A==", "dev": true }, - "fs-extra": { - "version": "10.0.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.0.0.tgz", - "integrity": "sha512-C5owb14u9eJwizKGdchcDUQeFtlSHHthBk8pbX9Vc1PFZrLombudjDnNns88aYslCyF6IY5SUw3Roz6xShcEIQ==", - "dev": true, - "requires": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - } - }, "http-proxy-agent": { "version": "5.0.0", "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-5.0.0.tgz", @@ -14254,22 +14115,6 @@ "agent-base": "6", "debug": "4" } - }, - "jsonfile": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", - "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", - "dev": true, - "requires": { - "graceful-fs": "^4.1.6", - "universalify": "^2.0.0" - } - }, - "universalify": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", - "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==", - "dev": true } } }, @@ -14292,35 +14137,6 @@ "registry-auth-token": "^4.0.0", "semver": "^7.1.2", "tempy": "^1.0.0" - }, - "dependencies": { - "fs-extra": { - "version": "10.0.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.0.0.tgz", - "integrity": "sha512-C5owb14u9eJwizKGdchcDUQeFtlSHHthBk8pbX9Vc1PFZrLombudjDnNns88aYslCyF6IY5SUw3Roz6xShcEIQ==", - "dev": true, - "requires": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - } - }, - "jsonfile": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", - "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", - "dev": true, - "requires": { - "graceful-fs": "^4.1.6", - "universalify": "^2.0.0" - } - }, - "universalify": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", - "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==", - "dev": true - } } }, "@semantic-release/release-notes-generator": { @@ -15510,9 +15326,9 @@ "dev": true }, "date-format": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/date-format/-/date-format-3.0.0.tgz", - "integrity": "sha512-eyTcpKOcamdhWJXj56DpQMo1ylSQpcGtGKXcU0Tb97+K56/CF5amAqqqNj0+KvA0iw2ynxtHWFsPDSClCxe48w==" + "version": "4.0.6", + "resolved": "https://registry.npmjs.org/date-format/-/date-format-4.0.6.tgz", + "integrity": "sha512-B9vvg5rHuQ8cbUXE/RMWMyX2YA5TecT3jKF5fLtGNlzPlU7zblSPmAm2OImDbWL+LDOQ6pUm+4LOFz+ywS41Zw==" }, "dateformat": { "version": "3.0.3", @@ -15521,9 +15337,9 @@ "dev": true }, "debug": { - "version": "4.3.3", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.3.tgz", - "integrity": "sha512-/zxw5+vh1Tfv+4Qn7a5nsbcJKPaSvCDhojn6FEl9vupwK2VCSDtEiEtqr8DFtzYFOdz63LBkxec7DYuc2jon6Q==", + "version": "4.3.4", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", + "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", "requires": { "ms": "2.1.2" } @@ -16517,8 +16333,7 @@ "flatted": { "version": "3.2.4", "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.2.4.tgz", - "integrity": "sha512-8/sOawo8tJ4QOBX8YlQBMxL8+RLZfxMQOif9o0KUKTNTjMYElWPE0r/m5VNFxTRd0NSw8qSy8dajrwX4RYI1Hw==", - "dev": true + "integrity": "sha512-8/sOawo8tJ4QOBX8YlQBMxL8+RLZfxMQOif9o0KUKTNTjMYElWPE0r/m5VNFxTRd0NSw8qSy8dajrwX4RYI1Hw==" }, "follow-redirects": { "version": "1.14.9", @@ -16585,13 +16400,20 @@ "dev": true }, "fs-extra": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-8.1.0.tgz", - "integrity": "sha512-yhlQgA6mnOJUKOsRUFsgJdQCvkKhcz8tlZG5HBQfReYZy46OwLcY+Zia0mtdHsOo9y/hP+CxMN0TU9QxoOtG4g==", + "version": "10.0.1", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.0.1.tgz", + "integrity": "sha512-NbdoVMZso2Lsrn/QwLXOy6rm0ufY2zEOKCDzJR/0kBsb0E6qed0P3iYK+Ath3BfvXEeu4JhEtXLgILx5psUfag==", "requires": { "graceful-fs": "^4.2.0", - "jsonfile": "^4.0.0", - "universalify": "^0.1.0" + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "dependencies": { + "universalify": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", + "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==" + } } }, "fs-minipass": { @@ -17996,11 +17818,19 @@ } }, "jsonfile": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-4.0.0.tgz", - "integrity": "sha1-h3Gq4HmbZAdrdmQPygWPnBDjPss=", + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", + "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", "requires": { - "graceful-fs": "^4.1.6" + "graceful-fs": "^4.1.6", + "universalify": "^2.0.0" + }, + "dependencies": { + "universalify": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", + "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==" + } } }, "jsonparse": { @@ -18248,22 +18078,15 @@ "dev": true }, "log4js": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/log4js/-/log4js-6.3.0.tgz", - "integrity": "sha512-Mc8jNuSFImQUIateBFwdOQcmC6Q5maU0VVvdC2R6XMb66/VnT+7WS4D/0EeNMZu1YODmJe5NIn2XftCzEocUgw==", + "version": "6.4.0", + "resolved": "https://registry.npmjs.org/log4js/-/log4js-6.4.0.tgz", + "integrity": "sha512-ysc/XUecZJuN8NoKOssk3V0cQ29xY4fra6fnigZa5VwxFsCsvdqsdnEuAxNN89LlHpbE4KUD3zGcn+kFqonSVQ==", "requires": { - "date-format": "^3.0.0", - "debug": "^4.1.1", - "flatted": "^2.0.1", - "rfdc": "^1.1.4", - "streamroller": "^2.2.4" - }, - "dependencies": { - "flatted": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/flatted/-/flatted-2.0.2.tgz", - "integrity": "sha512-r5wGx7YeOwNWNlCA0wQ86zKyDLMQr+/RB8xy74M4hTphfmjlijTSSXGuH8rnvKZnfT9i+75zmd8jcKdMR4O6jA==" - } + "date-format": "^4.0.3", + "debug": "^4.3.3", + "flatted": "^3.2.4", + "rfdc": "^1.3.0", + "streamroller": "^3.0.2" } }, "long": { @@ -21778,20 +21601,13 @@ } }, "streamroller": { - "version": "2.2.4", - "resolved": "https://registry.npmjs.org/streamroller/-/streamroller-2.2.4.tgz", - "integrity": "sha512-OG79qm3AujAM9ImoqgWEY1xG4HX+Lw+yY6qZj9R1K2mhF5bEmQ849wvrb+4vt4jLMLzwXttJlQbOdPOQVRv7DQ==", + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/streamroller/-/streamroller-3.0.6.tgz", + "integrity": "sha512-Qz32plKq/MZywYyhEatxyYc8vs994Gz0Hu2MSYXXLD233UyPeIeRBZARIIGwFer4Mdb8r3Y2UqKkgyDghM6QCg==", "requires": { - "date-format": "^2.1.0", - "debug": "^4.1.1", - "fs-extra": "^8.1.0" - }, - "dependencies": { - "date-format": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/date-format/-/date-format-2.1.0.tgz", - "integrity": "sha512-bYQuGLeFxhkxNOF3rcMtiZxvCBAquGzZm6oWA1oZ0g2THUzivaRhv8uOhdr19LmoobSOLoIAxeUK2RdbM8IFTA==" - } + "date-format": "^4.0.6", + "debug": "^4.3.4", + "fs-extra": "^10.0.1" } }, "string_decoder": { @@ -22260,7 +22076,8 @@ "universalify": { "version": "0.1.2", "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz", - "integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==" + "integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==", + "dev": true }, "uri-js": { "version": "4.4.1", diff --git a/package.json b/package.json index 519ea938..6b355097 100644 --- a/package.json +++ b/package.json @@ -26,7 +26,7 @@ "axios": "0.24.0", "jsonwebtoken": "8.5.1", "lodash": "4.17.21", - "log4js": "6.3.0", + "log4js": "6.4.0", "long": "5.2.0", "luxon": "2.2.0", "nice-grpc": "1.0.6", From b7621e40e1081129342e502d7999278f54fea622 Mon Sep 17 00:00:00 2001 From: Ivan Zuev Date: Fri, 15 Apr 2022 15:03:10 +0300 Subject: [PATCH 19/54] fix: correct endpoint for PayloadService --- src/service-endpoints.ts | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/service-endpoints.ts b/src/service-endpoints.ts index 87164e5b..972c4394 100644 --- a/src/service-endpoints.ts +++ b/src/service-endpoints.ts @@ -301,11 +301,16 @@ const SERVICE_ENDPOINTS_LIST: ServiceEndpointsList = [ }, { serviceIds: [ - 'yandex.cloud.lockbox.v1.PayloadService', 'yandex.cloud.lockbox.v1.SecretService', ], endpoint: 'lockbox.api.cloud.yandex.net:443', }, + { + serviceIds: [ + 'yandex.cloud.lockbox.v1.PayloadService', + ], + endpoint: 'payload.lockbox.api.cloud.yandex.net:443', + }, { serviceIds: [ 'yandex.cloud.marketplace.v1.metering.ImageProductUsageService', From 278e80c8cb4998a6f8b0977550f6603e4ae4c80c Mon Sep 17 00:00:00 2001 From: yandex-cloud-bot Date: Fri, 15 Apr 2022 12:07:40 +0000 Subject: [PATCH 20/54] chore(release): 2.0.1 [skip ci] ## [2.0.1](https://github.com/yandex-cloud/nodejs-sdk/compare/v2.0.0...v2.0.1) (2022-04-15) ### Bug Fixes * correct endpoint for PayloadService ([b7621e4](https://github.com/yandex-cloud/nodejs-sdk/commit/b7621e40e1081129342e502d7999278f54fea622)) --- package-lock.json | 4 ++-- package.json | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/package-lock.json b/package-lock.json index 8496351f..9e336c0f 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.0.0", + "version": "2.0.1", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.0.0", + "version": "2.0.1", "license": "MIT", "dependencies": { "@grpc/grpc-js": "1.6.0", diff --git a/package.json b/package.json index 6b355097..cedc2b95 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.0.0", + "version": "2.0.1", "description": "Yandex.Cloud NodeJS SDK", "keywords": [ "yandex-cloud", From e9fdac62a3ff2998c406d081d02d124ff36df632 Mon Sep 17 00:00:00 2001 From: Ivan Zuev Date: Mon, 25 Apr 2022 17:05:43 +0300 Subject: [PATCH 21/54] feat: update code according to latest proto specs - vpc.gateway_service - storage.bucket_service --- cloudapi | 2 +- scripts/services.ts | 4 + .../ai/translate/v2/translation_service.ts | 14 + src/generated/yandex/cloud/ai/tts/v3/tts.ts | 18 + .../yandex/cloud/compute/v1/host_type.ts | 31 + .../yandex/cloud/datatransfer/index.ts | 2 + .../yandex/cloud/datatransfer/v1/endpoint.ts | 102 + .../datatransfer/v1/endpoint/clickhouse.ts | 1300 ++++++ .../cloud/datatransfer/v1/endpoint/common.ts | 148 + .../cloud/datatransfer/v1/endpoint/mongo.ts | 788 ++++ .../cloud/datatransfer/v1/endpoint/mysql.ts | 61 +- .../datatransfer/v1/endpoint/postgres.ts | 64 +- src/generated/yandex/cloud/index.ts | 1 + .../cloud/logging/v1/log_reading_service.ts | 20 + .../cloud/mdb/greenplum/v1/cluster_service.ts | 372 ++ .../yandex/cloud/mdb/mysql/v1/cluster.ts | 19 + .../cloud/mdb/mysql/v1/cluster_service.ts | 55 + .../cloud/mdb/mysql/v1/config/mysql5_7.ts | 29 + .../cloud/mdb/mysql/v1/config/mysql8_0.ts | 29 + .../yandex/cloud/mdb/postgresql/v1/cluster.ts | 19 + .../mdb/postgresql/v1/cluster_service.ts | 55 + .../yandex/cloud/mdb/sqlserver/v1/cluster.ts | 16 + .../cloud/mdb/sqlserver/v1/cluster_service.ts | 258 ++ .../mdb/sqlserver/v1/database_service.ts | 840 +++- .../containers/v1/container_service.ts | 218 + src/generated/yandex/cloud/service_clients.ts | 2 + src/generated/yandex/cloud/storage/index.ts | 2 + .../yandex/cloud/storage/v1/bucket.ts | 3760 +++++++++++++++++ .../yandex/cloud/storage/v1/bucket_service.ts | 2229 ++++++++++ src/generated/yandex/cloud/vpc/index.ts | 2 + src/generated/yandex/cloud/vpc/v1/gateway.ts | 394 ++ .../yandex/cloud/vpc/v1/gateway_service.ts | 1939 +++++++++ .../yandex/cloud/ydb/v1/backup_service.ts | 103 + .../yandex/cloud/ydb/v1/database_service.ts | 103 + src/service-endpoints.ts | 7 + 35 files changed, 12884 insertions(+), 122 deletions(-) create mode 100644 src/generated/yandex/cloud/datatransfer/v1/endpoint/clickhouse.ts create mode 100644 src/generated/yandex/cloud/datatransfer/v1/endpoint/mongo.ts create mode 100644 src/generated/yandex/cloud/storage/index.ts create mode 100644 src/generated/yandex/cloud/storage/v1/bucket.ts create mode 100644 src/generated/yandex/cloud/storage/v1/bucket_service.ts create mode 100644 src/generated/yandex/cloud/vpc/v1/gateway.ts create mode 100644 src/generated/yandex/cloud/vpc/v1/gateway_service.ts diff --git a/cloudapi b/cloudapi index 3cd72656..532f96d2 160000 --- a/cloudapi +++ b/cloudapi @@ -1 +1 @@ -Subproject commit 3cd726562074d55546973cb075bc03754fdedc99 +Subproject commit 532f96d2834997d04cf0e50a7e65d99a88a85a89 diff --git a/scripts/services.ts b/scripts/services.ts index a0fa8594..8240dd51 100644 --- a/scripts/services.ts +++ b/scripts/services.ts @@ -187,12 +187,16 @@ export const servicesConfig: ServicesConfig = { mdbproxy_proxy_service: { importClassName: 'ProxyServiceClient' }, triggers_trigger_service: { importClassName: 'TriggerServiceClient' }, }, + storage: { + bucket_service: { importClassName: 'BucketServiceClient' }, + }, vpc: { address_service: { importClassName: 'AddressServiceClient' }, network_service: { importClassName: 'NetworkServiceClient' }, route_table_service: { importClassName: 'RouteTableServiceClient' }, security_group_service: { importClassName: 'SecurityGroupServiceClient' }, subnet_service: { importClassName: 'SubnetServiceClient' }, + gateway_service: { importClassName: 'GatewayServiceClient' }, }, ydb: { backup_service: { importClassName: 'BackupServiceClient', exportClassName: 'YdbBackupServiceClient' }, diff --git a/src/generated/yandex/cloud/ai/translate/v2/translation_service.ts b/src/generated/yandex/cloud/ai/translate/v2/translation_service.ts index 35032682..2e228843 100644 --- a/src/generated/yandex/cloud/ai/translate/v2/translation_service.ts +++ b/src/generated/yandex/cloud/ai/translate/v2/translation_service.ts @@ -123,6 +123,7 @@ export interface GlossaryPair { sourceText: string; /** Text in the target language. */ translatedText: string; + exact: boolean; } export interface TranslateResponse { @@ -500,6 +501,7 @@ const baseGlossaryPair: object = { $type: "yandex.cloud.ai.translate.v2.GlossaryPair", sourceText: "", translatedText: "", + exact: false, }; export const GlossaryPair = { @@ -515,6 +517,9 @@ export const GlossaryPair = { if (message.translatedText !== "") { writer.uint32(18).string(message.translatedText); } + if (message.exact === true) { + writer.uint32(24).bool(message.exact); + } return writer; }, @@ -531,6 +536,9 @@ export const GlossaryPair = { case 2: message.translatedText = reader.string(); break; + case 3: + message.exact = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -549,6 +557,10 @@ export const GlossaryPair = { object.translatedText !== undefined && object.translatedText !== null ? String(object.translatedText) : ""; + message.exact = + object.exact !== undefined && object.exact !== null + ? Boolean(object.exact) + : false; return message; }, @@ -557,6 +569,7 @@ export const GlossaryPair = { message.sourceText !== undefined && (obj.sourceText = message.sourceText); message.translatedText !== undefined && (obj.translatedText = message.translatedText); + message.exact !== undefined && (obj.exact = message.exact); return obj; }, @@ -566,6 +579,7 @@ export const GlossaryPair = { const message = { ...baseGlossaryPair } as GlossaryPair; message.sourceText = object.sourceText ?? ""; message.translatedText = object.translatedText ?? ""; + message.exact = object.exact ?? false; return message; }, }; diff --git a/src/generated/yandex/cloud/ai/tts/v3/tts.ts b/src/generated/yandex/cloud/ai/tts/v3/tts.ts index 8db6f39d..2e03f1f5 100644 --- a/src/generated/yandex/cloud/ai/tts/v3/tts.ts +++ b/src/generated/yandex/cloud/ai/tts/v3/tts.ts @@ -191,6 +191,11 @@ export interface Hints { export interface UtteranceSynthesisRequest { $type: "speechkit.tts.v3.UtteranceSynthesisRequest"; + /** + * The name of the model. + * Specifies basic synthesis functionality. Currently should be empty. Do not use it + */ + model: string; /** Raw text (e.g. "Hello, Alice"). */ text: string | undefined; /** Text template instance, e.g. `{"Hello, {username}" with username="Alice"}`. */ @@ -1177,6 +1182,7 @@ messageTypeRegistry.set(Hints.$type, Hints); const baseUtteranceSynthesisRequest: object = { $type: "speechkit.tts.v3.UtteranceSynthesisRequest", + model: "", loudnessNormalizationType: 0, unsafeMode: false, }; @@ -1188,6 +1194,9 @@ export const UtteranceSynthesisRequest = { message: UtteranceSynthesisRequest, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { + if (message.model !== "") { + writer.uint32(10).string(message.model); + } if (message.text !== undefined) { writer.uint32(18).string(message.text); } @@ -1228,6 +1237,9 @@ export const UtteranceSynthesisRequest = { while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { + case 1: + message.model = reader.string(); + break; case 2: message.text = reader.string(); break; @@ -1261,6 +1273,10 @@ export const UtteranceSynthesisRequest = { const message = { ...baseUtteranceSynthesisRequest, } as UtteranceSynthesisRequest; + message.model = + object.model !== undefined && object.model !== null + ? String(object.model) + : ""; message.text = object.text !== undefined && object.text !== null ? String(object.text) @@ -1290,6 +1306,7 @@ export const UtteranceSynthesisRequest = { toJSON(message: UtteranceSynthesisRequest): unknown { const obj: any = {}; + message.model !== undefined && (obj.model = message.model); message.text !== undefined && (obj.text = message.text); message.textTemplate !== undefined && (obj.textTemplate = message.textTemplate @@ -1319,6 +1336,7 @@ export const UtteranceSynthesisRequest = { const message = { ...baseUtteranceSynthesisRequest, } as UtteranceSynthesisRequest; + message.model = object.model ?? ""; message.text = object.text ?? undefined; message.textTemplate = object.textTemplate !== undefined && object.textTemplate !== null diff --git a/src/generated/yandex/cloud/compute/v1/host_type.ts b/src/generated/yandex/cloud/compute/v1/host_type.ts index 46f28e32..0a62b00d 100644 --- a/src/generated/yandex/cloud/compute/v1/host_type.ts +++ b/src/generated/yandex/cloud/compute/v1/host_type.ts @@ -18,6 +18,10 @@ export interface HostType { cores: number; /** Ammount of memory available for instances. */ memory: number; + /** Number of local disks available for instances */ + disks: number; + /** Size of each local disk */ + diskSize: number; } const baseHostType: object = { @@ -25,6 +29,8 @@ const baseHostType: object = { id: "", cores: 0, memory: 0, + disks: 0, + diskSize: 0, }; export const HostType = { @@ -43,6 +49,12 @@ export const HostType = { if (message.memory !== 0) { writer.uint32(24).int64(message.memory); } + if (message.disks !== 0) { + writer.uint32(32).int64(message.disks); + } + if (message.diskSize !== 0) { + writer.uint32(40).int64(message.diskSize); + } return writer; }, @@ -62,6 +74,12 @@ export const HostType = { case 3: message.memory = longToNumber(reader.int64() as Long); break; + case 4: + message.disks = longToNumber(reader.int64() as Long); + break; + case 5: + message.diskSize = longToNumber(reader.int64() as Long); + break; default: reader.skipType(tag & 7); break; @@ -82,6 +100,14 @@ export const HostType = { object.memory !== undefined && object.memory !== null ? Number(object.memory) : 0; + message.disks = + object.disks !== undefined && object.disks !== null + ? Number(object.disks) + : 0; + message.diskSize = + object.diskSize !== undefined && object.diskSize !== null + ? Number(object.diskSize) + : 0; return message; }, @@ -90,6 +116,9 @@ export const HostType = { message.id !== undefined && (obj.id = message.id); message.cores !== undefined && (obj.cores = Math.round(message.cores)); message.memory !== undefined && (obj.memory = Math.round(message.memory)); + message.disks !== undefined && (obj.disks = Math.round(message.disks)); + message.diskSize !== undefined && + (obj.diskSize = Math.round(message.diskSize)); return obj; }, @@ -98,6 +127,8 @@ export const HostType = { message.id = object.id ?? ""; message.cores = object.cores ?? 0; message.memory = object.memory ?? 0; + message.disks = object.disks ?? 0; + message.diskSize = object.diskSize ?? 0; return message; }, }; diff --git a/src/generated/yandex/cloud/datatransfer/index.ts b/src/generated/yandex/cloud/datatransfer/index.ts index cbd8ff56..84b4cf4d 100644 --- a/src/generated/yandex/cloud/datatransfer/index.ts +++ b/src/generated/yandex/cloud/datatransfer/index.ts @@ -2,6 +2,8 @@ export * as endpoint from './v1/endpoint' export * as endpoint_service from './v1/endpoint_service' export * as transfer from './v1/transfer' export * as transfer_service from './v1/transfer_service' +export * as clickhouse from './v1/endpoint/clickhouse' export * as common from './v1/endpoint/common' +export * as mongo from './v1/endpoint/mongo' export * as mysql from './v1/endpoint/mysql' export * as postgres from './v1/endpoint/postgres' \ No newline at end of file diff --git a/src/generated/yandex/cloud/datatransfer/v1/endpoint.ts b/src/generated/yandex/cloud/datatransfer/v1/endpoint.ts index b9846e96..55aa40a5 100644 --- a/src/generated/yandex/cloud/datatransfer/v1/endpoint.ts +++ b/src/generated/yandex/cloud/datatransfer/v1/endpoint.ts @@ -10,6 +10,14 @@ import { PostgresSource, PostgresTarget, } from "../../../../yandex/cloud/datatransfer/v1/endpoint/postgres"; +import { + MongoSource, + MongoTarget, +} from "../../../../yandex/cloud/datatransfer/v1/endpoint/mongo"; +import { + ClickhouseSource, + ClickhouseTarget, +} from "../../../../yandex/cloud/datatransfer/v1/endpoint/clickhouse"; export const protobufPackage = "yandex.cloud.datatransfer.v1"; @@ -33,8 +41,12 @@ export interface EndpointSettings { $type: "yandex.cloud.datatransfer.v1.EndpointSettings"; mysqlSource?: MysqlSource | undefined; postgresSource?: PostgresSource | undefined; + mongoSource?: MongoSource | undefined; + clickhouseSource?: ClickhouseSource | undefined; mysqlTarget?: MysqlTarget | undefined; postgresTarget?: PostgresTarget | undefined; + clickhouseTarget?: ClickhouseTarget | undefined; + mongoTarget?: MongoTarget | undefined; } const baseEndpoint: object = { @@ -292,6 +304,18 @@ export const EndpointSettings = { writer.uint32(18).fork() ).ldelim(); } + if (message.mongoSource !== undefined) { + MongoSource.encode( + message.mongoSource, + writer.uint32(74).fork() + ).ldelim(); + } + if (message.clickhouseSource !== undefined) { + ClickhouseSource.encode( + message.clickhouseSource, + writer.uint32(130).fork() + ).ldelim(); + } if (message.mysqlTarget !== undefined) { MysqlTarget.encode( message.mysqlTarget, @@ -304,6 +328,18 @@ export const EndpointSettings = { writer.uint32(818).fork() ).ldelim(); } + if (message.clickhouseTarget !== undefined) { + ClickhouseTarget.encode( + message.clickhouseTarget, + writer.uint32(834).fork() + ).ldelim(); + } + if (message.mongoTarget !== undefined) { + MongoTarget.encode( + message.mongoTarget, + writer.uint32(890).fork() + ).ldelim(); + } return writer; }, @@ -323,6 +359,15 @@ export const EndpointSettings = { reader.uint32() ); break; + case 9: + message.mongoSource = MongoSource.decode(reader, reader.uint32()); + break; + case 16: + message.clickhouseSource = ClickhouseSource.decode( + reader, + reader.uint32() + ); + break; case 101: message.mysqlTarget = MysqlTarget.decode(reader, reader.uint32()); break; @@ -332,6 +377,15 @@ export const EndpointSettings = { reader.uint32() ); break; + case 104: + message.clickhouseTarget = ClickhouseTarget.decode( + reader, + reader.uint32() + ); + break; + case 111: + message.mongoTarget = MongoTarget.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -350,6 +404,14 @@ export const EndpointSettings = { object.postgresSource !== undefined && object.postgresSource !== null ? PostgresSource.fromJSON(object.postgresSource) : undefined; + message.mongoSource = + object.mongoSource !== undefined && object.mongoSource !== null + ? MongoSource.fromJSON(object.mongoSource) + : undefined; + message.clickhouseSource = + object.clickhouseSource !== undefined && object.clickhouseSource !== null + ? ClickhouseSource.fromJSON(object.clickhouseSource) + : undefined; message.mysqlTarget = object.mysqlTarget !== undefined && object.mysqlTarget !== null ? MysqlTarget.fromJSON(object.mysqlTarget) @@ -358,6 +420,14 @@ export const EndpointSettings = { object.postgresTarget !== undefined && object.postgresTarget !== null ? PostgresTarget.fromJSON(object.postgresTarget) : undefined; + message.clickhouseTarget = + object.clickhouseTarget !== undefined && object.clickhouseTarget !== null + ? ClickhouseTarget.fromJSON(object.clickhouseTarget) + : undefined; + message.mongoTarget = + object.mongoTarget !== undefined && object.mongoTarget !== null + ? MongoTarget.fromJSON(object.mongoTarget) + : undefined; return message; }, @@ -371,6 +441,14 @@ export const EndpointSettings = { (obj.postgresSource = message.postgresSource ? PostgresSource.toJSON(message.postgresSource) : undefined); + message.mongoSource !== undefined && + (obj.mongoSource = message.mongoSource + ? MongoSource.toJSON(message.mongoSource) + : undefined); + message.clickhouseSource !== undefined && + (obj.clickhouseSource = message.clickhouseSource + ? ClickhouseSource.toJSON(message.clickhouseSource) + : undefined); message.mysqlTarget !== undefined && (obj.mysqlTarget = message.mysqlTarget ? MysqlTarget.toJSON(message.mysqlTarget) @@ -379,6 +457,14 @@ export const EndpointSettings = { (obj.postgresTarget = message.postgresTarget ? PostgresTarget.toJSON(message.postgresTarget) : undefined); + message.clickhouseTarget !== undefined && + (obj.clickhouseTarget = message.clickhouseTarget + ? ClickhouseTarget.toJSON(message.clickhouseTarget) + : undefined); + message.mongoTarget !== undefined && + (obj.mongoTarget = message.mongoTarget + ? MongoTarget.toJSON(message.mongoTarget) + : undefined); return obj; }, @@ -394,6 +480,14 @@ export const EndpointSettings = { object.postgresSource !== undefined && object.postgresSource !== null ? PostgresSource.fromPartial(object.postgresSource) : undefined; + message.mongoSource = + object.mongoSource !== undefined && object.mongoSource !== null + ? MongoSource.fromPartial(object.mongoSource) + : undefined; + message.clickhouseSource = + object.clickhouseSource !== undefined && object.clickhouseSource !== null + ? ClickhouseSource.fromPartial(object.clickhouseSource) + : undefined; message.mysqlTarget = object.mysqlTarget !== undefined && object.mysqlTarget !== null ? MysqlTarget.fromPartial(object.mysqlTarget) @@ -402,6 +496,14 @@ export const EndpointSettings = { object.postgresTarget !== undefined && object.postgresTarget !== null ? PostgresTarget.fromPartial(object.postgresTarget) : undefined; + message.clickhouseTarget = + object.clickhouseTarget !== undefined && object.clickhouseTarget !== null + ? ClickhouseTarget.fromPartial(object.clickhouseTarget) + : undefined; + message.mongoTarget = + object.mongoTarget !== undefined && object.mongoTarget !== null + ? MongoTarget.fromPartial(object.mongoTarget) + : undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/datatransfer/v1/endpoint/clickhouse.ts b/src/generated/yandex/cloud/datatransfer/v1/endpoint/clickhouse.ts new file mode 100644 index 00000000..8b589474 --- /dev/null +++ b/src/generated/yandex/cloud/datatransfer/v1/endpoint/clickhouse.ts @@ -0,0 +1,1300 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { + TLSMode, + Secret, + ColumnValue, + AltName, +} from "../../../../../yandex/cloud/datatransfer/v1/endpoint/common"; +import { Empty } from "../../../../../google/protobuf/empty"; + +export const protobufPackage = "yandex.cloud.datatransfer.v1.endpoint"; + +export enum ClickhouseCleanupPolicy { + CLICKHOUSE_CLEANUP_POLICY_UNSPECIFIED = 0, + CLICKHOUSE_CLEANUP_POLICY_DISABLED = 1, + CLICKHOUSE_CLEANUP_POLICY_DROP = 2, + UNRECOGNIZED = -1, +} + +export function clickhouseCleanupPolicyFromJSON( + object: any +): ClickhouseCleanupPolicy { + switch (object) { + case 0: + case "CLICKHOUSE_CLEANUP_POLICY_UNSPECIFIED": + return ClickhouseCleanupPolicy.CLICKHOUSE_CLEANUP_POLICY_UNSPECIFIED; + case 1: + case "CLICKHOUSE_CLEANUP_POLICY_DISABLED": + return ClickhouseCleanupPolicy.CLICKHOUSE_CLEANUP_POLICY_DISABLED; + case 2: + case "CLICKHOUSE_CLEANUP_POLICY_DROP": + return ClickhouseCleanupPolicy.CLICKHOUSE_CLEANUP_POLICY_DROP; + case -1: + case "UNRECOGNIZED": + default: + return ClickhouseCleanupPolicy.UNRECOGNIZED; + } +} + +export function clickhouseCleanupPolicyToJSON( + object: ClickhouseCleanupPolicy +): string { + switch (object) { + case ClickhouseCleanupPolicy.CLICKHOUSE_CLEANUP_POLICY_UNSPECIFIED: + return "CLICKHOUSE_CLEANUP_POLICY_UNSPECIFIED"; + case ClickhouseCleanupPolicy.CLICKHOUSE_CLEANUP_POLICY_DISABLED: + return "CLICKHOUSE_CLEANUP_POLICY_DISABLED"; + case ClickhouseCleanupPolicy.CLICKHOUSE_CLEANUP_POLICY_DROP: + return "CLICKHOUSE_CLEANUP_POLICY_DROP"; + default: + return "UNKNOWN"; + } +} + +export interface ClickhouseShard { + $type: "yandex.cloud.datatransfer.v1.endpoint.ClickhouseShard"; + name: string; + hosts: string[]; +} + +export interface OnPremiseClickhouse { + $type: "yandex.cloud.datatransfer.v1.endpoint.OnPremiseClickhouse"; + shards: ClickhouseShard[]; + httpPort: number; + nativePort: number; + tlsMode?: TLSMode; +} + +export interface ClickhouseConnectionOptions { + $type: "yandex.cloud.datatransfer.v1.endpoint.ClickhouseConnectionOptions"; + mdbClusterId: string | undefined; + onPremise?: OnPremiseClickhouse | undefined; + database: string; + user: string; + password?: Secret; +} + +export interface ClickhouseConnection { + $type: "yandex.cloud.datatransfer.v1.endpoint.ClickhouseConnection"; + connectionOptions?: ClickhouseConnectionOptions | undefined; +} + +export interface ClickhouseSharding { + $type: "yandex.cloud.datatransfer.v1.endpoint.ClickhouseSharding"; + columnValueHash?: ClickhouseSharding_ColumnValueHash | undefined; + customMapping?: ClickhouseSharding_ColumnValueMapping | undefined; + transferId?: Empty | undefined; +} + +export interface ClickhouseSharding_ColumnValueHash { + $type: "yandex.cloud.datatransfer.v1.endpoint.ClickhouseSharding.ColumnValueHash"; + columnName: string; +} + +export interface ClickhouseSharding_ColumnValueMapping { + $type: "yandex.cloud.datatransfer.v1.endpoint.ClickhouseSharding.ColumnValueMapping"; + columnName: string; + mapping: ClickhouseSharding_ColumnValueMapping_ValueToShard[]; +} + +export interface ClickhouseSharding_ColumnValueMapping_ValueToShard { + $type: "yandex.cloud.datatransfer.v1.endpoint.ClickhouseSharding.ColumnValueMapping.ValueToShard"; + columnValue?: ColumnValue; + shardName: string; +} + +export interface ClickhouseSource { + $type: "yandex.cloud.datatransfer.v1.endpoint.ClickhouseSource"; + connection?: ClickhouseConnection; + subnetId: string; + securityGroups: string[]; + includeTables: string[]; + excludeTables: string[]; +} + +export interface ClickhouseTarget { + $type: "yandex.cloud.datatransfer.v1.endpoint.ClickhouseTarget"; + connection?: ClickhouseConnection; + subnetId: string; + securityGroups: string[]; + clickhouseClusterName: string; + altNames: AltName[]; + sharding?: ClickhouseSharding; + cleanupPolicy: ClickhouseCleanupPolicy; +} + +const baseClickhouseShard: object = { + $type: "yandex.cloud.datatransfer.v1.endpoint.ClickhouseShard", + name: "", + hosts: "", +}; + +export const ClickhouseShard = { + $type: "yandex.cloud.datatransfer.v1.endpoint.ClickhouseShard" as const, + + encode( + message: ClickhouseShard, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + for (const v of message.hosts) { + writer.uint32(18).string(v!); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ClickhouseShard { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseClickhouseShard } as ClickhouseShard; + message.hosts = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.hosts.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ClickhouseShard { + const message = { ...baseClickhouseShard } as ClickhouseShard; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.hosts = (object.hosts ?? []).map((e: any) => String(e)); + return message; + }, + + toJSON(message: ClickhouseShard): unknown { + const obj: any = {}; + message.name !== undefined && (obj.name = message.name); + if (message.hosts) { + obj.hosts = message.hosts.map((e) => e); + } else { + obj.hosts = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): ClickhouseShard { + const message = { ...baseClickhouseShard } as ClickhouseShard; + message.name = object.name ?? ""; + message.hosts = object.hosts?.map((e) => e) || []; + return message; + }, +}; + +messageTypeRegistry.set(ClickhouseShard.$type, ClickhouseShard); + +const baseOnPremiseClickhouse: object = { + $type: "yandex.cloud.datatransfer.v1.endpoint.OnPremiseClickhouse", + httpPort: 0, + nativePort: 0, +}; + +export const OnPremiseClickhouse = { + $type: "yandex.cloud.datatransfer.v1.endpoint.OnPremiseClickhouse" as const, + + encode( + message: OnPremiseClickhouse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.shards) { + ClickhouseShard.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.httpPort !== 0) { + writer.uint32(24).int64(message.httpPort); + } + if (message.nativePort !== 0) { + writer.uint32(32).int64(message.nativePort); + } + if (message.tlsMode !== undefined) { + TLSMode.encode(message.tlsMode, writer.uint32(66).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): OnPremiseClickhouse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseOnPremiseClickhouse } as OnPremiseClickhouse; + message.shards = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.shards.push(ClickhouseShard.decode(reader, reader.uint32())); + break; + case 3: + message.httpPort = longToNumber(reader.int64() as Long); + break; + case 4: + message.nativePort = longToNumber(reader.int64() as Long); + break; + case 8: + message.tlsMode = TLSMode.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): OnPremiseClickhouse { + const message = { ...baseOnPremiseClickhouse } as OnPremiseClickhouse; + message.shards = (object.shards ?? []).map((e: any) => + ClickhouseShard.fromJSON(e) + ); + message.httpPort = + object.httpPort !== undefined && object.httpPort !== null + ? Number(object.httpPort) + : 0; + message.nativePort = + object.nativePort !== undefined && object.nativePort !== null + ? Number(object.nativePort) + : 0; + message.tlsMode = + object.tlsMode !== undefined && object.tlsMode !== null + ? TLSMode.fromJSON(object.tlsMode) + : undefined; + return message; + }, + + toJSON(message: OnPremiseClickhouse): unknown { + const obj: any = {}; + if (message.shards) { + obj.shards = message.shards.map((e) => + e ? ClickhouseShard.toJSON(e) : undefined + ); + } else { + obj.shards = []; + } + message.httpPort !== undefined && + (obj.httpPort = Math.round(message.httpPort)); + message.nativePort !== undefined && + (obj.nativePort = Math.round(message.nativePort)); + message.tlsMode !== undefined && + (obj.tlsMode = message.tlsMode + ? TLSMode.toJSON(message.tlsMode) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): OnPremiseClickhouse { + const message = { ...baseOnPremiseClickhouse } as OnPremiseClickhouse; + message.shards = + object.shards?.map((e) => ClickhouseShard.fromPartial(e)) || []; + message.httpPort = object.httpPort ?? 0; + message.nativePort = object.nativePort ?? 0; + message.tlsMode = + object.tlsMode !== undefined && object.tlsMode !== null + ? TLSMode.fromPartial(object.tlsMode) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(OnPremiseClickhouse.$type, OnPremiseClickhouse); + +const baseClickhouseConnectionOptions: object = { + $type: "yandex.cloud.datatransfer.v1.endpoint.ClickhouseConnectionOptions", + database: "", + user: "", +}; + +export const ClickhouseConnectionOptions = { + $type: + "yandex.cloud.datatransfer.v1.endpoint.ClickhouseConnectionOptions" as const, + + encode( + message: ClickhouseConnectionOptions, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.mdbClusterId !== undefined) { + writer.uint32(42).string(message.mdbClusterId); + } + if (message.onPremise !== undefined) { + OnPremiseClickhouse.encode( + message.onPremise, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.database !== "") { + writer.uint32(66).string(message.database); + } + if (message.user !== "") { + writer.uint32(50).string(message.user); + } + if (message.password !== undefined) { + Secret.encode(message.password, writer.uint32(58).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ClickhouseConnectionOptions { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseClickhouseConnectionOptions, + } as ClickhouseConnectionOptions; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 5: + message.mdbClusterId = reader.string(); + break; + case 2: + message.onPremise = OnPremiseClickhouse.decode( + reader, + reader.uint32() + ); + break; + case 8: + message.database = reader.string(); + break; + case 6: + message.user = reader.string(); + break; + case 7: + message.password = Secret.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ClickhouseConnectionOptions { + const message = { + ...baseClickhouseConnectionOptions, + } as ClickhouseConnectionOptions; + message.mdbClusterId = + object.mdbClusterId !== undefined && object.mdbClusterId !== null + ? String(object.mdbClusterId) + : undefined; + message.onPremise = + object.onPremise !== undefined && object.onPremise !== null + ? OnPremiseClickhouse.fromJSON(object.onPremise) + : undefined; + message.database = + object.database !== undefined && object.database !== null + ? String(object.database) + : ""; + message.user = + object.user !== undefined && object.user !== null + ? String(object.user) + : ""; + message.password = + object.password !== undefined && object.password !== null + ? Secret.fromJSON(object.password) + : undefined; + return message; + }, + + toJSON(message: ClickhouseConnectionOptions): unknown { + const obj: any = {}; + message.mdbClusterId !== undefined && + (obj.mdbClusterId = message.mdbClusterId); + message.onPremise !== undefined && + (obj.onPremise = message.onPremise + ? OnPremiseClickhouse.toJSON(message.onPremise) + : undefined); + message.database !== undefined && (obj.database = message.database); + message.user !== undefined && (obj.user = message.user); + message.password !== undefined && + (obj.password = message.password + ? Secret.toJSON(message.password) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): ClickhouseConnectionOptions { + const message = { + ...baseClickhouseConnectionOptions, + } as ClickhouseConnectionOptions; + message.mdbClusterId = object.mdbClusterId ?? undefined; + message.onPremise = + object.onPremise !== undefined && object.onPremise !== null + ? OnPremiseClickhouse.fromPartial(object.onPremise) + : undefined; + message.database = object.database ?? ""; + message.user = object.user ?? ""; + message.password = + object.password !== undefined && object.password !== null + ? Secret.fromPartial(object.password) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + ClickhouseConnectionOptions.$type, + ClickhouseConnectionOptions +); + +const baseClickhouseConnection: object = { + $type: "yandex.cloud.datatransfer.v1.endpoint.ClickhouseConnection", +}; + +export const ClickhouseConnection = { + $type: "yandex.cloud.datatransfer.v1.endpoint.ClickhouseConnection" as const, + + encode( + message: ClickhouseConnection, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.connectionOptions !== undefined) { + ClickhouseConnectionOptions.encode( + message.connectionOptions, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ClickhouseConnection { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseClickhouseConnection } as ClickhouseConnection; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.connectionOptions = ClickhouseConnectionOptions.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ClickhouseConnection { + const message = { ...baseClickhouseConnection } as ClickhouseConnection; + message.connectionOptions = + object.connectionOptions !== undefined && + object.connectionOptions !== null + ? ClickhouseConnectionOptions.fromJSON(object.connectionOptions) + : undefined; + return message; + }, + + toJSON(message: ClickhouseConnection): unknown { + const obj: any = {}; + message.connectionOptions !== undefined && + (obj.connectionOptions = message.connectionOptions + ? ClickhouseConnectionOptions.toJSON(message.connectionOptions) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): ClickhouseConnection { + const message = { ...baseClickhouseConnection } as ClickhouseConnection; + message.connectionOptions = + object.connectionOptions !== undefined && + object.connectionOptions !== null + ? ClickhouseConnectionOptions.fromPartial(object.connectionOptions) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(ClickhouseConnection.$type, ClickhouseConnection); + +const baseClickhouseSharding: object = { + $type: "yandex.cloud.datatransfer.v1.endpoint.ClickhouseSharding", +}; + +export const ClickhouseSharding = { + $type: "yandex.cloud.datatransfer.v1.endpoint.ClickhouseSharding" as const, + + encode( + message: ClickhouseSharding, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.columnValueHash !== undefined) { + ClickhouseSharding_ColumnValueHash.encode( + message.columnValueHash, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.customMapping !== undefined) { + ClickhouseSharding_ColumnValueMapping.encode( + message.customMapping, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.transferId !== undefined) { + Empty.encode(message.transferId, writer.uint32(26).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ClickhouseSharding { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseClickhouseSharding } as ClickhouseSharding; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.columnValueHash = ClickhouseSharding_ColumnValueHash.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.customMapping = ClickhouseSharding_ColumnValueMapping.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.transferId = Empty.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ClickhouseSharding { + const message = { ...baseClickhouseSharding } as ClickhouseSharding; + message.columnValueHash = + object.columnValueHash !== undefined && object.columnValueHash !== null + ? ClickhouseSharding_ColumnValueHash.fromJSON(object.columnValueHash) + : undefined; + message.customMapping = + object.customMapping !== undefined && object.customMapping !== null + ? ClickhouseSharding_ColumnValueMapping.fromJSON(object.customMapping) + : undefined; + message.transferId = + object.transferId !== undefined && object.transferId !== null + ? Empty.fromJSON(object.transferId) + : undefined; + return message; + }, + + toJSON(message: ClickhouseSharding): unknown { + const obj: any = {}; + message.columnValueHash !== undefined && + (obj.columnValueHash = message.columnValueHash + ? ClickhouseSharding_ColumnValueHash.toJSON(message.columnValueHash) + : undefined); + message.customMapping !== undefined && + (obj.customMapping = message.customMapping + ? ClickhouseSharding_ColumnValueMapping.toJSON(message.customMapping) + : undefined); + message.transferId !== undefined && + (obj.transferId = message.transferId + ? Empty.toJSON(message.transferId) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): ClickhouseSharding { + const message = { ...baseClickhouseSharding } as ClickhouseSharding; + message.columnValueHash = + object.columnValueHash !== undefined && object.columnValueHash !== null + ? ClickhouseSharding_ColumnValueHash.fromPartial(object.columnValueHash) + : undefined; + message.customMapping = + object.customMapping !== undefined && object.customMapping !== null + ? ClickhouseSharding_ColumnValueMapping.fromPartial( + object.customMapping + ) + : undefined; + message.transferId = + object.transferId !== undefined && object.transferId !== null + ? Empty.fromPartial(object.transferId) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(ClickhouseSharding.$type, ClickhouseSharding); + +const baseClickhouseSharding_ColumnValueHash: object = { + $type: + "yandex.cloud.datatransfer.v1.endpoint.ClickhouseSharding.ColumnValueHash", + columnName: "", +}; + +export const ClickhouseSharding_ColumnValueHash = { + $type: + "yandex.cloud.datatransfer.v1.endpoint.ClickhouseSharding.ColumnValueHash" as const, + + encode( + message: ClickhouseSharding_ColumnValueHash, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.columnName !== "") { + writer.uint32(10).string(message.columnName); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ClickhouseSharding_ColumnValueHash { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseClickhouseSharding_ColumnValueHash, + } as ClickhouseSharding_ColumnValueHash; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.columnName = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ClickhouseSharding_ColumnValueHash { + const message = { + ...baseClickhouseSharding_ColumnValueHash, + } as ClickhouseSharding_ColumnValueHash; + message.columnName = + object.columnName !== undefined && object.columnName !== null + ? String(object.columnName) + : ""; + return message; + }, + + toJSON(message: ClickhouseSharding_ColumnValueHash): unknown { + const obj: any = {}; + message.columnName !== undefined && (obj.columnName = message.columnName); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): ClickhouseSharding_ColumnValueHash { + const message = { + ...baseClickhouseSharding_ColumnValueHash, + } as ClickhouseSharding_ColumnValueHash; + message.columnName = object.columnName ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ClickhouseSharding_ColumnValueHash.$type, + ClickhouseSharding_ColumnValueHash +); + +const baseClickhouseSharding_ColumnValueMapping: object = { + $type: + "yandex.cloud.datatransfer.v1.endpoint.ClickhouseSharding.ColumnValueMapping", + columnName: "", +}; + +export const ClickhouseSharding_ColumnValueMapping = { + $type: + "yandex.cloud.datatransfer.v1.endpoint.ClickhouseSharding.ColumnValueMapping" as const, + + encode( + message: ClickhouseSharding_ColumnValueMapping, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.columnName !== "") { + writer.uint32(10).string(message.columnName); + } + for (const v of message.mapping) { + ClickhouseSharding_ColumnValueMapping_ValueToShard.encode( + v!, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ClickhouseSharding_ColumnValueMapping { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseClickhouseSharding_ColumnValueMapping, + } as ClickhouseSharding_ColumnValueMapping; + message.mapping = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.columnName = reader.string(); + break; + case 2: + message.mapping.push( + ClickhouseSharding_ColumnValueMapping_ValueToShard.decode( + reader, + reader.uint32() + ) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ClickhouseSharding_ColumnValueMapping { + const message = { + ...baseClickhouseSharding_ColumnValueMapping, + } as ClickhouseSharding_ColumnValueMapping; + message.columnName = + object.columnName !== undefined && object.columnName !== null + ? String(object.columnName) + : ""; + message.mapping = (object.mapping ?? []).map((e: any) => + ClickhouseSharding_ColumnValueMapping_ValueToShard.fromJSON(e) + ); + return message; + }, + + toJSON(message: ClickhouseSharding_ColumnValueMapping): unknown { + const obj: any = {}; + message.columnName !== undefined && (obj.columnName = message.columnName); + if (message.mapping) { + obj.mapping = message.mapping.map((e) => + e + ? ClickhouseSharding_ColumnValueMapping_ValueToShard.toJSON(e) + : undefined + ); + } else { + obj.mapping = []; + } + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): ClickhouseSharding_ColumnValueMapping { + const message = { + ...baseClickhouseSharding_ColumnValueMapping, + } as ClickhouseSharding_ColumnValueMapping; + message.columnName = object.columnName ?? ""; + message.mapping = + object.mapping?.map((e) => + ClickhouseSharding_ColumnValueMapping_ValueToShard.fromPartial(e) + ) || []; + return message; + }, +}; + +messageTypeRegistry.set( + ClickhouseSharding_ColumnValueMapping.$type, + ClickhouseSharding_ColumnValueMapping +); + +const baseClickhouseSharding_ColumnValueMapping_ValueToShard: object = { + $type: + "yandex.cloud.datatransfer.v1.endpoint.ClickhouseSharding.ColumnValueMapping.ValueToShard", + shardName: "", +}; + +export const ClickhouseSharding_ColumnValueMapping_ValueToShard = { + $type: + "yandex.cloud.datatransfer.v1.endpoint.ClickhouseSharding.ColumnValueMapping.ValueToShard" as const, + + encode( + message: ClickhouseSharding_ColumnValueMapping_ValueToShard, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.columnValue !== undefined) { + ColumnValue.encode( + message.columnValue, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.shardName !== "") { + writer.uint32(18).string(message.shardName); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ClickhouseSharding_ColumnValueMapping_ValueToShard { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseClickhouseSharding_ColumnValueMapping_ValueToShard, + } as ClickhouseSharding_ColumnValueMapping_ValueToShard; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.columnValue = ColumnValue.decode(reader, reader.uint32()); + break; + case 2: + message.shardName = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ClickhouseSharding_ColumnValueMapping_ValueToShard { + const message = { + ...baseClickhouseSharding_ColumnValueMapping_ValueToShard, + } as ClickhouseSharding_ColumnValueMapping_ValueToShard; + message.columnValue = + object.columnValue !== undefined && object.columnValue !== null + ? ColumnValue.fromJSON(object.columnValue) + : undefined; + message.shardName = + object.shardName !== undefined && object.shardName !== null + ? String(object.shardName) + : ""; + return message; + }, + + toJSON(message: ClickhouseSharding_ColumnValueMapping_ValueToShard): unknown { + const obj: any = {}; + message.columnValue !== undefined && + (obj.columnValue = message.columnValue + ? ColumnValue.toJSON(message.columnValue) + : undefined); + message.shardName !== undefined && (obj.shardName = message.shardName); + return obj; + }, + + fromPartial< + I extends Exact< + DeepPartial, + I + > + >(object: I): ClickhouseSharding_ColumnValueMapping_ValueToShard { + const message = { + ...baseClickhouseSharding_ColumnValueMapping_ValueToShard, + } as ClickhouseSharding_ColumnValueMapping_ValueToShard; + message.columnValue = + object.columnValue !== undefined && object.columnValue !== null + ? ColumnValue.fromPartial(object.columnValue) + : undefined; + message.shardName = object.shardName ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ClickhouseSharding_ColumnValueMapping_ValueToShard.$type, + ClickhouseSharding_ColumnValueMapping_ValueToShard +); + +const baseClickhouseSource: object = { + $type: "yandex.cloud.datatransfer.v1.endpoint.ClickhouseSource", + subnetId: "", + securityGroups: "", + includeTables: "", + excludeTables: "", +}; + +export const ClickhouseSource = { + $type: "yandex.cloud.datatransfer.v1.endpoint.ClickhouseSource" as const, + + encode( + message: ClickhouseSource, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.connection !== undefined) { + ClickhouseConnection.encode( + message.connection, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.subnetId !== "") { + writer.uint32(74).string(message.subnetId); + } + for (const v of message.securityGroups) { + writer.uint32(82).string(v!); + } + for (const v of message.includeTables) { + writer.uint32(58).string(v!); + } + for (const v of message.excludeTables) { + writer.uint32(66).string(v!); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ClickhouseSource { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseClickhouseSource } as ClickhouseSource; + message.securityGroups = []; + message.includeTables = []; + message.excludeTables = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.connection = ClickhouseConnection.decode( + reader, + reader.uint32() + ); + break; + case 9: + message.subnetId = reader.string(); + break; + case 10: + message.securityGroups.push(reader.string()); + break; + case 7: + message.includeTables.push(reader.string()); + break; + case 8: + message.excludeTables.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ClickhouseSource { + const message = { ...baseClickhouseSource } as ClickhouseSource; + message.connection = + object.connection !== undefined && object.connection !== null + ? ClickhouseConnection.fromJSON(object.connection) + : undefined; + message.subnetId = + object.subnetId !== undefined && object.subnetId !== null + ? String(object.subnetId) + : ""; + message.securityGroups = (object.securityGroups ?? []).map((e: any) => + String(e) + ); + message.includeTables = (object.includeTables ?? []).map((e: any) => + String(e) + ); + message.excludeTables = (object.excludeTables ?? []).map((e: any) => + String(e) + ); + return message; + }, + + toJSON(message: ClickhouseSource): unknown { + const obj: any = {}; + message.connection !== undefined && + (obj.connection = message.connection + ? ClickhouseConnection.toJSON(message.connection) + : undefined); + message.subnetId !== undefined && (obj.subnetId = message.subnetId); + if (message.securityGroups) { + obj.securityGroups = message.securityGroups.map((e) => e); + } else { + obj.securityGroups = []; + } + if (message.includeTables) { + obj.includeTables = message.includeTables.map((e) => e); + } else { + obj.includeTables = []; + } + if (message.excludeTables) { + obj.excludeTables = message.excludeTables.map((e) => e); + } else { + obj.excludeTables = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): ClickhouseSource { + const message = { ...baseClickhouseSource } as ClickhouseSource; + message.connection = + object.connection !== undefined && object.connection !== null + ? ClickhouseConnection.fromPartial(object.connection) + : undefined; + message.subnetId = object.subnetId ?? ""; + message.securityGroups = object.securityGroups?.map((e) => e) || []; + message.includeTables = object.includeTables?.map((e) => e) || []; + message.excludeTables = object.excludeTables?.map((e) => e) || []; + return message; + }, +}; + +messageTypeRegistry.set(ClickhouseSource.$type, ClickhouseSource); + +const baseClickhouseTarget: object = { + $type: "yandex.cloud.datatransfer.v1.endpoint.ClickhouseTarget", + subnetId: "", + securityGroups: "", + clickhouseClusterName: "", + cleanupPolicy: 0, +}; + +export const ClickhouseTarget = { + $type: "yandex.cloud.datatransfer.v1.endpoint.ClickhouseTarget" as const, + + encode( + message: ClickhouseTarget, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.connection !== undefined) { + ClickhouseConnection.encode( + message.connection, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.subnetId !== "") { + writer.uint32(98).string(message.subnetId); + } + for (const v of message.securityGroups) { + writer.uint32(410).string(v!); + } + if (message.clickhouseClusterName !== "") { + writer.uint32(402).string(message.clickhouseClusterName); + } + for (const v of message.altNames) { + AltName.encode(v!, writer.uint32(138).fork()).ldelim(); + } + if (message.sharding !== undefined) { + ClickhouseSharding.encode( + message.sharding, + writer.uint32(178).fork() + ).ldelim(); + } + if (message.cleanupPolicy !== 0) { + writer.uint32(168).int32(message.cleanupPolicy); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ClickhouseTarget { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseClickhouseTarget } as ClickhouseTarget; + message.securityGroups = []; + message.altNames = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + message.connection = ClickhouseConnection.decode( + reader, + reader.uint32() + ); + break; + case 12: + message.subnetId = reader.string(); + break; + case 51: + message.securityGroups.push(reader.string()); + break; + case 50: + message.clickhouseClusterName = reader.string(); + break; + case 17: + message.altNames.push(AltName.decode(reader, reader.uint32())); + break; + case 22: + message.sharding = ClickhouseSharding.decode(reader, reader.uint32()); + break; + case 21: + message.cleanupPolicy = reader.int32() as any; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ClickhouseTarget { + const message = { ...baseClickhouseTarget } as ClickhouseTarget; + message.connection = + object.connection !== undefined && object.connection !== null + ? ClickhouseConnection.fromJSON(object.connection) + : undefined; + message.subnetId = + object.subnetId !== undefined && object.subnetId !== null + ? String(object.subnetId) + : ""; + message.securityGroups = (object.securityGroups ?? []).map((e: any) => + String(e) + ); + message.clickhouseClusterName = + object.clickhouseClusterName !== undefined && + object.clickhouseClusterName !== null + ? String(object.clickhouseClusterName) + : ""; + message.altNames = (object.altNames ?? []).map((e: any) => + AltName.fromJSON(e) + ); + message.sharding = + object.sharding !== undefined && object.sharding !== null + ? ClickhouseSharding.fromJSON(object.sharding) + : undefined; + message.cleanupPolicy = + object.cleanupPolicy !== undefined && object.cleanupPolicy !== null + ? clickhouseCleanupPolicyFromJSON(object.cleanupPolicy) + : 0; + return message; + }, + + toJSON(message: ClickhouseTarget): unknown { + const obj: any = {}; + message.connection !== undefined && + (obj.connection = message.connection + ? ClickhouseConnection.toJSON(message.connection) + : undefined); + message.subnetId !== undefined && (obj.subnetId = message.subnetId); + if (message.securityGroups) { + obj.securityGroups = message.securityGroups.map((e) => e); + } else { + obj.securityGroups = []; + } + message.clickhouseClusterName !== undefined && + (obj.clickhouseClusterName = message.clickhouseClusterName); + if (message.altNames) { + obj.altNames = message.altNames.map((e) => + e ? AltName.toJSON(e) : undefined + ); + } else { + obj.altNames = []; + } + message.sharding !== undefined && + (obj.sharding = message.sharding + ? ClickhouseSharding.toJSON(message.sharding) + : undefined); + message.cleanupPolicy !== undefined && + (obj.cleanupPolicy = clickhouseCleanupPolicyToJSON( + message.cleanupPolicy + )); + return obj; + }, + + fromPartial, I>>( + object: I + ): ClickhouseTarget { + const message = { ...baseClickhouseTarget } as ClickhouseTarget; + message.connection = + object.connection !== undefined && object.connection !== null + ? ClickhouseConnection.fromPartial(object.connection) + : undefined; + message.subnetId = object.subnetId ?? ""; + message.securityGroups = object.securityGroups?.map((e) => e) || []; + message.clickhouseClusterName = object.clickhouseClusterName ?? ""; + message.altNames = + object.altNames?.map((e) => AltName.fromPartial(e)) || []; + message.sharding = + object.sharding !== undefined && object.sharding !== null + ? ClickhouseSharding.fromPartial(object.sharding) + : undefined; + message.cleanupPolicy = object.cleanupPolicy ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(ClickhouseTarget.$type, ClickhouseTarget); + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/datatransfer/v1/endpoint/common.ts b/src/generated/yandex/cloud/datatransfer/v1/endpoint/common.ts index 595c01ad..a926973e 100644 --- a/src/generated/yandex/cloud/datatransfer/v1/endpoint/common.ts +++ b/src/generated/yandex/cloud/datatransfer/v1/endpoint/common.ts @@ -94,6 +94,14 @@ export function cleanupPolicyToJSON(object: CleanupPolicy): string { } } +export interface AltName { + $type: "yandex.cloud.datatransfer.v1.endpoint.AltName"; + /** From table name */ + fromName: string; + /** To table name */ + toName: string; +} + export interface Secret { $type: "yandex.cloud.datatransfer.v1.endpoint.Secret"; /** Password */ @@ -118,6 +126,84 @@ export interface TLSConfig { caCertificate: string; } +export interface ColumnValue { + $type: "yandex.cloud.datatransfer.v1.endpoint.ColumnValue"; + stringValue: string | undefined; +} + +const baseAltName: object = { + $type: "yandex.cloud.datatransfer.v1.endpoint.AltName", + fromName: "", + toName: "", +}; + +export const AltName = { + $type: "yandex.cloud.datatransfer.v1.endpoint.AltName" as const, + + encode( + message: AltName, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.fromName !== "") { + writer.uint32(10).string(message.fromName); + } + if (message.toName !== "") { + writer.uint32(18).string(message.toName); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): AltName { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseAltName } as AltName; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.fromName = reader.string(); + break; + case 2: + message.toName = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AltName { + const message = { ...baseAltName } as AltName; + message.fromName = + object.fromName !== undefined && object.fromName !== null + ? String(object.fromName) + : ""; + message.toName = + object.toName !== undefined && object.toName !== null + ? String(object.toName) + : ""; + return message; + }, + + toJSON(message: AltName): unknown { + const obj: any = {}; + message.fromName !== undefined && (obj.fromName = message.fromName); + message.toName !== undefined && (obj.toName = message.toName); + return obj; + }, + + fromPartial, I>>(object: I): AltName { + const message = { ...baseAltName } as AltName; + message.fromName = object.fromName ?? ""; + message.toName = object.toName ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(AltName.$type, AltName); + const baseSecret: object = { $type: "yandex.cloud.datatransfer.v1.endpoint.Secret", }; @@ -323,6 +409,68 @@ export const TLSConfig = { messageTypeRegistry.set(TLSConfig.$type, TLSConfig); +const baseColumnValue: object = { + $type: "yandex.cloud.datatransfer.v1.endpoint.ColumnValue", +}; + +export const ColumnValue = { + $type: "yandex.cloud.datatransfer.v1.endpoint.ColumnValue" as const, + + encode( + message: ColumnValue, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.stringValue !== undefined) { + writer.uint32(10).string(message.stringValue); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ColumnValue { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseColumnValue } as ColumnValue; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.stringValue = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ColumnValue { + const message = { ...baseColumnValue } as ColumnValue; + message.stringValue = + object.stringValue !== undefined && object.stringValue !== null + ? String(object.stringValue) + : undefined; + return message; + }, + + toJSON(message: ColumnValue): unknown { + const obj: any = {}; + message.stringValue !== undefined && + (obj.stringValue = message.stringValue); + return obj; + }, + + fromPartial, I>>( + object: I + ): ColumnValue { + const message = { ...baseColumnValue } as ColumnValue; + message.stringValue = object.stringValue ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(ColumnValue.$type, ColumnValue); + type Builtin = | Date | Function diff --git a/src/generated/yandex/cloud/datatransfer/v1/endpoint/mongo.ts b/src/generated/yandex/cloud/datatransfer/v1/endpoint/mongo.ts new file mode 100644 index 00000000..678ea3e0 --- /dev/null +++ b/src/generated/yandex/cloud/datatransfer/v1/endpoint/mongo.ts @@ -0,0 +1,788 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { + TLSMode, + Secret, + CleanupPolicy, + cleanupPolicyFromJSON, + cleanupPolicyToJSON, +} from "../../../../../yandex/cloud/datatransfer/v1/endpoint/common"; + +export const protobufPackage = "yandex.cloud.datatransfer.v1.endpoint"; + +export interface OnPremiseMongo { + $type: "yandex.cloud.datatransfer.v1.endpoint.OnPremiseMongo"; + hosts: string[]; + port: number; + tlsMode?: TLSMode; + replicaSet: string; +} + +export interface MongoConnectionOptions { + $type: "yandex.cloud.datatransfer.v1.endpoint.MongoConnectionOptions"; + mdbClusterId: string | undefined; + onPremise?: OnPremiseMongo | undefined; + user: string; + password?: Secret; + authSource: string; +} + +export interface MongoConnection { + $type: "yandex.cloud.datatransfer.v1.endpoint.MongoConnection"; + connectionOptions?: MongoConnectionOptions | undefined; +} + +export interface MongoCollection { + $type: "yandex.cloud.datatransfer.v1.endpoint.MongoCollection"; + databaseName: string; + collectionName: string; +} + +export interface MongoSource { + $type: "yandex.cloud.datatransfer.v1.endpoint.MongoSource"; + connection?: MongoConnection; + subnetId: string; + /** Security groups */ + securityGroups: string[]; + collections: MongoCollection[]; + excludedCollections: MongoCollection[]; + secondaryPreferredMode: boolean; +} + +export interface MongoTarget { + $type: "yandex.cloud.datatransfer.v1.endpoint.MongoTarget"; + connection?: MongoConnection; + subnetId: string; + /** Security groups */ + securityGroups: string[]; + database: string; + cleanupPolicy: CleanupPolicy; +} + +const baseOnPremiseMongo: object = { + $type: "yandex.cloud.datatransfer.v1.endpoint.OnPremiseMongo", + hosts: "", + port: 0, + replicaSet: "", +}; + +export const OnPremiseMongo = { + $type: "yandex.cloud.datatransfer.v1.endpoint.OnPremiseMongo" as const, + + encode( + message: OnPremiseMongo, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.hosts) { + writer.uint32(10).string(v!); + } + if (message.port !== 0) { + writer.uint32(16).int64(message.port); + } + if (message.tlsMode !== undefined) { + TLSMode.encode(message.tlsMode, writer.uint32(50).fork()).ldelim(); + } + if (message.replicaSet !== "") { + writer.uint32(42).string(message.replicaSet); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): OnPremiseMongo { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseOnPremiseMongo } as OnPremiseMongo; + message.hosts = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.hosts.push(reader.string()); + break; + case 2: + message.port = longToNumber(reader.int64() as Long); + break; + case 6: + message.tlsMode = TLSMode.decode(reader, reader.uint32()); + break; + case 5: + message.replicaSet = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): OnPremiseMongo { + const message = { ...baseOnPremiseMongo } as OnPremiseMongo; + message.hosts = (object.hosts ?? []).map((e: any) => String(e)); + message.port = + object.port !== undefined && object.port !== null + ? Number(object.port) + : 0; + message.tlsMode = + object.tlsMode !== undefined && object.tlsMode !== null + ? TLSMode.fromJSON(object.tlsMode) + : undefined; + message.replicaSet = + object.replicaSet !== undefined && object.replicaSet !== null + ? String(object.replicaSet) + : ""; + return message; + }, + + toJSON(message: OnPremiseMongo): unknown { + const obj: any = {}; + if (message.hosts) { + obj.hosts = message.hosts.map((e) => e); + } else { + obj.hosts = []; + } + message.port !== undefined && (obj.port = Math.round(message.port)); + message.tlsMode !== undefined && + (obj.tlsMode = message.tlsMode + ? TLSMode.toJSON(message.tlsMode) + : undefined); + message.replicaSet !== undefined && (obj.replicaSet = message.replicaSet); + return obj; + }, + + fromPartial, I>>( + object: I + ): OnPremiseMongo { + const message = { ...baseOnPremiseMongo } as OnPremiseMongo; + message.hosts = object.hosts?.map((e) => e) || []; + message.port = object.port ?? 0; + message.tlsMode = + object.tlsMode !== undefined && object.tlsMode !== null + ? TLSMode.fromPartial(object.tlsMode) + : undefined; + message.replicaSet = object.replicaSet ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(OnPremiseMongo.$type, OnPremiseMongo); + +const baseMongoConnectionOptions: object = { + $type: "yandex.cloud.datatransfer.v1.endpoint.MongoConnectionOptions", + user: "", + authSource: "", +}; + +export const MongoConnectionOptions = { + $type: + "yandex.cloud.datatransfer.v1.endpoint.MongoConnectionOptions" as const, + + encode( + message: MongoConnectionOptions, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.mdbClusterId !== undefined) { + writer.uint32(10).string(message.mdbClusterId); + } + if (message.onPremise !== undefined) { + OnPremiseMongo.encode( + message.onPremise, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.user !== "") { + writer.uint32(26).string(message.user); + } + if (message.password !== undefined) { + Secret.encode(message.password, writer.uint32(34).fork()).ldelim(); + } + if (message.authSource !== "") { + writer.uint32(42).string(message.authSource); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): MongoConnectionOptions { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMongoConnectionOptions } as MongoConnectionOptions; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.mdbClusterId = reader.string(); + break; + case 2: + message.onPremise = OnPremiseMongo.decode(reader, reader.uint32()); + break; + case 3: + message.user = reader.string(); + break; + case 4: + message.password = Secret.decode(reader, reader.uint32()); + break; + case 5: + message.authSource = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): MongoConnectionOptions { + const message = { ...baseMongoConnectionOptions } as MongoConnectionOptions; + message.mdbClusterId = + object.mdbClusterId !== undefined && object.mdbClusterId !== null + ? String(object.mdbClusterId) + : undefined; + message.onPremise = + object.onPremise !== undefined && object.onPremise !== null + ? OnPremiseMongo.fromJSON(object.onPremise) + : undefined; + message.user = + object.user !== undefined && object.user !== null + ? String(object.user) + : ""; + message.password = + object.password !== undefined && object.password !== null + ? Secret.fromJSON(object.password) + : undefined; + message.authSource = + object.authSource !== undefined && object.authSource !== null + ? String(object.authSource) + : ""; + return message; + }, + + toJSON(message: MongoConnectionOptions): unknown { + const obj: any = {}; + message.mdbClusterId !== undefined && + (obj.mdbClusterId = message.mdbClusterId); + message.onPremise !== undefined && + (obj.onPremise = message.onPremise + ? OnPremiseMongo.toJSON(message.onPremise) + : undefined); + message.user !== undefined && (obj.user = message.user); + message.password !== undefined && + (obj.password = message.password + ? Secret.toJSON(message.password) + : undefined); + message.authSource !== undefined && (obj.authSource = message.authSource); + return obj; + }, + + fromPartial, I>>( + object: I + ): MongoConnectionOptions { + const message = { ...baseMongoConnectionOptions } as MongoConnectionOptions; + message.mdbClusterId = object.mdbClusterId ?? undefined; + message.onPremise = + object.onPremise !== undefined && object.onPremise !== null + ? OnPremiseMongo.fromPartial(object.onPremise) + : undefined; + message.user = object.user ?? ""; + message.password = + object.password !== undefined && object.password !== null + ? Secret.fromPartial(object.password) + : undefined; + message.authSource = object.authSource ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(MongoConnectionOptions.$type, MongoConnectionOptions); + +const baseMongoConnection: object = { + $type: "yandex.cloud.datatransfer.v1.endpoint.MongoConnection", +}; + +export const MongoConnection = { + $type: "yandex.cloud.datatransfer.v1.endpoint.MongoConnection" as const, + + encode( + message: MongoConnection, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.connectionOptions !== undefined) { + MongoConnectionOptions.encode( + message.connectionOptions, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): MongoConnection { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMongoConnection } as MongoConnection; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 3: + message.connectionOptions = MongoConnectionOptions.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): MongoConnection { + const message = { ...baseMongoConnection } as MongoConnection; + message.connectionOptions = + object.connectionOptions !== undefined && + object.connectionOptions !== null + ? MongoConnectionOptions.fromJSON(object.connectionOptions) + : undefined; + return message; + }, + + toJSON(message: MongoConnection): unknown { + const obj: any = {}; + message.connectionOptions !== undefined && + (obj.connectionOptions = message.connectionOptions + ? MongoConnectionOptions.toJSON(message.connectionOptions) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): MongoConnection { + const message = { ...baseMongoConnection } as MongoConnection; + message.connectionOptions = + object.connectionOptions !== undefined && + object.connectionOptions !== null + ? MongoConnectionOptions.fromPartial(object.connectionOptions) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(MongoConnection.$type, MongoConnection); + +const baseMongoCollection: object = { + $type: "yandex.cloud.datatransfer.v1.endpoint.MongoCollection", + databaseName: "", + collectionName: "", +}; + +export const MongoCollection = { + $type: "yandex.cloud.datatransfer.v1.endpoint.MongoCollection" as const, + + encode( + message: MongoCollection, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.databaseName !== "") { + writer.uint32(10).string(message.databaseName); + } + if (message.collectionName !== "") { + writer.uint32(18).string(message.collectionName); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): MongoCollection { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMongoCollection } as MongoCollection; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.databaseName = reader.string(); + break; + case 2: + message.collectionName = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): MongoCollection { + const message = { ...baseMongoCollection } as MongoCollection; + message.databaseName = + object.databaseName !== undefined && object.databaseName !== null + ? String(object.databaseName) + : ""; + message.collectionName = + object.collectionName !== undefined && object.collectionName !== null + ? String(object.collectionName) + : ""; + return message; + }, + + toJSON(message: MongoCollection): unknown { + const obj: any = {}; + message.databaseName !== undefined && + (obj.databaseName = message.databaseName); + message.collectionName !== undefined && + (obj.collectionName = message.collectionName); + return obj; + }, + + fromPartial, I>>( + object: I + ): MongoCollection { + const message = { ...baseMongoCollection } as MongoCollection; + message.databaseName = object.databaseName ?? ""; + message.collectionName = object.collectionName ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(MongoCollection.$type, MongoCollection); + +const baseMongoSource: object = { + $type: "yandex.cloud.datatransfer.v1.endpoint.MongoSource", + subnetId: "", + securityGroups: "", + secondaryPreferredMode: false, +}; + +export const MongoSource = { + $type: "yandex.cloud.datatransfer.v1.endpoint.MongoSource" as const, + + encode( + message: MongoSource, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.connection !== undefined) { + MongoConnection.encode( + message.connection, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.subnetId !== "") { + writer.uint32(18).string(message.subnetId); + } + for (const v of message.securityGroups) { + writer.uint32(90).string(v!); + } + for (const v of message.collections) { + MongoCollection.encode(v!, writer.uint32(50).fork()).ldelim(); + } + for (const v of message.excludedCollections) { + MongoCollection.encode(v!, writer.uint32(58).fork()).ldelim(); + } + if (message.secondaryPreferredMode === true) { + writer.uint32(64).bool(message.secondaryPreferredMode); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): MongoSource { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMongoSource } as MongoSource; + message.securityGroups = []; + message.collections = []; + message.excludedCollections = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.connection = MongoConnection.decode(reader, reader.uint32()); + break; + case 2: + message.subnetId = reader.string(); + break; + case 11: + message.securityGroups.push(reader.string()); + break; + case 6: + message.collections.push( + MongoCollection.decode(reader, reader.uint32()) + ); + break; + case 7: + message.excludedCollections.push( + MongoCollection.decode(reader, reader.uint32()) + ); + break; + case 8: + message.secondaryPreferredMode = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): MongoSource { + const message = { ...baseMongoSource } as MongoSource; + message.connection = + object.connection !== undefined && object.connection !== null + ? MongoConnection.fromJSON(object.connection) + : undefined; + message.subnetId = + object.subnetId !== undefined && object.subnetId !== null + ? String(object.subnetId) + : ""; + message.securityGroups = (object.securityGroups ?? []).map((e: any) => + String(e) + ); + message.collections = (object.collections ?? []).map((e: any) => + MongoCollection.fromJSON(e) + ); + message.excludedCollections = (object.excludedCollections ?? []).map( + (e: any) => MongoCollection.fromJSON(e) + ); + message.secondaryPreferredMode = + object.secondaryPreferredMode !== undefined && + object.secondaryPreferredMode !== null + ? Boolean(object.secondaryPreferredMode) + : false; + return message; + }, + + toJSON(message: MongoSource): unknown { + const obj: any = {}; + message.connection !== undefined && + (obj.connection = message.connection + ? MongoConnection.toJSON(message.connection) + : undefined); + message.subnetId !== undefined && (obj.subnetId = message.subnetId); + if (message.securityGroups) { + obj.securityGroups = message.securityGroups.map((e) => e); + } else { + obj.securityGroups = []; + } + if (message.collections) { + obj.collections = message.collections.map((e) => + e ? MongoCollection.toJSON(e) : undefined + ); + } else { + obj.collections = []; + } + if (message.excludedCollections) { + obj.excludedCollections = message.excludedCollections.map((e) => + e ? MongoCollection.toJSON(e) : undefined + ); + } else { + obj.excludedCollections = []; + } + message.secondaryPreferredMode !== undefined && + (obj.secondaryPreferredMode = message.secondaryPreferredMode); + return obj; + }, + + fromPartial, I>>( + object: I + ): MongoSource { + const message = { ...baseMongoSource } as MongoSource; + message.connection = + object.connection !== undefined && object.connection !== null + ? MongoConnection.fromPartial(object.connection) + : undefined; + message.subnetId = object.subnetId ?? ""; + message.securityGroups = object.securityGroups?.map((e) => e) || []; + message.collections = + object.collections?.map((e) => MongoCollection.fromPartial(e)) || []; + message.excludedCollections = + object.excludedCollections?.map((e) => MongoCollection.fromPartial(e)) || + []; + message.secondaryPreferredMode = object.secondaryPreferredMode ?? false; + return message; + }, +}; + +messageTypeRegistry.set(MongoSource.$type, MongoSource); + +const baseMongoTarget: object = { + $type: "yandex.cloud.datatransfer.v1.endpoint.MongoTarget", + subnetId: "", + securityGroups: "", + database: "", + cleanupPolicy: 0, +}; + +export const MongoTarget = { + $type: "yandex.cloud.datatransfer.v1.endpoint.MongoTarget" as const, + + encode( + message: MongoTarget, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.connection !== undefined) { + MongoConnection.encode( + message.connection, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.subnetId !== "") { + writer.uint32(58).string(message.subnetId); + } + for (const v of message.securityGroups) { + writer.uint32(66).string(v!); + } + if (message.database !== "") { + writer.uint32(18).string(message.database); + } + if (message.cleanupPolicy !== 0) { + writer.uint32(48).int32(message.cleanupPolicy); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): MongoTarget { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMongoTarget } as MongoTarget; + message.securityGroups = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.connection = MongoConnection.decode(reader, reader.uint32()); + break; + case 7: + message.subnetId = reader.string(); + break; + case 8: + message.securityGroups.push(reader.string()); + break; + case 2: + message.database = reader.string(); + break; + case 6: + message.cleanupPolicy = reader.int32() as any; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): MongoTarget { + const message = { ...baseMongoTarget } as MongoTarget; + message.connection = + object.connection !== undefined && object.connection !== null + ? MongoConnection.fromJSON(object.connection) + : undefined; + message.subnetId = + object.subnetId !== undefined && object.subnetId !== null + ? String(object.subnetId) + : ""; + message.securityGroups = (object.securityGroups ?? []).map((e: any) => + String(e) + ); + message.database = + object.database !== undefined && object.database !== null + ? String(object.database) + : ""; + message.cleanupPolicy = + object.cleanupPolicy !== undefined && object.cleanupPolicy !== null + ? cleanupPolicyFromJSON(object.cleanupPolicy) + : 0; + return message; + }, + + toJSON(message: MongoTarget): unknown { + const obj: any = {}; + message.connection !== undefined && + (obj.connection = message.connection + ? MongoConnection.toJSON(message.connection) + : undefined); + message.subnetId !== undefined && (obj.subnetId = message.subnetId); + if (message.securityGroups) { + obj.securityGroups = message.securityGroups.map((e) => e); + } else { + obj.securityGroups = []; + } + message.database !== undefined && (obj.database = message.database); + message.cleanupPolicy !== undefined && + (obj.cleanupPolicy = cleanupPolicyToJSON(message.cleanupPolicy)); + return obj; + }, + + fromPartial, I>>( + object: I + ): MongoTarget { + const message = { ...baseMongoTarget } as MongoTarget; + message.connection = + object.connection !== undefined && object.connection !== null + ? MongoConnection.fromPartial(object.connection) + : undefined; + message.subnetId = object.subnetId ?? ""; + message.securityGroups = object.securityGroups?.map((e) => e) || []; + message.database = object.database ?? ""; + message.cleanupPolicy = object.cleanupPolicy ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(MongoTarget.$type, MongoTarget); + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/datatransfer/v1/endpoint/mysql.ts b/src/generated/yandex/cloud/datatransfer/v1/endpoint/mysql.ts index 3fa97f38..204d3fb1 100644 --- a/src/generated/yandex/cloud/datatransfer/v1/endpoint/mysql.ts +++ b/src/generated/yandex/cloud/datatransfer/v1/endpoint/mysql.ts @@ -43,7 +43,7 @@ export interface MysqlConnection { /** * Managed cluster * - * Yandex Managed Service for MySQL cluster ID + * Yandex.Cloud Managed MySQL cluster ID */ mdbClusterId: string | undefined; /** @@ -84,6 +84,8 @@ export interface MysqlSource { * Database connection settings */ connection?: MysqlConnection; + /** Security groups */ + securityGroups: string[]; /** * Database name * @@ -91,6 +93,13 @@ export interface MysqlSource { * databases at the same time from this source. */ database: string; + /** + * Database for service tables + * + * Default: data source database. Here created technical tables (__tm_keeper, + * __tm_gtid_keeper). + */ + serviceDatabase: string; /** * Username * @@ -128,6 +137,8 @@ export interface MysqlTarget { * Database connection settings */ connection?: MysqlConnection; + /** Security groups */ + securityGroups: string[]; /** * Database name * @@ -481,7 +492,9 @@ messageTypeRegistry.set( const baseMysqlSource: object = { $type: "yandex.cloud.datatransfer.v1.endpoint.MysqlSource", + securityGroups: "", database: "", + serviceDatabase: "", user: "", includeTablesRegex: "", excludeTablesRegex: "", @@ -501,9 +514,15 @@ export const MysqlSource = { writer.uint32(10).fork() ).ldelim(); } + for (const v of message.securityGroups) { + writer.uint32(114).string(v!); + } if (message.database !== "") { writer.uint32(18).string(message.database); } + if (message.serviceDatabase !== "") { + writer.uint32(122).string(message.serviceDatabase); + } if (message.user !== "") { writer.uint32(26).string(message.user); } @@ -532,6 +551,7 @@ export const MysqlSource = { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = { ...baseMysqlSource } as MysqlSource; + message.securityGroups = []; message.includeTablesRegex = []; message.excludeTablesRegex = []; while (reader.pos < end) { @@ -540,9 +560,15 @@ export const MysqlSource = { case 1: message.connection = MysqlConnection.decode(reader, reader.uint32()); break; + case 14: + message.securityGroups.push(reader.string()); + break; case 2: message.database = reader.string(); break; + case 15: + message.serviceDatabase = reader.string(); + break; case 3: message.user = reader.string(); break; @@ -578,10 +604,17 @@ export const MysqlSource = { object.connection !== undefined && object.connection !== null ? MysqlConnection.fromJSON(object.connection) : undefined; + message.securityGroups = (object.securityGroups ?? []).map((e: any) => + String(e) + ); message.database = object.database !== undefined && object.database !== null ? String(object.database) : ""; + message.serviceDatabase = + object.serviceDatabase !== undefined && object.serviceDatabase !== null + ? String(object.serviceDatabase) + : ""; message.user = object.user !== undefined && object.user !== null ? String(object.user) @@ -614,7 +647,14 @@ export const MysqlSource = { (obj.connection = message.connection ? MysqlConnection.toJSON(message.connection) : undefined); + if (message.securityGroups) { + obj.securityGroups = message.securityGroups.map((e) => e); + } else { + obj.securityGroups = []; + } message.database !== undefined && (obj.database = message.database); + message.serviceDatabase !== undefined && + (obj.serviceDatabase = message.serviceDatabase); message.user !== undefined && (obj.user = message.user); message.password !== undefined && (obj.password = message.password @@ -646,7 +686,9 @@ export const MysqlSource = { object.connection !== undefined && object.connection !== null ? MysqlConnection.fromPartial(object.connection) : undefined; + message.securityGroups = object.securityGroups?.map((e) => e) || []; message.database = object.database ?? ""; + message.serviceDatabase = object.serviceDatabase ?? ""; message.user = object.user ?? ""; message.password = object.password !== undefined && object.password !== null @@ -668,6 +710,7 @@ messageTypeRegistry.set(MysqlSource.$type, MysqlSource); const baseMysqlTarget: object = { $type: "yandex.cloud.datatransfer.v1.endpoint.MysqlTarget", + securityGroups: "", database: "", user: "", sqlMode: "", @@ -690,6 +733,9 @@ export const MysqlTarget = { writer.uint32(10).fork() ).ldelim(); } + for (const v of message.securityGroups) { + writer.uint32(130).string(v!); + } if (message.database !== "") { writer.uint32(18).string(message.database); } @@ -721,12 +767,16 @@ export const MysqlTarget = { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = { ...baseMysqlTarget } as MysqlTarget; + message.securityGroups = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { case 1: message.connection = MysqlConnection.decode(reader, reader.uint32()); break; + case 16: + message.securityGroups.push(reader.string()); + break; case 2: message.database = reader.string(); break; @@ -765,6 +815,9 @@ export const MysqlTarget = { object.connection !== undefined && object.connection !== null ? MysqlConnection.fromJSON(object.connection) : undefined; + message.securityGroups = (object.securityGroups ?? []).map((e: any) => + String(e) + ); message.database = object.database !== undefined && object.database !== null ? String(object.database) @@ -807,6 +860,11 @@ export const MysqlTarget = { (obj.connection = message.connection ? MysqlConnection.toJSON(message.connection) : undefined); + if (message.securityGroups) { + obj.securityGroups = message.securityGroups.map((e) => e); + } else { + obj.securityGroups = []; + } message.database !== undefined && (obj.database = message.database); message.user !== undefined && (obj.user = message.user); message.password !== undefined && @@ -832,6 +890,7 @@ export const MysqlTarget = { object.connection !== undefined && object.connection !== null ? MysqlConnection.fromPartial(object.connection) : undefined; + message.securityGroups = object.securityGroups?.map((e) => e) || []; message.database = object.database ?? ""; message.user = object.user ?? ""; message.password = diff --git a/src/generated/yandex/cloud/datatransfer/v1/endpoint/postgres.ts b/src/generated/yandex/cloud/datatransfer/v1/endpoint/postgres.ts index beec5fae..4736b8a5 100644 --- a/src/generated/yandex/cloud/datatransfer/v1/endpoint/postgres.ts +++ b/src/generated/yandex/cloud/datatransfer/v1/endpoint/postgres.ts @@ -113,6 +113,12 @@ export interface PostgresObjectTransferSettings { * CREATE CAST ... */ cast: ObjectTransferStage; + /** + * Materialized views + * + * CREATE MATERIALIZED VIEW ... + */ + materializedView: ObjectTransferStage; } export interface OnPremisePostgres { @@ -143,7 +149,7 @@ export interface PostgresConnection { /** * Managed cluster * - * Yandex Managed Service for PostgreSQL cluster ID + * Yandex.Cloud Managed PostgreSQL cluster ID */ mdbClusterId: string | undefined; /** @@ -162,6 +168,8 @@ export interface PostgresSource { * Database connection settings */ connection?: PostgresConnection; + /** Security groups */ + securityGroups: string[]; /** Database name */ database: string; /** @@ -199,7 +207,7 @@ export interface PostgresSource { */ slotByteLagLimit: number; /** - * Database schema for service table + * Database schema for service tables * * Default: public. Here created technical tables (__consumer_keeper, * __data_transfer_mole_finder). @@ -221,6 +229,8 @@ export interface PostgresTarget { * Database connection settings */ connection?: PostgresConnection; + /** Security groups */ + securityGroups: string[]; /** Database name */ database: string; /** @@ -262,6 +272,7 @@ const basePostgresObjectTransferSettings: object = { collation: 0, policy: 0, cast: 0, + materializedView: 0, }; export const PostgresObjectTransferSettings = { @@ -320,6 +331,9 @@ export const PostgresObjectTransferSettings = { if (message.cast !== 0) { writer.uint32(128).int32(message.cast); } + if (message.materializedView !== 0) { + writer.uint32(136).int32(message.materializedView); + } return writer; }, @@ -383,6 +397,9 @@ export const PostgresObjectTransferSettings = { case 16: message.cast = reader.int32() as any; break; + case 17: + message.materializedView = reader.int32() as any; + break; default: reader.skipType(tag & 7); break; @@ -459,6 +476,10 @@ export const PostgresObjectTransferSettings = { object.cast !== undefined && object.cast !== null ? objectTransferStageFromJSON(object.cast) : 0; + message.materializedView = + object.materializedView !== undefined && object.materializedView !== null + ? objectTransferStageFromJSON(object.materializedView) + : 0; return message; }, @@ -498,6 +519,10 @@ export const PostgresObjectTransferSettings = { (obj.policy = objectTransferStageToJSON(message.policy)); message.cast !== undefined && (obj.cast = objectTransferStageToJSON(message.cast)); + message.materializedView !== undefined && + (obj.materializedView = objectTransferStageToJSON( + message.materializedView + )); return obj; }, @@ -523,6 +548,7 @@ export const PostgresObjectTransferSettings = { message.collation = object.collation ?? 0; message.policy = object.policy ?? 0; message.cast = object.cast ?? 0; + message.materializedView = object.materializedView ?? 0; return message; }, }; @@ -725,6 +751,7 @@ messageTypeRegistry.set(PostgresConnection.$type, PostgresConnection); const basePostgresSource: object = { $type: "yandex.cloud.datatransfer.v1.endpoint.PostgresSource", + securityGroups: "", database: "", user: "", includeTables: "", @@ -746,6 +773,9 @@ export const PostgresSource = { writer.uint32(10).fork() ).ldelim(); } + for (const v of message.securityGroups) { + writer.uint32(114).string(v!); + } if (message.database !== "") { writer.uint32(18).string(message.database); } @@ -780,6 +810,7 @@ export const PostgresSource = { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = { ...basePostgresSource } as PostgresSource; + message.securityGroups = []; message.includeTables = []; message.excludeTables = []; while (reader.pos < end) { @@ -791,6 +822,9 @@ export const PostgresSource = { reader.uint32() ); break; + case 14: + message.securityGroups.push(reader.string()); + break; case 2: message.database = reader.string(); break; @@ -830,6 +864,9 @@ export const PostgresSource = { object.connection !== undefined && object.connection !== null ? PostgresConnection.fromJSON(object.connection) : undefined; + message.securityGroups = (object.securityGroups ?? []).map((e: any) => + String(e) + ); message.database = object.database !== undefined && object.database !== null ? String(object.database) @@ -870,6 +907,11 @@ export const PostgresSource = { (obj.connection = message.connection ? PostgresConnection.toJSON(message.connection) : undefined); + if (message.securityGroups) { + obj.securityGroups = message.securityGroups.map((e) => e); + } else { + obj.securityGroups = []; + } message.database !== undefined && (obj.database = message.database); message.user !== undefined && (obj.user = message.user); message.password !== undefined && @@ -905,6 +947,7 @@ export const PostgresSource = { object.connection !== undefined && object.connection !== null ? PostgresConnection.fromPartial(object.connection) : undefined; + message.securityGroups = object.securityGroups?.map((e) => e) || []; message.database = object.database ?? ""; message.user = object.user ?? ""; message.password = @@ -930,6 +973,7 @@ messageTypeRegistry.set(PostgresSource.$type, PostgresSource); const basePostgresTarget: object = { $type: "yandex.cloud.datatransfer.v1.endpoint.PostgresTarget", + securityGroups: "", database: "", user: "", cleanupPolicy: 0, @@ -948,6 +992,9 @@ export const PostgresTarget = { writer.uint32(10).fork() ).ldelim(); } + for (const v of message.securityGroups) { + writer.uint32(58).string(v!); + } if (message.database !== "") { writer.uint32(18).string(message.database); } @@ -967,6 +1014,7 @@ export const PostgresTarget = { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = { ...basePostgresTarget } as PostgresTarget; + message.securityGroups = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -976,6 +1024,9 @@ export const PostgresTarget = { reader.uint32() ); break; + case 7: + message.securityGroups.push(reader.string()); + break; case 2: message.database = reader.string(); break; @@ -1002,6 +1053,9 @@ export const PostgresTarget = { object.connection !== undefined && object.connection !== null ? PostgresConnection.fromJSON(object.connection) : undefined; + message.securityGroups = (object.securityGroups ?? []).map((e: any) => + String(e) + ); message.database = object.database !== undefined && object.database !== null ? String(object.database) @@ -1027,6 +1081,11 @@ export const PostgresTarget = { (obj.connection = message.connection ? PostgresConnection.toJSON(message.connection) : undefined); + if (message.securityGroups) { + obj.securityGroups = message.securityGroups.map((e) => e); + } else { + obj.securityGroups = []; + } message.database !== undefined && (obj.database = message.database); message.user !== undefined && (obj.user = message.user); message.password !== undefined && @@ -1046,6 +1105,7 @@ export const PostgresTarget = { object.connection !== undefined && object.connection !== null ? PostgresConnection.fromPartial(object.connection) : undefined; + message.securityGroups = object.securityGroups?.map((e) => e) || []; message.database = object.database ?? ""; message.user = object.user ?? ""; message.password = diff --git a/src/generated/yandex/cloud/index.ts b/src/generated/yandex/cloud/index.ts index f6ea8c29..3f8c96cb 100644 --- a/src/generated/yandex/cloud/index.ts +++ b/src/generated/yandex/cloud/index.ts @@ -28,5 +28,6 @@ export * as quota from './quota/' export * as reference from './reference/' export * as resourcemanager from './resourcemanager/' export * as serverless from './serverless/' +export * as storage from './storage/' export * as vpc from './vpc/' export * as ydb from './ydb/' \ No newline at end of file diff --git a/src/generated/yandex/cloud/logging/v1/log_reading_service.ts b/src/generated/yandex/cloud/logging/v1/log_reading_service.ts index 8482e6f4..5d8450a9 100644 --- a/src/generated/yandex/cloud/logging/v1/log_reading_service.ts +++ b/src/generated/yandex/cloud/logging/v1/log_reading_service.ts @@ -100,6 +100,12 @@ export interface Criteria { filter: string; /** The maximum number of results per page to return. */ pageSize: number; + /** + * Limits response to maximum size in bytes. Prevents gRPC resource exhaustion. + * + * Default value for max response size is 3.5 MiB + */ + maxResponseSize: number; } const baseReadRequest: object = { @@ -298,6 +304,7 @@ const baseCriteria: object = { levels: 0, filter: "", pageSize: 0, + maxResponseSize: 0, }; export const Criteria = { @@ -339,6 +346,9 @@ export const Criteria = { if (message.pageSize !== 0) { writer.uint32(64).int64(message.pageSize); } + if (message.maxResponseSize !== 0) { + writer.uint32(72).int64(message.maxResponseSize); + } return writer; }, @@ -387,6 +397,9 @@ export const Criteria = { case 8: message.pageSize = longToNumber(reader.int64() as Long); break; + case 9: + message.maxResponseSize = longToNumber(reader.int64() as Long); + break; default: reader.skipType(tag & 7); break; @@ -424,6 +437,10 @@ export const Criteria = { object.pageSize !== undefined && object.pageSize !== null ? Number(object.pageSize) : 0; + message.maxResponseSize = + object.maxResponseSize !== undefined && object.maxResponseSize !== null + ? Number(object.maxResponseSize) + : 0; return message; }, @@ -450,6 +467,8 @@ export const Criteria = { message.filter !== undefined && (obj.filter = message.filter); message.pageSize !== undefined && (obj.pageSize = Math.round(message.pageSize)); + message.maxResponseSize !== undefined && + (obj.maxResponseSize = Math.round(message.maxResponseSize)); return obj; }, @@ -463,6 +482,7 @@ export const Criteria = { message.levels = object.levels?.map((e) => e) || []; message.filter = object.filter ?? ""; message.pageSize = object.pageSize ?? 0; + message.maxResponseSize = object.maxResponseSize ?? 0; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/greenplum/v1/cluster_service.ts b/src/generated/yandex/cloud/mdb/greenplum/v1/cluster_service.ts index 23cb86f1..428b3da7 100644 --- a/src/generated/yandex/cloud/mdb/greenplum/v1/cluster_service.ts +++ b/src/generated/yandex/cloud/mdb/greenplum/v1/cluster_service.ts @@ -7,10 +7,12 @@ import { ChannelOptions, UntypedServiceImplementation, handleUnaryCall, + handleServerStreamingCall, Client, ClientUnaryCall, Metadata, CallOptions, + ClientReadableStream, ServiceError, } from "@grpc/grpc-js"; import _m0 from "protobufjs/minimal"; @@ -460,6 +462,98 @@ export interface ListClusterBackupsRequest { pageToken: string; } +export interface StreamLogRecord { + $type: "yandex.cloud.mdb.greenplum.v1.StreamLogRecord"; + /** One of the requested log records. */ + record?: LogRecord; + /** + * This token allows you to continue streaming logs starting from the exact + * same record. To continue streaming, specify value of `next_record_token` + * as value for `record_token` parameter in the next StreamLogs request. + * This value is interchangeable with `next_page_token` from ListLogs method. + */ + nextRecordToken: string; +} + +export interface StreamClusterLogsRequest { + $type: "yandex.cloud.mdb.greenplum.v1.StreamClusterLogsRequest"; + /** Required. ID of the Greenplum cluster. */ + clusterId: string; + /** Columns from logs table to get in the response. */ + columnFilter: string[]; + serviceType: StreamClusterLogsRequest_ServiceType; + /** Start timestamp for the logs request. */ + fromTime?: Date; + /** + * End timestamp for the logs request. + * If this field is not set, all existing logs will be sent and then the new ones as + * they appear. In essence it has 'tail -f' semantics. + */ + toTime?: Date; + /** + * Record token. Set `record_token` to the `next_record_token` returned by a previous StreamLogs + * request to start streaming from next log record. + */ + recordToken: string; + /** + * A filter expression that filters resources listed in the response. + * The expression must specify: + * 1. The field name. Currently filtering can be applied to the [LogRecord.logs.message.hostname], + * [LogRecord.logs.message.error_severity] (for GREENPLUM service), [LogRecord.logs.message.level] (for POOLER service) fields. + * 2. An `=` operator. + * 3. The value in double quotes (`"`). Must be 1-63 characters long and match the regular expression `[a-z0-9.-]{1,61}`. + * Examples of a filter: + * `message.hostname='node1.db.cloud.yandex.net'` + * `message.error_severity IN ("ERROR", "FATAL", "PANIC") AND message.hostname = "node1.db.cloud.yandex.net"` + */ + filter: string; +} + +export enum StreamClusterLogsRequest_ServiceType { + /** SERVICE_TYPE_UNSPECIFIED - Type is not specified. */ + SERVICE_TYPE_UNSPECIFIED = 0, + /** GREENPLUM - Greenplum® activity logs. */ + GREENPLUM = 1, + /** GREENPLUM_POOLER - Greenplum® pooler logs. */ + GREENPLUM_POOLER = 2, + UNRECOGNIZED = -1, +} + +export function streamClusterLogsRequest_ServiceTypeFromJSON( + object: any +): StreamClusterLogsRequest_ServiceType { + switch (object) { + case 0: + case "SERVICE_TYPE_UNSPECIFIED": + return StreamClusterLogsRequest_ServiceType.SERVICE_TYPE_UNSPECIFIED; + case 1: + case "GREENPLUM": + return StreamClusterLogsRequest_ServiceType.GREENPLUM; + case 2: + case "GREENPLUM_POOLER": + return StreamClusterLogsRequest_ServiceType.GREENPLUM_POOLER; + case -1: + case "UNRECOGNIZED": + default: + return StreamClusterLogsRequest_ServiceType.UNRECOGNIZED; + } +} + +export function streamClusterLogsRequest_ServiceTypeToJSON( + object: StreamClusterLogsRequest_ServiceType +): string { + switch (object) { + case StreamClusterLogsRequest_ServiceType.SERVICE_TYPE_UNSPECIFIED: + return "SERVICE_TYPE_UNSPECIFIED"; + case StreamClusterLogsRequest_ServiceType.GREENPLUM: + return "GREENPLUM"; + case StreamClusterLogsRequest_ServiceType.GREENPLUM_POOLER: + return "GREENPLUM_POOLER"; + default: + return "UNKNOWN"; + } +} + export interface ListClusterBackupsResponse { $type: "yandex.cloud.mdb.greenplum.v1.ListClusterBackupsResponse"; /** List of Greenplum® backups. */ @@ -3339,6 +3433,256 @@ messageTypeRegistry.set( ListClusterBackupsRequest ); +const baseStreamLogRecord: object = { + $type: "yandex.cloud.mdb.greenplum.v1.StreamLogRecord", + nextRecordToken: "", +}; + +export const StreamLogRecord = { + $type: "yandex.cloud.mdb.greenplum.v1.StreamLogRecord" as const, + + encode( + message: StreamLogRecord, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.record !== undefined) { + LogRecord.encode(message.record, writer.uint32(10).fork()).ldelim(); + } + if (message.nextRecordToken !== "") { + writer.uint32(18).string(message.nextRecordToken); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): StreamLogRecord { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseStreamLogRecord } as StreamLogRecord; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.record = LogRecord.decode(reader, reader.uint32()); + break; + case 2: + message.nextRecordToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): StreamLogRecord { + const message = { ...baseStreamLogRecord } as StreamLogRecord; + message.record = + object.record !== undefined && object.record !== null + ? LogRecord.fromJSON(object.record) + : undefined; + message.nextRecordToken = + object.nextRecordToken !== undefined && object.nextRecordToken !== null + ? String(object.nextRecordToken) + : ""; + return message; + }, + + toJSON(message: StreamLogRecord): unknown { + const obj: any = {}; + message.record !== undefined && + (obj.record = message.record + ? LogRecord.toJSON(message.record) + : undefined); + message.nextRecordToken !== undefined && + (obj.nextRecordToken = message.nextRecordToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): StreamLogRecord { + const message = { ...baseStreamLogRecord } as StreamLogRecord; + message.record = + object.record !== undefined && object.record !== null + ? LogRecord.fromPartial(object.record) + : undefined; + message.nextRecordToken = object.nextRecordToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(StreamLogRecord.$type, StreamLogRecord); + +const baseStreamClusterLogsRequest: object = { + $type: "yandex.cloud.mdb.greenplum.v1.StreamClusterLogsRequest", + clusterId: "", + columnFilter: "", + serviceType: 0, + recordToken: "", + filter: "", +}; + +export const StreamClusterLogsRequest = { + $type: "yandex.cloud.mdb.greenplum.v1.StreamClusterLogsRequest" as const, + + encode( + message: StreamClusterLogsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + for (const v of message.columnFilter) { + writer.uint32(18).string(v!); + } + if (message.serviceType !== 0) { + writer.uint32(24).int32(message.serviceType); + } + if (message.fromTime !== undefined) { + Timestamp.encode( + toTimestamp(message.fromTime), + writer.uint32(34).fork() + ).ldelim(); + } + if (message.toTime !== undefined) { + Timestamp.encode( + toTimestamp(message.toTime), + writer.uint32(42).fork() + ).ldelim(); + } + if (message.recordToken !== "") { + writer.uint32(50).string(message.recordToken); + } + if (message.filter !== "") { + writer.uint32(58).string(message.filter); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): StreamClusterLogsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseStreamClusterLogsRequest, + } as StreamClusterLogsRequest; + message.columnFilter = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.columnFilter.push(reader.string()); + break; + case 3: + message.serviceType = reader.int32() as any; + break; + case 4: + message.fromTime = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 5: + message.toTime = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 6: + message.recordToken = reader.string(); + break; + case 7: + message.filter = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): StreamClusterLogsRequest { + const message = { + ...baseStreamClusterLogsRequest, + } as StreamClusterLogsRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.columnFilter = (object.columnFilter ?? []).map((e: any) => + String(e) + ); + message.serviceType = + object.serviceType !== undefined && object.serviceType !== null + ? streamClusterLogsRequest_ServiceTypeFromJSON(object.serviceType) + : 0; + message.fromTime = + object.fromTime !== undefined && object.fromTime !== null + ? fromJsonTimestamp(object.fromTime) + : undefined; + message.toTime = + object.toTime !== undefined && object.toTime !== null + ? fromJsonTimestamp(object.toTime) + : undefined; + message.recordToken = + object.recordToken !== undefined && object.recordToken !== null + ? String(object.recordToken) + : ""; + message.filter = + object.filter !== undefined && object.filter !== null + ? String(object.filter) + : ""; + return message; + }, + + toJSON(message: StreamClusterLogsRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + if (message.columnFilter) { + obj.columnFilter = message.columnFilter.map((e) => e); + } else { + obj.columnFilter = []; + } + message.serviceType !== undefined && + (obj.serviceType = streamClusterLogsRequest_ServiceTypeToJSON( + message.serviceType + )); + message.fromTime !== undefined && + (obj.fromTime = message.fromTime.toISOString()); + message.toTime !== undefined && (obj.toTime = message.toTime.toISOString()); + message.recordToken !== undefined && + (obj.recordToken = message.recordToken); + message.filter !== undefined && (obj.filter = message.filter); + return obj; + }, + + fromPartial, I>>( + object: I + ): StreamClusterLogsRequest { + const message = { + ...baseStreamClusterLogsRequest, + } as StreamClusterLogsRequest; + message.clusterId = object.clusterId ?? ""; + message.columnFilter = object.columnFilter?.map((e) => e) || []; + message.serviceType = object.serviceType ?? 0; + message.fromTime = object.fromTime ?? undefined; + message.toTime = object.toTime ?? undefined; + message.recordToken = object.recordToken ?? ""; + message.filter = object.filter ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + StreamClusterLogsRequest.$type, + StreamClusterLogsRequest +); + const baseListClusterBackupsResponse: object = { $type: "yandex.cloud.mdb.greenplum.v1.ListClusterBackupsResponse", nextPageToken: "", @@ -4069,6 +4413,19 @@ export const ClusterServiceService = { responseDeserialize: (value: Buffer) => ListClusterLogsResponse.decode(value), }, + /** Same as ListLogs but using server-side streaming. Also allows for 'tail -f' semantics. */ + streamLogs: { + path: "/yandex.cloud.mdb.greenplum.v1.ClusterService/StreamLogs", + requestStream: false, + responseStream: true, + requestSerialize: (value: StreamClusterLogsRequest) => + Buffer.from(StreamClusterLogsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + StreamClusterLogsRequest.decode(value), + responseSerialize: (value: StreamLogRecord) => + Buffer.from(StreamLogRecord.encode(value).finish()), + responseDeserialize: (value: Buffer) => StreamLogRecord.decode(value), + }, /** Retrieves the list of available backups for the specified Greenplum cluster. */ listBackups: { path: "/yandex.cloud.mdb.greenplum.v1.ClusterService/ListBackups", @@ -4133,6 +4490,11 @@ export interface ClusterServiceServer extends UntypedServiceImplementation { >; /** Retrieves logs for the specified Greenplum® cluster. */ listLogs: handleUnaryCall; + /** Same as ListLogs but using server-side streaming. Also allows for 'tail -f' semantics. */ + streamLogs: handleServerStreamingCall< + StreamClusterLogsRequest, + StreamLogRecord + >; /** Retrieves the list of available backups for the specified Greenplum cluster. */ listBackups: handleUnaryCall< ListClusterBackupsRequest, @@ -4368,6 +4730,16 @@ export interface ClusterServiceClient extends Client { response: ListClusterLogsResponse ) => void ): ClientUnaryCall; + /** Same as ListLogs but using server-side streaming. Also allows for 'tail -f' semantics. */ + streamLogs( + request: StreamClusterLogsRequest, + options?: Partial + ): ClientReadableStream; + streamLogs( + request: StreamClusterLogsRequest, + metadata?: Metadata, + options?: Partial + ): ClientReadableStream; /** Retrieves the list of available backups for the specified Greenplum cluster. */ listBackups( request: ListClusterBackupsRequest, diff --git a/src/generated/yandex/cloud/mdb/mysql/v1/cluster.ts b/src/generated/yandex/cloud/mdb/mysql/v1/cluster.ts index 73a796eb..64625e53 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1/cluster.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1/cluster.ts @@ -56,6 +56,8 @@ export interface Cluster { securityGroupIds: string[]; /** This option prevents unintended deletion of the cluster. */ deletionProtection: boolean; + /** Host groups hosting VMs of the cluster. */ + hostGroupIds: string[]; } export enum Cluster_Environment { @@ -533,6 +535,7 @@ const baseCluster: object = { status: 0, securityGroupIds: "", deletionProtection: false, + hostGroupIds: "", }; export const Cluster = { @@ -606,6 +609,9 @@ export const Cluster = { if (message.deletionProtection === true) { writer.uint32(128).bool(message.deletionProtection); } + for (const v of message.hostGroupIds) { + writer.uint32(138).string(v!); + } return writer; }, @@ -616,6 +622,7 @@ export const Cluster = { message.labels = {}; message.monitoring = []; message.securityGroupIds = []; + message.hostGroupIds = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -678,6 +685,9 @@ export const Cluster = { case 16: message.deletionProtection = reader.bool(); break; + case 17: + message.hostGroupIds.push(reader.string()); + break; default: reader.skipType(tag & 7); break; @@ -752,6 +762,9 @@ export const Cluster = { object.deletionProtection !== null ? Boolean(object.deletionProtection) : false; + message.hostGroupIds = (object.hostGroupIds ?? []).map((e: any) => + String(e) + ); return message; }, @@ -803,6 +816,11 @@ export const Cluster = { } message.deletionProtection !== undefined && (obj.deletionProtection = message.deletionProtection); + if (message.hostGroupIds) { + obj.hostGroupIds = message.hostGroupIds.map((e) => e); + } else { + obj.hostGroupIds = []; + } return obj; }, @@ -842,6 +860,7 @@ export const Cluster = { : undefined; message.securityGroupIds = object.securityGroupIds?.map((e) => e) || []; message.deletionProtection = object.deletionProtection ?? false; + message.hostGroupIds = object.hostGroupIds?.map((e) => e) || []; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/mysql/v1/cluster_service.ts b/src/generated/yandex/cloud/mdb/mysql/v1/cluster_service.ts index d99fd4fd..5fd8e0db 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1/cluster_service.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1/cluster_service.ts @@ -124,6 +124,8 @@ export interface CreateClusterRequest { securityGroupIds: string[]; /** This option prevents unintended deletion of the cluster. */ deletionProtection: boolean; + /** Host groups hosting VMs of the cluster. */ + hostGroupIds: string[]; } export interface CreateClusterRequest_LabelsEntry { @@ -241,6 +243,10 @@ export interface RestoreClusterRequest { folderId: string; /** List of security group IDs to apply to the new cluster. */ securityGroupIds: string[]; + /** Deletion Protection inhibits deletion of the cluster */ + deletionProtection: boolean; + /** Host groups hosting VMs of the cluster. */ + hostGroupIds: string[]; } export interface RestoreClusterRequest_LabelsEntry { @@ -1172,6 +1178,7 @@ const baseCreateClusterRequest: object = { networkId: "", securityGroupIds: "", deletionProtection: false, + hostGroupIds: "", }; export const CreateClusterRequest = { @@ -1224,6 +1231,9 @@ export const CreateClusterRequest = { if (message.deletionProtection === true) { writer.uint32(96).bool(message.deletionProtection); } + for (const v of message.hostGroupIds) { + writer.uint32(106).string(v!); + } return writer; }, @@ -1239,6 +1249,7 @@ export const CreateClusterRequest = { message.userSpecs = []; message.hostSpecs = []; message.securityGroupIds = []; + message.hostGroupIds = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -1286,6 +1297,9 @@ export const CreateClusterRequest = { case 12: message.deletionProtection = reader.bool(); break; + case 13: + message.hostGroupIds.push(reader.string()); + break; default: reader.skipType(tag & 7); break; @@ -1343,6 +1357,9 @@ export const CreateClusterRequest = { object.deletionProtection !== null ? Boolean(object.deletionProtection) : false; + message.hostGroupIds = (object.hostGroupIds ?? []).map((e: any) => + String(e) + ); return message; }, @@ -1393,6 +1410,11 @@ export const CreateClusterRequest = { } message.deletionProtection !== undefined && (obj.deletionProtection = message.deletionProtection); + if (message.hostGroupIds) { + obj.hostGroupIds = message.hostGroupIds.map((e) => e); + } else { + obj.hostGroupIds = []; + } return obj; }, @@ -1425,6 +1447,7 @@ export const CreateClusterRequest = { message.networkId = object.networkId ?? ""; message.securityGroupIds = object.securityGroupIds?.map((e) => e) || []; message.deletionProtection = object.deletionProtection ?? false; + message.hostGroupIds = object.hostGroupIds?.map((e) => e) || []; return message; }, }; @@ -2226,6 +2249,8 @@ const baseRestoreClusterRequest: object = { networkId: "", folderId: "", securityGroupIds: "", + deletionProtection: false, + hostGroupIds: "", }; export const RestoreClusterRequest = { @@ -2278,6 +2303,12 @@ export const RestoreClusterRequest = { for (const v of message.securityGroupIds) { writer.uint32(98).string(v!); } + if (message.deletionProtection === true) { + writer.uint32(104).bool(message.deletionProtection); + } + for (const v of message.hostGroupIds) { + writer.uint32(114).string(v!); + } return writer; }, @@ -2291,6 +2322,7 @@ export const RestoreClusterRequest = { message.labels = {}; message.hostSpecs = []; message.securityGroupIds = []; + message.hostGroupIds = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -2335,6 +2367,12 @@ export const RestoreClusterRequest = { case 12: message.securityGroupIds.push(reader.string()); break; + case 13: + message.deletionProtection = reader.bool(); + break; + case 14: + message.hostGroupIds.push(reader.string()); + break; default: reader.skipType(tag & 7); break; @@ -2389,6 +2427,14 @@ export const RestoreClusterRequest = { message.securityGroupIds = (object.securityGroupIds ?? []).map((e: any) => String(e) ); + message.deletionProtection = + object.deletionProtection !== undefined && + object.deletionProtection !== null + ? Boolean(object.deletionProtection) + : false; + message.hostGroupIds = (object.hostGroupIds ?? []).map((e: any) => + String(e) + ); return message; }, @@ -2425,6 +2471,13 @@ export const RestoreClusterRequest = { } else { obj.securityGroupIds = []; } + message.deletionProtection !== undefined && + (obj.deletionProtection = message.deletionProtection); + if (message.hostGroupIds) { + obj.hostGroupIds = message.hostGroupIds.map((e) => e); + } else { + obj.hostGroupIds = []; + } return obj; }, @@ -2454,6 +2507,8 @@ export const RestoreClusterRequest = { message.networkId = object.networkId ?? ""; message.folderId = object.folderId ?? ""; message.securityGroupIds = object.securityGroupIds?.map((e) => e) || []; + message.deletionProtection = object.deletionProtection ?? false; + message.hostGroupIds = object.hostGroupIds?.map((e) => e) || []; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/mysql/v1/config/mysql5_7.ts b/src/generated/yandex/cloud/mdb/mysql/v1/config/mysql5_7.ts index d4f88278..2d7c6091 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1/config/mysql5_7.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1/config/mysql5_7.ts @@ -411,6 +411,12 @@ export interface Mysqlconfig57 { * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_show_compatibility_56). */ showCompatibility56?: boolean; + /** + * The number of times that any given stored procedure may be called recursively. + * + * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_max_sp_recursion_depth). + */ + maxSpRecursionDepth?: number; } export enum Mysqlconfig57_SQLMode { @@ -1474,6 +1480,15 @@ export const Mysqlconfig57 = { writer.uint32(546).fork() ).ldelim(); } + if (message.maxSpRecursionDepth !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxSpRecursionDepth!, + }, + writer.uint32(554).fork() + ).ldelim(); + } return writer; }, @@ -1869,6 +1884,12 @@ export const Mysqlconfig57 = { reader.uint32() ).value; break; + case 69: + message.maxSpRecursionDepth = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; default: reader.skipType(tag & 7); break; @@ -2192,6 +2213,11 @@ export const Mysqlconfig57 = { object.showCompatibility_56 !== null ? Boolean(object.showCompatibility_56) : undefined; + message.maxSpRecursionDepth = + object.maxSpRecursionDepth !== undefined && + object.maxSpRecursionDepth !== null + ? Number(object.maxSpRecursionDepth) + : undefined; return message; }, @@ -2349,6 +2375,8 @@ export const Mysqlconfig57 = { (obj.lowerCaseTableNames = message.lowerCaseTableNames); message.showCompatibility56 !== undefined && (obj.showCompatibility_56 = message.showCompatibility56); + message.maxSpRecursionDepth !== undefined && + (obj.maxSpRecursionDepth = message.maxSpRecursionDepth); return obj; }, @@ -2441,6 +2469,7 @@ export const Mysqlconfig57 = { message.innodbFtMaxTokenSize = object.innodbFtMaxTokenSize ?? undefined; message.lowerCaseTableNames = object.lowerCaseTableNames ?? undefined; message.showCompatibility56 = object.showCompatibility56 ?? undefined; + message.maxSpRecursionDepth = object.maxSpRecursionDepth ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/mysql/v1/config/mysql8_0.ts b/src/generated/yandex/cloud/mdb/mysql/v1/config/mysql8_0.ts index 20d8be1c..ad126a8e 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1/config/mysql8_0.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1/config/mysql8_0.ts @@ -411,6 +411,12 @@ export interface Mysqlconfig80 { * See [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_lower_case_table_names) for details. */ lowerCaseTableNames?: number; + /** + * The number of times that any given stored procedure may be called recursively. + * + * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_max_sp_recursion_depth). + */ + maxSpRecursionDepth?: number; } export enum Mysqlconfig80_SQLMode { @@ -1414,6 +1420,15 @@ export const Mysqlconfig80 = { writer.uint32(546).fork() ).ldelim(); } + if (message.maxSpRecursionDepth !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxSpRecursionDepth!, + }, + writer.uint32(554).fork() + ).ldelim(); + } return writer; }, @@ -1809,6 +1824,12 @@ export const Mysqlconfig80 = { reader.uint32() ).value; break; + case 69: + message.maxSpRecursionDepth = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; default: reader.skipType(tag & 7); break; @@ -2131,6 +2152,11 @@ export const Mysqlconfig80 = { object.lowerCaseTableNames !== null ? Number(object.lowerCaseTableNames) : undefined; + message.maxSpRecursionDepth = + object.maxSpRecursionDepth !== undefined && + object.maxSpRecursionDepth !== null + ? Number(object.maxSpRecursionDepth) + : undefined; return message; }, @@ -2288,6 +2314,8 @@ export const Mysqlconfig80 = { (obj.innodbFtMaxTokenSize = message.innodbFtMaxTokenSize); message.lowerCaseTableNames !== undefined && (obj.lowerCaseTableNames = message.lowerCaseTableNames); + message.maxSpRecursionDepth !== undefined && + (obj.maxSpRecursionDepth = message.maxSpRecursionDepth); return obj; }, @@ -2380,6 +2408,7 @@ export const Mysqlconfig80 = { message.innodbFtMinTokenSize = object.innodbFtMinTokenSize ?? undefined; message.innodbFtMaxTokenSize = object.innodbFtMaxTokenSize ?? undefined; message.lowerCaseTableNames = object.lowerCaseTableNames ?? undefined; + message.maxSpRecursionDepth = object.maxSpRecursionDepth ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/cluster.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/cluster.ts index 93be9eff..2c8401b1 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/cluster.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/cluster.ts @@ -77,6 +77,8 @@ export interface Cluster { securityGroupIds: string[]; /** Deletion Protection inhibits deletion of the cluster */ deletionProtection: boolean; + /** Host groups hosting VMs of the cluster. */ + hostGroupIds: string[]; } export enum Cluster_Environment { @@ -709,6 +711,7 @@ const baseCluster: object = { status: 0, securityGroupIds: "", deletionProtection: false, + hostGroupIds: "", }; export const Cluster = { @@ -782,6 +785,9 @@ export const Cluster = { if (message.deletionProtection === true) { writer.uint32(128).bool(message.deletionProtection); } + for (const v of message.hostGroupIds) { + writer.uint32(138).string(v!); + } return writer; }, @@ -792,6 +798,7 @@ export const Cluster = { message.labels = {}; message.monitoring = []; message.securityGroupIds = []; + message.hostGroupIds = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -854,6 +861,9 @@ export const Cluster = { case 16: message.deletionProtection = reader.bool(); break; + case 17: + message.hostGroupIds.push(reader.string()); + break; default: reader.skipType(tag & 7); break; @@ -928,6 +938,9 @@ export const Cluster = { object.deletionProtection !== null ? Boolean(object.deletionProtection) : false; + message.hostGroupIds = (object.hostGroupIds ?? []).map((e: any) => + String(e) + ); return message; }, @@ -979,6 +992,11 @@ export const Cluster = { } message.deletionProtection !== undefined && (obj.deletionProtection = message.deletionProtection); + if (message.hostGroupIds) { + obj.hostGroupIds = message.hostGroupIds.map((e) => e); + } else { + obj.hostGroupIds = []; + } return obj; }, @@ -1018,6 +1036,7 @@ export const Cluster = { : undefined; message.securityGroupIds = object.securityGroupIds?.map((e) => e) || []; message.deletionProtection = object.deletionProtection ?? false; + message.hostGroupIds = object.hostGroupIds?.map((e) => e) || []; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/cluster_service.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/cluster_service.ts index 886f947b..cd485249 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/cluster_service.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/cluster_service.ts @@ -136,6 +136,8 @@ export interface CreateClusterRequest { securityGroupIds: string[]; /** Deletion Protection inhibits deletion of the cluster */ deletionProtection: boolean; + /** Host groups hosting VMs of the cluster. */ + hostGroupIds: string[]; } export interface CreateClusterRequest_LabelsEntry { @@ -307,6 +309,10 @@ export interface RestoreClusterRequest { folderId: string; /** User security groups */ securityGroupIds: string[]; + /** Deletion Protection inhibits deletion of the cluster */ + deletionProtection: boolean; + /** Host groups hosting VMs of the cluster. */ + hostGroupIds: string[]; } export interface RestoreClusterRequest_LabelsEntry { @@ -1143,6 +1149,7 @@ const baseCreateClusterRequest: object = { networkId: "", securityGroupIds: "", deletionProtection: false, + hostGroupIds: "", }; export const CreateClusterRequest = { @@ -1196,6 +1203,9 @@ export const CreateClusterRequest = { if (message.deletionProtection === true) { writer.uint32(96).bool(message.deletionProtection); } + for (const v of message.hostGroupIds) { + writer.uint32(106).string(v!); + } return writer; }, @@ -1211,6 +1221,7 @@ export const CreateClusterRequest = { message.userSpecs = []; message.hostSpecs = []; message.securityGroupIds = []; + message.hostGroupIds = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -1258,6 +1269,9 @@ export const CreateClusterRequest = { case 12: message.deletionProtection = reader.bool(); break; + case 13: + message.hostGroupIds.push(reader.string()); + break; default: reader.skipType(tag & 7); break; @@ -1315,6 +1329,9 @@ export const CreateClusterRequest = { object.deletionProtection !== null ? Boolean(object.deletionProtection) : false; + message.hostGroupIds = (object.hostGroupIds ?? []).map((e: any) => + String(e) + ); return message; }, @@ -1365,6 +1382,11 @@ export const CreateClusterRequest = { } message.deletionProtection !== undefined && (obj.deletionProtection = message.deletionProtection); + if (message.hostGroupIds) { + obj.hostGroupIds = message.hostGroupIds.map((e) => e); + } else { + obj.hostGroupIds = []; + } return obj; }, @@ -1397,6 +1419,7 @@ export const CreateClusterRequest = { message.networkId = object.networkId ?? ""; message.securityGroupIds = object.securityGroupIds?.map((e) => e) || []; message.deletionProtection = object.deletionProtection ?? false; + message.hostGroupIds = object.hostGroupIds?.map((e) => e) || []; return message; }, }; @@ -2621,6 +2644,8 @@ const baseRestoreClusterRequest: object = { networkId: "", folderId: "", securityGroupIds: "", + deletionProtection: false, + hostGroupIds: "", }; export const RestoreClusterRequest = { @@ -2677,6 +2702,12 @@ export const RestoreClusterRequest = { for (const v of message.securityGroupIds) { writer.uint32(98).string(v!); } + if (message.deletionProtection === true) { + writer.uint32(104).bool(message.deletionProtection); + } + for (const v of message.hostGroupIds) { + writer.uint32(114).string(v!); + } return writer; }, @@ -2690,6 +2721,7 @@ export const RestoreClusterRequest = { message.labels = {}; message.hostSpecs = []; message.securityGroupIds = []; + message.hostGroupIds = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -2737,6 +2769,12 @@ export const RestoreClusterRequest = { case 12: message.securityGroupIds.push(reader.string()); break; + case 13: + message.deletionProtection = reader.bool(); + break; + case 14: + message.hostGroupIds.push(reader.string()); + break; default: reader.skipType(tag & 7); break; @@ -2795,6 +2833,14 @@ export const RestoreClusterRequest = { message.securityGroupIds = (object.securityGroupIds ?? []).map((e: any) => String(e) ); + message.deletionProtection = + object.deletionProtection !== undefined && + object.deletionProtection !== null + ? Boolean(object.deletionProtection) + : false; + message.hostGroupIds = (object.hostGroupIds ?? []).map((e: any) => + String(e) + ); return message; }, @@ -2833,6 +2879,13 @@ export const RestoreClusterRequest = { } else { obj.securityGroupIds = []; } + message.deletionProtection !== undefined && + (obj.deletionProtection = message.deletionProtection); + if (message.hostGroupIds) { + obj.hostGroupIds = message.hostGroupIds.map((e) => e); + } else { + obj.hostGroupIds = []; + } return obj; }, @@ -2863,6 +2916,8 @@ export const RestoreClusterRequest = { message.networkId = object.networkId ?? ""; message.folderId = object.folderId ?? ""; message.securityGroupIds = object.securityGroupIds?.map((e) => e) || []; + message.deletionProtection = object.deletionProtection ?? false; + message.hostGroupIds = object.hostGroupIds?.map((e) => e) || []; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/sqlserver/v1/cluster.ts b/src/generated/yandex/cloud/mdb/sqlserver/v1/cluster.ts index e0bae4e4..b372b637 100644 --- a/src/generated/yandex/cloud/mdb/sqlserver/v1/cluster.ts +++ b/src/generated/yandex/cloud/mdb/sqlserver/v1/cluster.ts @@ -58,6 +58,8 @@ export interface Cluster { sqlcollation: string; /** Host groups hosting VMs of the cluster. */ hostGroupIds: string[]; + /** ID of the service account used for access to Yandex Object Storage. */ + serviceAccountId: string; } export enum Cluster_Environment { @@ -506,6 +508,7 @@ const baseCluster: object = { deletionProtection: false, sqlcollation: "", hostGroupIds: "", + serviceAccountId: "", }; export const Cluster = { @@ -573,6 +576,9 @@ export const Cluster = { for (const v of message.hostGroupIds) { writer.uint32(130).string(v!); } + if (message.serviceAccountId !== "") { + writer.uint32(138).string(message.serviceAccountId); + } return writer; }, @@ -640,6 +646,9 @@ export const Cluster = { case 16: message.hostGroupIds.push(reader.string()); break; + case 17: + message.serviceAccountId = reader.string(); + break; default: reader.skipType(tag & 7); break; @@ -712,6 +721,10 @@ export const Cluster = { message.hostGroupIds = (object.hostGroupIds ?? []).map((e: any) => String(e) ); + message.serviceAccountId = + object.serviceAccountId !== undefined && object.serviceAccountId !== null + ? String(object.serviceAccountId) + : ""; return message; }, @@ -762,6 +775,8 @@ export const Cluster = { } else { obj.hostGroupIds = []; } + message.serviceAccountId !== undefined && + (obj.serviceAccountId = message.serviceAccountId); return obj; }, @@ -794,6 +809,7 @@ export const Cluster = { message.deletionProtection = object.deletionProtection ?? false; message.sqlcollation = object.sqlcollation ?? ""; message.hostGroupIds = object.hostGroupIds?.map((e) => e) || []; + message.serviceAccountId = object.serviceAccountId ?? ""; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/sqlserver/v1/cluster_service.ts b/src/generated/yandex/cloud/mdb/sqlserver/v1/cluster_service.ts index ff912a4c..9b5decc0 100644 --- a/src/generated/yandex/cloud/mdb/sqlserver/v1/cluster_service.ts +++ b/src/generated/yandex/cloud/mdb/sqlserver/v1/cluster_service.ts @@ -128,6 +128,8 @@ export interface CreateClusterRequest { sqlcollation: string; /** Host groups hosting VMs of the cluster. */ hostGroupIds: string[]; + /** ID of the service account used for access to Yandex Object Storage. */ + serviceAccountId: string; } export interface CreateClusterRequest_LabelsEntry { @@ -171,6 +173,8 @@ export interface UpdateClusterRequest { securityGroupIds: string[]; /** Deletion Protection inhibits deletion of the cluster */ deletionProtection: boolean; + /** ID of the service account used for access to Yandex Object Storage. */ + serviceAccountId: string; } export interface UpdateClusterRequest_LabelsEntry { @@ -256,6 +260,8 @@ export interface RestoreClusterRequest { deletionProtection: boolean; /** Host groups hosting VMs of the cluster. */ hostGroupIds: string[]; + /** ID of the service account used for access to Yandex Object Storage. */ + serviceAccountId: string; } export interface RestoreClusterRequest_LabelsEntry { @@ -272,6 +278,24 @@ export interface RestoreClusterMetadata { backupId: string; } +export interface StartClusterFailoverRequest { + $type: "yandex.cloud.mdb.sqlserver.v1.StartClusterFailoverRequest"; + /** ID of sqlserver cluster. */ + clusterId: string; + /** + * Host name to switch master role to. + * + * To get this name, make a [ClusterService.ListHosts] request. + */ + hostName: string; +} + +export interface StartClusterFailoverMetadata { + $type: "yandex.cloud.mdb.sqlserver.v1.StartClusterFailoverMetadata"; + /** ID of the sqlserver cluster being failovered. */ + clusterId: string; +} + export interface LogRecord { $type: "yandex.cloud.mdb.sqlserver.v1.LogRecord"; /** Log record timestamp. */ @@ -865,6 +889,7 @@ const baseCreateClusterRequest: object = { deletionProtection: false, sqlcollation: "", hostGroupIds: "", + serviceAccountId: "", }; export const CreateClusterRequest = { @@ -924,6 +949,9 @@ export const CreateClusterRequest = { for (const v of message.hostGroupIds) { writer.uint32(114).string(v!); } + if (message.serviceAccountId !== "") { + writer.uint32(122).string(message.serviceAccountId); + } return writer; }, @@ -993,6 +1021,9 @@ export const CreateClusterRequest = { case 14: message.hostGroupIds.push(reader.string()); break; + case 15: + message.serviceAccountId = reader.string(); + break; default: reader.skipType(tag & 7); break; @@ -1057,6 +1088,10 @@ export const CreateClusterRequest = { message.hostGroupIds = (object.hostGroupIds ?? []).map((e: any) => String(e) ); + message.serviceAccountId = + object.serviceAccountId !== undefined && object.serviceAccountId !== null + ? String(object.serviceAccountId) + : ""; return message; }, @@ -1114,6 +1149,8 @@ export const CreateClusterRequest = { } else { obj.hostGroupIds = []; } + message.serviceAccountId !== undefined && + (obj.serviceAccountId = message.serviceAccountId); return obj; }, @@ -1148,6 +1185,7 @@ export const CreateClusterRequest = { message.deletionProtection = object.deletionProtection ?? false; message.sqlcollation = object.sqlcollation ?? ""; message.hostGroupIds = object.hostGroupIds?.map((e) => e) || []; + message.serviceAccountId = object.serviceAccountId ?? ""; return message; }, }; @@ -1312,6 +1350,7 @@ const baseUpdateClusterRequest: object = { name: "", securityGroupIds: "", deletionProtection: false, + serviceAccountId: "", }; export const UpdateClusterRequest = { @@ -1353,6 +1392,9 @@ export const UpdateClusterRequest = { if (message.deletionProtection === true) { writer.uint32(64).bool(message.deletionProtection); } + if (message.serviceAccountId !== "") { + writer.uint32(74).string(message.serviceAccountId); + } return writer; }, @@ -1398,6 +1440,9 @@ export const UpdateClusterRequest = { case 8: message.deletionProtection = reader.bool(); break; + case 9: + message.serviceAccountId = reader.string(); + break; default: reader.skipType(tag & 7); break; @@ -1442,6 +1487,10 @@ export const UpdateClusterRequest = { object.deletionProtection !== null ? Boolean(object.deletionProtection) : false; + message.serviceAccountId = + object.serviceAccountId !== undefined && object.serviceAccountId !== null + ? String(object.serviceAccountId) + : ""; return message; }, @@ -1472,6 +1521,8 @@ export const UpdateClusterRequest = { } message.deletionProtection !== undefined && (obj.deletionProtection = message.deletionProtection); + message.serviceAccountId !== undefined && + (obj.serviceAccountId = message.serviceAccountId); return obj; }, @@ -1500,6 +1551,7 @@ export const UpdateClusterRequest = { message.name = object.name ?? ""; message.securityGroupIds = object.securityGroupIds?.map((e) => e) || []; message.deletionProtection = object.deletionProtection ?? false; + message.serviceAccountId = object.serviceAccountId ?? ""; return message; }, }; @@ -1928,6 +1980,7 @@ const baseRestoreClusterRequest: object = { securityGroupIds: "", deletionProtection: false, hostGroupIds: "", + serviceAccountId: "", }; export const RestoreClusterRequest = { @@ -1987,6 +2040,9 @@ export const RestoreClusterRequest = { for (const v of message.hostGroupIds) { writer.uint32(114).string(v!); } + if (message.serviceAccountId !== "") { + writer.uint32(122).string(message.serviceAccountId); + } return writer; }, @@ -2051,6 +2107,9 @@ export const RestoreClusterRequest = { case 14: message.hostGroupIds.push(reader.string()); break; + case 15: + message.serviceAccountId = reader.string(); + break; default: reader.skipType(tag & 7); break; @@ -2113,6 +2172,10 @@ export const RestoreClusterRequest = { message.hostGroupIds = (object.hostGroupIds ?? []).map((e: any) => String(e) ); + message.serviceAccountId = + object.serviceAccountId !== undefined && object.serviceAccountId !== null + ? String(object.serviceAccountId) + : ""; return message; }, @@ -2156,6 +2219,8 @@ export const RestoreClusterRequest = { } else { obj.hostGroupIds = []; } + message.serviceAccountId !== undefined && + (obj.serviceAccountId = message.serviceAccountId); return obj; }, @@ -2187,6 +2252,7 @@ export const RestoreClusterRequest = { message.securityGroupIds = object.securityGroupIds?.map((e) => e) || []; message.deletionProtection = object.deletionProtection ?? false; message.hostGroupIds = object.hostGroupIds?.map((e) => e) || []; + message.serviceAccountId = object.serviceAccountId ?? ""; return message; }, }; @@ -2357,6 +2423,167 @@ export const RestoreClusterMetadata = { messageTypeRegistry.set(RestoreClusterMetadata.$type, RestoreClusterMetadata); +const baseStartClusterFailoverRequest: object = { + $type: "yandex.cloud.mdb.sqlserver.v1.StartClusterFailoverRequest", + clusterId: "", + hostName: "", +}; + +export const StartClusterFailoverRequest = { + $type: "yandex.cloud.mdb.sqlserver.v1.StartClusterFailoverRequest" as const, + + encode( + message: StartClusterFailoverRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.hostName !== "") { + writer.uint32(18).string(message.hostName); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): StartClusterFailoverRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseStartClusterFailoverRequest, + } as StartClusterFailoverRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.hostName = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): StartClusterFailoverRequest { + const message = { + ...baseStartClusterFailoverRequest, + } as StartClusterFailoverRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.hostName = + object.hostName !== undefined && object.hostName !== null + ? String(object.hostName) + : ""; + return message; + }, + + toJSON(message: StartClusterFailoverRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.hostName !== undefined && (obj.hostName = message.hostName); + return obj; + }, + + fromPartial, I>>( + object: I + ): StartClusterFailoverRequest { + const message = { + ...baseStartClusterFailoverRequest, + } as StartClusterFailoverRequest; + message.clusterId = object.clusterId ?? ""; + message.hostName = object.hostName ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + StartClusterFailoverRequest.$type, + StartClusterFailoverRequest +); + +const baseStartClusterFailoverMetadata: object = { + $type: "yandex.cloud.mdb.sqlserver.v1.StartClusterFailoverMetadata", + clusterId: "", +}; + +export const StartClusterFailoverMetadata = { + $type: "yandex.cloud.mdb.sqlserver.v1.StartClusterFailoverMetadata" as const, + + encode( + message: StartClusterFailoverMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): StartClusterFailoverMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseStartClusterFailoverMetadata, + } as StartClusterFailoverMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): StartClusterFailoverMetadata { + const message = { + ...baseStartClusterFailoverMetadata, + } as StartClusterFailoverMetadata; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + return message; + }, + + toJSON(message: StartClusterFailoverMetadata): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + return obj; + }, + + fromPartial, I>>( + object: I + ): StartClusterFailoverMetadata { + const message = { + ...baseStartClusterFailoverMetadata, + } as StartClusterFailoverMetadata; + message.clusterId = object.clusterId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + StartClusterFailoverMetadata.$type, + StartClusterFailoverMetadata +); + const baseLogRecord: object = { $type: "yandex.cloud.mdb.sqlserver.v1.LogRecord", }; @@ -4283,6 +4510,19 @@ export const ClusterServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, + /** Starts a manual failover for a cluster. */ + startFailover: { + path: "/yandex.cloud.mdb.sqlserver.v1.ClusterService/StartFailover", + requestStream: false, + responseStream: false, + requestSerialize: (value: StartClusterFailoverRequest) => + Buffer.from(StartClusterFailoverRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + StartClusterFailoverRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, /** * Retrieves logs for the specified SQL Server cluster. * @@ -4369,6 +4609,8 @@ export interface ClusterServiceServer extends UntypedServiceImplementation { backup: handleUnaryCall; /** Creates a new SQL Server cluster using the specified backup. */ restore: handleUnaryCall; + /** Starts a manual failover for a cluster. */ + startFailover: handleUnaryCall; /** * Retrieves logs for the specified SQL Server cluster. * @@ -4563,6 +4805,22 @@ export interface ClusterServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; + /** Starts a manual failover for a cluster. */ + startFailover( + request: StartClusterFailoverRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + startFailover( + request: StartClusterFailoverRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + startFailover( + request: StartClusterFailoverRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; /** * Retrieves logs for the specified SQL Server cluster. * diff --git a/src/generated/yandex/cloud/mdb/sqlserver/v1/database_service.ts b/src/generated/yandex/cloud/mdb/sqlserver/v1/database_service.ts index 44627369..e652a3d8 100644 --- a/src/generated/yandex/cloud/mdb/sqlserver/v1/database_service.ts +++ b/src/generated/yandex/cloud/mdb/sqlserver/v1/database_service.ts @@ -93,18 +93,6 @@ export interface CreateDatabaseMetadata { databaseName: string; } -export interface RestoreDatabaseMetadata { - $type: "yandex.cloud.mdb.sqlserver.v1.RestoreDatabaseMetadata"; - /** ID of the SQLServer cluster where a database is being created. */ - clusterId: string; - /** Name of the SQLServer database that is being created. */ - databaseName: string; - /** name of the database which backup will be used to restore the database */ - fromDatabase: string; - /** ID of a backup to be used */ - backupId: string; -} - export interface DeleteDatabaseRequest { $type: "yandex.cloud.mdb.sqlserver.v1.DeleteDatabaseRequest"; /** @@ -146,6 +134,76 @@ export interface RestoreDatabaseRequest { time?: Date; } +export interface RestoreDatabaseMetadata { + $type: "yandex.cloud.mdb.sqlserver.v1.RestoreDatabaseMetadata"; + /** ID of the SQLServer cluster where a database is being created. */ + clusterId: string; + /** Name of the SQLServer database that is being created. */ + databaseName: string; + /** name of the database which backup will be used to restore the database */ + fromDatabase: string; + /** ID of a backup to be used */ + backupId: string; +} + +export interface ImportDatabaseBackupRequest { + $type: "yandex.cloud.mdb.sqlserver.v1.ImportDatabaseBackupRequest"; + /** + * Required. ID of the SQL Server cluster to import a database in. + * To get the cluster ID, use a [ClusterService.List] request + */ + clusterId: string; + /** Name of the SQLServer database that is being imported. */ + databaseName: string; + /** Name of object storage bucket to import backups from. */ + s3Bucket: string; + /** Path in object storage bucket to import backups from. */ + s3Path: string; + /** List of .bak files in bucket containing database backup */ + files: string[]; +} + +export interface ImportDatabaseBackupMetadata { + $type: "yandex.cloud.mdb.sqlserver.v1.ImportDatabaseBackupMetadata"; + /** ID of the SQLServer cluster where a database is being imported. */ + clusterId: string; + /** Name of the SQLServer database that is being imported. */ + databaseName: string; + /** Name of object storage bucket to import backups from. */ + s3Bucket: string; + /** Path in object storage bucket to import backups from. */ + s3Path: string; +} + +export interface ExportDatabaseBackupRequest { + $type: "yandex.cloud.mdb.sqlserver.v1.ExportDatabaseBackupRequest"; + /** + * Required. ID of the SQL Server cluster to export a database from. + * To get the cluster ID, use a [ClusterService.List] request + */ + clusterId: string; + /** Name of the SQLServer database that is being exported. */ + databaseName: string; + /** Name of object storage bucket to export backups to */ + s3Bucket: string; + /** Path in object storage bucket to export backups to. */ + s3Path: string; + /** Prefix for .bak files to */ + prefix: string; +} + +export interface ExportDatabaseBackupMetadata { + $type: "yandex.cloud.mdb.sqlserver.v1.ExportDatabaseBackupMetadata"; + /** ID of the SQLServer cluster where a database is being exported. */ + clusterId: string; + /** Name of the SQLServer database that is being exported. */ + databaseName: string; + /** Name of object storage bucket to import backups from. */ + s3Bucket: string; + /** Path in object storage bucket to import backups from. */ + s3Path: string; +} + const baseGetDatabaseRequest: object = { $type: "yandex.cloud.mdb.sqlserver.v1.GetDatabaseRequest", clusterId: "", @@ -564,19 +622,17 @@ export const CreateDatabaseMetadata = { messageTypeRegistry.set(CreateDatabaseMetadata.$type, CreateDatabaseMetadata); -const baseRestoreDatabaseMetadata: object = { - $type: "yandex.cloud.mdb.sqlserver.v1.RestoreDatabaseMetadata", +const baseDeleteDatabaseRequest: object = { + $type: "yandex.cloud.mdb.sqlserver.v1.DeleteDatabaseRequest", clusterId: "", databaseName: "", - fromDatabase: "", - backupId: "", }; -export const RestoreDatabaseMetadata = { - $type: "yandex.cloud.mdb.sqlserver.v1.RestoreDatabaseMetadata" as const, +export const DeleteDatabaseRequest = { + $type: "yandex.cloud.mdb.sqlserver.v1.DeleteDatabaseRequest" as const, encode( - message: RestoreDatabaseMetadata, + message: DeleteDatabaseRequest, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { if (message.clusterId !== "") { @@ -585,24 +641,16 @@ export const RestoreDatabaseMetadata = { if (message.databaseName !== "") { writer.uint32(18).string(message.databaseName); } - if (message.fromDatabase !== "") { - writer.uint32(26).string(message.fromDatabase); - } - if (message.backupId !== "") { - writer.uint32(34).string(message.backupId); - } return writer; }, decode( input: _m0.Reader | Uint8Array, length?: number - ): RestoreDatabaseMetadata { + ): DeleteDatabaseRequest { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; - const message = { - ...baseRestoreDatabaseMetadata, - } as RestoreDatabaseMetadata; + const message = { ...baseDeleteDatabaseRequest } as DeleteDatabaseRequest; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -612,12 +660,6 @@ export const RestoreDatabaseMetadata = { case 2: message.databaseName = reader.string(); break; - case 3: - message.fromDatabase = reader.string(); - break; - case 4: - message.backupId = reader.string(); - break; default: reader.skipType(tag & 7); break; @@ -626,10 +668,8 @@ export const RestoreDatabaseMetadata = { return message; }, - fromJSON(object: any): RestoreDatabaseMetadata { - const message = { - ...baseRestoreDatabaseMetadata, - } as RestoreDatabaseMetadata; + fromJSON(object: any): DeleteDatabaseRequest { + const message = { ...baseDeleteDatabaseRequest } as DeleteDatabaseRequest; message.clusterId = object.clusterId !== undefined && object.clusterId !== null ? String(object.clusterId) @@ -638,55 +678,40 @@ export const RestoreDatabaseMetadata = { object.databaseName !== undefined && object.databaseName !== null ? String(object.databaseName) : ""; - message.fromDatabase = - object.fromDatabase !== undefined && object.fromDatabase !== null - ? String(object.fromDatabase) - : ""; - message.backupId = - object.backupId !== undefined && object.backupId !== null - ? String(object.backupId) - : ""; return message; }, - toJSON(message: RestoreDatabaseMetadata): unknown { + toJSON(message: DeleteDatabaseRequest): unknown { const obj: any = {}; message.clusterId !== undefined && (obj.clusterId = message.clusterId); message.databaseName !== undefined && (obj.databaseName = message.databaseName); - message.fromDatabase !== undefined && - (obj.fromDatabase = message.fromDatabase); - message.backupId !== undefined && (obj.backupId = message.backupId); return obj; }, - fromPartial, I>>( + fromPartial, I>>( object: I - ): RestoreDatabaseMetadata { - const message = { - ...baseRestoreDatabaseMetadata, - } as RestoreDatabaseMetadata; + ): DeleteDatabaseRequest { + const message = { ...baseDeleteDatabaseRequest } as DeleteDatabaseRequest; message.clusterId = object.clusterId ?? ""; message.databaseName = object.databaseName ?? ""; - message.fromDatabase = object.fromDatabase ?? ""; - message.backupId = object.backupId ?? ""; return message; }, }; -messageTypeRegistry.set(RestoreDatabaseMetadata.$type, RestoreDatabaseMetadata); +messageTypeRegistry.set(DeleteDatabaseRequest.$type, DeleteDatabaseRequest); -const baseDeleteDatabaseRequest: object = { - $type: "yandex.cloud.mdb.sqlserver.v1.DeleteDatabaseRequest", +const baseDeleteDatabaseMetadata: object = { + $type: "yandex.cloud.mdb.sqlserver.v1.DeleteDatabaseMetadata", clusterId: "", databaseName: "", }; -export const DeleteDatabaseRequest = { - $type: "yandex.cloud.mdb.sqlserver.v1.DeleteDatabaseRequest" as const, +export const DeleteDatabaseMetadata = { + $type: "yandex.cloud.mdb.sqlserver.v1.DeleteDatabaseMetadata" as const, encode( - message: DeleteDatabaseRequest, + message: DeleteDatabaseMetadata, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { if (message.clusterId !== "") { @@ -701,10 +726,10 @@ export const DeleteDatabaseRequest = { decode( input: _m0.Reader | Uint8Array, length?: number - ): DeleteDatabaseRequest { + ): DeleteDatabaseMetadata { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; - const message = { ...baseDeleteDatabaseRequest } as DeleteDatabaseRequest; + const message = { ...baseDeleteDatabaseMetadata } as DeleteDatabaseMetadata; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -722,8 +747,8 @@ export const DeleteDatabaseRequest = { return message; }, - fromJSON(object: any): DeleteDatabaseRequest { - const message = { ...baseDeleteDatabaseRequest } as DeleteDatabaseRequest; + fromJSON(object: any): DeleteDatabaseMetadata { + const message = { ...baseDeleteDatabaseMetadata } as DeleteDatabaseMetadata; message.clusterId = object.clusterId !== undefined && object.clusterId !== null ? String(object.clusterId) @@ -735,7 +760,7 @@ export const DeleteDatabaseRequest = { return message; }, - toJSON(message: DeleteDatabaseRequest): unknown { + toJSON(message: DeleteDatabaseMetadata): unknown { const obj: any = {}; message.clusterId !== undefined && (obj.clusterId = message.clusterId); message.databaseName !== undefined && @@ -743,29 +768,31 @@ export const DeleteDatabaseRequest = { return obj; }, - fromPartial, I>>( + fromPartial, I>>( object: I - ): DeleteDatabaseRequest { - const message = { ...baseDeleteDatabaseRequest } as DeleteDatabaseRequest; + ): DeleteDatabaseMetadata { + const message = { ...baseDeleteDatabaseMetadata } as DeleteDatabaseMetadata; message.clusterId = object.clusterId ?? ""; message.databaseName = object.databaseName ?? ""; return message; }, }; -messageTypeRegistry.set(DeleteDatabaseRequest.$type, DeleteDatabaseRequest); +messageTypeRegistry.set(DeleteDatabaseMetadata.$type, DeleteDatabaseMetadata); -const baseDeleteDatabaseMetadata: object = { - $type: "yandex.cloud.mdb.sqlserver.v1.DeleteDatabaseMetadata", +const baseRestoreDatabaseRequest: object = { + $type: "yandex.cloud.mdb.sqlserver.v1.RestoreDatabaseRequest", clusterId: "", databaseName: "", + fromDatabase: "", + backupId: "", }; -export const DeleteDatabaseMetadata = { - $type: "yandex.cloud.mdb.sqlserver.v1.DeleteDatabaseMetadata" as const, +export const RestoreDatabaseRequest = { + $type: "yandex.cloud.mdb.sqlserver.v1.RestoreDatabaseRequest" as const, encode( - message: DeleteDatabaseMetadata, + message: RestoreDatabaseRequest, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { if (message.clusterId !== "") { @@ -774,16 +801,28 @@ export const DeleteDatabaseMetadata = { if (message.databaseName !== "") { writer.uint32(18).string(message.databaseName); } + if (message.fromDatabase !== "") { + writer.uint32(26).string(message.fromDatabase); + } + if (message.backupId !== "") { + writer.uint32(34).string(message.backupId); + } + if (message.time !== undefined) { + Timestamp.encode( + toTimestamp(message.time), + writer.uint32(50).fork() + ).ldelim(); + } return writer; }, decode( input: _m0.Reader | Uint8Array, length?: number - ): DeleteDatabaseMetadata { + ): RestoreDatabaseRequest { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; - const message = { ...baseDeleteDatabaseMetadata } as DeleteDatabaseMetadata; + const message = { ...baseRestoreDatabaseRequest } as RestoreDatabaseRequest; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -793,6 +832,17 @@ export const DeleteDatabaseMetadata = { case 2: message.databaseName = reader.string(); break; + case 3: + message.fromDatabase = reader.string(); + break; + case 4: + message.backupId = reader.string(); + break; + case 6: + message.time = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; default: reader.skipType(tag & 7); break; @@ -801,8 +851,8 @@ export const DeleteDatabaseMetadata = { return message; }, - fromJSON(object: any): DeleteDatabaseMetadata { - const message = { ...baseDeleteDatabaseMetadata } as DeleteDatabaseMetadata; + fromJSON(object: any): RestoreDatabaseRequest { + const message = { ...baseRestoreDatabaseRequest } as RestoreDatabaseRequest; message.clusterId = object.clusterId !== undefined && object.clusterId !== null ? String(object.clusterId) @@ -811,42 +861,61 @@ export const DeleteDatabaseMetadata = { object.databaseName !== undefined && object.databaseName !== null ? String(object.databaseName) : ""; + message.fromDatabase = + object.fromDatabase !== undefined && object.fromDatabase !== null + ? String(object.fromDatabase) + : ""; + message.backupId = + object.backupId !== undefined && object.backupId !== null + ? String(object.backupId) + : ""; + message.time = + object.time !== undefined && object.time !== null + ? fromJsonTimestamp(object.time) + : undefined; return message; }, - toJSON(message: DeleteDatabaseMetadata): unknown { + toJSON(message: RestoreDatabaseRequest): unknown { const obj: any = {}; message.clusterId !== undefined && (obj.clusterId = message.clusterId); message.databaseName !== undefined && (obj.databaseName = message.databaseName); + message.fromDatabase !== undefined && + (obj.fromDatabase = message.fromDatabase); + message.backupId !== undefined && (obj.backupId = message.backupId); + message.time !== undefined && (obj.time = message.time.toISOString()); return obj; }, - fromPartial, I>>( + fromPartial, I>>( object: I - ): DeleteDatabaseMetadata { - const message = { ...baseDeleteDatabaseMetadata } as DeleteDatabaseMetadata; + ): RestoreDatabaseRequest { + const message = { ...baseRestoreDatabaseRequest } as RestoreDatabaseRequest; message.clusterId = object.clusterId ?? ""; message.databaseName = object.databaseName ?? ""; + message.fromDatabase = object.fromDatabase ?? ""; + message.backupId = object.backupId ?? ""; + message.time = object.time ?? undefined; return message; }, }; -messageTypeRegistry.set(DeleteDatabaseMetadata.$type, DeleteDatabaseMetadata); +messageTypeRegistry.set(RestoreDatabaseRequest.$type, RestoreDatabaseRequest); -const baseRestoreDatabaseRequest: object = { - $type: "yandex.cloud.mdb.sqlserver.v1.RestoreDatabaseRequest", +const baseRestoreDatabaseMetadata: object = { + $type: "yandex.cloud.mdb.sqlserver.v1.RestoreDatabaseMetadata", clusterId: "", databaseName: "", fromDatabase: "", backupId: "", }; -export const RestoreDatabaseRequest = { - $type: "yandex.cloud.mdb.sqlserver.v1.RestoreDatabaseRequest" as const, +export const RestoreDatabaseMetadata = { + $type: "yandex.cloud.mdb.sqlserver.v1.RestoreDatabaseMetadata" as const, encode( - message: RestoreDatabaseRequest, + message: RestoreDatabaseMetadata, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { if (message.clusterId !== "") { @@ -861,22 +930,18 @@ export const RestoreDatabaseRequest = { if (message.backupId !== "") { writer.uint32(34).string(message.backupId); } - if (message.time !== undefined) { - Timestamp.encode( - toTimestamp(message.time), - writer.uint32(50).fork() - ).ldelim(); - } return writer; }, decode( input: _m0.Reader | Uint8Array, length?: number - ): RestoreDatabaseRequest { + ): RestoreDatabaseMetadata { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; - const message = { ...baseRestoreDatabaseRequest } as RestoreDatabaseRequest; + const message = { + ...baseRestoreDatabaseMetadata, + } as RestoreDatabaseMetadata; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -892,11 +957,6 @@ export const RestoreDatabaseRequest = { case 4: message.backupId = reader.string(); break; - case 6: - message.time = fromTimestamp( - Timestamp.decode(reader, reader.uint32()) - ); - break; default: reader.skipType(tag & 7); break; @@ -905,8 +965,10 @@ export const RestoreDatabaseRequest = { return message; }, - fromJSON(object: any): RestoreDatabaseRequest { - const message = { ...baseRestoreDatabaseRequest } as RestoreDatabaseRequest; + fromJSON(object: any): RestoreDatabaseMetadata { + const message = { + ...baseRestoreDatabaseMetadata, + } as RestoreDatabaseMetadata; message.clusterId = object.clusterId !== undefined && object.clusterId !== null ? String(object.clusterId) @@ -923,14 +985,10 @@ export const RestoreDatabaseRequest = { object.backupId !== undefined && object.backupId !== null ? String(object.backupId) : ""; - message.time = - object.time !== undefined && object.time !== null - ? fromJsonTimestamp(object.time) - : undefined; return message; }, - toJSON(message: RestoreDatabaseRequest): unknown { + toJSON(message: RestoreDatabaseMetadata): unknown { const obj: any = {}; message.clusterId !== undefined && (obj.clusterId = message.clusterId); message.databaseName !== undefined && @@ -938,24 +996,508 @@ export const RestoreDatabaseRequest = { message.fromDatabase !== undefined && (obj.fromDatabase = message.fromDatabase); message.backupId !== undefined && (obj.backupId = message.backupId); - message.time !== undefined && (obj.time = message.time.toISOString()); return obj; }, - fromPartial, I>>( + fromPartial, I>>( object: I - ): RestoreDatabaseRequest { - const message = { ...baseRestoreDatabaseRequest } as RestoreDatabaseRequest; + ): RestoreDatabaseMetadata { + const message = { + ...baseRestoreDatabaseMetadata, + } as RestoreDatabaseMetadata; message.clusterId = object.clusterId ?? ""; message.databaseName = object.databaseName ?? ""; message.fromDatabase = object.fromDatabase ?? ""; message.backupId = object.backupId ?? ""; - message.time = object.time ?? undefined; return message; }, }; -messageTypeRegistry.set(RestoreDatabaseRequest.$type, RestoreDatabaseRequest); +messageTypeRegistry.set(RestoreDatabaseMetadata.$type, RestoreDatabaseMetadata); + +const baseImportDatabaseBackupRequest: object = { + $type: "yandex.cloud.mdb.sqlserver.v1.ImportDatabaseBackupRequest", + clusterId: "", + databaseName: "", + s3Bucket: "", + s3Path: "", + files: "", +}; + +export const ImportDatabaseBackupRequest = { + $type: "yandex.cloud.mdb.sqlserver.v1.ImportDatabaseBackupRequest" as const, + + encode( + message: ImportDatabaseBackupRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.databaseName !== "") { + writer.uint32(18).string(message.databaseName); + } + if (message.s3Bucket !== "") { + writer.uint32(26).string(message.s3Bucket); + } + if (message.s3Path !== "") { + writer.uint32(34).string(message.s3Path); + } + for (const v of message.files) { + writer.uint32(42).string(v!); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ImportDatabaseBackupRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseImportDatabaseBackupRequest, + } as ImportDatabaseBackupRequest; + message.files = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.databaseName = reader.string(); + break; + case 3: + message.s3Bucket = reader.string(); + break; + case 4: + message.s3Path = reader.string(); + break; + case 5: + message.files.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ImportDatabaseBackupRequest { + const message = { + ...baseImportDatabaseBackupRequest, + } as ImportDatabaseBackupRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.databaseName = + object.databaseName !== undefined && object.databaseName !== null + ? String(object.databaseName) + : ""; + message.s3Bucket = + object.s3Bucket !== undefined && object.s3Bucket !== null + ? String(object.s3Bucket) + : ""; + message.s3Path = + object.s3Path !== undefined && object.s3Path !== null + ? String(object.s3Path) + : ""; + message.files = (object.files ?? []).map((e: any) => String(e)); + return message; + }, + + toJSON(message: ImportDatabaseBackupRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.databaseName !== undefined && + (obj.databaseName = message.databaseName); + message.s3Bucket !== undefined && (obj.s3Bucket = message.s3Bucket); + message.s3Path !== undefined && (obj.s3Path = message.s3Path); + if (message.files) { + obj.files = message.files.map((e) => e); + } else { + obj.files = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): ImportDatabaseBackupRequest { + const message = { + ...baseImportDatabaseBackupRequest, + } as ImportDatabaseBackupRequest; + message.clusterId = object.clusterId ?? ""; + message.databaseName = object.databaseName ?? ""; + message.s3Bucket = object.s3Bucket ?? ""; + message.s3Path = object.s3Path ?? ""; + message.files = object.files?.map((e) => e) || []; + return message; + }, +}; + +messageTypeRegistry.set( + ImportDatabaseBackupRequest.$type, + ImportDatabaseBackupRequest +); + +const baseImportDatabaseBackupMetadata: object = { + $type: "yandex.cloud.mdb.sqlserver.v1.ImportDatabaseBackupMetadata", + clusterId: "", + databaseName: "", + s3Bucket: "", + s3Path: "", +}; + +export const ImportDatabaseBackupMetadata = { + $type: "yandex.cloud.mdb.sqlserver.v1.ImportDatabaseBackupMetadata" as const, + + encode( + message: ImportDatabaseBackupMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.databaseName !== "") { + writer.uint32(18).string(message.databaseName); + } + if (message.s3Bucket !== "") { + writer.uint32(26).string(message.s3Bucket); + } + if (message.s3Path !== "") { + writer.uint32(34).string(message.s3Path); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ImportDatabaseBackupMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseImportDatabaseBackupMetadata, + } as ImportDatabaseBackupMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.databaseName = reader.string(); + break; + case 3: + message.s3Bucket = reader.string(); + break; + case 4: + message.s3Path = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ImportDatabaseBackupMetadata { + const message = { + ...baseImportDatabaseBackupMetadata, + } as ImportDatabaseBackupMetadata; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.databaseName = + object.databaseName !== undefined && object.databaseName !== null + ? String(object.databaseName) + : ""; + message.s3Bucket = + object.s3Bucket !== undefined && object.s3Bucket !== null + ? String(object.s3Bucket) + : ""; + message.s3Path = + object.s3Path !== undefined && object.s3Path !== null + ? String(object.s3Path) + : ""; + return message; + }, + + toJSON(message: ImportDatabaseBackupMetadata): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.databaseName !== undefined && + (obj.databaseName = message.databaseName); + message.s3Bucket !== undefined && (obj.s3Bucket = message.s3Bucket); + message.s3Path !== undefined && (obj.s3Path = message.s3Path); + return obj; + }, + + fromPartial, I>>( + object: I + ): ImportDatabaseBackupMetadata { + const message = { + ...baseImportDatabaseBackupMetadata, + } as ImportDatabaseBackupMetadata; + message.clusterId = object.clusterId ?? ""; + message.databaseName = object.databaseName ?? ""; + message.s3Bucket = object.s3Bucket ?? ""; + message.s3Path = object.s3Path ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ImportDatabaseBackupMetadata.$type, + ImportDatabaseBackupMetadata +); + +const baseExportDatabaseBackupRequest: object = { + $type: "yandex.cloud.mdb.sqlserver.v1.ExportDatabaseBackupRequest", + clusterId: "", + databaseName: "", + s3Bucket: "", + s3Path: "", + prefix: "", +}; + +export const ExportDatabaseBackupRequest = { + $type: "yandex.cloud.mdb.sqlserver.v1.ExportDatabaseBackupRequest" as const, + + encode( + message: ExportDatabaseBackupRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.databaseName !== "") { + writer.uint32(18).string(message.databaseName); + } + if (message.s3Bucket !== "") { + writer.uint32(26).string(message.s3Bucket); + } + if (message.s3Path !== "") { + writer.uint32(34).string(message.s3Path); + } + if (message.prefix !== "") { + writer.uint32(42).string(message.prefix); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ExportDatabaseBackupRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseExportDatabaseBackupRequest, + } as ExportDatabaseBackupRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.databaseName = reader.string(); + break; + case 3: + message.s3Bucket = reader.string(); + break; + case 4: + message.s3Path = reader.string(); + break; + case 5: + message.prefix = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ExportDatabaseBackupRequest { + const message = { + ...baseExportDatabaseBackupRequest, + } as ExportDatabaseBackupRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.databaseName = + object.databaseName !== undefined && object.databaseName !== null + ? String(object.databaseName) + : ""; + message.s3Bucket = + object.s3Bucket !== undefined && object.s3Bucket !== null + ? String(object.s3Bucket) + : ""; + message.s3Path = + object.s3Path !== undefined && object.s3Path !== null + ? String(object.s3Path) + : ""; + message.prefix = + object.prefix !== undefined && object.prefix !== null + ? String(object.prefix) + : ""; + return message; + }, + + toJSON(message: ExportDatabaseBackupRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.databaseName !== undefined && + (obj.databaseName = message.databaseName); + message.s3Bucket !== undefined && (obj.s3Bucket = message.s3Bucket); + message.s3Path !== undefined && (obj.s3Path = message.s3Path); + message.prefix !== undefined && (obj.prefix = message.prefix); + return obj; + }, + + fromPartial, I>>( + object: I + ): ExportDatabaseBackupRequest { + const message = { + ...baseExportDatabaseBackupRequest, + } as ExportDatabaseBackupRequest; + message.clusterId = object.clusterId ?? ""; + message.databaseName = object.databaseName ?? ""; + message.s3Bucket = object.s3Bucket ?? ""; + message.s3Path = object.s3Path ?? ""; + message.prefix = object.prefix ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ExportDatabaseBackupRequest.$type, + ExportDatabaseBackupRequest +); + +const baseExportDatabaseBackupMetadata: object = { + $type: "yandex.cloud.mdb.sqlserver.v1.ExportDatabaseBackupMetadata", + clusterId: "", + databaseName: "", + s3Bucket: "", + s3Path: "", +}; + +export const ExportDatabaseBackupMetadata = { + $type: "yandex.cloud.mdb.sqlserver.v1.ExportDatabaseBackupMetadata" as const, + + encode( + message: ExportDatabaseBackupMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.databaseName !== "") { + writer.uint32(18).string(message.databaseName); + } + if (message.s3Bucket !== "") { + writer.uint32(26).string(message.s3Bucket); + } + if (message.s3Path !== "") { + writer.uint32(34).string(message.s3Path); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ExportDatabaseBackupMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseExportDatabaseBackupMetadata, + } as ExportDatabaseBackupMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.databaseName = reader.string(); + break; + case 3: + message.s3Bucket = reader.string(); + break; + case 4: + message.s3Path = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ExportDatabaseBackupMetadata { + const message = { + ...baseExportDatabaseBackupMetadata, + } as ExportDatabaseBackupMetadata; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.databaseName = + object.databaseName !== undefined && object.databaseName !== null + ? String(object.databaseName) + : ""; + message.s3Bucket = + object.s3Bucket !== undefined && object.s3Bucket !== null + ? String(object.s3Bucket) + : ""; + message.s3Path = + object.s3Path !== undefined && object.s3Path !== null + ? String(object.s3Path) + : ""; + return message; + }, + + toJSON(message: ExportDatabaseBackupMetadata): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.databaseName !== undefined && + (obj.databaseName = message.databaseName); + message.s3Bucket !== undefined && (obj.s3Bucket = message.s3Bucket); + message.s3Path !== undefined && (obj.s3Path = message.s3Path); + return obj; + }, + + fromPartial, I>>( + object: I + ): ExportDatabaseBackupMetadata { + const message = { + ...baseExportDatabaseBackupMetadata, + } as ExportDatabaseBackupMetadata; + message.clusterId = object.clusterId ?? ""; + message.databaseName = object.databaseName ?? ""; + message.s3Bucket = object.s3Bucket ?? ""; + message.s3Path = object.s3Path ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ExportDatabaseBackupMetadata.$type, + ExportDatabaseBackupMetadata +); /** A set of methods for managing SQL Server databases. */ export const DatabaseServiceService = { @@ -1011,6 +1553,32 @@ export const DatabaseServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, + /** Imports a new SQL Server database from external backup */ + importBackup: { + path: "/yandex.cloud.mdb.sqlserver.v1.DatabaseService/ImportBackup", + requestStream: false, + responseStream: false, + requestSerialize: (value: ImportDatabaseBackupRequest) => + Buffer.from(ImportDatabaseBackupRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + ImportDatabaseBackupRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Exports database backup to external backup */ + exportBackup: { + path: "/yandex.cloud.mdb.sqlserver.v1.DatabaseService/ExportBackup", + requestStream: false, + responseStream: false, + requestSerialize: (value: ExportDatabaseBackupRequest) => + Buffer.from(ExportDatabaseBackupRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + ExportDatabaseBackupRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, /** Deletes the specified SQL Server database. */ delete: { path: "/yandex.cloud.mdb.sqlserver.v1.DatabaseService/Delete", @@ -1038,6 +1606,10 @@ export interface DatabaseServiceServer extends UntypedServiceImplementation { create: handleUnaryCall; /** Creates a new SQL Server database in the specified cluster from a backup */ restore: handleUnaryCall; + /** Imports a new SQL Server database from external backup */ + importBackup: handleUnaryCall; + /** Exports database backup to external backup */ + exportBackup: handleUnaryCall; /** Deletes the specified SQL Server database. */ delete: handleUnaryCall; } @@ -1120,6 +1692,38 @@ export interface DatabaseServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; + /** Imports a new SQL Server database from external backup */ + importBackup( + request: ImportDatabaseBackupRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + importBackup( + request: ImportDatabaseBackupRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + importBackup( + request: ImportDatabaseBackupRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Exports database backup to external backup */ + exportBackup( + request: ExportDatabaseBackupRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + exportBackup( + request: ExportDatabaseBackupRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + exportBackup( + request: ExportDatabaseBackupRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; /** Deletes the specified SQL Server database. */ delete( request: DeleteDatabaseRequest, diff --git a/src/generated/yandex/cloud/serverless/containers/v1/container_service.ts b/src/generated/yandex/cloud/serverless/containers/v1/container_service.ts index d4c7972c..f25a4fe1 100644 --- a/src/generated/yandex/cloud/serverless/containers/v1/container_service.ts +++ b/src/generated/yandex/cloud/serverless/containers/v1/container_service.ts @@ -156,6 +156,18 @@ export interface DeployContainerRevisionMetadata { containerRevisionId: string; } +export interface RollbackContainerRequest { + $type: "yandex.cloud.serverless.containers.v1.RollbackContainerRequest"; + containerId: string; + revisionId: string; +} + +export interface RollbackContainerMetadata { + $type: "yandex.cloud.serverless.containers.v1.RollbackContainerMetadata"; + containerId: string; + revisionId: string; +} + export interface ListContainerOperationsRequest { $type: "yandex.cloud.serverless.containers.v1.ListContainerOperationsRequest"; containerId: string; @@ -1999,6 +2011,184 @@ messageTypeRegistry.set( DeployContainerRevisionMetadata ); +const baseRollbackContainerRequest: object = { + $type: "yandex.cloud.serverless.containers.v1.RollbackContainerRequest", + containerId: "", + revisionId: "", +}; + +export const RollbackContainerRequest = { + $type: + "yandex.cloud.serverless.containers.v1.RollbackContainerRequest" as const, + + encode( + message: RollbackContainerRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.containerId !== "") { + writer.uint32(10).string(message.containerId); + } + if (message.revisionId !== "") { + writer.uint32(18).string(message.revisionId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): RollbackContainerRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseRollbackContainerRequest, + } as RollbackContainerRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.containerId = reader.string(); + break; + case 2: + message.revisionId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RollbackContainerRequest { + const message = { + ...baseRollbackContainerRequest, + } as RollbackContainerRequest; + message.containerId = + object.containerId !== undefined && object.containerId !== null + ? String(object.containerId) + : ""; + message.revisionId = + object.revisionId !== undefined && object.revisionId !== null + ? String(object.revisionId) + : ""; + return message; + }, + + toJSON(message: RollbackContainerRequest): unknown { + const obj: any = {}; + message.containerId !== undefined && + (obj.containerId = message.containerId); + message.revisionId !== undefined && (obj.revisionId = message.revisionId); + return obj; + }, + + fromPartial, I>>( + object: I + ): RollbackContainerRequest { + const message = { + ...baseRollbackContainerRequest, + } as RollbackContainerRequest; + message.containerId = object.containerId ?? ""; + message.revisionId = object.revisionId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + RollbackContainerRequest.$type, + RollbackContainerRequest +); + +const baseRollbackContainerMetadata: object = { + $type: "yandex.cloud.serverless.containers.v1.RollbackContainerMetadata", + containerId: "", + revisionId: "", +}; + +export const RollbackContainerMetadata = { + $type: + "yandex.cloud.serverless.containers.v1.RollbackContainerMetadata" as const, + + encode( + message: RollbackContainerMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.containerId !== "") { + writer.uint32(10).string(message.containerId); + } + if (message.revisionId !== "") { + writer.uint32(18).string(message.revisionId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): RollbackContainerMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseRollbackContainerMetadata, + } as RollbackContainerMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.containerId = reader.string(); + break; + case 2: + message.revisionId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RollbackContainerMetadata { + const message = { + ...baseRollbackContainerMetadata, + } as RollbackContainerMetadata; + message.containerId = + object.containerId !== undefined && object.containerId !== null + ? String(object.containerId) + : ""; + message.revisionId = + object.revisionId !== undefined && object.revisionId !== null + ? String(object.revisionId) + : ""; + return message; + }, + + toJSON(message: RollbackContainerMetadata): unknown { + const obj: any = {}; + message.containerId !== undefined && + (obj.containerId = message.containerId); + message.revisionId !== undefined && (obj.revisionId = message.revisionId); + return obj; + }, + + fromPartial, I>>( + object: I + ): RollbackContainerMetadata { + const message = { + ...baseRollbackContainerMetadata, + } as RollbackContainerMetadata; + message.containerId = object.containerId ?? ""; + message.revisionId = object.revisionId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + RollbackContainerMetadata.$type, + RollbackContainerMetadata +); + const baseListContainerOperationsRequest: object = { $type: "yandex.cloud.serverless.containers.v1.ListContainerOperationsRequest", containerId: "", @@ -2280,6 +2470,18 @@ export const ContainerServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, + rollback: { + path: "/yandex.cloud.serverless.containers.v1.ContainerService/Rollback", + requestStream: false, + responseStream: false, + requestSerialize: (value: RollbackContainerRequest) => + Buffer.from(RollbackContainerRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + RollbackContainerRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, getRevision: { path: "/yandex.cloud.serverless.containers.v1.ContainerService/GetRevision", requestStream: false, @@ -2364,6 +2566,7 @@ export interface ContainerServiceServer extends UntypedServiceImplementation { update: handleUnaryCall; delete: handleUnaryCall; deployRevision: handleUnaryCall; + rollback: handleUnaryCall; getRevision: handleUnaryCall; listRevisions: handleUnaryCall< ListContainersRevisionsRequest, @@ -2481,6 +2684,21 @@ export interface ContainerServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; + rollback( + request: RollbackContainerRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + rollback( + request: RollbackContainerRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + rollback( + request: RollbackContainerRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; getRevision( request: GetContainerRevisionRequest, callback: (error: ServiceError | null, response: Revision) => void diff --git a/src/generated/yandex/cloud/service_clients.ts b/src/generated/yandex/cloud/service_clients.ts index 255d6f2e..6f89b569 100644 --- a/src/generated/yandex/cloud/service_clients.ts +++ b/src/generated/yandex/cloud/service_clients.ts @@ -136,7 +136,9 @@ export const ContainerServiceClient = cloudApi.serverless.containers_container_s export const FunctionServiceClient = cloudApi.serverless.functions_function_service.FunctionServiceClient; export const ProxyServiceClient = cloudApi.serverless.mdbproxy_proxy_service.ProxyServiceClient; export const TriggerServiceClient = cloudApi.serverless.triggers_trigger_service.TriggerServiceClient; +export const BucketServiceClient = cloudApi.storage.bucket_service.BucketServiceClient; export const AddressServiceClient = cloudApi.vpc.address_service.AddressServiceClient; +export const GatewayServiceClient = cloudApi.vpc.gateway_service.GatewayServiceClient; export const NetworkServiceClient = cloudApi.vpc.network_service.NetworkServiceClient; export const RouteTableServiceClient = cloudApi.vpc.route_table_service.RouteTableServiceClient; export const SecurityGroupServiceClient = cloudApi.vpc.security_group_service.SecurityGroupServiceClient; diff --git a/src/generated/yandex/cloud/storage/index.ts b/src/generated/yandex/cloud/storage/index.ts new file mode 100644 index 00000000..85ff5061 --- /dev/null +++ b/src/generated/yandex/cloud/storage/index.ts @@ -0,0 +1,2 @@ +export * as bucket from './v1/bucket' +export * as bucket_service from './v1/bucket_service' \ No newline at end of file diff --git a/src/generated/yandex/cloud/storage/v1/bucket.ts b/src/generated/yandex/cloud/storage/v1/bucket.ts new file mode 100644 index 00000000..7c274cdd --- /dev/null +++ b/src/generated/yandex/cloud/storage/v1/bucket.ts @@ -0,0 +1,3760 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { Timestamp } from "../../../../google/protobuf/timestamp"; +import { Struct } from "../../../../google/protobuf/struct"; +import { + BoolValue, + Int64Value, + StringValue, +} from "../../../../google/protobuf/wrappers"; + +export const protobufPackage = "yandex.cloud.storage.v1"; + +export enum Versioning { + VERSIONING_UNSPECIFIED = 0, + /** + * VERSIONING_DISABLED - The bucket is unversioned, i.e. versioning has never been enabled for the bucket, including at its creation. + * Objects that are stored in the bucket have a version ID of `null`. + * + * To enable versioning, change status to `VERSIONING_ENABLED` via a [BucketService.Update] request. Note that this + * action is irreversible, and a bucket with versioning enabled can never return to `VERSIONING_DISABLED` state. + */ + VERSIONING_DISABLED = 1, + /** + * VERSIONING_ENABLED - Bucket versioning is enabled, i.e. all new objects are versioned and given a unique version ID, and objects that + * already existed at the time versioning was enabled will be versioned and given a unique version ID when modified + * by future requests. + * + * To suspend versioning, change status to `VERSIONING_SUSPENDED` via a [BucketService.Update] request. You cannot + * disable versioning altogether for a bucket that already had it enabled; objects that had version IDs will keep + * them. + */ + VERSIONING_ENABLED = 2, + /** + * VERSIONING_SUSPENDED - Bucket versioning is suspended, i.e. new objects are not versioned, but objects that already existed at the time + * versioning was suspended are still versioned and keep their version IDs. + * + * To resume versioning, change status to `VERSIONING_ENABLED` via a [BucketService.Update] request. + */ + VERSIONING_SUSPENDED = 3, + UNRECOGNIZED = -1, +} + +export function versioningFromJSON(object: any): Versioning { + switch (object) { + case 0: + case "VERSIONING_UNSPECIFIED": + return Versioning.VERSIONING_UNSPECIFIED; + case 1: + case "VERSIONING_DISABLED": + return Versioning.VERSIONING_DISABLED; + case 2: + case "VERSIONING_ENABLED": + return Versioning.VERSIONING_ENABLED; + case 3: + case "VERSIONING_SUSPENDED": + return Versioning.VERSIONING_SUSPENDED; + case -1: + case "UNRECOGNIZED": + default: + return Versioning.UNRECOGNIZED; + } +} + +export function versioningToJSON(object: Versioning): string { + switch (object) { + case Versioning.VERSIONING_UNSPECIFIED: + return "VERSIONING_UNSPECIFIED"; + case Versioning.VERSIONING_DISABLED: + return "VERSIONING_DISABLED"; + case Versioning.VERSIONING_ENABLED: + return "VERSIONING_ENABLED"; + case Versioning.VERSIONING_SUSPENDED: + return "VERSIONING_SUSPENDED"; + default: + return "UNKNOWN"; + } +} + +/** + * A bucket resource. + * For details about the concept, see [documentation](/docs/storage/concepts/bucket). + */ +export interface Bucket { + $type: "yandex.cloud.storage.v1.Bucket"; + /** ID of the bucket. Always equal to [name], which has priority. */ + id: string; + /** + * Name of the bucket. + * + * The name is unique within Yandex Cloud. For naming limitations and rules, see + * [documentation](/docs/storage/concepts/bucket#naming). + */ + name: string; + /** ID of the folder that the bucket belongs to. */ + folderId: string; + /** + * Flags for configuring public (anonymous) access to the bucket's content and settings. + * For details, see [documentation](/docs/storage/concepts/bucket#bucket-access). + */ + anonymousAccessFlags?: AnonymousAccessFlags; + /** + * Default storage class for objects in the bucket. Supported classes are standard storage (`STANDARD`) and + * cold storage (`COLD`, `STANDARD_IA`, `NEARLINE` all synonyms). + * For details, see [documentation](/docs/storage/concepts/storage-class). + */ + defaultStorageClass: string; + /** + * Bucket versioning status. + * For details, see [documentation](/docs/storage/concepts/versioning). + */ + versioning: Versioning; + /** + * Maximum size of the bucket, in bytes. + * For details, see [documentation](/docs/storage/operations/buckets/limit-max-volume). + */ + maxSize: number; + /** + * Bucket policies that set permissions for actions with the bucket, its objects, and groups of objects. + * For details, see [documentation](/docs/storage/concepts/policy). + */ + policy?: { [key: string]: any }; + /** + * Access control list (ACL) of the bucket. + * For details, see [documentation](/docs/storage/concepts/acl). + */ + acl?: ACL; + /** Creation timestamp. */ + createdAt?: Date; + /** + * List of rules for cross-domain requests to objects in the bucket (cross-origin resource sharing, CORS). + * For details, see [documentation](/docs/storage/concepts/cors). + */ + cors: CorsRule[]; + /** + * Configuration for hosting a static website in the bucket. + * For details, see [documentation](/docs/storage/concepts/hosting). + */ + websiteSettings?: WebsiteSettings; + /** + * List of object lifecycle rules for the bucket. + * For details, see [documentation](/docs/storage/concepts/lifecycles). + */ + lifecycleRules: LifecycleRule[]; +} + +export interface ACL { + $type: "yandex.cloud.storage.v1.ACL"; + /** List of permissions granted and the grantees. */ + grants: ACL_Grant[]; +} + +/** A grant resource, used to specify the permission granted and the grantee. */ +export interface ACL_Grant { + $type: "yandex.cloud.storage.v1.ACL.Grant"; + /** Permission granted by the grant. */ + permission: ACL_Grant_Permission; + /** The grantee type for the grant. */ + grantType: ACL_Grant_GrantType; + /** ID of the Yandex Cloud user who is a grantee. Required when the [grant_type] is `GRANT_TYPE_ACCOUNT`. */ + granteeId: string; +} + +export enum ACL_Grant_Permission { + PERMISSION_UNSPECIFIED = 0, + /** + * PERMISSION_FULL_CONTROL - Allows grantee the `PERMISSION_WRITE`, `PERMISSION_WRITE_ACP`, `PERMISSION_READ`, and `PERMISSION_READ_ACP` + * on the bucket. + * + * Maps to `x-amz-grant-full-control` header for [bucketPutAcl](/docs/storage/s3/api-ref/acl/bucketput) method of + * Amazon S3-compatible HTTP API. + */ + PERMISSION_FULL_CONTROL = 1, + /** + * PERMISSION_WRITE - Allows grantee to create new objects in the bucket. For the bucket and object owners of existing objects, also + * allows deletions and overwrites of those objects. + * + * Maps to `x-amz-grant-write` header for [bucketPutAcl](/docs/storage/s3/api-ref/acl/bucketput) method of Amazon + * S3-compatible HTTP API. + */ + PERMISSION_WRITE = 2, + /** + * PERMISSION_WRITE_ACP - Allows grantee to write the ACL for the bucket. + * + * Maps to `x-amz-grant-write-acp` header for [bucketPutAcl](/docs/storage/s3/api-ref/acl/bucketput) method of + * Amazon S3-compatible HTTP API. + */ + PERMISSION_WRITE_ACP = 3, + /** + * PERMISSION_READ - Allows grantee to list the objects in the bucket. + * + * Maps to `x-amz-grant-read` header for [bucketPutAcl](/docs/storage/s3/api-ref/acl/bucketput) method of Amazon + * S3-compatible HTTP API. + */ + PERMISSION_READ = 4, + /** + * PERMISSION_READ_ACP - Allows grantee to read the bucket ACL + * + * Maps to `x-amz-grant-read-acp` header for [bucketPutAcl](/docs/storage/s3/api-ref/acl/bucketput) method of + * Amazon S3-compatible HTTP API. + */ + PERMISSION_READ_ACP = 5, + UNRECOGNIZED = -1, +} + +export function aCL_Grant_PermissionFromJSON( + object: any +): ACL_Grant_Permission { + switch (object) { + case 0: + case "PERMISSION_UNSPECIFIED": + return ACL_Grant_Permission.PERMISSION_UNSPECIFIED; + case 1: + case "PERMISSION_FULL_CONTROL": + return ACL_Grant_Permission.PERMISSION_FULL_CONTROL; + case 2: + case "PERMISSION_WRITE": + return ACL_Grant_Permission.PERMISSION_WRITE; + case 3: + case "PERMISSION_WRITE_ACP": + return ACL_Grant_Permission.PERMISSION_WRITE_ACP; + case 4: + case "PERMISSION_READ": + return ACL_Grant_Permission.PERMISSION_READ; + case 5: + case "PERMISSION_READ_ACP": + return ACL_Grant_Permission.PERMISSION_READ_ACP; + case -1: + case "UNRECOGNIZED": + default: + return ACL_Grant_Permission.UNRECOGNIZED; + } +} + +export function aCL_Grant_PermissionToJSON( + object: ACL_Grant_Permission +): string { + switch (object) { + case ACL_Grant_Permission.PERMISSION_UNSPECIFIED: + return "PERMISSION_UNSPECIFIED"; + case ACL_Grant_Permission.PERMISSION_FULL_CONTROL: + return "PERMISSION_FULL_CONTROL"; + case ACL_Grant_Permission.PERMISSION_WRITE: + return "PERMISSION_WRITE"; + case ACL_Grant_Permission.PERMISSION_WRITE_ACP: + return "PERMISSION_WRITE_ACP"; + case ACL_Grant_Permission.PERMISSION_READ: + return "PERMISSION_READ"; + case ACL_Grant_Permission.PERMISSION_READ_ACP: + return "PERMISSION_READ_ACP"; + default: + return "UNKNOWN"; + } +} + +export enum ACL_Grant_GrantType { + GRANT_TYPE_UNSPECIFIED = 0, + /** + * GRANT_TYPE_ACCOUNT - A grantee is a [Yandex Cloud account](/docs/iam/concepts/#accounts). + * + * For this grantee type, you need to specify the user ID in [Bucket.acl.grants.grantee_id] field. To get user ID, see + * [instruction](/docs/iam/operations/users/get). + * + * Maps to using `id="*"` value for `x-amz-grant-*` header ([bucketPutAcl](/docs/storage/s3/api-ref/acl/bucketput) + * method of Amazon S3-compatible HTTP API). + */ + GRANT_TYPE_ACCOUNT = 1, + /** + * GRANT_TYPE_ALL_AUTHENTICATED_USERS - Grantees are all authenticated Yandex Cloud users, both from your clouds and other users' clouds. Access + * permission to this group allows any Yandex Cloud account to access the resource via a signed (authenticated) + * request. + * + * Maps to using `uri="http://acs.amazonaws.com/groups/global/AuthenticatedUsers"` value for `x-amz-grant-*` + * header ([bucketPutAcl](/docs/storage/s3/api-ref/acl/bucketput) method of Amazon S3-compatible HTTP API). + */ + GRANT_TYPE_ALL_AUTHENTICATED_USERS = 2, + /** + * GRANT_TYPE_ALL_USERS - Grantees are all internet users. Access permission to this group allows anyone in the world access to the + * resource via signed (authenticated) or unsigned (anonymous) requests. + * + * Maps to using `uri="http://acs.amazonaws.com/groups/global/AllUsers"` value for `x-amz-grant-*` header + * ([bucketPutAcl](/docs/storage/s3/api-ref/acl/bucketput) method of Amazon S3-compatible HTTP API). + */ + GRANT_TYPE_ALL_USERS = 3, + UNRECOGNIZED = -1, +} + +export function aCL_Grant_GrantTypeFromJSON(object: any): ACL_Grant_GrantType { + switch (object) { + case 0: + case "GRANT_TYPE_UNSPECIFIED": + return ACL_Grant_GrantType.GRANT_TYPE_UNSPECIFIED; + case 1: + case "GRANT_TYPE_ACCOUNT": + return ACL_Grant_GrantType.GRANT_TYPE_ACCOUNT; + case 2: + case "GRANT_TYPE_ALL_AUTHENTICATED_USERS": + return ACL_Grant_GrantType.GRANT_TYPE_ALL_AUTHENTICATED_USERS; + case 3: + case "GRANT_TYPE_ALL_USERS": + return ACL_Grant_GrantType.GRANT_TYPE_ALL_USERS; + case -1: + case "UNRECOGNIZED": + default: + return ACL_Grant_GrantType.UNRECOGNIZED; + } +} + +export function aCL_Grant_GrantTypeToJSON(object: ACL_Grant_GrantType): string { + switch (object) { + case ACL_Grant_GrantType.GRANT_TYPE_UNSPECIFIED: + return "GRANT_TYPE_UNSPECIFIED"; + case ACL_Grant_GrantType.GRANT_TYPE_ACCOUNT: + return "GRANT_TYPE_ACCOUNT"; + case ACL_Grant_GrantType.GRANT_TYPE_ALL_AUTHENTICATED_USERS: + return "GRANT_TYPE_ALL_AUTHENTICATED_USERS"; + case ACL_Grant_GrantType.GRANT_TYPE_ALL_USERS: + return "GRANT_TYPE_ALL_USERS"; + default: + return "UNKNOWN"; + } +} + +export interface AnonymousAccessFlags { + $type: "yandex.cloud.storage.v1.AnonymousAccessFlags"; + /** Specifies whether public (anonymous) access to read objects in the bucket is enabled. */ + read?: boolean; + /** Specifies whether public (anonymous) access to the list of objects in the bucket is enabled. */ + list?: boolean; + /** + * Specifies whether public (anonymous) access to read [CORS](/docs/storage/concepts/cors), + * [static website hosting](/docs/storage/concepts/hosting), and + * [object lifecycles](/docs/storage/concepts/lifecycles) settings of the bucket is enabled. + */ + configRead?: boolean; +} + +/** + * A CORS rule resource. + * For details about the concept, see [documentation](/docs/storage/concepts/cors). + */ +export interface CorsRule { + $type: "yandex.cloud.storage.v1.CorsRule"; + /** ID of the CORS rule. */ + id: string; + /** + * List of HTTP methods allowed by the CORS rule. + * + * When a client sends a CORS-preflight `options` request with the `Access-Control-Request-Method` header (see + * [S3-compatible API reference](/docs/storage/s3/api-ref/object/options)), the specified method is checked against + * the list of the allowed methods. If there is a match, all the allowed methods are listed in the + * `Access-Control-Allow-Methods` header of the response. + */ + allowedMethods: CorsRule_Method[]; + /** + * List of HTTP headers allowed by the CORS rule. + * + * When a client sends a CORS-preflight `options` request with the `Access-Control-Request-Headers` header (see + * [S3-compatible API reference](/docs/storage/s3/api-ref/object/options)), the specified headers are checked against + * the list of the allowed headers. If there is a match, the specified headers that are allowed are listed in the + * `Access-Control-Allow-Headers` header of the response. + * + * Each string in the list can contain at most one `*` wildcard character that matches 0 or more characters. + * For example, `x-amz-*` value will allow all Amazon S3-compatible headers. + */ + allowedHeaders: string[]; + /** + * List of request origins allowed by the CORS rule. + * + * Each string in the list can contain at most one `*` wildcard character that matches 0 or more characters. + * For example, `http://*.example.com` value will allow requests originating from all subdomains of `example.com`. + */ + allowedOrigins: string[]; + /** List of headers contained in responses to CORS requests that can be accessed by applications. */ + exposeHeaders: string[]; + /** + * Time in seconds that a client can cache the response to a CORS-preflight request as identified by the + * object requested, the HTTP method, and the origin. + */ + maxAgeSeconds?: number; +} + +/** + * List of HTTP methods that are allowed by the CORS rule. + * + * When a client sends a CORS-preflight `options` request with the `Access-Control-Request-Method` header (see + * S3-compatible API reference](/docs/storage/s3/api-ref/object/options)), the specified method is checked against the + * list of the allowed methods. If there is a match, all the allowed methods are listed in the + * `Access-Control-Allow-Methods` header of the response. + */ +export enum CorsRule_Method { + METHOD_UNSPECIFIED = 0, + /** METHOD_GET - HTTP `GET` method. */ + METHOD_GET = 1, + /** METHOD_HEAD - HTTP `HEAD` method. */ + METHOD_HEAD = 2, + /** METHOD_POST - HTTP `POST` method. */ + METHOD_POST = 3, + /** METHOD_PUT - HTTP `PUT` method. */ + METHOD_PUT = 4, + /** METHOD_DELETE - HTTP `DELETE` method. */ + METHOD_DELETE = 5, + UNRECOGNIZED = -1, +} + +export function corsRule_MethodFromJSON(object: any): CorsRule_Method { + switch (object) { + case 0: + case "METHOD_UNSPECIFIED": + return CorsRule_Method.METHOD_UNSPECIFIED; + case 1: + case "METHOD_GET": + return CorsRule_Method.METHOD_GET; + case 2: + case "METHOD_HEAD": + return CorsRule_Method.METHOD_HEAD; + case 3: + case "METHOD_POST": + return CorsRule_Method.METHOD_POST; + case 4: + case "METHOD_PUT": + return CorsRule_Method.METHOD_PUT; + case 5: + case "METHOD_DELETE": + return CorsRule_Method.METHOD_DELETE; + case -1: + case "UNRECOGNIZED": + default: + return CorsRule_Method.UNRECOGNIZED; + } +} + +export function corsRule_MethodToJSON(object: CorsRule_Method): string { + switch (object) { + case CorsRule_Method.METHOD_UNSPECIFIED: + return "METHOD_UNSPECIFIED"; + case CorsRule_Method.METHOD_GET: + return "METHOD_GET"; + case CorsRule_Method.METHOD_HEAD: + return "METHOD_HEAD"; + case CorsRule_Method.METHOD_POST: + return "METHOD_POST"; + case CorsRule_Method.METHOD_PUT: + return "METHOD_PUT"; + case CorsRule_Method.METHOD_DELETE: + return "METHOD_DELETE"; + default: + return "UNKNOWN"; + } +} + +export interface WebsiteSettings { + $type: "yandex.cloud.storage.v1.WebsiteSettings"; + /** + * Key of the index page object that is returned when a response is made to the root of the website. + * + * Either [index] or [redirect_all_requests] must be specified in order for the bucket to host a static website. + * + * If specified, the index page object must be located in the root of the bucket. + */ + index: string; + /** Key of the error page object that is returned when an error occurs. */ + error: string; + /** + * Configuration for redirecting all requests sent to the website. + * + * Either [redirect_all_requests] or [index] must be specified in order for the bucket to host a static website. + * If [redirect_all_requests] is specified, it must be the only field in [Bucket.website_settings]. + */ + redirectAllRequests?: WebsiteSettings_Scheme; + /** List of redirect rules. */ + routingRules: WebsiteSettings_RoutingRule[]; +} + +export enum WebsiteSettings_Protocol { + PROTOCOL_UNSPECIFIED = 0, + /** PROTOCOL_HTTP - `http` scheme. */ + PROTOCOL_HTTP = 1, + /** PROTOCOL_HTTPS - `https` scheme. */ + PROTOCOL_HTTPS = 2, + UNRECOGNIZED = -1, +} + +export function websiteSettings_ProtocolFromJSON( + object: any +): WebsiteSettings_Protocol { + switch (object) { + case 0: + case "PROTOCOL_UNSPECIFIED": + return WebsiteSettings_Protocol.PROTOCOL_UNSPECIFIED; + case 1: + case "PROTOCOL_HTTP": + return WebsiteSettings_Protocol.PROTOCOL_HTTP; + case 2: + case "PROTOCOL_HTTPS": + return WebsiteSettings_Protocol.PROTOCOL_HTTPS; + case -1: + case "UNRECOGNIZED": + default: + return WebsiteSettings_Protocol.UNRECOGNIZED; + } +} + +export function websiteSettings_ProtocolToJSON( + object: WebsiteSettings_Protocol +): string { + switch (object) { + case WebsiteSettings_Protocol.PROTOCOL_UNSPECIFIED: + return "PROTOCOL_UNSPECIFIED"; + case WebsiteSettings_Protocol.PROTOCOL_HTTP: + return "PROTOCOL_HTTP"; + case WebsiteSettings_Protocol.PROTOCOL_HTTPS: + return "PROTOCOL_HTTPS"; + default: + return "UNKNOWN"; + } +} + +/** A configuration resource for redirecting all requests sent to the website. */ +export interface WebsiteSettings_Scheme { + $type: "yandex.cloud.storage.v1.WebsiteSettings.Scheme"; + /** Scheme of the redirect URI. */ + protocol: WebsiteSettings_Protocol; + /** Hostname of the redirect URI. */ + hostname: string; +} + +export interface WebsiteSettings_Condition { + $type: "yandex.cloud.storage.v1.WebsiteSettings.Condition"; + /** HTTP status code (number only) that must match for the redirect to apply. */ + httpErrorCodeReturnedEquals: string; + /** Prefix of the object key from which requests are redirected. */ + keyPrefixEquals: string; +} + +export interface WebsiteSettings_Redirect { + $type: "yandex.cloud.storage.v1.WebsiteSettings.Redirect"; + /** Hostname of the redirect URI. */ + hostname: string; + /** + * HTTP status code of the redirect response. + * + * Default value: `"301"`. + */ + httpRedirectCode: string; + /** Scheme of the redirect URI. */ + protocol: WebsiteSettings_Protocol; + /** + * Substitution for the prefix of the object key specified in [Condition.key_prefix_equals]. + * + * At most one of [replace_key_prefix_with] and [replace_key_with] can be specified. + */ + replaceKeyPrefixWith: string; + /** + * New object key. + * + * At most one of [replace_key_with] and [replace_key_prefix_with] can be specified. + */ + replaceKeyWith: string; +} + +/** List of redirect rules. */ +export interface WebsiteSettings_RoutingRule { + $type: "yandex.cloud.storage.v1.WebsiteSettings.RoutingRule"; + /** Redirect condition. */ + condition?: WebsiteSettings_Condition; + /** Redirect instructions. */ + redirect?: WebsiteSettings_Redirect; +} + +/** + * An object lifecycle rule resource for the bucket. + * For details about the concept, see [documentation](/docs/storage/concepts/lifecycles). + */ +export interface LifecycleRule { + $type: "yandex.cloud.storage.v1.LifecycleRule"; + /** ID of the rule. Provided by the client or generated at creation time. */ + id?: string; + /** Indicates whether the rule is in effect. */ + enabled: boolean; + /** + * Filter that identifies the objects to which the rule applies. + * + * If not specified, the rule applies to all objects in the bucket. + */ + filter?: LifecycleRule_RuleFilter; + /** + * Expiration rule. + * + * The expiration of an object is described as follows. + * + * For the unversioned bucket ([Bucket.versioning] is `VERSIONING_DISABLED`), the object is deleted and cannot be + * recovered. + * + * For the bucket with versioning enabled ([Bucket.versioning] is `VERSIONING_ENABLED`), the current version of the + * object (if it exists and is not a delete marker) is retained as a non-current version, and a delete marker becomes + * the current version of the object. + * + * For the bucket with versioning suspended ([Bucket.versioning] is `VERSIONING_SUSPENDED`), the current version of + * the object is retained as a non-current version if it is not a delete marker, or is removed otherwise, and a + * delete marker becomes the current version of the object. + */ + expiration?: LifecycleRule_Expiration; + /** + * List of transition rules. + * + * The transition of an object is described as follows. + * + * For the unversioned bucket ([Bucket.versioning] is `VERSIONING_DISABLED`), the object is transitioned to the + * specified storage class. + * + * For the bucket with versioning enabled ([Bucket.versioning] is `VERSIONING_ENABLED`) or suspended + * (`VERSIONING_SUSPENDED`), the current version of the object is transitioned to the specified storage class. + */ + transitions: LifecycleRule_Transition[]; + /** Configuration for aborting incomplete [multipart uploads](/docs/storage/concepts/multipart). */ + abortIncompleteMultipartUpload?: LifecycleRule_AfterDays; + /** + * Expiration rule for non-current versions of objects in a bucket with versioning enabled ([Bucket.versioning] is + * `VERSIONING_ENABLED`) or suspended (`VERSIONING_SUSPENDED`). + * + * At expiration, the non-current version of the object is deleted and cannot be recovered. + */ + noncurrentExpiration?: LifecycleRule_NoncurrentExpiration; + /** + * List of transition rules for non-current versions of objects in a bucket with versioning enabled + * ([Bucket.versioning] is `VERSIONING_ENABLED`) or suspended (`VERSIONING_SUSPENDED`). + * + * At transition, the non-current version of the object is transitioned to the specified storage class. + */ + noncurrentTransitions: LifecycleRule_NoncurrentTransition[]; +} + +export interface LifecycleRule_AfterDays { + $type: "yandex.cloud.storage.v1.LifecycleRule.AfterDays"; + /** + * Time period, in number of days from the start of the multipart upload, after which the incomplete upload is + * aborted. + */ + daysAfterExpiration?: number; +} + +export interface LifecycleRule_NoncurrentExpiration { + $type: "yandex.cloud.storage.v1.LifecycleRule.NoncurrentExpiration"; + /** + * Time period, in number of days since the version of an object was classified as non-current, after which the + * version expires. + */ + noncurrentDays?: number; +} + +/** + * List of transition rules for non-current versions of objects in a bucket with versioning enabled + * ([Bucket.versioning] is `VERSIONING_ENABLED`) or suspended (`VERSIONING_SUSPENDED`). + * + * At transition, the non-current version of the object is transitioned to the specified storage class. + */ +export interface LifecycleRule_NoncurrentTransition { + $type: "yandex.cloud.storage.v1.LifecycleRule.NoncurrentTransition"; + /** + * Time period, in number of days since the version of an object was classified as non-current, after which the + * version is transitioned. + */ + noncurrentDays?: number; + /** + * Storage class to which a non-current version of an object is transitioned. + * + * The only supported class is cold storage (`COLD`, `STANDARD_IA`, `NEARLINE` all synonyms). Transitions from cold + * to standard storage are not allowed. + */ + storageClass: string; +} + +/** + * List of transition rules. + * + * The transition of an object is described as follows. + * + * For the unversioned bucket ([Bucket.versioning] is `VERSIONING_DISABLED`), the object is transitioned to the + * specified storage class. + * + * For the bucket with versioning enabled ([Bucket.versioning] is `VERSIONING_ENABLED`) or suspended + * (`VERSIONING_SUSPENDED`), the current version of the object is transitioned to the specified storage class. + */ +export interface LifecycleRule_Transition { + $type: "yandex.cloud.storage.v1.LifecycleRule.Transition"; + /** + * Specific date of object transition. + * + * The rule continues to apply even after the date has passed, i.e. any new objects created in the bucket are + * transitioned immediately. + * + * At most one of [date] and [days] fields can be specified. + */ + date?: Date; + /** + * Time period, in number of days from the creation or modification of the object, after which an object is + * transitioned. + * + * At most one of [days] and [date] fields can be specified. + */ + days?: number; + /** + * Storage class to which an object is transitioned. + * + * The only supported class is cold storage (`COLD`, `STANDARD_IA`, `NEARLINE` all synonyms). Transitions from cold + * to standard storage are not allowed. + */ + storageClass: string; +} + +export interface LifecycleRule_Expiration { + $type: "yandex.cloud.storage.v1.LifecycleRule.Expiration"; + /** + * Specific date of object expiration. + * + * The rule continues to apply even after the date has passed, i.e. any new objects created in the bucket expire + * immediately. + * + * Exactly one of [date], [days], and [expired_object_delete_marker] fields can be specified. + */ + date?: Date; + /** + * Time period, in number of days from the creation or modification of the object, after which an object expires. + * + * Exactly one of [days], [date], and [expired_object_delete_marker] fields can be specified. + */ + days?: number; + /** + * Indicates whether a delete marker of an object with no non-current versions (referred to as an expired object + * delete marker) is removed at the object's expiration. + * + * Exactly one of [expired_object_delete_marker], [date], and [days] fields can be specified. + */ + expiredObjectDeleteMarker?: boolean; +} + +export interface LifecycleRule_RuleFilter { + $type: "yandex.cloud.storage.v1.LifecycleRule.RuleFilter"; + /** Key prefix that the object must have in order for the rule to apply. */ + prefix: string; +} + +export interface Counters { + $type: "yandex.cloud.storage.v1.Counters"; + /** Total size of objects uploaded in single operation, in bytes. */ + simpleObjectSize: number; + /** Number of objects uploaded in single operation. */ + simpleObjectCount: number; + /** Total size of uploaded parts in incomplete multipart uploads, in bytes. */ + objectsPartsSize: number; + /** Number of uploaded parts in incomplete multipart uploads. */ + objectsPartsCount: number; + /** Total size of objects uploaded in multiple parts, in bytes. */ + multipartObjectsSize: number; + /** Number of objects uploaded in multiple parts. */ + multipartObjectsCount: number; + /** Number of incomplete multipart uploads. */ + activeMultipartCount: number; +} + +/** A resource for size of available space in a bucket for a storage class. */ +export interface OptionalSizeByClass { + $type: "yandex.cloud.storage.v1.OptionalSizeByClass"; + /** + * Storage class. Supported classes are standard storage (`STANDARD`) and cold storage (`COLD`, `STANDARD_IA`, + * `NEARLINE` all synonyms). + * For details, see [documentation](/docs/storage/concepts/storage-class). + */ + storageClass: string; + /** Size of available space in the bucket for the storage class. */ + classSize?: number; +} + +/** A resource for size of used space in a bucket for a storage class. */ +export interface SizeByClass { + $type: "yandex.cloud.storage.v1.SizeByClass"; + /** + * Storage class. Supported classes are standard storage (`STANDARD`) and cold storage (`COLD`, `STANDARD_IA`, + * `NEARLINE` all synonyms). + * For details, see [documentation](/docs/storage/concepts/storage-class). + */ + storageClass: string; + /** Size of used space in the bucket for the storage class. */ + classSize: number; +} + +/** A resource for object-related statistics for a storage class by type of upload (simple vs. multipart). */ +export interface CountersByClass { + $type: "yandex.cloud.storage.v1.CountersByClass"; + /** + * Storage class. Supported classes are standard storage (`STANDARD`) and cold storage (`COLD`, `STANDARD_IA`, + * `NEARLINE` all synonyms). + * For details, see [documentation](/docs/storage/concepts/storage-class). + */ + storageClass: string; + /** Object-related statistics for the storage class by type of upload. */ + counters?: Counters; +} + +/** A bucket statistics resource. */ +export interface BucketStats { + $type: "yandex.cloud.storage.v1.BucketStats"; + /** Name of the bucket. */ + name: string; + /** Maximum size of the bucket, in bytes. */ + maxSize?: number; + /** Size of used space in the bucket, in bytes. */ + usedSize: number; + /** Size of available space in the bucket by storage class, in bytes. */ + storageClassMaxSizes: OptionalSizeByClass[]; + /** Size of used space in the bucket by storage class, in bytes. */ + storageClassUsedSizes: SizeByClass[]; + /** Object-related statistics by storage class and type of upload (simple vs. multipart), in bytes. */ + storageClassCounters: CountersByClass[]; + /** + * Default storage class for objects in the bucket. Supported classes are standard storage (`STANDARD`) and + * cold storage (`COLD`, `STANDARD_IA`, `NEARLINE` all synonyms). + * For details, see [documentation](/docs/storage/concepts/storage-class). + */ + defaultStorageClass?: string; + /** + * Flags for configuring public (anonymous) access to the bucket's content and settings. + * For details, see [documentation](/docs/storage/concepts/bucket#bucket-access). + */ + anonymousAccessFlags?: AnonymousAccessFlags; + /** Bucket creation timestamp. */ + createdAt?: Date; + /** Bucket latest update timestamp. */ + updatedAt?: Date; +} + +/** A resource for HTTPS configuration of a bucket. */ +export interface HTTPSConfig { + $type: "yandex.cloud.storage.v1.HTTPSConfig"; + /** Name of the bucket. */ + name: string; + /** Type of TLS certificate source. */ + sourceType: HTTPSConfig_SourceType; + /** Issuer of the TLS certificate. */ + issuer?: string; + /** Subject of the TLS certificate. */ + subject?: string; + /** List of DNS names of the TLS certificate (Subject Alternative Name field). */ + dnsNames: string[]; + /** Start of the TLS certificate validity period (Not Before field). */ + notBefore?: Date; + /** End of the TLS certificate validity period (Not After field) */ + notAfter?: Date; + /** + * ID of the TLS certificate in Yandex Certificate Manager. + * + * To get information about the certificate from Certificate Manager, make a + * [yandex.cloud.certificatemanager.v1.CertificateService.Get] request. + */ + certificateId: string; +} + +/** A resource for type of TLS certificate source. */ +export enum HTTPSConfig_SourceType { + SOURCE_TYPE_UNSPECIFIED = 0, + /** SOURCE_TYPE_SELF_MANAGED - Your certificate, uploaded directly. */ + SOURCE_TYPE_SELF_MANAGED = 1, + /** SOURCE_TYPE_MANAGED_BY_CERTIFICATE_MANAGER - Certificate managed by Yandex Certificate Manager. */ + SOURCE_TYPE_MANAGED_BY_CERTIFICATE_MANAGER = 2, + UNRECOGNIZED = -1, +} + +export function hTTPSConfig_SourceTypeFromJSON( + object: any +): HTTPSConfig_SourceType { + switch (object) { + case 0: + case "SOURCE_TYPE_UNSPECIFIED": + return HTTPSConfig_SourceType.SOURCE_TYPE_UNSPECIFIED; + case 1: + case "SOURCE_TYPE_SELF_MANAGED": + return HTTPSConfig_SourceType.SOURCE_TYPE_SELF_MANAGED; + case 2: + case "SOURCE_TYPE_MANAGED_BY_CERTIFICATE_MANAGER": + return HTTPSConfig_SourceType.SOURCE_TYPE_MANAGED_BY_CERTIFICATE_MANAGER; + case -1: + case "UNRECOGNIZED": + default: + return HTTPSConfig_SourceType.UNRECOGNIZED; + } +} + +export function hTTPSConfig_SourceTypeToJSON( + object: HTTPSConfig_SourceType +): string { + switch (object) { + case HTTPSConfig_SourceType.SOURCE_TYPE_UNSPECIFIED: + return "SOURCE_TYPE_UNSPECIFIED"; + case HTTPSConfig_SourceType.SOURCE_TYPE_SELF_MANAGED: + return "SOURCE_TYPE_SELF_MANAGED"; + case HTTPSConfig_SourceType.SOURCE_TYPE_MANAGED_BY_CERTIFICATE_MANAGER: + return "SOURCE_TYPE_MANAGED_BY_CERTIFICATE_MANAGER"; + default: + return "UNKNOWN"; + } +} + +const baseBucket: object = { + $type: "yandex.cloud.storage.v1.Bucket", + id: "", + name: "", + folderId: "", + defaultStorageClass: "", + versioning: 0, + maxSize: 0, +}; + +export const Bucket = { + $type: "yandex.cloud.storage.v1.Bucket" as const, + + encode( + message: Bucket, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.id !== "") { + writer.uint32(10).string(message.id); + } + if (message.name !== "") { + writer.uint32(18).string(message.name); + } + if (message.folderId !== "") { + writer.uint32(26).string(message.folderId); + } + if (message.anonymousAccessFlags !== undefined) { + AnonymousAccessFlags.encode( + message.anonymousAccessFlags, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.defaultStorageClass !== "") { + writer.uint32(42).string(message.defaultStorageClass); + } + if (message.versioning !== 0) { + writer.uint32(48).int32(message.versioning); + } + if (message.maxSize !== 0) { + writer.uint32(56).int64(message.maxSize); + } + if (message.policy !== undefined) { + Struct.encode( + Struct.wrap(message.policy), + writer.uint32(66).fork() + ).ldelim(); + } + if (message.acl !== undefined) { + ACL.encode(message.acl, writer.uint32(74).fork()).ldelim(); + } + if (message.createdAt !== undefined) { + Timestamp.encode( + toTimestamp(message.createdAt), + writer.uint32(82).fork() + ).ldelim(); + } + for (const v of message.cors) { + CorsRule.encode(v!, writer.uint32(90).fork()).ldelim(); + } + if (message.websiteSettings !== undefined) { + WebsiteSettings.encode( + message.websiteSettings, + writer.uint32(98).fork() + ).ldelim(); + } + for (const v of message.lifecycleRules) { + LifecycleRule.encode(v!, writer.uint32(106).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Bucket { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseBucket } as Bucket; + message.cors = []; + message.lifecycleRules = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = reader.string(); + break; + case 2: + message.name = reader.string(); + break; + case 3: + message.folderId = reader.string(); + break; + case 4: + message.anonymousAccessFlags = AnonymousAccessFlags.decode( + reader, + reader.uint32() + ); + break; + case 5: + message.defaultStorageClass = reader.string(); + break; + case 6: + message.versioning = reader.int32() as any; + break; + case 7: + message.maxSize = longToNumber(reader.int64() as Long); + break; + case 8: + message.policy = Struct.unwrap( + Struct.decode(reader, reader.uint32()) + ); + break; + case 9: + message.acl = ACL.decode(reader, reader.uint32()); + break; + case 10: + message.createdAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 11: + message.cors.push(CorsRule.decode(reader, reader.uint32())); + break; + case 12: + message.websiteSettings = WebsiteSettings.decode( + reader, + reader.uint32() + ); + break; + case 13: + message.lifecycleRules.push( + LifecycleRule.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Bucket { + const message = { ...baseBucket } as Bucket; + message.id = + object.id !== undefined && object.id !== null ? String(object.id) : ""; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.anonymousAccessFlags = + object.anonymousAccessFlags !== undefined && + object.anonymousAccessFlags !== null + ? AnonymousAccessFlags.fromJSON(object.anonymousAccessFlags) + : undefined; + message.defaultStorageClass = + object.defaultStorageClass !== undefined && + object.defaultStorageClass !== null + ? String(object.defaultStorageClass) + : ""; + message.versioning = + object.versioning !== undefined && object.versioning !== null + ? versioningFromJSON(object.versioning) + : 0; + message.maxSize = + object.maxSize !== undefined && object.maxSize !== null + ? Number(object.maxSize) + : 0; + message.policy = + typeof object.policy === "object" ? object.policy : undefined; + message.acl = + object.acl !== undefined && object.acl !== null + ? ACL.fromJSON(object.acl) + : undefined; + message.createdAt = + object.createdAt !== undefined && object.createdAt !== null + ? fromJsonTimestamp(object.createdAt) + : undefined; + message.cors = (object.cors ?? []).map((e: any) => CorsRule.fromJSON(e)); + message.websiteSettings = + object.websiteSettings !== undefined && object.websiteSettings !== null + ? WebsiteSettings.fromJSON(object.websiteSettings) + : undefined; + message.lifecycleRules = (object.lifecycleRules ?? []).map((e: any) => + LifecycleRule.fromJSON(e) + ); + return message; + }, + + toJSON(message: Bucket): unknown { + const obj: any = {}; + message.id !== undefined && (obj.id = message.id); + message.name !== undefined && (obj.name = message.name); + message.folderId !== undefined && (obj.folderId = message.folderId); + message.anonymousAccessFlags !== undefined && + (obj.anonymousAccessFlags = message.anonymousAccessFlags + ? AnonymousAccessFlags.toJSON(message.anonymousAccessFlags) + : undefined); + message.defaultStorageClass !== undefined && + (obj.defaultStorageClass = message.defaultStorageClass); + message.versioning !== undefined && + (obj.versioning = versioningToJSON(message.versioning)); + message.maxSize !== undefined && + (obj.maxSize = Math.round(message.maxSize)); + message.policy !== undefined && (obj.policy = message.policy); + message.acl !== undefined && + (obj.acl = message.acl ? ACL.toJSON(message.acl) : undefined); + message.createdAt !== undefined && + (obj.createdAt = message.createdAt.toISOString()); + if (message.cors) { + obj.cors = message.cors.map((e) => (e ? CorsRule.toJSON(e) : undefined)); + } else { + obj.cors = []; + } + message.websiteSettings !== undefined && + (obj.websiteSettings = message.websiteSettings + ? WebsiteSettings.toJSON(message.websiteSettings) + : undefined); + if (message.lifecycleRules) { + obj.lifecycleRules = message.lifecycleRules.map((e) => + e ? LifecycleRule.toJSON(e) : undefined + ); + } else { + obj.lifecycleRules = []; + } + return obj; + }, + + fromPartial, I>>(object: I): Bucket { + const message = { ...baseBucket } as Bucket; + message.id = object.id ?? ""; + message.name = object.name ?? ""; + message.folderId = object.folderId ?? ""; + message.anonymousAccessFlags = + object.anonymousAccessFlags !== undefined && + object.anonymousAccessFlags !== null + ? AnonymousAccessFlags.fromPartial(object.anonymousAccessFlags) + : undefined; + message.defaultStorageClass = object.defaultStorageClass ?? ""; + message.versioning = object.versioning ?? 0; + message.maxSize = object.maxSize ?? 0; + message.policy = object.policy ?? undefined; + message.acl = + object.acl !== undefined && object.acl !== null + ? ACL.fromPartial(object.acl) + : undefined; + message.createdAt = object.createdAt ?? undefined; + message.cors = object.cors?.map((e) => CorsRule.fromPartial(e)) || []; + message.websiteSettings = + object.websiteSettings !== undefined && object.websiteSettings !== null + ? WebsiteSettings.fromPartial(object.websiteSettings) + : undefined; + message.lifecycleRules = + object.lifecycleRules?.map((e) => LifecycleRule.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(Bucket.$type, Bucket); + +const baseACL: object = { $type: "yandex.cloud.storage.v1.ACL" }; + +export const ACL = { + $type: "yandex.cloud.storage.v1.ACL" as const, + + encode(message: ACL, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + for (const v of message.grants) { + ACL_Grant.encode(v!, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ACL { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseACL } as ACL; + message.grants = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.grants.push(ACL_Grant.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ACL { + const message = { ...baseACL } as ACL; + message.grants = (object.grants ?? []).map((e: any) => + ACL_Grant.fromJSON(e) + ); + return message; + }, + + toJSON(message: ACL): unknown { + const obj: any = {}; + if (message.grants) { + obj.grants = message.grants.map((e) => + e ? ACL_Grant.toJSON(e) : undefined + ); + } else { + obj.grants = []; + } + return obj; + }, + + fromPartial, I>>(object: I): ACL { + const message = { ...baseACL } as ACL; + message.grants = object.grants?.map((e) => ACL_Grant.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(ACL.$type, ACL); + +const baseACL_Grant: object = { + $type: "yandex.cloud.storage.v1.ACL.Grant", + permission: 0, + grantType: 0, + granteeId: "", +}; + +export const ACL_Grant = { + $type: "yandex.cloud.storage.v1.ACL.Grant" as const, + + encode( + message: ACL_Grant, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.permission !== 0) { + writer.uint32(8).int32(message.permission); + } + if (message.grantType !== 0) { + writer.uint32(16).int32(message.grantType); + } + if (message.granteeId !== "") { + writer.uint32(26).string(message.granteeId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ACL_Grant { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseACL_Grant } as ACL_Grant; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.permission = reader.int32() as any; + break; + case 2: + message.grantType = reader.int32() as any; + break; + case 3: + message.granteeId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ACL_Grant { + const message = { ...baseACL_Grant } as ACL_Grant; + message.permission = + object.permission !== undefined && object.permission !== null + ? aCL_Grant_PermissionFromJSON(object.permission) + : 0; + message.grantType = + object.grantType !== undefined && object.grantType !== null + ? aCL_Grant_GrantTypeFromJSON(object.grantType) + : 0; + message.granteeId = + object.granteeId !== undefined && object.granteeId !== null + ? String(object.granteeId) + : ""; + return message; + }, + + toJSON(message: ACL_Grant): unknown { + const obj: any = {}; + message.permission !== undefined && + (obj.permission = aCL_Grant_PermissionToJSON(message.permission)); + message.grantType !== undefined && + (obj.grantType = aCL_Grant_GrantTypeToJSON(message.grantType)); + message.granteeId !== undefined && (obj.granteeId = message.granteeId); + return obj; + }, + + fromPartial, I>>( + object: I + ): ACL_Grant { + const message = { ...baseACL_Grant } as ACL_Grant; + message.permission = object.permission ?? 0; + message.grantType = object.grantType ?? 0; + message.granteeId = object.granteeId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ACL_Grant.$type, ACL_Grant); + +const baseAnonymousAccessFlags: object = { + $type: "yandex.cloud.storage.v1.AnonymousAccessFlags", +}; + +export const AnonymousAccessFlags = { + $type: "yandex.cloud.storage.v1.AnonymousAccessFlags" as const, + + encode( + message: AnonymousAccessFlags, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.read !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.read! }, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.list !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.list! }, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.configRead !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.configRead! }, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): AnonymousAccessFlags { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseAnonymousAccessFlags } as AnonymousAccessFlags; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.read = BoolValue.decode(reader, reader.uint32()).value; + break; + case 2: + message.list = BoolValue.decode(reader, reader.uint32()).value; + break; + case 3: + message.configRead = BoolValue.decode(reader, reader.uint32()).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AnonymousAccessFlags { + const message = { ...baseAnonymousAccessFlags } as AnonymousAccessFlags; + message.read = + object.read !== undefined && object.read !== null + ? Boolean(object.read) + : undefined; + message.list = + object.list !== undefined && object.list !== null + ? Boolean(object.list) + : undefined; + message.configRead = + object.configRead !== undefined && object.configRead !== null + ? Boolean(object.configRead) + : undefined; + return message; + }, + + toJSON(message: AnonymousAccessFlags): unknown { + const obj: any = {}; + message.read !== undefined && (obj.read = message.read); + message.list !== undefined && (obj.list = message.list); + message.configRead !== undefined && (obj.configRead = message.configRead); + return obj; + }, + + fromPartial, I>>( + object: I + ): AnonymousAccessFlags { + const message = { ...baseAnonymousAccessFlags } as AnonymousAccessFlags; + message.read = object.read ?? undefined; + message.list = object.list ?? undefined; + message.configRead = object.configRead ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(AnonymousAccessFlags.$type, AnonymousAccessFlags); + +const baseCorsRule: object = { + $type: "yandex.cloud.storage.v1.CorsRule", + id: "", + allowedMethods: 0, + allowedHeaders: "", + allowedOrigins: "", + exposeHeaders: "", +}; + +export const CorsRule = { + $type: "yandex.cloud.storage.v1.CorsRule" as const, + + encode( + message: CorsRule, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.id !== "") { + writer.uint32(10).string(message.id); + } + writer.uint32(18).fork(); + for (const v of message.allowedMethods) { + writer.int32(v); + } + writer.ldelim(); + for (const v of message.allowedHeaders) { + writer.uint32(26).string(v!); + } + for (const v of message.allowedOrigins) { + writer.uint32(34).string(v!); + } + for (const v of message.exposeHeaders) { + writer.uint32(42).string(v!); + } + if (message.maxAgeSeconds !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.maxAgeSeconds! }, + writer.uint32(50).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): CorsRule { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseCorsRule } as CorsRule; + message.allowedMethods = []; + message.allowedHeaders = []; + message.allowedOrigins = []; + message.exposeHeaders = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = reader.string(); + break; + case 2: + if ((tag & 7) === 2) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.allowedMethods.push(reader.int32() as any); + } + } else { + message.allowedMethods.push(reader.int32() as any); + } + break; + case 3: + message.allowedHeaders.push(reader.string()); + break; + case 4: + message.allowedOrigins.push(reader.string()); + break; + case 5: + message.exposeHeaders.push(reader.string()); + break; + case 6: + message.maxAgeSeconds = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CorsRule { + const message = { ...baseCorsRule } as CorsRule; + message.id = + object.id !== undefined && object.id !== null ? String(object.id) : ""; + message.allowedMethods = (object.allowedMethods ?? []).map((e: any) => + corsRule_MethodFromJSON(e) + ); + message.allowedHeaders = (object.allowedHeaders ?? []).map((e: any) => + String(e) + ); + message.allowedOrigins = (object.allowedOrigins ?? []).map((e: any) => + String(e) + ); + message.exposeHeaders = (object.exposeHeaders ?? []).map((e: any) => + String(e) + ); + message.maxAgeSeconds = + object.maxAgeSeconds !== undefined && object.maxAgeSeconds !== null + ? Number(object.maxAgeSeconds) + : undefined; + return message; + }, + + toJSON(message: CorsRule): unknown { + const obj: any = {}; + message.id !== undefined && (obj.id = message.id); + if (message.allowedMethods) { + obj.allowedMethods = message.allowedMethods.map((e) => + corsRule_MethodToJSON(e) + ); + } else { + obj.allowedMethods = []; + } + if (message.allowedHeaders) { + obj.allowedHeaders = message.allowedHeaders.map((e) => e); + } else { + obj.allowedHeaders = []; + } + if (message.allowedOrigins) { + obj.allowedOrigins = message.allowedOrigins.map((e) => e); + } else { + obj.allowedOrigins = []; + } + if (message.exposeHeaders) { + obj.exposeHeaders = message.exposeHeaders.map((e) => e); + } else { + obj.exposeHeaders = []; + } + message.maxAgeSeconds !== undefined && + (obj.maxAgeSeconds = message.maxAgeSeconds); + return obj; + }, + + fromPartial, I>>(object: I): CorsRule { + const message = { ...baseCorsRule } as CorsRule; + message.id = object.id ?? ""; + message.allowedMethods = object.allowedMethods?.map((e) => e) || []; + message.allowedHeaders = object.allowedHeaders?.map((e) => e) || []; + message.allowedOrigins = object.allowedOrigins?.map((e) => e) || []; + message.exposeHeaders = object.exposeHeaders?.map((e) => e) || []; + message.maxAgeSeconds = object.maxAgeSeconds ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(CorsRule.$type, CorsRule); + +const baseWebsiteSettings: object = { + $type: "yandex.cloud.storage.v1.WebsiteSettings", + index: "", + error: "", +}; + +export const WebsiteSettings = { + $type: "yandex.cloud.storage.v1.WebsiteSettings" as const, + + encode( + message: WebsiteSettings, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.index !== "") { + writer.uint32(10).string(message.index); + } + if (message.error !== "") { + writer.uint32(18).string(message.error); + } + if (message.redirectAllRequests !== undefined) { + WebsiteSettings_Scheme.encode( + message.redirectAllRequests, + writer.uint32(26).fork() + ).ldelim(); + } + for (const v of message.routingRules) { + WebsiteSettings_RoutingRule.encode(v!, writer.uint32(34).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): WebsiteSettings { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseWebsiteSettings } as WebsiteSettings; + message.routingRules = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.index = reader.string(); + break; + case 2: + message.error = reader.string(); + break; + case 3: + message.redirectAllRequests = WebsiteSettings_Scheme.decode( + reader, + reader.uint32() + ); + break; + case 4: + message.routingRules.push( + WebsiteSettings_RoutingRule.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): WebsiteSettings { + const message = { ...baseWebsiteSettings } as WebsiteSettings; + message.index = + object.index !== undefined && object.index !== null + ? String(object.index) + : ""; + message.error = + object.error !== undefined && object.error !== null + ? String(object.error) + : ""; + message.redirectAllRequests = + object.redirectAllRequests !== undefined && + object.redirectAllRequests !== null + ? WebsiteSettings_Scheme.fromJSON(object.redirectAllRequests) + : undefined; + message.routingRules = (object.routingRules ?? []).map((e: any) => + WebsiteSettings_RoutingRule.fromJSON(e) + ); + return message; + }, + + toJSON(message: WebsiteSettings): unknown { + const obj: any = {}; + message.index !== undefined && (obj.index = message.index); + message.error !== undefined && (obj.error = message.error); + message.redirectAllRequests !== undefined && + (obj.redirectAllRequests = message.redirectAllRequests + ? WebsiteSettings_Scheme.toJSON(message.redirectAllRequests) + : undefined); + if (message.routingRules) { + obj.routingRules = message.routingRules.map((e) => + e ? WebsiteSettings_RoutingRule.toJSON(e) : undefined + ); + } else { + obj.routingRules = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): WebsiteSettings { + const message = { ...baseWebsiteSettings } as WebsiteSettings; + message.index = object.index ?? ""; + message.error = object.error ?? ""; + message.redirectAllRequests = + object.redirectAllRequests !== undefined && + object.redirectAllRequests !== null + ? WebsiteSettings_Scheme.fromPartial(object.redirectAllRequests) + : undefined; + message.routingRules = + object.routingRules?.map((e) => + WebsiteSettings_RoutingRule.fromPartial(e) + ) || []; + return message; + }, +}; + +messageTypeRegistry.set(WebsiteSettings.$type, WebsiteSettings); + +const baseWebsiteSettings_Scheme: object = { + $type: "yandex.cloud.storage.v1.WebsiteSettings.Scheme", + protocol: 0, + hostname: "", +}; + +export const WebsiteSettings_Scheme = { + $type: "yandex.cloud.storage.v1.WebsiteSettings.Scheme" as const, + + encode( + message: WebsiteSettings_Scheme, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.protocol !== 0) { + writer.uint32(8).int32(message.protocol); + } + if (message.hostname !== "") { + writer.uint32(18).string(message.hostname); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): WebsiteSettings_Scheme { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseWebsiteSettings_Scheme } as WebsiteSettings_Scheme; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.protocol = reader.int32() as any; + break; + case 2: + message.hostname = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): WebsiteSettings_Scheme { + const message = { ...baseWebsiteSettings_Scheme } as WebsiteSettings_Scheme; + message.protocol = + object.protocol !== undefined && object.protocol !== null + ? websiteSettings_ProtocolFromJSON(object.protocol) + : 0; + message.hostname = + object.hostname !== undefined && object.hostname !== null + ? String(object.hostname) + : ""; + return message; + }, + + toJSON(message: WebsiteSettings_Scheme): unknown { + const obj: any = {}; + message.protocol !== undefined && + (obj.protocol = websiteSettings_ProtocolToJSON(message.protocol)); + message.hostname !== undefined && (obj.hostname = message.hostname); + return obj; + }, + + fromPartial, I>>( + object: I + ): WebsiteSettings_Scheme { + const message = { ...baseWebsiteSettings_Scheme } as WebsiteSettings_Scheme; + message.protocol = object.protocol ?? 0; + message.hostname = object.hostname ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(WebsiteSettings_Scheme.$type, WebsiteSettings_Scheme); + +const baseWebsiteSettings_Condition: object = { + $type: "yandex.cloud.storage.v1.WebsiteSettings.Condition", + httpErrorCodeReturnedEquals: "", + keyPrefixEquals: "", +}; + +export const WebsiteSettings_Condition = { + $type: "yandex.cloud.storage.v1.WebsiteSettings.Condition" as const, + + encode( + message: WebsiteSettings_Condition, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.httpErrorCodeReturnedEquals !== "") { + writer.uint32(10).string(message.httpErrorCodeReturnedEquals); + } + if (message.keyPrefixEquals !== "") { + writer.uint32(18).string(message.keyPrefixEquals); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): WebsiteSettings_Condition { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseWebsiteSettings_Condition, + } as WebsiteSettings_Condition; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.httpErrorCodeReturnedEquals = reader.string(); + break; + case 2: + message.keyPrefixEquals = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): WebsiteSettings_Condition { + const message = { + ...baseWebsiteSettings_Condition, + } as WebsiteSettings_Condition; + message.httpErrorCodeReturnedEquals = + object.httpErrorCodeReturnedEquals !== undefined && + object.httpErrorCodeReturnedEquals !== null + ? String(object.httpErrorCodeReturnedEquals) + : ""; + message.keyPrefixEquals = + object.keyPrefixEquals !== undefined && object.keyPrefixEquals !== null + ? String(object.keyPrefixEquals) + : ""; + return message; + }, + + toJSON(message: WebsiteSettings_Condition): unknown { + const obj: any = {}; + message.httpErrorCodeReturnedEquals !== undefined && + (obj.httpErrorCodeReturnedEquals = message.httpErrorCodeReturnedEquals); + message.keyPrefixEquals !== undefined && + (obj.keyPrefixEquals = message.keyPrefixEquals); + return obj; + }, + + fromPartial, I>>( + object: I + ): WebsiteSettings_Condition { + const message = { + ...baseWebsiteSettings_Condition, + } as WebsiteSettings_Condition; + message.httpErrorCodeReturnedEquals = + object.httpErrorCodeReturnedEquals ?? ""; + message.keyPrefixEquals = object.keyPrefixEquals ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + WebsiteSettings_Condition.$type, + WebsiteSettings_Condition +); + +const baseWebsiteSettings_Redirect: object = { + $type: "yandex.cloud.storage.v1.WebsiteSettings.Redirect", + hostname: "", + httpRedirectCode: "", + protocol: 0, + replaceKeyPrefixWith: "", + replaceKeyWith: "", +}; + +export const WebsiteSettings_Redirect = { + $type: "yandex.cloud.storage.v1.WebsiteSettings.Redirect" as const, + + encode( + message: WebsiteSettings_Redirect, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.hostname !== "") { + writer.uint32(10).string(message.hostname); + } + if (message.httpRedirectCode !== "") { + writer.uint32(18).string(message.httpRedirectCode); + } + if (message.protocol !== 0) { + writer.uint32(24).int32(message.protocol); + } + if (message.replaceKeyPrefixWith !== "") { + writer.uint32(34).string(message.replaceKeyPrefixWith); + } + if (message.replaceKeyWith !== "") { + writer.uint32(42).string(message.replaceKeyWith); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): WebsiteSettings_Redirect { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseWebsiteSettings_Redirect, + } as WebsiteSettings_Redirect; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.hostname = reader.string(); + break; + case 2: + message.httpRedirectCode = reader.string(); + break; + case 3: + message.protocol = reader.int32() as any; + break; + case 4: + message.replaceKeyPrefixWith = reader.string(); + break; + case 5: + message.replaceKeyWith = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): WebsiteSettings_Redirect { + const message = { + ...baseWebsiteSettings_Redirect, + } as WebsiteSettings_Redirect; + message.hostname = + object.hostname !== undefined && object.hostname !== null + ? String(object.hostname) + : ""; + message.httpRedirectCode = + object.httpRedirectCode !== undefined && object.httpRedirectCode !== null + ? String(object.httpRedirectCode) + : ""; + message.protocol = + object.protocol !== undefined && object.protocol !== null + ? websiteSettings_ProtocolFromJSON(object.protocol) + : 0; + message.replaceKeyPrefixWith = + object.replaceKeyPrefixWith !== undefined && + object.replaceKeyPrefixWith !== null + ? String(object.replaceKeyPrefixWith) + : ""; + message.replaceKeyWith = + object.replaceKeyWith !== undefined && object.replaceKeyWith !== null + ? String(object.replaceKeyWith) + : ""; + return message; + }, + + toJSON(message: WebsiteSettings_Redirect): unknown { + const obj: any = {}; + message.hostname !== undefined && (obj.hostname = message.hostname); + message.httpRedirectCode !== undefined && + (obj.httpRedirectCode = message.httpRedirectCode); + message.protocol !== undefined && + (obj.protocol = websiteSettings_ProtocolToJSON(message.protocol)); + message.replaceKeyPrefixWith !== undefined && + (obj.replaceKeyPrefixWith = message.replaceKeyPrefixWith); + message.replaceKeyWith !== undefined && + (obj.replaceKeyWith = message.replaceKeyWith); + return obj; + }, + + fromPartial, I>>( + object: I + ): WebsiteSettings_Redirect { + const message = { + ...baseWebsiteSettings_Redirect, + } as WebsiteSettings_Redirect; + message.hostname = object.hostname ?? ""; + message.httpRedirectCode = object.httpRedirectCode ?? ""; + message.protocol = object.protocol ?? 0; + message.replaceKeyPrefixWith = object.replaceKeyPrefixWith ?? ""; + message.replaceKeyWith = object.replaceKeyWith ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + WebsiteSettings_Redirect.$type, + WebsiteSettings_Redirect +); + +const baseWebsiteSettings_RoutingRule: object = { + $type: "yandex.cloud.storage.v1.WebsiteSettings.RoutingRule", +}; + +export const WebsiteSettings_RoutingRule = { + $type: "yandex.cloud.storage.v1.WebsiteSettings.RoutingRule" as const, + + encode( + message: WebsiteSettings_RoutingRule, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.condition !== undefined) { + WebsiteSettings_Condition.encode( + message.condition, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.redirect !== undefined) { + WebsiteSettings_Redirect.encode( + message.redirect, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): WebsiteSettings_RoutingRule { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseWebsiteSettings_RoutingRule, + } as WebsiteSettings_RoutingRule; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.condition = WebsiteSettings_Condition.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.redirect = WebsiteSettings_Redirect.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): WebsiteSettings_RoutingRule { + const message = { + ...baseWebsiteSettings_RoutingRule, + } as WebsiteSettings_RoutingRule; + message.condition = + object.condition !== undefined && object.condition !== null + ? WebsiteSettings_Condition.fromJSON(object.condition) + : undefined; + message.redirect = + object.redirect !== undefined && object.redirect !== null + ? WebsiteSettings_Redirect.fromJSON(object.redirect) + : undefined; + return message; + }, + + toJSON(message: WebsiteSettings_RoutingRule): unknown { + const obj: any = {}; + message.condition !== undefined && + (obj.condition = message.condition + ? WebsiteSettings_Condition.toJSON(message.condition) + : undefined); + message.redirect !== undefined && + (obj.redirect = message.redirect + ? WebsiteSettings_Redirect.toJSON(message.redirect) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): WebsiteSettings_RoutingRule { + const message = { + ...baseWebsiteSettings_RoutingRule, + } as WebsiteSettings_RoutingRule; + message.condition = + object.condition !== undefined && object.condition !== null + ? WebsiteSettings_Condition.fromPartial(object.condition) + : undefined; + message.redirect = + object.redirect !== undefined && object.redirect !== null + ? WebsiteSettings_Redirect.fromPartial(object.redirect) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + WebsiteSettings_RoutingRule.$type, + WebsiteSettings_RoutingRule +); + +const baseLifecycleRule: object = { + $type: "yandex.cloud.storage.v1.LifecycleRule", + enabled: false, +}; + +export const LifecycleRule = { + $type: "yandex.cloud.storage.v1.LifecycleRule" as const, + + encode( + message: LifecycleRule, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.id !== undefined) { + StringValue.encode( + { $type: "google.protobuf.StringValue", value: message.id! }, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.enabled === true) { + writer.uint32(16).bool(message.enabled); + } + if (message.filter !== undefined) { + LifecycleRule_RuleFilter.encode( + message.filter, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.expiration !== undefined) { + LifecycleRule_Expiration.encode( + message.expiration, + writer.uint32(34).fork() + ).ldelim(); + } + for (const v of message.transitions) { + LifecycleRule_Transition.encode(v!, writer.uint32(42).fork()).ldelim(); + } + if (message.abortIncompleteMultipartUpload !== undefined) { + LifecycleRule_AfterDays.encode( + message.abortIncompleteMultipartUpload, + writer.uint32(50).fork() + ).ldelim(); + } + if (message.noncurrentExpiration !== undefined) { + LifecycleRule_NoncurrentExpiration.encode( + message.noncurrentExpiration, + writer.uint32(58).fork() + ).ldelim(); + } + for (const v of message.noncurrentTransitions) { + LifecycleRule_NoncurrentTransition.encode( + v!, + writer.uint32(66).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): LifecycleRule { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseLifecycleRule } as LifecycleRule; + message.transitions = []; + message.noncurrentTransitions = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = StringValue.decode(reader, reader.uint32()).value; + break; + case 2: + message.enabled = reader.bool(); + break; + case 3: + message.filter = LifecycleRule_RuleFilter.decode( + reader, + reader.uint32() + ); + break; + case 4: + message.expiration = LifecycleRule_Expiration.decode( + reader, + reader.uint32() + ); + break; + case 5: + message.transitions.push( + LifecycleRule_Transition.decode(reader, reader.uint32()) + ); + break; + case 6: + message.abortIncompleteMultipartUpload = + LifecycleRule_AfterDays.decode(reader, reader.uint32()); + break; + case 7: + message.noncurrentExpiration = + LifecycleRule_NoncurrentExpiration.decode(reader, reader.uint32()); + break; + case 8: + message.noncurrentTransitions.push( + LifecycleRule_NoncurrentTransition.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): LifecycleRule { + const message = { ...baseLifecycleRule } as LifecycleRule; + message.id = + object.id !== undefined && object.id !== null + ? String(object.id) + : undefined; + message.enabled = + object.enabled !== undefined && object.enabled !== null + ? Boolean(object.enabled) + : false; + message.filter = + object.filter !== undefined && object.filter !== null + ? LifecycleRule_RuleFilter.fromJSON(object.filter) + : undefined; + message.expiration = + object.expiration !== undefined && object.expiration !== null + ? LifecycleRule_Expiration.fromJSON(object.expiration) + : undefined; + message.transitions = (object.transitions ?? []).map((e: any) => + LifecycleRule_Transition.fromJSON(e) + ); + message.abortIncompleteMultipartUpload = + object.abortIncompleteMultipartUpload !== undefined && + object.abortIncompleteMultipartUpload !== null + ? LifecycleRule_AfterDays.fromJSON( + object.abortIncompleteMultipartUpload + ) + : undefined; + message.noncurrentExpiration = + object.noncurrentExpiration !== undefined && + object.noncurrentExpiration !== null + ? LifecycleRule_NoncurrentExpiration.fromJSON( + object.noncurrentExpiration + ) + : undefined; + message.noncurrentTransitions = (object.noncurrentTransitions ?? []).map( + (e: any) => LifecycleRule_NoncurrentTransition.fromJSON(e) + ); + return message; + }, + + toJSON(message: LifecycleRule): unknown { + const obj: any = {}; + message.id !== undefined && (obj.id = message.id); + message.enabled !== undefined && (obj.enabled = message.enabled); + message.filter !== undefined && + (obj.filter = message.filter + ? LifecycleRule_RuleFilter.toJSON(message.filter) + : undefined); + message.expiration !== undefined && + (obj.expiration = message.expiration + ? LifecycleRule_Expiration.toJSON(message.expiration) + : undefined); + if (message.transitions) { + obj.transitions = message.transitions.map((e) => + e ? LifecycleRule_Transition.toJSON(e) : undefined + ); + } else { + obj.transitions = []; + } + message.abortIncompleteMultipartUpload !== undefined && + (obj.abortIncompleteMultipartUpload = + message.abortIncompleteMultipartUpload + ? LifecycleRule_AfterDays.toJSON( + message.abortIncompleteMultipartUpload + ) + : undefined); + message.noncurrentExpiration !== undefined && + (obj.noncurrentExpiration = message.noncurrentExpiration + ? LifecycleRule_NoncurrentExpiration.toJSON( + message.noncurrentExpiration + ) + : undefined); + if (message.noncurrentTransitions) { + obj.noncurrentTransitions = message.noncurrentTransitions.map((e) => + e ? LifecycleRule_NoncurrentTransition.toJSON(e) : undefined + ); + } else { + obj.noncurrentTransitions = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): LifecycleRule { + const message = { ...baseLifecycleRule } as LifecycleRule; + message.id = object.id ?? undefined; + message.enabled = object.enabled ?? false; + message.filter = + object.filter !== undefined && object.filter !== null + ? LifecycleRule_RuleFilter.fromPartial(object.filter) + : undefined; + message.expiration = + object.expiration !== undefined && object.expiration !== null + ? LifecycleRule_Expiration.fromPartial(object.expiration) + : undefined; + message.transitions = + object.transitions?.map((e) => LifecycleRule_Transition.fromPartial(e)) || + []; + message.abortIncompleteMultipartUpload = + object.abortIncompleteMultipartUpload !== undefined && + object.abortIncompleteMultipartUpload !== null + ? LifecycleRule_AfterDays.fromPartial( + object.abortIncompleteMultipartUpload + ) + : undefined; + message.noncurrentExpiration = + object.noncurrentExpiration !== undefined && + object.noncurrentExpiration !== null + ? LifecycleRule_NoncurrentExpiration.fromPartial( + object.noncurrentExpiration + ) + : undefined; + message.noncurrentTransitions = + object.noncurrentTransitions?.map((e) => + LifecycleRule_NoncurrentTransition.fromPartial(e) + ) || []; + return message; + }, +}; + +messageTypeRegistry.set(LifecycleRule.$type, LifecycleRule); + +const baseLifecycleRule_AfterDays: object = { + $type: "yandex.cloud.storage.v1.LifecycleRule.AfterDays", +}; + +export const LifecycleRule_AfterDays = { + $type: "yandex.cloud.storage.v1.LifecycleRule.AfterDays" as const, + + encode( + message: LifecycleRule_AfterDays, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.daysAfterExpiration !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.daysAfterExpiration!, + }, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): LifecycleRule_AfterDays { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseLifecycleRule_AfterDays, + } as LifecycleRule_AfterDays; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.daysAfterExpiration = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): LifecycleRule_AfterDays { + const message = { + ...baseLifecycleRule_AfterDays, + } as LifecycleRule_AfterDays; + message.daysAfterExpiration = + object.daysAfterExpiration !== undefined && + object.daysAfterExpiration !== null + ? Number(object.daysAfterExpiration) + : undefined; + return message; + }, + + toJSON(message: LifecycleRule_AfterDays): unknown { + const obj: any = {}; + message.daysAfterExpiration !== undefined && + (obj.daysAfterExpiration = message.daysAfterExpiration); + return obj; + }, + + fromPartial, I>>( + object: I + ): LifecycleRule_AfterDays { + const message = { + ...baseLifecycleRule_AfterDays, + } as LifecycleRule_AfterDays; + message.daysAfterExpiration = object.daysAfterExpiration ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(LifecycleRule_AfterDays.$type, LifecycleRule_AfterDays); + +const baseLifecycleRule_NoncurrentExpiration: object = { + $type: "yandex.cloud.storage.v1.LifecycleRule.NoncurrentExpiration", +}; + +export const LifecycleRule_NoncurrentExpiration = { + $type: "yandex.cloud.storage.v1.LifecycleRule.NoncurrentExpiration" as const, + + encode( + message: LifecycleRule_NoncurrentExpiration, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.noncurrentDays !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.noncurrentDays! }, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): LifecycleRule_NoncurrentExpiration { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseLifecycleRule_NoncurrentExpiration, + } as LifecycleRule_NoncurrentExpiration; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.noncurrentDays = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): LifecycleRule_NoncurrentExpiration { + const message = { + ...baseLifecycleRule_NoncurrentExpiration, + } as LifecycleRule_NoncurrentExpiration; + message.noncurrentDays = + object.noncurrentDays !== undefined && object.noncurrentDays !== null + ? Number(object.noncurrentDays) + : undefined; + return message; + }, + + toJSON(message: LifecycleRule_NoncurrentExpiration): unknown { + const obj: any = {}; + message.noncurrentDays !== undefined && + (obj.noncurrentDays = message.noncurrentDays); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): LifecycleRule_NoncurrentExpiration { + const message = { + ...baseLifecycleRule_NoncurrentExpiration, + } as LifecycleRule_NoncurrentExpiration; + message.noncurrentDays = object.noncurrentDays ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + LifecycleRule_NoncurrentExpiration.$type, + LifecycleRule_NoncurrentExpiration +); + +const baseLifecycleRule_NoncurrentTransition: object = { + $type: "yandex.cloud.storage.v1.LifecycleRule.NoncurrentTransition", + storageClass: "", +}; + +export const LifecycleRule_NoncurrentTransition = { + $type: "yandex.cloud.storage.v1.LifecycleRule.NoncurrentTransition" as const, + + encode( + message: LifecycleRule_NoncurrentTransition, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.noncurrentDays !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.noncurrentDays! }, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.storageClass !== "") { + writer.uint32(18).string(message.storageClass); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): LifecycleRule_NoncurrentTransition { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseLifecycleRule_NoncurrentTransition, + } as LifecycleRule_NoncurrentTransition; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.noncurrentDays = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 2: + message.storageClass = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): LifecycleRule_NoncurrentTransition { + const message = { + ...baseLifecycleRule_NoncurrentTransition, + } as LifecycleRule_NoncurrentTransition; + message.noncurrentDays = + object.noncurrentDays !== undefined && object.noncurrentDays !== null + ? Number(object.noncurrentDays) + : undefined; + message.storageClass = + object.storageClass !== undefined && object.storageClass !== null + ? String(object.storageClass) + : ""; + return message; + }, + + toJSON(message: LifecycleRule_NoncurrentTransition): unknown { + const obj: any = {}; + message.noncurrentDays !== undefined && + (obj.noncurrentDays = message.noncurrentDays); + message.storageClass !== undefined && + (obj.storageClass = message.storageClass); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): LifecycleRule_NoncurrentTransition { + const message = { + ...baseLifecycleRule_NoncurrentTransition, + } as LifecycleRule_NoncurrentTransition; + message.noncurrentDays = object.noncurrentDays ?? undefined; + message.storageClass = object.storageClass ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + LifecycleRule_NoncurrentTransition.$type, + LifecycleRule_NoncurrentTransition +); + +const baseLifecycleRule_Transition: object = { + $type: "yandex.cloud.storage.v1.LifecycleRule.Transition", + storageClass: "", +}; + +export const LifecycleRule_Transition = { + $type: "yandex.cloud.storage.v1.LifecycleRule.Transition" as const, + + encode( + message: LifecycleRule_Transition, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.date !== undefined) { + Timestamp.encode( + toTimestamp(message.date), + writer.uint32(10).fork() + ).ldelim(); + } + if (message.days !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.days! }, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.storageClass !== "") { + writer.uint32(34).string(message.storageClass); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): LifecycleRule_Transition { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseLifecycleRule_Transition, + } as LifecycleRule_Transition; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.date = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 2: + message.days = Int64Value.decode(reader, reader.uint32()).value; + break; + case 4: + message.storageClass = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): LifecycleRule_Transition { + const message = { + ...baseLifecycleRule_Transition, + } as LifecycleRule_Transition; + message.date = + object.date !== undefined && object.date !== null + ? fromJsonTimestamp(object.date) + : undefined; + message.days = + object.days !== undefined && object.days !== null + ? Number(object.days) + : undefined; + message.storageClass = + object.storageClass !== undefined && object.storageClass !== null + ? String(object.storageClass) + : ""; + return message; + }, + + toJSON(message: LifecycleRule_Transition): unknown { + const obj: any = {}; + message.date !== undefined && (obj.date = message.date.toISOString()); + message.days !== undefined && (obj.days = message.days); + message.storageClass !== undefined && + (obj.storageClass = message.storageClass); + return obj; + }, + + fromPartial, I>>( + object: I + ): LifecycleRule_Transition { + const message = { + ...baseLifecycleRule_Transition, + } as LifecycleRule_Transition; + message.date = object.date ?? undefined; + message.days = object.days ?? undefined; + message.storageClass = object.storageClass ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + LifecycleRule_Transition.$type, + LifecycleRule_Transition +); + +const baseLifecycleRule_Expiration: object = { + $type: "yandex.cloud.storage.v1.LifecycleRule.Expiration", +}; + +export const LifecycleRule_Expiration = { + $type: "yandex.cloud.storage.v1.LifecycleRule.Expiration" as const, + + encode( + message: LifecycleRule_Expiration, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.date !== undefined) { + Timestamp.encode( + toTimestamp(message.date), + writer.uint32(10).fork() + ).ldelim(); + } + if (message.days !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.days! }, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.expiredObjectDeleteMarker !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.expiredObjectDeleteMarker!, + }, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): LifecycleRule_Expiration { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseLifecycleRule_Expiration, + } as LifecycleRule_Expiration; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.date = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 2: + message.days = Int64Value.decode(reader, reader.uint32()).value; + break; + case 3: + message.expiredObjectDeleteMarker = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): LifecycleRule_Expiration { + const message = { + ...baseLifecycleRule_Expiration, + } as LifecycleRule_Expiration; + message.date = + object.date !== undefined && object.date !== null + ? fromJsonTimestamp(object.date) + : undefined; + message.days = + object.days !== undefined && object.days !== null + ? Number(object.days) + : undefined; + message.expiredObjectDeleteMarker = + object.expiredObjectDeleteMarker !== undefined && + object.expiredObjectDeleteMarker !== null + ? Boolean(object.expiredObjectDeleteMarker) + : undefined; + return message; + }, + + toJSON(message: LifecycleRule_Expiration): unknown { + const obj: any = {}; + message.date !== undefined && (obj.date = message.date.toISOString()); + message.days !== undefined && (obj.days = message.days); + message.expiredObjectDeleteMarker !== undefined && + (obj.expiredObjectDeleteMarker = message.expiredObjectDeleteMarker); + return obj; + }, + + fromPartial, I>>( + object: I + ): LifecycleRule_Expiration { + const message = { + ...baseLifecycleRule_Expiration, + } as LifecycleRule_Expiration; + message.date = object.date ?? undefined; + message.days = object.days ?? undefined; + message.expiredObjectDeleteMarker = + object.expiredObjectDeleteMarker ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + LifecycleRule_Expiration.$type, + LifecycleRule_Expiration +); + +const baseLifecycleRule_RuleFilter: object = { + $type: "yandex.cloud.storage.v1.LifecycleRule.RuleFilter", + prefix: "", +}; + +export const LifecycleRule_RuleFilter = { + $type: "yandex.cloud.storage.v1.LifecycleRule.RuleFilter" as const, + + encode( + message: LifecycleRule_RuleFilter, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.prefix !== "") { + writer.uint32(10).string(message.prefix); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): LifecycleRule_RuleFilter { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseLifecycleRule_RuleFilter, + } as LifecycleRule_RuleFilter; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.prefix = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): LifecycleRule_RuleFilter { + const message = { + ...baseLifecycleRule_RuleFilter, + } as LifecycleRule_RuleFilter; + message.prefix = + object.prefix !== undefined && object.prefix !== null + ? String(object.prefix) + : ""; + return message; + }, + + toJSON(message: LifecycleRule_RuleFilter): unknown { + const obj: any = {}; + message.prefix !== undefined && (obj.prefix = message.prefix); + return obj; + }, + + fromPartial, I>>( + object: I + ): LifecycleRule_RuleFilter { + const message = { + ...baseLifecycleRule_RuleFilter, + } as LifecycleRule_RuleFilter; + message.prefix = object.prefix ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + LifecycleRule_RuleFilter.$type, + LifecycleRule_RuleFilter +); + +const baseCounters: object = { + $type: "yandex.cloud.storage.v1.Counters", + simpleObjectSize: 0, + simpleObjectCount: 0, + objectsPartsSize: 0, + objectsPartsCount: 0, + multipartObjectsSize: 0, + multipartObjectsCount: 0, + activeMultipartCount: 0, +}; + +export const Counters = { + $type: "yandex.cloud.storage.v1.Counters" as const, + + encode( + message: Counters, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.simpleObjectSize !== 0) { + writer.uint32(8).int64(message.simpleObjectSize); + } + if (message.simpleObjectCount !== 0) { + writer.uint32(16).int64(message.simpleObjectCount); + } + if (message.objectsPartsSize !== 0) { + writer.uint32(24).int64(message.objectsPartsSize); + } + if (message.objectsPartsCount !== 0) { + writer.uint32(32).int64(message.objectsPartsCount); + } + if (message.multipartObjectsSize !== 0) { + writer.uint32(40).int64(message.multipartObjectsSize); + } + if (message.multipartObjectsCount !== 0) { + writer.uint32(48).int64(message.multipartObjectsCount); + } + if (message.activeMultipartCount !== 0) { + writer.uint32(56).int64(message.activeMultipartCount); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Counters { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseCounters } as Counters; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.simpleObjectSize = longToNumber(reader.int64() as Long); + break; + case 2: + message.simpleObjectCount = longToNumber(reader.int64() as Long); + break; + case 3: + message.objectsPartsSize = longToNumber(reader.int64() as Long); + break; + case 4: + message.objectsPartsCount = longToNumber(reader.int64() as Long); + break; + case 5: + message.multipartObjectsSize = longToNumber(reader.int64() as Long); + break; + case 6: + message.multipartObjectsCount = longToNumber(reader.int64() as Long); + break; + case 7: + message.activeMultipartCount = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Counters { + const message = { ...baseCounters } as Counters; + message.simpleObjectSize = + object.simpleObjectSize !== undefined && object.simpleObjectSize !== null + ? Number(object.simpleObjectSize) + : 0; + message.simpleObjectCount = + object.simpleObjectCount !== undefined && + object.simpleObjectCount !== null + ? Number(object.simpleObjectCount) + : 0; + message.objectsPartsSize = + object.objectsPartsSize !== undefined && object.objectsPartsSize !== null + ? Number(object.objectsPartsSize) + : 0; + message.objectsPartsCount = + object.objectsPartsCount !== undefined && + object.objectsPartsCount !== null + ? Number(object.objectsPartsCount) + : 0; + message.multipartObjectsSize = + object.multipartObjectsSize !== undefined && + object.multipartObjectsSize !== null + ? Number(object.multipartObjectsSize) + : 0; + message.multipartObjectsCount = + object.multipartObjectsCount !== undefined && + object.multipartObjectsCount !== null + ? Number(object.multipartObjectsCount) + : 0; + message.activeMultipartCount = + object.activeMultipartCount !== undefined && + object.activeMultipartCount !== null + ? Number(object.activeMultipartCount) + : 0; + return message; + }, + + toJSON(message: Counters): unknown { + const obj: any = {}; + message.simpleObjectSize !== undefined && + (obj.simpleObjectSize = Math.round(message.simpleObjectSize)); + message.simpleObjectCount !== undefined && + (obj.simpleObjectCount = Math.round(message.simpleObjectCount)); + message.objectsPartsSize !== undefined && + (obj.objectsPartsSize = Math.round(message.objectsPartsSize)); + message.objectsPartsCount !== undefined && + (obj.objectsPartsCount = Math.round(message.objectsPartsCount)); + message.multipartObjectsSize !== undefined && + (obj.multipartObjectsSize = Math.round(message.multipartObjectsSize)); + message.multipartObjectsCount !== undefined && + (obj.multipartObjectsCount = Math.round(message.multipartObjectsCount)); + message.activeMultipartCount !== undefined && + (obj.activeMultipartCount = Math.round(message.activeMultipartCount)); + return obj; + }, + + fromPartial, I>>(object: I): Counters { + const message = { ...baseCounters } as Counters; + message.simpleObjectSize = object.simpleObjectSize ?? 0; + message.simpleObjectCount = object.simpleObjectCount ?? 0; + message.objectsPartsSize = object.objectsPartsSize ?? 0; + message.objectsPartsCount = object.objectsPartsCount ?? 0; + message.multipartObjectsSize = object.multipartObjectsSize ?? 0; + message.multipartObjectsCount = object.multipartObjectsCount ?? 0; + message.activeMultipartCount = object.activeMultipartCount ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(Counters.$type, Counters); + +const baseOptionalSizeByClass: object = { + $type: "yandex.cloud.storage.v1.OptionalSizeByClass", + storageClass: "", +}; + +export const OptionalSizeByClass = { + $type: "yandex.cloud.storage.v1.OptionalSizeByClass" as const, + + encode( + message: OptionalSizeByClass, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.storageClass !== "") { + writer.uint32(10).string(message.storageClass); + } + if (message.classSize !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.classSize! }, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): OptionalSizeByClass { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseOptionalSizeByClass } as OptionalSizeByClass; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.storageClass = reader.string(); + break; + case 2: + message.classSize = Int64Value.decode(reader, reader.uint32()).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): OptionalSizeByClass { + const message = { ...baseOptionalSizeByClass } as OptionalSizeByClass; + message.storageClass = + object.storageClass !== undefined && object.storageClass !== null + ? String(object.storageClass) + : ""; + message.classSize = + object.classSize !== undefined && object.classSize !== null + ? Number(object.classSize) + : undefined; + return message; + }, + + toJSON(message: OptionalSizeByClass): unknown { + const obj: any = {}; + message.storageClass !== undefined && + (obj.storageClass = message.storageClass); + message.classSize !== undefined && (obj.classSize = message.classSize); + return obj; + }, + + fromPartial, I>>( + object: I + ): OptionalSizeByClass { + const message = { ...baseOptionalSizeByClass } as OptionalSizeByClass; + message.storageClass = object.storageClass ?? ""; + message.classSize = object.classSize ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(OptionalSizeByClass.$type, OptionalSizeByClass); + +const baseSizeByClass: object = { + $type: "yandex.cloud.storage.v1.SizeByClass", + storageClass: "", + classSize: 0, +}; + +export const SizeByClass = { + $type: "yandex.cloud.storage.v1.SizeByClass" as const, + + encode( + message: SizeByClass, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.storageClass !== "") { + writer.uint32(10).string(message.storageClass); + } + if (message.classSize !== 0) { + writer.uint32(16).int64(message.classSize); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): SizeByClass { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseSizeByClass } as SizeByClass; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.storageClass = reader.string(); + break; + case 2: + message.classSize = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): SizeByClass { + const message = { ...baseSizeByClass } as SizeByClass; + message.storageClass = + object.storageClass !== undefined && object.storageClass !== null + ? String(object.storageClass) + : ""; + message.classSize = + object.classSize !== undefined && object.classSize !== null + ? Number(object.classSize) + : 0; + return message; + }, + + toJSON(message: SizeByClass): unknown { + const obj: any = {}; + message.storageClass !== undefined && + (obj.storageClass = message.storageClass); + message.classSize !== undefined && + (obj.classSize = Math.round(message.classSize)); + return obj; + }, + + fromPartial, I>>( + object: I + ): SizeByClass { + const message = { ...baseSizeByClass } as SizeByClass; + message.storageClass = object.storageClass ?? ""; + message.classSize = object.classSize ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(SizeByClass.$type, SizeByClass); + +const baseCountersByClass: object = { + $type: "yandex.cloud.storage.v1.CountersByClass", + storageClass: "", +}; + +export const CountersByClass = { + $type: "yandex.cloud.storage.v1.CountersByClass" as const, + + encode( + message: CountersByClass, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.storageClass !== "") { + writer.uint32(10).string(message.storageClass); + } + if (message.counters !== undefined) { + Counters.encode(message.counters, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): CountersByClass { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseCountersByClass } as CountersByClass; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.storageClass = reader.string(); + break; + case 2: + message.counters = Counters.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CountersByClass { + const message = { ...baseCountersByClass } as CountersByClass; + message.storageClass = + object.storageClass !== undefined && object.storageClass !== null + ? String(object.storageClass) + : ""; + message.counters = + object.counters !== undefined && object.counters !== null + ? Counters.fromJSON(object.counters) + : undefined; + return message; + }, + + toJSON(message: CountersByClass): unknown { + const obj: any = {}; + message.storageClass !== undefined && + (obj.storageClass = message.storageClass); + message.counters !== undefined && + (obj.counters = message.counters + ? Counters.toJSON(message.counters) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): CountersByClass { + const message = { ...baseCountersByClass } as CountersByClass; + message.storageClass = object.storageClass ?? ""; + message.counters = + object.counters !== undefined && object.counters !== null + ? Counters.fromPartial(object.counters) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(CountersByClass.$type, CountersByClass); + +const baseBucketStats: object = { + $type: "yandex.cloud.storage.v1.BucketStats", + name: "", + usedSize: 0, +}; + +export const BucketStats = { + $type: "yandex.cloud.storage.v1.BucketStats" as const, + + encode( + message: BucketStats, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + if (message.maxSize !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.maxSize! }, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.usedSize !== 0) { + writer.uint32(24).int64(message.usedSize); + } + for (const v of message.storageClassMaxSizes) { + OptionalSizeByClass.encode(v!, writer.uint32(34).fork()).ldelim(); + } + for (const v of message.storageClassUsedSizes) { + SizeByClass.encode(v!, writer.uint32(42).fork()).ldelim(); + } + for (const v of message.storageClassCounters) { + CountersByClass.encode(v!, writer.uint32(50).fork()).ldelim(); + } + if (message.defaultStorageClass !== undefined) { + StringValue.encode( + { + $type: "google.protobuf.StringValue", + value: message.defaultStorageClass!, + }, + writer.uint32(58).fork() + ).ldelim(); + } + if (message.anonymousAccessFlags !== undefined) { + AnonymousAccessFlags.encode( + message.anonymousAccessFlags, + writer.uint32(66).fork() + ).ldelim(); + } + if (message.createdAt !== undefined) { + Timestamp.encode( + toTimestamp(message.createdAt), + writer.uint32(74).fork() + ).ldelim(); + } + if (message.updatedAt !== undefined) { + Timestamp.encode( + toTimestamp(message.updatedAt), + writer.uint32(82).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): BucketStats { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseBucketStats } as BucketStats; + message.storageClassMaxSizes = []; + message.storageClassUsedSizes = []; + message.storageClassCounters = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.maxSize = Int64Value.decode(reader, reader.uint32()).value; + break; + case 3: + message.usedSize = longToNumber(reader.int64() as Long); + break; + case 4: + message.storageClassMaxSizes.push( + OptionalSizeByClass.decode(reader, reader.uint32()) + ); + break; + case 5: + message.storageClassUsedSizes.push( + SizeByClass.decode(reader, reader.uint32()) + ); + break; + case 6: + message.storageClassCounters.push( + CountersByClass.decode(reader, reader.uint32()) + ); + break; + case 7: + message.defaultStorageClass = StringValue.decode( + reader, + reader.uint32() + ).value; + break; + case 8: + message.anonymousAccessFlags = AnonymousAccessFlags.decode( + reader, + reader.uint32() + ); + break; + case 9: + message.createdAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 10: + message.updatedAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): BucketStats { + const message = { ...baseBucketStats } as BucketStats; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.maxSize = + object.maxSize !== undefined && object.maxSize !== null + ? Number(object.maxSize) + : undefined; + message.usedSize = + object.usedSize !== undefined && object.usedSize !== null + ? Number(object.usedSize) + : 0; + message.storageClassMaxSizes = (object.storageClassMaxSizes ?? []).map( + (e: any) => OptionalSizeByClass.fromJSON(e) + ); + message.storageClassUsedSizes = (object.storageClassUsedSizes ?? []).map( + (e: any) => SizeByClass.fromJSON(e) + ); + message.storageClassCounters = (object.storageClassCounters ?? []).map( + (e: any) => CountersByClass.fromJSON(e) + ); + message.defaultStorageClass = + object.defaultStorageClass !== undefined && + object.defaultStorageClass !== null + ? String(object.defaultStorageClass) + : undefined; + message.anonymousAccessFlags = + object.anonymousAccessFlags !== undefined && + object.anonymousAccessFlags !== null + ? AnonymousAccessFlags.fromJSON(object.anonymousAccessFlags) + : undefined; + message.createdAt = + object.createdAt !== undefined && object.createdAt !== null + ? fromJsonTimestamp(object.createdAt) + : undefined; + message.updatedAt = + object.updatedAt !== undefined && object.updatedAt !== null + ? fromJsonTimestamp(object.updatedAt) + : undefined; + return message; + }, + + toJSON(message: BucketStats): unknown { + const obj: any = {}; + message.name !== undefined && (obj.name = message.name); + message.maxSize !== undefined && (obj.maxSize = message.maxSize); + message.usedSize !== undefined && + (obj.usedSize = Math.round(message.usedSize)); + if (message.storageClassMaxSizes) { + obj.storageClassMaxSizes = message.storageClassMaxSizes.map((e) => + e ? OptionalSizeByClass.toJSON(e) : undefined + ); + } else { + obj.storageClassMaxSizes = []; + } + if (message.storageClassUsedSizes) { + obj.storageClassUsedSizes = message.storageClassUsedSizes.map((e) => + e ? SizeByClass.toJSON(e) : undefined + ); + } else { + obj.storageClassUsedSizes = []; + } + if (message.storageClassCounters) { + obj.storageClassCounters = message.storageClassCounters.map((e) => + e ? CountersByClass.toJSON(e) : undefined + ); + } else { + obj.storageClassCounters = []; + } + message.defaultStorageClass !== undefined && + (obj.defaultStorageClass = message.defaultStorageClass); + message.anonymousAccessFlags !== undefined && + (obj.anonymousAccessFlags = message.anonymousAccessFlags + ? AnonymousAccessFlags.toJSON(message.anonymousAccessFlags) + : undefined); + message.createdAt !== undefined && + (obj.createdAt = message.createdAt.toISOString()); + message.updatedAt !== undefined && + (obj.updatedAt = message.updatedAt.toISOString()); + return obj; + }, + + fromPartial, I>>( + object: I + ): BucketStats { + const message = { ...baseBucketStats } as BucketStats; + message.name = object.name ?? ""; + message.maxSize = object.maxSize ?? undefined; + message.usedSize = object.usedSize ?? 0; + message.storageClassMaxSizes = + object.storageClassMaxSizes?.map((e) => + OptionalSizeByClass.fromPartial(e) + ) || []; + message.storageClassUsedSizes = + object.storageClassUsedSizes?.map((e) => SizeByClass.fromPartial(e)) || + []; + message.storageClassCounters = + object.storageClassCounters?.map((e) => CountersByClass.fromPartial(e)) || + []; + message.defaultStorageClass = object.defaultStorageClass ?? undefined; + message.anonymousAccessFlags = + object.anonymousAccessFlags !== undefined && + object.anonymousAccessFlags !== null + ? AnonymousAccessFlags.fromPartial(object.anonymousAccessFlags) + : undefined; + message.createdAt = object.createdAt ?? undefined; + message.updatedAt = object.updatedAt ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(BucketStats.$type, BucketStats); + +const baseHTTPSConfig: object = { + $type: "yandex.cloud.storage.v1.HTTPSConfig", + name: "", + sourceType: 0, + dnsNames: "", + certificateId: "", +}; + +export const HTTPSConfig = { + $type: "yandex.cloud.storage.v1.HTTPSConfig" as const, + + encode( + message: HTTPSConfig, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + if (message.sourceType !== 0) { + writer.uint32(16).int32(message.sourceType); + } + if (message.issuer !== undefined) { + StringValue.encode( + { $type: "google.protobuf.StringValue", value: message.issuer! }, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.subject !== undefined) { + StringValue.encode( + { $type: "google.protobuf.StringValue", value: message.subject! }, + writer.uint32(34).fork() + ).ldelim(); + } + for (const v of message.dnsNames) { + writer.uint32(42).string(v!); + } + if (message.notBefore !== undefined) { + Timestamp.encode( + toTimestamp(message.notBefore), + writer.uint32(50).fork() + ).ldelim(); + } + if (message.notAfter !== undefined) { + Timestamp.encode( + toTimestamp(message.notAfter), + writer.uint32(58).fork() + ).ldelim(); + } + if (message.certificateId !== "") { + writer.uint32(66).string(message.certificateId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): HTTPSConfig { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseHTTPSConfig } as HTTPSConfig; + message.dnsNames = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.sourceType = reader.int32() as any; + break; + case 3: + message.issuer = StringValue.decode(reader, reader.uint32()).value; + break; + case 4: + message.subject = StringValue.decode(reader, reader.uint32()).value; + break; + case 5: + message.dnsNames.push(reader.string()); + break; + case 6: + message.notBefore = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 7: + message.notAfter = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 8: + message.certificateId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): HTTPSConfig { + const message = { ...baseHTTPSConfig } as HTTPSConfig; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.sourceType = + object.sourceType !== undefined && object.sourceType !== null + ? hTTPSConfig_SourceTypeFromJSON(object.sourceType) + : 0; + message.issuer = + object.issuer !== undefined && object.issuer !== null + ? String(object.issuer) + : undefined; + message.subject = + object.subject !== undefined && object.subject !== null + ? String(object.subject) + : undefined; + message.dnsNames = (object.dnsNames ?? []).map((e: any) => String(e)); + message.notBefore = + object.notBefore !== undefined && object.notBefore !== null + ? fromJsonTimestamp(object.notBefore) + : undefined; + message.notAfter = + object.notAfter !== undefined && object.notAfter !== null + ? fromJsonTimestamp(object.notAfter) + : undefined; + message.certificateId = + object.certificateId !== undefined && object.certificateId !== null + ? String(object.certificateId) + : ""; + return message; + }, + + toJSON(message: HTTPSConfig): unknown { + const obj: any = {}; + message.name !== undefined && (obj.name = message.name); + message.sourceType !== undefined && + (obj.sourceType = hTTPSConfig_SourceTypeToJSON(message.sourceType)); + message.issuer !== undefined && (obj.issuer = message.issuer); + message.subject !== undefined && (obj.subject = message.subject); + if (message.dnsNames) { + obj.dnsNames = message.dnsNames.map((e) => e); + } else { + obj.dnsNames = []; + } + message.notBefore !== undefined && + (obj.notBefore = message.notBefore.toISOString()); + message.notAfter !== undefined && + (obj.notAfter = message.notAfter.toISOString()); + message.certificateId !== undefined && + (obj.certificateId = message.certificateId); + return obj; + }, + + fromPartial, I>>( + object: I + ): HTTPSConfig { + const message = { ...baseHTTPSConfig } as HTTPSConfig; + message.name = object.name ?? ""; + message.sourceType = object.sourceType ?? 0; + message.issuer = object.issuer ?? undefined; + message.subject = object.subject ?? undefined; + message.dnsNames = object.dnsNames?.map((e) => e) || []; + message.notBefore = object.notBefore ?? undefined; + message.notAfter = object.notAfter ?? undefined; + message.certificateId = object.certificateId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(HTTPSConfig.$type, HTTPSConfig); + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function toTimestamp(date: Date): Timestamp { + const seconds = date.getTime() / 1_000; + const nanos = (date.getTime() % 1_000) * 1_000_000; + return { $type: "google.protobuf.Timestamp", seconds, nanos }; +} + +function fromTimestamp(t: Timestamp): Date { + let millis = t.seconds * 1_000; + millis += t.nanos / 1_000_000; + return new Date(millis); +} + +function fromJsonTimestamp(o: any): Date { + if (o instanceof Date) { + return o; + } else if (typeof o === "string") { + return new Date(o); + } else { + return fromTimestamp(Timestamp.fromJSON(o)); + } +} + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/storage/v1/bucket_service.ts b/src/generated/yandex/cloud/storage/v1/bucket_service.ts new file mode 100644 index 00000000..5d1be9c6 --- /dev/null +++ b/src/generated/yandex/cloud/storage/v1/bucket_service.ts @@ -0,0 +1,2229 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../typeRegistry"; +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleUnaryCall, + Client, + ClientUnaryCall, + Metadata, + CallOptions, + ServiceError, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; +import { + AnonymousAccessFlags, + ACL, + WebsiteSettings, + Versioning, + Bucket, + CorsRule, + LifecycleRule, + BucketStats, + HTTPSConfig, + versioningFromJSON, + versioningToJSON, +} from "../../../../yandex/cloud/storage/v1/bucket"; +import { FieldMask } from "../../../../google/protobuf/field_mask"; +import { Operation } from "../../../../yandex/cloud/operation/operation"; +import { Struct } from "../../../../google/protobuf/struct"; + +export const protobufPackage = "yandex.cloud.storage.v1"; + +export interface GetBucketRequest { + $type: "yandex.cloud.storage.v1.GetBucketRequest"; + /** + * Name of the bucket to return. + * + * To get the bucket name, make a [BucketService.List] request. + */ + name: string; + /** + * Scope of information about the bucket to return. + * + * Access to scopes is managed via [Identity and Access Management roles](/docs/storage/security), + * bucket [ACL](/docs/storage/concepts/acl) and [policies](/docs/storage/concepts/policy). + */ + view: GetBucketRequest_View; +} + +export enum GetBucketRequest_View { + VIEW_UNSPECIFIED = 0, + /** + * VIEW_BASIC - Returns basic information about a bucket. + * + * The following fields will _not_ be returned: [Bucket.acl], [Bucket.cors], [Bucket.website_settings], + * [Bucket.lifecycle_rules]. + */ + VIEW_BASIC = 1, + /** + * VIEW_ACL - Returns basic information and access control list (ACL) for the bucket. + * + * The following fields will _not_ be returned: [Bucket.cors], [Bucket.website_settings], [Bucket.lifecycle_rules]. + */ + VIEW_ACL = 2, + /** VIEW_FULL - Returns full information about a bucket. */ + VIEW_FULL = 3, + UNRECOGNIZED = -1, +} + +export function getBucketRequest_ViewFromJSON( + object: any +): GetBucketRequest_View { + switch (object) { + case 0: + case "VIEW_UNSPECIFIED": + return GetBucketRequest_View.VIEW_UNSPECIFIED; + case 1: + case "VIEW_BASIC": + return GetBucketRequest_View.VIEW_BASIC; + case 2: + case "VIEW_ACL": + return GetBucketRequest_View.VIEW_ACL; + case 3: + case "VIEW_FULL": + return GetBucketRequest_View.VIEW_FULL; + case -1: + case "UNRECOGNIZED": + default: + return GetBucketRequest_View.UNRECOGNIZED; + } +} + +export function getBucketRequest_ViewToJSON( + object: GetBucketRequest_View +): string { + switch (object) { + case GetBucketRequest_View.VIEW_UNSPECIFIED: + return "VIEW_UNSPECIFIED"; + case GetBucketRequest_View.VIEW_BASIC: + return "VIEW_BASIC"; + case GetBucketRequest_View.VIEW_ACL: + return "VIEW_ACL"; + case GetBucketRequest_View.VIEW_FULL: + return "VIEW_FULL"; + default: + return "UNKNOWN"; + } +} + +export interface ListBucketsRequest { + $type: "yandex.cloud.storage.v1.ListBucketsRequest"; + /** + * ID of the folder to list buckets in. + * + * To get the folder ID, make a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + */ + folderId: string; +} + +export interface ListBucketsResponse { + $type: "yandex.cloud.storage.v1.ListBucketsResponse"; + /** List of buckets in the specified folder. */ + buckets: Bucket[]; +} + +export interface CreateBucketRequest { + $type: "yandex.cloud.storage.v1.CreateBucketRequest"; + /** + * Name of the bucket. + * + * The name must be unique within Yandex Cloud. For naming limitations and rules, see + * [documentation](/docs/storage/concepts/bucket#naming). + */ + name: string; + /** + * ID of the folder to create a bucket in. + * + * To get the folder ID, make a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + */ + folderId: string; + /** + * Default storage class for objects in the bucket. Supported classes are standard storage (`STANDARD`) and + * cold storage (`COLD`, `STANDARD_IA`, `NEARLINE` all synonyms). + * For details, see [documentation](/docs/storage/concepts/storage-class). + */ + defaultStorageClass: string; + /** + * Maximum size of the bucket. + * For details, see [documentation](/docs/storage/operations/buckets/limit-max-volume). + */ + maxSize: number; + /** + * Flags for configuring public (anonymous) access to the bucket's content and settings. + * For details, see [documentation](/docs/storage/concepts/bucket#bucket-access). + */ + anonymousAccessFlags?: AnonymousAccessFlags; + /** + * Access control list (ACL) of the bucket. + * For details, see [documentation](/docs/storage/concepts/acl). + */ + acl?: ACL; +} + +export interface CreateBucketMetadata { + $type: "yandex.cloud.storage.v1.CreateBucketMetadata"; + /** Name of the bucket that is being created. */ + name: string; +} + +export interface UpdateBucketRequest { + $type: "yandex.cloud.storage.v1.UpdateBucketRequest"; + /** + * Name of the bucket to update. + * + * The name cannot be updated. + * + * To get the bucket name, make a [BucketService.List] request. + */ + name: string; + /** Field mask that specifies which attributes of the bucket should be updated. */ + fieldMask?: FieldMask; + /** + * Flags for configuring public (anonymous) access to the bucket's content and settings. + * For details, see [documentation](/docs/storage/concepts/bucket#bucket-access). + */ + anonymousAccessFlags?: AnonymousAccessFlags; + /** + * Default storage class for objects in the bucket. Supported classes are standard storage (`STANDARD`) and + * cold storage (`COLD`, `STANDARD_IA`, `NEARLINE` all synonyms). + * For details, see [documentation](/docs/storage/concepts/storage-class). + */ + defaultStorageClass: string; + /** + * Maximum size of the bucket, in bytes. + * For details, see [documentation](/docs/storage/operations/buckets/limit-max-volume). + */ + maxSize: number; + /** + * List of rules for cross-domain requests to objects in the bucket (cross-origin resource sharing, CORS). + * For details, see [documentation](/docs/storage/concepts/cors). + */ + cors: CorsRule[]; + /** + * Configuration for hosting a static website in the bucket. + * For details, see [documentation](/docs/storage/concepts/hosting). + */ + websiteSettings?: WebsiteSettings; + /** + * Bucket versioning status. + * For details, see [documentation](/docs/storage/concepts/versioning). + */ + versioning: Versioning; + /** + * List of object lifecycle rules for the bucket. + * For details, see [documentation](/docs/storage/concepts/lifecycles). + */ + lifecycleRules: LifecycleRule[]; + /** + * Bucket policies that set permissions for actions with the bucket, its objects, and groups of objects. + * For details, see [documentation](/docs/storage/concepts/policy). + */ + policy?: { [key: string]: any }; + /** + * Access control list (ACL) of the bucket. + * For details, see [documentation](/docs/storage/concepts/acl). + */ + acl?: ACL; +} + +export interface UpdateBucketMetadata { + $type: "yandex.cloud.storage.v1.UpdateBucketMetadata"; + /** Name of the bucket that is being updated. */ + name: string; +} + +/** DeleteBucketRequest deletes requested bucket from the Cloud. */ +export interface DeleteBucketRequest { + $type: "yandex.cloud.storage.v1.DeleteBucketRequest"; + /** + * Name of the bucket to update. + * + * To get the bucket name, make a [BucketService.List] request. + */ + name: string; +} + +export interface DeleteBucketMetadata { + $type: "yandex.cloud.storage.v1.DeleteBucketMetadata"; + /** Name of the bucket that is being deleted. */ + name: string; +} + +export interface GetBucketStatsRequest { + $type: "yandex.cloud.storage.v1.GetBucketStatsRequest"; + /** Name of the bucket to return the statistics for. */ + name: string; +} + +export interface GetBucketHTTPSConfigRequest { + $type: "yandex.cloud.storage.v1.GetBucketHTTPSConfigRequest"; + /** Name of the bucket to return the HTTPS configuration for. */ + name: string; +} + +export interface SelfManagedHTTPSConfigParams { + $type: "yandex.cloud.storage.v1.SelfManagedHTTPSConfigParams"; + /** [PEM](https://en.wikipedia.org/wiki/Privacy-Enhanced_Mail)-encoded certificate. */ + certificatePem: string; + /** [PEM](https://en.wikipedia.org/wiki/Privacy-Enhanced_Mail)-encoded private key for the certificate. */ + privateKeyPem: string; +} + +/** A resource for a TLS certificate from Yandex Certificate Manager. */ +export interface CertificateManagerHTTPSConfigParams { + $type: "yandex.cloud.storage.v1.CertificateManagerHTTPSConfigParams"; + /** + * ID of the certificate. + * + * To get the list of all available certificates, make a [yandex.cloud.certificatemanager.v1.CertificateService.List] + * request. + */ + certificateId: string; +} + +export interface SetBucketHTTPSConfigRequest { + $type: "yandex.cloud.storage.v1.SetBucketHTTPSConfigRequest"; + /** Name of the bucket to update the HTTPS configuration for. */ + name: string; + /** + * Your TLS certificate, uploaded directly. + * + * Object Storage only supports [PEM](https://en.wikipedia.org/wiki/Privacy-Enhanced_Mail)-encoded certificates. + */ + selfManaged?: SelfManagedHTTPSConfigParams | undefined; + /** + * TLS certificate from Yandex Certificate Manager. + * + * To create a certificate in Certificate Manager, make a + * [yandex.cloud.certificatemanager.v1.CertificateService.Create] request. + */ + certificateManager?: CertificateManagerHTTPSConfigParams | undefined; +} + +export interface SetBucketHTTPSConfigMetadata { + $type: "yandex.cloud.storage.v1.SetBucketHTTPSConfigMetadata"; + /** Name of the bucket the HTTPS configuration is being updated for. */ + name: string; +} + +export interface DeleteBucketHTTPSConfigRequest { + $type: "yandex.cloud.storage.v1.DeleteBucketHTTPSConfigRequest"; + /** Name of the bucket to delete the HTTPS configuration for. */ + name: string; +} + +export interface DeleteBucketHTTPSConfigMetadata { + $type: "yandex.cloud.storage.v1.DeleteBucketHTTPSConfigMetadata"; + /** Name of the bucket the HTTPS configuration is being deleted for. */ + name: string; +} + +const baseGetBucketRequest: object = { + $type: "yandex.cloud.storage.v1.GetBucketRequest", + name: "", + view: 0, +}; + +export const GetBucketRequest = { + $type: "yandex.cloud.storage.v1.GetBucketRequest" as const, + + encode( + message: GetBucketRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + if (message.view !== 0) { + writer.uint32(16).int32(message.view); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): GetBucketRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGetBucketRequest } as GetBucketRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.view = reader.int32() as any; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GetBucketRequest { + const message = { ...baseGetBucketRequest } as GetBucketRequest; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.view = + object.view !== undefined && object.view !== null + ? getBucketRequest_ViewFromJSON(object.view) + : 0; + return message; + }, + + toJSON(message: GetBucketRequest): unknown { + const obj: any = {}; + message.name !== undefined && (obj.name = message.name); + message.view !== undefined && + (obj.view = getBucketRequest_ViewToJSON(message.view)); + return obj; + }, + + fromPartial, I>>( + object: I + ): GetBucketRequest { + const message = { ...baseGetBucketRequest } as GetBucketRequest; + message.name = object.name ?? ""; + message.view = object.view ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(GetBucketRequest.$type, GetBucketRequest); + +const baseListBucketsRequest: object = { + $type: "yandex.cloud.storage.v1.ListBucketsRequest", + folderId: "", +}; + +export const ListBucketsRequest = { + $type: "yandex.cloud.storage.v1.ListBucketsRequest" as const, + + encode( + message: ListBucketsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.folderId !== "") { + writer.uint32(10).string(message.folderId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ListBucketsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListBucketsRequest } as ListBucketsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.folderId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListBucketsRequest { + const message = { ...baseListBucketsRequest } as ListBucketsRequest; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + return message; + }, + + toJSON(message: ListBucketsRequest): unknown { + const obj: any = {}; + message.folderId !== undefined && (obj.folderId = message.folderId); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListBucketsRequest { + const message = { ...baseListBucketsRequest } as ListBucketsRequest; + message.folderId = object.folderId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListBucketsRequest.$type, ListBucketsRequest); + +const baseListBucketsResponse: object = { + $type: "yandex.cloud.storage.v1.ListBucketsResponse", +}; + +export const ListBucketsResponse = { + $type: "yandex.cloud.storage.v1.ListBucketsResponse" as const, + + encode( + message: ListBucketsResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.buckets) { + Bucket.encode(v!, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ListBucketsResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListBucketsResponse } as ListBucketsResponse; + message.buckets = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.buckets.push(Bucket.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListBucketsResponse { + const message = { ...baseListBucketsResponse } as ListBucketsResponse; + message.buckets = (object.buckets ?? []).map((e: any) => + Bucket.fromJSON(e) + ); + return message; + }, + + toJSON(message: ListBucketsResponse): unknown { + const obj: any = {}; + if (message.buckets) { + obj.buckets = message.buckets.map((e) => + e ? Bucket.toJSON(e) : undefined + ); + } else { + obj.buckets = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): ListBucketsResponse { + const message = { ...baseListBucketsResponse } as ListBucketsResponse; + message.buckets = object.buckets?.map((e) => Bucket.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(ListBucketsResponse.$type, ListBucketsResponse); + +const baseCreateBucketRequest: object = { + $type: "yandex.cloud.storage.v1.CreateBucketRequest", + name: "", + folderId: "", + defaultStorageClass: "", + maxSize: 0, +}; + +export const CreateBucketRequest = { + $type: "yandex.cloud.storage.v1.CreateBucketRequest" as const, + + encode( + message: CreateBucketRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + if (message.folderId !== "") { + writer.uint32(18).string(message.folderId); + } + if (message.defaultStorageClass !== "") { + writer.uint32(34).string(message.defaultStorageClass); + } + if (message.maxSize !== 0) { + writer.uint32(40).int64(message.maxSize); + } + if (message.anonymousAccessFlags !== undefined) { + AnonymousAccessFlags.encode( + message.anonymousAccessFlags, + writer.uint32(50).fork() + ).ldelim(); + } + if (message.acl !== undefined) { + ACL.encode(message.acl, writer.uint32(58).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): CreateBucketRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseCreateBucketRequest } as CreateBucketRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.folderId = reader.string(); + break; + case 4: + message.defaultStorageClass = reader.string(); + break; + case 5: + message.maxSize = longToNumber(reader.int64() as Long); + break; + case 6: + message.anonymousAccessFlags = AnonymousAccessFlags.decode( + reader, + reader.uint32() + ); + break; + case 7: + message.acl = ACL.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateBucketRequest { + const message = { ...baseCreateBucketRequest } as CreateBucketRequest; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.defaultStorageClass = + object.defaultStorageClass !== undefined && + object.defaultStorageClass !== null + ? String(object.defaultStorageClass) + : ""; + message.maxSize = + object.maxSize !== undefined && object.maxSize !== null + ? Number(object.maxSize) + : 0; + message.anonymousAccessFlags = + object.anonymousAccessFlags !== undefined && + object.anonymousAccessFlags !== null + ? AnonymousAccessFlags.fromJSON(object.anonymousAccessFlags) + : undefined; + message.acl = + object.acl !== undefined && object.acl !== null + ? ACL.fromJSON(object.acl) + : undefined; + return message; + }, + + toJSON(message: CreateBucketRequest): unknown { + const obj: any = {}; + message.name !== undefined && (obj.name = message.name); + message.folderId !== undefined && (obj.folderId = message.folderId); + message.defaultStorageClass !== undefined && + (obj.defaultStorageClass = message.defaultStorageClass); + message.maxSize !== undefined && + (obj.maxSize = Math.round(message.maxSize)); + message.anonymousAccessFlags !== undefined && + (obj.anonymousAccessFlags = message.anonymousAccessFlags + ? AnonymousAccessFlags.toJSON(message.anonymousAccessFlags) + : undefined); + message.acl !== undefined && + (obj.acl = message.acl ? ACL.toJSON(message.acl) : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): CreateBucketRequest { + const message = { ...baseCreateBucketRequest } as CreateBucketRequest; + message.name = object.name ?? ""; + message.folderId = object.folderId ?? ""; + message.defaultStorageClass = object.defaultStorageClass ?? ""; + message.maxSize = object.maxSize ?? 0; + message.anonymousAccessFlags = + object.anonymousAccessFlags !== undefined && + object.anonymousAccessFlags !== null + ? AnonymousAccessFlags.fromPartial(object.anonymousAccessFlags) + : undefined; + message.acl = + object.acl !== undefined && object.acl !== null + ? ACL.fromPartial(object.acl) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(CreateBucketRequest.$type, CreateBucketRequest); + +const baseCreateBucketMetadata: object = { + $type: "yandex.cloud.storage.v1.CreateBucketMetadata", + name: "", +}; + +export const CreateBucketMetadata = { + $type: "yandex.cloud.storage.v1.CreateBucketMetadata" as const, + + encode( + message: CreateBucketMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): CreateBucketMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseCreateBucketMetadata } as CreateBucketMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateBucketMetadata { + const message = { ...baseCreateBucketMetadata } as CreateBucketMetadata; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + return message; + }, + + toJSON(message: CreateBucketMetadata): unknown { + const obj: any = {}; + message.name !== undefined && (obj.name = message.name); + return obj; + }, + + fromPartial, I>>( + object: I + ): CreateBucketMetadata { + const message = { ...baseCreateBucketMetadata } as CreateBucketMetadata; + message.name = object.name ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(CreateBucketMetadata.$type, CreateBucketMetadata); + +const baseUpdateBucketRequest: object = { + $type: "yandex.cloud.storage.v1.UpdateBucketRequest", + name: "", + defaultStorageClass: "", + maxSize: 0, + versioning: 0, +}; + +export const UpdateBucketRequest = { + $type: "yandex.cloud.storage.v1.UpdateBucketRequest" as const, + + encode( + message: UpdateBucketRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + if (message.fieldMask !== undefined) { + FieldMask.encode(message.fieldMask, writer.uint32(18).fork()).ldelim(); + } + if (message.anonymousAccessFlags !== undefined) { + AnonymousAccessFlags.encode( + message.anonymousAccessFlags, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.defaultStorageClass !== "") { + writer.uint32(34).string(message.defaultStorageClass); + } + if (message.maxSize !== 0) { + writer.uint32(40).int64(message.maxSize); + } + for (const v of message.cors) { + CorsRule.encode(v!, writer.uint32(50).fork()).ldelim(); + } + if (message.websiteSettings !== undefined) { + WebsiteSettings.encode( + message.websiteSettings, + writer.uint32(58).fork() + ).ldelim(); + } + if (message.versioning !== 0) { + writer.uint32(64).int32(message.versioning); + } + for (const v of message.lifecycleRules) { + LifecycleRule.encode(v!, writer.uint32(74).fork()).ldelim(); + } + if (message.policy !== undefined) { + Struct.encode( + Struct.wrap(message.policy), + writer.uint32(82).fork() + ).ldelim(); + } + if (message.acl !== undefined) { + ACL.encode(message.acl, writer.uint32(90).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): UpdateBucketRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseUpdateBucketRequest } as UpdateBucketRequest; + message.cors = []; + message.lifecycleRules = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.fieldMask = FieldMask.decode(reader, reader.uint32()); + break; + case 3: + message.anonymousAccessFlags = AnonymousAccessFlags.decode( + reader, + reader.uint32() + ); + break; + case 4: + message.defaultStorageClass = reader.string(); + break; + case 5: + message.maxSize = longToNumber(reader.int64() as Long); + break; + case 6: + message.cors.push(CorsRule.decode(reader, reader.uint32())); + break; + case 7: + message.websiteSettings = WebsiteSettings.decode( + reader, + reader.uint32() + ); + break; + case 8: + message.versioning = reader.int32() as any; + break; + case 9: + message.lifecycleRules.push( + LifecycleRule.decode(reader, reader.uint32()) + ); + break; + case 10: + message.policy = Struct.unwrap( + Struct.decode(reader, reader.uint32()) + ); + break; + case 11: + message.acl = ACL.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateBucketRequest { + const message = { ...baseUpdateBucketRequest } as UpdateBucketRequest; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.fieldMask = + object.fieldMask !== undefined && object.fieldMask !== null + ? FieldMask.fromJSON(object.fieldMask) + : undefined; + message.anonymousAccessFlags = + object.anonymousAccessFlags !== undefined && + object.anonymousAccessFlags !== null + ? AnonymousAccessFlags.fromJSON(object.anonymousAccessFlags) + : undefined; + message.defaultStorageClass = + object.defaultStorageClass !== undefined && + object.defaultStorageClass !== null + ? String(object.defaultStorageClass) + : ""; + message.maxSize = + object.maxSize !== undefined && object.maxSize !== null + ? Number(object.maxSize) + : 0; + message.cors = (object.cors ?? []).map((e: any) => CorsRule.fromJSON(e)); + message.websiteSettings = + object.websiteSettings !== undefined && object.websiteSettings !== null + ? WebsiteSettings.fromJSON(object.websiteSettings) + : undefined; + message.versioning = + object.versioning !== undefined && object.versioning !== null + ? versioningFromJSON(object.versioning) + : 0; + message.lifecycleRules = (object.lifecycleRules ?? []).map((e: any) => + LifecycleRule.fromJSON(e) + ); + message.policy = + typeof object.policy === "object" ? object.policy : undefined; + message.acl = + object.acl !== undefined && object.acl !== null + ? ACL.fromJSON(object.acl) + : undefined; + return message; + }, + + toJSON(message: UpdateBucketRequest): unknown { + const obj: any = {}; + message.name !== undefined && (obj.name = message.name); + message.fieldMask !== undefined && + (obj.fieldMask = message.fieldMask + ? FieldMask.toJSON(message.fieldMask) + : undefined); + message.anonymousAccessFlags !== undefined && + (obj.anonymousAccessFlags = message.anonymousAccessFlags + ? AnonymousAccessFlags.toJSON(message.anonymousAccessFlags) + : undefined); + message.defaultStorageClass !== undefined && + (obj.defaultStorageClass = message.defaultStorageClass); + message.maxSize !== undefined && + (obj.maxSize = Math.round(message.maxSize)); + if (message.cors) { + obj.cors = message.cors.map((e) => (e ? CorsRule.toJSON(e) : undefined)); + } else { + obj.cors = []; + } + message.websiteSettings !== undefined && + (obj.websiteSettings = message.websiteSettings + ? WebsiteSettings.toJSON(message.websiteSettings) + : undefined); + message.versioning !== undefined && + (obj.versioning = versioningToJSON(message.versioning)); + if (message.lifecycleRules) { + obj.lifecycleRules = message.lifecycleRules.map((e) => + e ? LifecycleRule.toJSON(e) : undefined + ); + } else { + obj.lifecycleRules = []; + } + message.policy !== undefined && (obj.policy = message.policy); + message.acl !== undefined && + (obj.acl = message.acl ? ACL.toJSON(message.acl) : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateBucketRequest { + const message = { ...baseUpdateBucketRequest } as UpdateBucketRequest; + message.name = object.name ?? ""; + message.fieldMask = + object.fieldMask !== undefined && object.fieldMask !== null + ? FieldMask.fromPartial(object.fieldMask) + : undefined; + message.anonymousAccessFlags = + object.anonymousAccessFlags !== undefined && + object.anonymousAccessFlags !== null + ? AnonymousAccessFlags.fromPartial(object.anonymousAccessFlags) + : undefined; + message.defaultStorageClass = object.defaultStorageClass ?? ""; + message.maxSize = object.maxSize ?? 0; + message.cors = object.cors?.map((e) => CorsRule.fromPartial(e)) || []; + message.websiteSettings = + object.websiteSettings !== undefined && object.websiteSettings !== null + ? WebsiteSettings.fromPartial(object.websiteSettings) + : undefined; + message.versioning = object.versioning ?? 0; + message.lifecycleRules = + object.lifecycleRules?.map((e) => LifecycleRule.fromPartial(e)) || []; + message.policy = object.policy ?? undefined; + message.acl = + object.acl !== undefined && object.acl !== null + ? ACL.fromPartial(object.acl) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(UpdateBucketRequest.$type, UpdateBucketRequest); + +const baseUpdateBucketMetadata: object = { + $type: "yandex.cloud.storage.v1.UpdateBucketMetadata", + name: "", +}; + +export const UpdateBucketMetadata = { + $type: "yandex.cloud.storage.v1.UpdateBucketMetadata" as const, + + encode( + message: UpdateBucketMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateBucketMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseUpdateBucketMetadata } as UpdateBucketMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateBucketMetadata { + const message = { ...baseUpdateBucketMetadata } as UpdateBucketMetadata; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + return message; + }, + + toJSON(message: UpdateBucketMetadata): unknown { + const obj: any = {}; + message.name !== undefined && (obj.name = message.name); + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateBucketMetadata { + const message = { ...baseUpdateBucketMetadata } as UpdateBucketMetadata; + message.name = object.name ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(UpdateBucketMetadata.$type, UpdateBucketMetadata); + +const baseDeleteBucketRequest: object = { + $type: "yandex.cloud.storage.v1.DeleteBucketRequest", + name: "", +}; + +export const DeleteBucketRequest = { + $type: "yandex.cloud.storage.v1.DeleteBucketRequest" as const, + + encode( + message: DeleteBucketRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): DeleteBucketRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDeleteBucketRequest } as DeleteBucketRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteBucketRequest { + const message = { ...baseDeleteBucketRequest } as DeleteBucketRequest; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + return message; + }, + + toJSON(message: DeleteBucketRequest): unknown { + const obj: any = {}; + message.name !== undefined && (obj.name = message.name); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteBucketRequest { + const message = { ...baseDeleteBucketRequest } as DeleteBucketRequest; + message.name = object.name ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(DeleteBucketRequest.$type, DeleteBucketRequest); + +const baseDeleteBucketMetadata: object = { + $type: "yandex.cloud.storage.v1.DeleteBucketMetadata", + name: "", +}; + +export const DeleteBucketMetadata = { + $type: "yandex.cloud.storage.v1.DeleteBucketMetadata" as const, + + encode( + message: DeleteBucketMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DeleteBucketMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDeleteBucketMetadata } as DeleteBucketMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteBucketMetadata { + const message = { ...baseDeleteBucketMetadata } as DeleteBucketMetadata; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + return message; + }, + + toJSON(message: DeleteBucketMetadata): unknown { + const obj: any = {}; + message.name !== undefined && (obj.name = message.name); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteBucketMetadata { + const message = { ...baseDeleteBucketMetadata } as DeleteBucketMetadata; + message.name = object.name ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(DeleteBucketMetadata.$type, DeleteBucketMetadata); + +const baseGetBucketStatsRequest: object = { + $type: "yandex.cloud.storage.v1.GetBucketStatsRequest", + name: "", +}; + +export const GetBucketStatsRequest = { + $type: "yandex.cloud.storage.v1.GetBucketStatsRequest" as const, + + encode( + message: GetBucketStatsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): GetBucketStatsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGetBucketStatsRequest } as GetBucketStatsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GetBucketStatsRequest { + const message = { ...baseGetBucketStatsRequest } as GetBucketStatsRequest; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + return message; + }, + + toJSON(message: GetBucketStatsRequest): unknown { + const obj: any = {}; + message.name !== undefined && (obj.name = message.name); + return obj; + }, + + fromPartial, I>>( + object: I + ): GetBucketStatsRequest { + const message = { ...baseGetBucketStatsRequest } as GetBucketStatsRequest; + message.name = object.name ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(GetBucketStatsRequest.$type, GetBucketStatsRequest); + +const baseGetBucketHTTPSConfigRequest: object = { + $type: "yandex.cloud.storage.v1.GetBucketHTTPSConfigRequest", + name: "", +}; + +export const GetBucketHTTPSConfigRequest = { + $type: "yandex.cloud.storage.v1.GetBucketHTTPSConfigRequest" as const, + + encode( + message: GetBucketHTTPSConfigRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): GetBucketHTTPSConfigRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseGetBucketHTTPSConfigRequest, + } as GetBucketHTTPSConfigRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GetBucketHTTPSConfigRequest { + const message = { + ...baseGetBucketHTTPSConfigRequest, + } as GetBucketHTTPSConfigRequest; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + return message; + }, + + toJSON(message: GetBucketHTTPSConfigRequest): unknown { + const obj: any = {}; + message.name !== undefined && (obj.name = message.name); + return obj; + }, + + fromPartial, I>>( + object: I + ): GetBucketHTTPSConfigRequest { + const message = { + ...baseGetBucketHTTPSConfigRequest, + } as GetBucketHTTPSConfigRequest; + message.name = object.name ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + GetBucketHTTPSConfigRequest.$type, + GetBucketHTTPSConfigRequest +); + +const baseSelfManagedHTTPSConfigParams: object = { + $type: "yandex.cloud.storage.v1.SelfManagedHTTPSConfigParams", + certificatePem: "", + privateKeyPem: "", +}; + +export const SelfManagedHTTPSConfigParams = { + $type: "yandex.cloud.storage.v1.SelfManagedHTTPSConfigParams" as const, + + encode( + message: SelfManagedHTTPSConfigParams, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.certificatePem !== "") { + writer.uint32(10).string(message.certificatePem); + } + if (message.privateKeyPem !== "") { + writer.uint32(18).string(message.privateKeyPem); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): SelfManagedHTTPSConfigParams { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseSelfManagedHTTPSConfigParams, + } as SelfManagedHTTPSConfigParams; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.certificatePem = reader.string(); + break; + case 2: + message.privateKeyPem = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): SelfManagedHTTPSConfigParams { + const message = { + ...baseSelfManagedHTTPSConfigParams, + } as SelfManagedHTTPSConfigParams; + message.certificatePem = + object.certificatePem !== undefined && object.certificatePem !== null + ? String(object.certificatePem) + : ""; + message.privateKeyPem = + object.privateKeyPem !== undefined && object.privateKeyPem !== null + ? String(object.privateKeyPem) + : ""; + return message; + }, + + toJSON(message: SelfManagedHTTPSConfigParams): unknown { + const obj: any = {}; + message.certificatePem !== undefined && + (obj.certificatePem = message.certificatePem); + message.privateKeyPem !== undefined && + (obj.privateKeyPem = message.privateKeyPem); + return obj; + }, + + fromPartial, I>>( + object: I + ): SelfManagedHTTPSConfigParams { + const message = { + ...baseSelfManagedHTTPSConfigParams, + } as SelfManagedHTTPSConfigParams; + message.certificatePem = object.certificatePem ?? ""; + message.privateKeyPem = object.privateKeyPem ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + SelfManagedHTTPSConfigParams.$type, + SelfManagedHTTPSConfigParams +); + +const baseCertificateManagerHTTPSConfigParams: object = { + $type: "yandex.cloud.storage.v1.CertificateManagerHTTPSConfigParams", + certificateId: "", +}; + +export const CertificateManagerHTTPSConfigParams = { + $type: "yandex.cloud.storage.v1.CertificateManagerHTTPSConfigParams" as const, + + encode( + message: CertificateManagerHTTPSConfigParams, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.certificateId !== "") { + writer.uint32(10).string(message.certificateId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): CertificateManagerHTTPSConfigParams { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseCertificateManagerHTTPSConfigParams, + } as CertificateManagerHTTPSConfigParams; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.certificateId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CertificateManagerHTTPSConfigParams { + const message = { + ...baseCertificateManagerHTTPSConfigParams, + } as CertificateManagerHTTPSConfigParams; + message.certificateId = + object.certificateId !== undefined && object.certificateId !== null + ? String(object.certificateId) + : ""; + return message; + }, + + toJSON(message: CertificateManagerHTTPSConfigParams): unknown { + const obj: any = {}; + message.certificateId !== undefined && + (obj.certificateId = message.certificateId); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): CertificateManagerHTTPSConfigParams { + const message = { + ...baseCertificateManagerHTTPSConfigParams, + } as CertificateManagerHTTPSConfigParams; + message.certificateId = object.certificateId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + CertificateManagerHTTPSConfigParams.$type, + CertificateManagerHTTPSConfigParams +); + +const baseSetBucketHTTPSConfigRequest: object = { + $type: "yandex.cloud.storage.v1.SetBucketHTTPSConfigRequest", + name: "", +}; + +export const SetBucketHTTPSConfigRequest = { + $type: "yandex.cloud.storage.v1.SetBucketHTTPSConfigRequest" as const, + + encode( + message: SetBucketHTTPSConfigRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + if (message.selfManaged !== undefined) { + SelfManagedHTTPSConfigParams.encode( + message.selfManaged, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.certificateManager !== undefined) { + CertificateManagerHTTPSConfigParams.encode( + message.certificateManager, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): SetBucketHTTPSConfigRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseSetBucketHTTPSConfigRequest, + } as SetBucketHTTPSConfigRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.selfManaged = SelfManagedHTTPSConfigParams.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.certificateManager = + CertificateManagerHTTPSConfigParams.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): SetBucketHTTPSConfigRequest { + const message = { + ...baseSetBucketHTTPSConfigRequest, + } as SetBucketHTTPSConfigRequest; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.selfManaged = + object.selfManaged !== undefined && object.selfManaged !== null + ? SelfManagedHTTPSConfigParams.fromJSON(object.selfManaged) + : undefined; + message.certificateManager = + object.certificateManager !== undefined && + object.certificateManager !== null + ? CertificateManagerHTTPSConfigParams.fromJSON( + object.certificateManager + ) + : undefined; + return message; + }, + + toJSON(message: SetBucketHTTPSConfigRequest): unknown { + const obj: any = {}; + message.name !== undefined && (obj.name = message.name); + message.selfManaged !== undefined && + (obj.selfManaged = message.selfManaged + ? SelfManagedHTTPSConfigParams.toJSON(message.selfManaged) + : undefined); + message.certificateManager !== undefined && + (obj.certificateManager = message.certificateManager + ? CertificateManagerHTTPSConfigParams.toJSON(message.certificateManager) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): SetBucketHTTPSConfigRequest { + const message = { + ...baseSetBucketHTTPSConfigRequest, + } as SetBucketHTTPSConfigRequest; + message.name = object.name ?? ""; + message.selfManaged = + object.selfManaged !== undefined && object.selfManaged !== null + ? SelfManagedHTTPSConfigParams.fromPartial(object.selfManaged) + : undefined; + message.certificateManager = + object.certificateManager !== undefined && + object.certificateManager !== null + ? CertificateManagerHTTPSConfigParams.fromPartial( + object.certificateManager + ) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + SetBucketHTTPSConfigRequest.$type, + SetBucketHTTPSConfigRequest +); + +const baseSetBucketHTTPSConfigMetadata: object = { + $type: "yandex.cloud.storage.v1.SetBucketHTTPSConfigMetadata", + name: "", +}; + +export const SetBucketHTTPSConfigMetadata = { + $type: "yandex.cloud.storage.v1.SetBucketHTTPSConfigMetadata" as const, + + encode( + message: SetBucketHTTPSConfigMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): SetBucketHTTPSConfigMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseSetBucketHTTPSConfigMetadata, + } as SetBucketHTTPSConfigMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): SetBucketHTTPSConfigMetadata { + const message = { + ...baseSetBucketHTTPSConfigMetadata, + } as SetBucketHTTPSConfigMetadata; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + return message; + }, + + toJSON(message: SetBucketHTTPSConfigMetadata): unknown { + const obj: any = {}; + message.name !== undefined && (obj.name = message.name); + return obj; + }, + + fromPartial, I>>( + object: I + ): SetBucketHTTPSConfigMetadata { + const message = { + ...baseSetBucketHTTPSConfigMetadata, + } as SetBucketHTTPSConfigMetadata; + message.name = object.name ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + SetBucketHTTPSConfigMetadata.$type, + SetBucketHTTPSConfigMetadata +); + +const baseDeleteBucketHTTPSConfigRequest: object = { + $type: "yandex.cloud.storage.v1.DeleteBucketHTTPSConfigRequest", + name: "", +}; + +export const DeleteBucketHTTPSConfigRequest = { + $type: "yandex.cloud.storage.v1.DeleteBucketHTTPSConfigRequest" as const, + + encode( + message: DeleteBucketHTTPSConfigRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DeleteBucketHTTPSConfigRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseDeleteBucketHTTPSConfigRequest, + } as DeleteBucketHTTPSConfigRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteBucketHTTPSConfigRequest { + const message = { + ...baseDeleteBucketHTTPSConfigRequest, + } as DeleteBucketHTTPSConfigRequest; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + return message; + }, + + toJSON(message: DeleteBucketHTTPSConfigRequest): unknown { + const obj: any = {}; + message.name !== undefined && (obj.name = message.name); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteBucketHTTPSConfigRequest { + const message = { + ...baseDeleteBucketHTTPSConfigRequest, + } as DeleteBucketHTTPSConfigRequest; + message.name = object.name ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + DeleteBucketHTTPSConfigRequest.$type, + DeleteBucketHTTPSConfigRequest +); + +const baseDeleteBucketHTTPSConfigMetadata: object = { + $type: "yandex.cloud.storage.v1.DeleteBucketHTTPSConfigMetadata", + name: "", +}; + +export const DeleteBucketHTTPSConfigMetadata = { + $type: "yandex.cloud.storage.v1.DeleteBucketHTTPSConfigMetadata" as const, + + encode( + message: DeleteBucketHTTPSConfigMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DeleteBucketHTTPSConfigMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseDeleteBucketHTTPSConfigMetadata, + } as DeleteBucketHTTPSConfigMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteBucketHTTPSConfigMetadata { + const message = { + ...baseDeleteBucketHTTPSConfigMetadata, + } as DeleteBucketHTTPSConfigMetadata; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + return message; + }, + + toJSON(message: DeleteBucketHTTPSConfigMetadata): unknown { + const obj: any = {}; + message.name !== undefined && (obj.name = message.name); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteBucketHTTPSConfigMetadata { + const message = { + ...baseDeleteBucketHTTPSConfigMetadata, + } as DeleteBucketHTTPSConfigMetadata; + message.name = object.name ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + DeleteBucketHTTPSConfigMetadata.$type, + DeleteBucketHTTPSConfigMetadata +); + +/** A set of methods for managing buckets. */ +export const BucketServiceService = { + /** + * Retrieves the list of buckets in the specified folder. + * + * The following fields will not be returned for buckets in the list: [Bucket.policy], [Bucket.acl], [Bucket.cors], + * [Bucket.website_settings], [Bucket.lifecycle_rules]. + */ + list: { + path: "/yandex.cloud.storage.v1.BucketService/List", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListBucketsRequest) => + Buffer.from(ListBucketsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => ListBucketsRequest.decode(value), + responseSerialize: (value: ListBucketsResponse) => + Buffer.from(ListBucketsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => ListBucketsResponse.decode(value), + }, + /** + * Returns the specified bucket. + * + * To get the list of all available buckets, make a [List] request. + */ + get: { + path: "/yandex.cloud.storage.v1.BucketService/Get", + requestStream: false, + responseStream: false, + requestSerialize: (value: GetBucketRequest) => + Buffer.from(GetBucketRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => GetBucketRequest.decode(value), + responseSerialize: (value: Bucket) => + Buffer.from(Bucket.encode(value).finish()), + responseDeserialize: (value: Buffer) => Bucket.decode(value), + }, + /** Creates a bucket in the specified folder. */ + create: { + path: "/yandex.cloud.storage.v1.BucketService/Create", + requestStream: false, + responseStream: false, + requestSerialize: (value: CreateBucketRequest) => + Buffer.from(CreateBucketRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => CreateBucketRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** + * Updates the specified bucket. + * + * In most cases, `storage.editor` role (see [documentation](/docs/storage/security/#storage-editor)) should be enough + * to update a bucket, subject to its [policy](/docs/storage/concepts/policy). + */ + update: { + path: "/yandex.cloud.storage.v1.BucketService/Update", + requestStream: false, + responseStream: false, + requestSerialize: (value: UpdateBucketRequest) => + Buffer.from(UpdateBucketRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => UpdateBucketRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Deletes the specified bucket. */ + delete: { + path: "/yandex.cloud.storage.v1.BucketService/Delete", + requestStream: false, + responseStream: false, + requestSerialize: (value: DeleteBucketRequest) => + Buffer.from(DeleteBucketRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => DeleteBucketRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Returns the statistics for the specified bucket. */ + getStats: { + path: "/yandex.cloud.storage.v1.BucketService/GetStats", + requestStream: false, + responseStream: false, + requestSerialize: (value: GetBucketStatsRequest) => + Buffer.from(GetBucketStatsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => GetBucketStatsRequest.decode(value), + responseSerialize: (value: BucketStats) => + Buffer.from(BucketStats.encode(value).finish()), + responseDeserialize: (value: Buffer) => BucketStats.decode(value), + }, + /** Returns the HTTPS configuration for the specified bucket. */ + getHTTPSConfig: { + path: "/yandex.cloud.storage.v1.BucketService/GetHTTPSConfig", + requestStream: false, + responseStream: false, + requestSerialize: (value: GetBucketHTTPSConfigRequest) => + Buffer.from(GetBucketHTTPSConfigRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + GetBucketHTTPSConfigRequest.decode(value), + responseSerialize: (value: HTTPSConfig) => + Buffer.from(HTTPSConfig.encode(value).finish()), + responseDeserialize: (value: Buffer) => HTTPSConfig.decode(value), + }, + /** + * Updates the HTTPS configuration for the specified bucket. + * + * The updated configuration could take up to 30 minutes to apply to the bucket. + */ + setHTTPSConfig: { + path: "/yandex.cloud.storage.v1.BucketService/SetHTTPSConfig", + requestStream: false, + responseStream: false, + requestSerialize: (value: SetBucketHTTPSConfigRequest) => + Buffer.from(SetBucketHTTPSConfigRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + SetBucketHTTPSConfigRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Deletes the HTTPS configuration for the specified bucket. */ + deleteHTTPSConfig: { + path: "/yandex.cloud.storage.v1.BucketService/DeleteHTTPSConfig", + requestStream: false, + responseStream: false, + requestSerialize: (value: DeleteBucketHTTPSConfigRequest) => + Buffer.from(DeleteBucketHTTPSConfigRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + DeleteBucketHTTPSConfigRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, +} as const; + +export interface BucketServiceServer extends UntypedServiceImplementation { + /** + * Retrieves the list of buckets in the specified folder. + * + * The following fields will not be returned for buckets in the list: [Bucket.policy], [Bucket.acl], [Bucket.cors], + * [Bucket.website_settings], [Bucket.lifecycle_rules]. + */ + list: handleUnaryCall; + /** + * Returns the specified bucket. + * + * To get the list of all available buckets, make a [List] request. + */ + get: handleUnaryCall; + /** Creates a bucket in the specified folder. */ + create: handleUnaryCall; + /** + * Updates the specified bucket. + * + * In most cases, `storage.editor` role (see [documentation](/docs/storage/security/#storage-editor)) should be enough + * to update a bucket, subject to its [policy](/docs/storage/concepts/policy). + */ + update: handleUnaryCall; + /** Deletes the specified bucket. */ + delete: handleUnaryCall; + /** Returns the statistics for the specified bucket. */ + getStats: handleUnaryCall; + /** Returns the HTTPS configuration for the specified bucket. */ + getHTTPSConfig: handleUnaryCall; + /** + * Updates the HTTPS configuration for the specified bucket. + * + * The updated configuration could take up to 30 minutes to apply to the bucket. + */ + setHTTPSConfig: handleUnaryCall; + /** Deletes the HTTPS configuration for the specified bucket. */ + deleteHTTPSConfig: handleUnaryCall; +} + +export interface BucketServiceClient extends Client { + /** + * Retrieves the list of buckets in the specified folder. + * + * The following fields will not be returned for buckets in the list: [Bucket.policy], [Bucket.acl], [Bucket.cors], + * [Bucket.website_settings], [Bucket.lifecycle_rules]. + */ + list( + request: ListBucketsRequest, + callback: ( + error: ServiceError | null, + response: ListBucketsResponse + ) => void + ): ClientUnaryCall; + list( + request: ListBucketsRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListBucketsResponse + ) => void + ): ClientUnaryCall; + list( + request: ListBucketsRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListBucketsResponse + ) => void + ): ClientUnaryCall; + /** + * Returns the specified bucket. + * + * To get the list of all available buckets, make a [List] request. + */ + get( + request: GetBucketRequest, + callback: (error: ServiceError | null, response: Bucket) => void + ): ClientUnaryCall; + get( + request: GetBucketRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Bucket) => void + ): ClientUnaryCall; + get( + request: GetBucketRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Bucket) => void + ): ClientUnaryCall; + /** Creates a bucket in the specified folder. */ + create( + request: CreateBucketRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + create( + request: CreateBucketRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + create( + request: CreateBucketRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** + * Updates the specified bucket. + * + * In most cases, `storage.editor` role (see [documentation](/docs/storage/security/#storage-editor)) should be enough + * to update a bucket, subject to its [policy](/docs/storage/concepts/policy). + */ + update( + request: UpdateBucketRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + update( + request: UpdateBucketRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + update( + request: UpdateBucketRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Deletes the specified bucket. */ + delete( + request: DeleteBucketRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + delete( + request: DeleteBucketRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + delete( + request: DeleteBucketRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Returns the statistics for the specified bucket. */ + getStats( + request: GetBucketStatsRequest, + callback: (error: ServiceError | null, response: BucketStats) => void + ): ClientUnaryCall; + getStats( + request: GetBucketStatsRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: BucketStats) => void + ): ClientUnaryCall; + getStats( + request: GetBucketStatsRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: BucketStats) => void + ): ClientUnaryCall; + /** Returns the HTTPS configuration for the specified bucket. */ + getHTTPSConfig( + request: GetBucketHTTPSConfigRequest, + callback: (error: ServiceError | null, response: HTTPSConfig) => void + ): ClientUnaryCall; + getHTTPSConfig( + request: GetBucketHTTPSConfigRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: HTTPSConfig) => void + ): ClientUnaryCall; + getHTTPSConfig( + request: GetBucketHTTPSConfigRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: HTTPSConfig) => void + ): ClientUnaryCall; + /** + * Updates the HTTPS configuration for the specified bucket. + * + * The updated configuration could take up to 30 minutes to apply to the bucket. + */ + setHTTPSConfig( + request: SetBucketHTTPSConfigRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + setHTTPSConfig( + request: SetBucketHTTPSConfigRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + setHTTPSConfig( + request: SetBucketHTTPSConfigRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Deletes the HTTPS configuration for the specified bucket. */ + deleteHTTPSConfig( + request: DeleteBucketHTTPSConfigRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + deleteHTTPSConfig( + request: DeleteBucketHTTPSConfigRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + deleteHTTPSConfig( + request: DeleteBucketHTTPSConfigRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; +} + +export const BucketServiceClient = makeGenericClientConstructor( + BucketServiceService, + "yandex.cloud.storage.v1.BucketService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): BucketServiceClient; + service: typeof BucketServiceService; +}; + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/vpc/index.ts b/src/generated/yandex/cloud/vpc/index.ts index 882656b3..729ef520 100644 --- a/src/generated/yandex/cloud/vpc/index.ts +++ b/src/generated/yandex/cloud/vpc/index.ts @@ -1,5 +1,7 @@ export * as address from './v1/address' export * as address_service from './v1/address_service' +export * as gateway from './v1/gateway' +export * as gateway_service from './v1/gateway_service' export * as network from './v1/network' export * as network_service from './v1/network_service' export * as route_table from './v1/route_table' diff --git a/src/generated/yandex/cloud/vpc/v1/gateway.ts b/src/generated/yandex/cloud/vpc/v1/gateway.ts new file mode 100644 index 00000000..254e9774 --- /dev/null +++ b/src/generated/yandex/cloud/vpc/v1/gateway.ts @@ -0,0 +1,394 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { Timestamp } from "../../../../google/protobuf/timestamp"; + +export const protobufPackage = "yandex.cloud.vpc.v1"; + +/** A Gateway resource. For more information, see [Gateway](/docs/vpc/concepts/gateway). */ +export interface Gateway { + $type: "yandex.cloud.vpc.v1.Gateway"; + /** ID of the gateway. Generated at creation time. */ + id: string; + /** ID of the folder that the gateway belongs to. */ + folderId: string; + /** Creation timestamp. */ + createdAt?: Date; + /** + * Name of the gateway. + * The name is unique within the folder. + */ + name: string; + /** Description of the gateway. */ + description: string; + /** Resource labels as `key:value` pairs. */ + labels: { [key: string]: string }; + sharedEgressGateway?: SharedEgressGateway | undefined; +} + +export interface Gateway_LabelsEntry { + $type: "yandex.cloud.vpc.v1.Gateway.LabelsEntry"; + key: string; + value: string; +} + +/** Shared Egress Gateway configuration */ +export interface SharedEgressGateway { + $type: "yandex.cloud.vpc.v1.SharedEgressGateway"; +} + +const baseGateway: object = { + $type: "yandex.cloud.vpc.v1.Gateway", + id: "", + folderId: "", + name: "", + description: "", +}; + +export const Gateway = { + $type: "yandex.cloud.vpc.v1.Gateway" as const, + + encode( + message: Gateway, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.id !== "") { + writer.uint32(10).string(message.id); + } + if (message.folderId !== "") { + writer.uint32(18).string(message.folderId); + } + if (message.createdAt !== undefined) { + Timestamp.encode( + toTimestamp(message.createdAt), + writer.uint32(26).fork() + ).ldelim(); + } + if (message.name !== "") { + writer.uint32(34).string(message.name); + } + if (message.description !== "") { + writer.uint32(42).string(message.description); + } + Object.entries(message.labels).forEach(([key, value]) => { + Gateway_LabelsEntry.encode( + { + $type: "yandex.cloud.vpc.v1.Gateway.LabelsEntry", + key: key as any, + value, + }, + writer.uint32(50).fork() + ).ldelim(); + }); + if (message.sharedEgressGateway !== undefined) { + SharedEgressGateway.encode( + message.sharedEgressGateway, + writer.uint32(58).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Gateway { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGateway } as Gateway; + message.labels = {}; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = reader.string(); + break; + case 2: + message.folderId = reader.string(); + break; + case 3: + message.createdAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 4: + message.name = reader.string(); + break; + case 5: + message.description = reader.string(); + break; + case 6: + const entry6 = Gateway_LabelsEntry.decode(reader, reader.uint32()); + if (entry6.value !== undefined) { + message.labels[entry6.key] = entry6.value; + } + break; + case 7: + message.sharedEgressGateway = SharedEgressGateway.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Gateway { + const message = { ...baseGateway } as Gateway; + message.id = + object.id !== undefined && object.id !== null ? String(object.id) : ""; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.createdAt = + object.createdAt !== undefined && object.createdAt !== null + ? fromJsonTimestamp(object.createdAt) + : undefined; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); + message.sharedEgressGateway = + object.sharedEgressGateway !== undefined && + object.sharedEgressGateway !== null + ? SharedEgressGateway.fromJSON(object.sharedEgressGateway) + : undefined; + return message; + }, + + toJSON(message: Gateway): unknown { + const obj: any = {}; + message.id !== undefined && (obj.id = message.id); + message.folderId !== undefined && (obj.folderId = message.folderId); + message.createdAt !== undefined && + (obj.createdAt = message.createdAt.toISOString()); + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + obj.labels = {}; + if (message.labels) { + Object.entries(message.labels).forEach(([k, v]) => { + obj.labels[k] = v; + }); + } + message.sharedEgressGateway !== undefined && + (obj.sharedEgressGateway = message.sharedEgressGateway + ? SharedEgressGateway.toJSON(message.sharedEgressGateway) + : undefined); + return obj; + }, + + fromPartial, I>>(object: I): Gateway { + const message = { ...baseGateway } as Gateway; + message.id = object.id ?? ""; + message.folderId = object.folderId ?? ""; + message.createdAt = object.createdAt ?? undefined; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); + message.sharedEgressGateway = + object.sharedEgressGateway !== undefined && + object.sharedEgressGateway !== null + ? SharedEgressGateway.fromPartial(object.sharedEgressGateway) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Gateway.$type, Gateway); + +const baseGateway_LabelsEntry: object = { + $type: "yandex.cloud.vpc.v1.Gateway.LabelsEntry", + key: "", + value: "", +}; + +export const Gateway_LabelsEntry = { + $type: "yandex.cloud.vpc.v1.Gateway.LabelsEntry" as const, + + encode( + message: Gateway_LabelsEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Gateway_LabelsEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGateway_LabelsEntry } as Gateway_LabelsEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Gateway_LabelsEntry { + const message = { ...baseGateway_LabelsEntry } as Gateway_LabelsEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: Gateway_LabelsEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial, I>>( + object: I + ): Gateway_LabelsEntry { + const message = { ...baseGateway_LabelsEntry } as Gateway_LabelsEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(Gateway_LabelsEntry.$type, Gateway_LabelsEntry); + +const baseSharedEgressGateway: object = { + $type: "yandex.cloud.vpc.v1.SharedEgressGateway", +}; + +export const SharedEgressGateway = { + $type: "yandex.cloud.vpc.v1.SharedEgressGateway" as const, + + encode( + _: SharedEgressGateway, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): SharedEgressGateway { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseSharedEgressGateway } as SharedEgressGateway; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(_: any): SharedEgressGateway { + const message = { ...baseSharedEgressGateway } as SharedEgressGateway; + return message; + }, + + toJSON(_: SharedEgressGateway): unknown { + const obj: any = {}; + return obj; + }, + + fromPartial, I>>( + _: I + ): SharedEgressGateway { + const message = { ...baseSharedEgressGateway } as SharedEgressGateway; + return message; + }, +}; + +messageTypeRegistry.set(SharedEgressGateway.$type, SharedEgressGateway); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function toTimestamp(date: Date): Timestamp { + const seconds = date.getTime() / 1_000; + const nanos = (date.getTime() % 1_000) * 1_000_000; + return { $type: "google.protobuf.Timestamp", seconds, nanos }; +} + +function fromTimestamp(t: Timestamp): Date { + let millis = t.seconds * 1_000; + millis += t.nanos / 1_000_000; + return new Date(millis); +} + +function fromJsonTimestamp(o: any): Date { + if (o instanceof Date) { + return o; + } else if (typeof o === "string") { + return new Date(o); + } else { + return fromTimestamp(Timestamp.fromJSON(o)); + } +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/vpc/v1/gateway_service.ts b/src/generated/yandex/cloud/vpc/v1/gateway_service.ts new file mode 100644 index 00000000..e7cac683 --- /dev/null +++ b/src/generated/yandex/cloud/vpc/v1/gateway_service.ts @@ -0,0 +1,1939 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../typeRegistry"; +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleUnaryCall, + Client, + ClientUnaryCall, + Metadata, + CallOptions, + ServiceError, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; +import { FieldMask } from "../../../../google/protobuf/field_mask"; +import { Gateway } from "../../../../yandex/cloud/vpc/v1/gateway"; +import { Operation } from "../../../../yandex/cloud/operation/operation"; + +export const protobufPackage = "yandex.cloud.vpc.v1"; + +export interface GetGatewayRequest { + $type: "yandex.cloud.vpc.v1.GetGatewayRequest"; + /** + * ID of the Gateway resource to return. + * + * To get Gateway resource ID make a [GatewayService.List] request. + */ + gatewayId: string; +} + +export interface ListGatewaysRequest { + $type: "yandex.cloud.vpc.v1.ListGatewaysRequest"; + /** + * ID of the folder to list gateways in. + * + * To get the folder ID use a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + */ + folderId: string; + /** + * The maximum number of results per page to return. If the number of available + * results is larger than `page_size`, the service returns a [ListGatewaysResponse.next_page_token] + * that can be used to get the next page of results in subsequent list requests. + * Default value: 100. + */ + pageSize: number; + /** + * Page token. To get the next page of results, set `page_token` to the + * [ListGatewaysResponse.next_page_token] returned by a previous list request. + */ + pageToken: string; + /** + * A filter expression that filters Gateway listed in the response. + * + * The expression must specify: + * 1. The field name. Currently you can use filtering only on [Gateway.name] field. + * 2. An `=` operator. + * 3. The value in double quotes (`"`). Must be 3-63 characters long and match the regular expression `[a-z][-a-z0-9]{1,61}[a-z0-9]`. + * Example of a filter: `name=my-gateway`. + */ + filter: string; +} + +export interface ListGatewaysResponse { + $type: "yandex.cloud.vpc.v1.ListGatewaysResponse"; + /** List of gateways. */ + gateways: Gateway[]; + /** + * Token for getting the next page of the list. If the number of results is greater than + * the specified [ListGatewaysRequest.page_size], use `next_page_token` as the value + * for the [ListGatewaysRequest.page_token] parameter in the next list request. + * + * Each subsequent page will have its own `next_page_token` to continue paging through the results. + */ + nextPageToken: string; +} + +export interface ListGatewayOperationsRequest { + $type: "yandex.cloud.vpc.v1.ListGatewayOperationsRequest"; + /** + * ID of the gateway to list operations for. + * + * To get a gateway ID make a [GatewayService.List] request. + */ + gatewayId: string; + /** + * The maximum number of results per page to return. If the number of available + * results is larger than [page_size], the service returns a [ListGatewayOperationsResponse.next_page_token] + * that can be used to get the next page of results in subsequent list requests. + * Default value: 100. + */ + pageSize: number; + /** + * Page token. To get the next page of results, set [page_token] to the + * [ListGatewayOperationsResponse.next_page_token] returned by a previous list request. + */ + pageToken: string; +} + +export interface ListGatewayOperationsResponse { + $type: "yandex.cloud.vpc.v1.ListGatewayOperationsResponse"; + /** List of operations for the specified gateway. */ + operations: Operation[]; + /** + * Token for getting the next page of the list. If the number of results is greater than + * the specified [ListGatewayOperationsRequest.page_size], use `next_page_token` as the value + * for the [ListGatewayOperationsRequest.page_token] parameter in the next list request. + * + * Each subsequent page will have its own `next_page_token` to continue paging through the results. + */ + nextPageToken: string; +} + +export interface SharedEgressGatewaySpec { + $type: "yandex.cloud.vpc.v1.SharedEgressGatewaySpec"; +} + +export interface CreateGatewayRequest { + $type: "yandex.cloud.vpc.v1.CreateGatewayRequest"; + /** + * ID of the folder to create a gateway in. + * + * To get a folder ID make a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + */ + folderId: string; + /** + * Name of the gateway. + * The name must be unique within the folder. + */ + name: string; + /** Description of the gateway. */ + description: string; + /** Gateway labels as `key:value` pairs. */ + labels: { [key: string]: string }; + sharedEgressGatewaySpec?: SharedEgressGatewaySpec | undefined; +} + +export interface CreateGatewayRequest_LabelsEntry { + $type: "yandex.cloud.vpc.v1.CreateGatewayRequest.LabelsEntry"; + key: string; + value: string; +} + +export interface CreateGatewayMetadata { + $type: "yandex.cloud.vpc.v1.CreateGatewayMetadata"; + /** ID of the gateway that is being created. */ + gatewayId: string; +} + +export interface UpdateGatewayRequest { + $type: "yandex.cloud.vpc.v1.UpdateGatewayRequest"; + /** + * ID of the gateway to update. + * + * To get the gateway ID make a [GatewayService.List] request. + */ + gatewayId: string; + /** Field mask that specifies which attributes of the Gateway should be updated. */ + updateMask?: FieldMask; + /** + * New name for the gateway. + * The name must be unique within the folder. + */ + name: string; + /** New description of the gateway. */ + description: string; + /** + * Gateway labels as `key:value` pairs. + * + * Existing set of labels is completely replaced by the provided set, so if you just want + * to add or remove a label: + * 1. Get the current set of labels with a [GatewayService.Get] request. + * 2. Add or remove a label in this set. + * 3. Send the new set in this field. + */ + labels: { [key: string]: string }; + sharedEgressGatewaySpec?: SharedEgressGatewaySpec | undefined; +} + +export interface UpdateGatewayRequest_LabelsEntry { + $type: "yandex.cloud.vpc.v1.UpdateGatewayRequest.LabelsEntry"; + key: string; + value: string; +} + +export interface UpdateGatewayMetadata { + $type: "yandex.cloud.vpc.v1.UpdateGatewayMetadata"; + /** ID of the Gateway that is being updated. */ + gatewayId: string; +} + +export interface DeleteGatewayRequest { + $type: "yandex.cloud.vpc.v1.DeleteGatewayRequest"; + /** + * ID of the gateway to delete. + * + * To get a gateway ID make a [GatewayService.List] request. + */ + gatewayId: string; +} + +export interface DeleteGatewayMetadata { + $type: "yandex.cloud.vpc.v1.DeleteGatewayMetadata"; + /** ID of the gateway that is being deleted. */ + gatewayId: string; +} + +export interface MoveGatewayRequest { + $type: "yandex.cloud.vpc.v1.MoveGatewayRequest"; + gatewayId: string; + destinationFolderId: string; +} + +export interface MoveGatewayMetadata { + $type: "yandex.cloud.vpc.v1.MoveGatewayMetadata"; + gatewayId: string; +} + +const baseGetGatewayRequest: object = { + $type: "yandex.cloud.vpc.v1.GetGatewayRequest", + gatewayId: "", +}; + +export const GetGatewayRequest = { + $type: "yandex.cloud.vpc.v1.GetGatewayRequest" as const, + + encode( + message: GetGatewayRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.gatewayId !== "") { + writer.uint32(10).string(message.gatewayId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): GetGatewayRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGetGatewayRequest } as GetGatewayRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.gatewayId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GetGatewayRequest { + const message = { ...baseGetGatewayRequest } as GetGatewayRequest; + message.gatewayId = + object.gatewayId !== undefined && object.gatewayId !== null + ? String(object.gatewayId) + : ""; + return message; + }, + + toJSON(message: GetGatewayRequest): unknown { + const obj: any = {}; + message.gatewayId !== undefined && (obj.gatewayId = message.gatewayId); + return obj; + }, + + fromPartial, I>>( + object: I + ): GetGatewayRequest { + const message = { ...baseGetGatewayRequest } as GetGatewayRequest; + message.gatewayId = object.gatewayId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(GetGatewayRequest.$type, GetGatewayRequest); + +const baseListGatewaysRequest: object = { + $type: "yandex.cloud.vpc.v1.ListGatewaysRequest", + folderId: "", + pageSize: 0, + pageToken: "", + filter: "", +}; + +export const ListGatewaysRequest = { + $type: "yandex.cloud.vpc.v1.ListGatewaysRequest" as const, + + encode( + message: ListGatewaysRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.folderId !== "") { + writer.uint32(10).string(message.folderId); + } + if (message.pageSize !== 0) { + writer.uint32(16).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(26).string(message.pageToken); + } + if (message.filter !== "") { + writer.uint32(34).string(message.filter); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ListGatewaysRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListGatewaysRequest } as ListGatewaysRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.folderId = reader.string(); + break; + case 2: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 3: + message.pageToken = reader.string(); + break; + case 4: + message.filter = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListGatewaysRequest { + const message = { ...baseListGatewaysRequest } as ListGatewaysRequest; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + message.filter = + object.filter !== undefined && object.filter !== null + ? String(object.filter) + : ""; + return message; + }, + + toJSON(message: ListGatewaysRequest): unknown { + const obj: any = {}; + message.folderId !== undefined && (obj.folderId = message.folderId); + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + message.filter !== undefined && (obj.filter = message.filter); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListGatewaysRequest { + const message = { ...baseListGatewaysRequest } as ListGatewaysRequest; + message.folderId = object.folderId ?? ""; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + message.filter = object.filter ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListGatewaysRequest.$type, ListGatewaysRequest); + +const baseListGatewaysResponse: object = { + $type: "yandex.cloud.vpc.v1.ListGatewaysResponse", + nextPageToken: "", +}; + +export const ListGatewaysResponse = { + $type: "yandex.cloud.vpc.v1.ListGatewaysResponse" as const, + + encode( + message: ListGatewaysResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.gateways) { + Gateway.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListGatewaysResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListGatewaysResponse } as ListGatewaysResponse; + message.gateways = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.gateways.push(Gateway.decode(reader, reader.uint32())); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListGatewaysResponse { + const message = { ...baseListGatewaysResponse } as ListGatewaysResponse; + message.gateways = (object.gateways ?? []).map((e: any) => + Gateway.fromJSON(e) + ); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListGatewaysResponse): unknown { + const obj: any = {}; + if (message.gateways) { + obj.gateways = message.gateways.map((e) => + e ? Gateway.toJSON(e) : undefined + ); + } else { + obj.gateways = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListGatewaysResponse { + const message = { ...baseListGatewaysResponse } as ListGatewaysResponse; + message.gateways = + object.gateways?.map((e) => Gateway.fromPartial(e)) || []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListGatewaysResponse.$type, ListGatewaysResponse); + +const baseListGatewayOperationsRequest: object = { + $type: "yandex.cloud.vpc.v1.ListGatewayOperationsRequest", + gatewayId: "", + pageSize: 0, + pageToken: "", +}; + +export const ListGatewayOperationsRequest = { + $type: "yandex.cloud.vpc.v1.ListGatewayOperationsRequest" as const, + + encode( + message: ListGatewayOperationsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.gatewayId !== "") { + writer.uint32(10).string(message.gatewayId); + } + if (message.pageSize !== 0) { + writer.uint32(16).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(26).string(message.pageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListGatewayOperationsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListGatewayOperationsRequest, + } as ListGatewayOperationsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.gatewayId = reader.string(); + break; + case 2: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 3: + message.pageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListGatewayOperationsRequest { + const message = { + ...baseListGatewayOperationsRequest, + } as ListGatewayOperationsRequest; + message.gatewayId = + object.gatewayId !== undefined && object.gatewayId !== null + ? String(object.gatewayId) + : ""; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + return message; + }, + + toJSON(message: ListGatewayOperationsRequest): unknown { + const obj: any = {}; + message.gatewayId !== undefined && (obj.gatewayId = message.gatewayId); + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListGatewayOperationsRequest { + const message = { + ...baseListGatewayOperationsRequest, + } as ListGatewayOperationsRequest; + message.gatewayId = object.gatewayId ?? ""; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListGatewayOperationsRequest.$type, + ListGatewayOperationsRequest +); + +const baseListGatewayOperationsResponse: object = { + $type: "yandex.cloud.vpc.v1.ListGatewayOperationsResponse", + nextPageToken: "", +}; + +export const ListGatewayOperationsResponse = { + $type: "yandex.cloud.vpc.v1.ListGatewayOperationsResponse" as const, + + encode( + message: ListGatewayOperationsResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.operations) { + Operation.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListGatewayOperationsResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListGatewayOperationsResponse, + } as ListGatewayOperationsResponse; + message.operations = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.operations.push(Operation.decode(reader, reader.uint32())); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListGatewayOperationsResponse { + const message = { + ...baseListGatewayOperationsResponse, + } as ListGatewayOperationsResponse; + message.operations = (object.operations ?? []).map((e: any) => + Operation.fromJSON(e) + ); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListGatewayOperationsResponse): unknown { + const obj: any = {}; + if (message.operations) { + obj.operations = message.operations.map((e) => + e ? Operation.toJSON(e) : undefined + ); + } else { + obj.operations = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListGatewayOperationsResponse { + const message = { + ...baseListGatewayOperationsResponse, + } as ListGatewayOperationsResponse; + message.operations = + object.operations?.map((e) => Operation.fromPartial(e)) || []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListGatewayOperationsResponse.$type, + ListGatewayOperationsResponse +); + +const baseSharedEgressGatewaySpec: object = { + $type: "yandex.cloud.vpc.v1.SharedEgressGatewaySpec", +}; + +export const SharedEgressGatewaySpec = { + $type: "yandex.cloud.vpc.v1.SharedEgressGatewaySpec" as const, + + encode( + _: SharedEgressGatewaySpec, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): SharedEgressGatewaySpec { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseSharedEgressGatewaySpec, + } as SharedEgressGatewaySpec; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(_: any): SharedEgressGatewaySpec { + const message = { + ...baseSharedEgressGatewaySpec, + } as SharedEgressGatewaySpec; + return message; + }, + + toJSON(_: SharedEgressGatewaySpec): unknown { + const obj: any = {}; + return obj; + }, + + fromPartial, I>>( + _: I + ): SharedEgressGatewaySpec { + const message = { + ...baseSharedEgressGatewaySpec, + } as SharedEgressGatewaySpec; + return message; + }, +}; + +messageTypeRegistry.set(SharedEgressGatewaySpec.$type, SharedEgressGatewaySpec); + +const baseCreateGatewayRequest: object = { + $type: "yandex.cloud.vpc.v1.CreateGatewayRequest", + folderId: "", + name: "", + description: "", +}; + +export const CreateGatewayRequest = { + $type: "yandex.cloud.vpc.v1.CreateGatewayRequest" as const, + + encode( + message: CreateGatewayRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.folderId !== "") { + writer.uint32(10).string(message.folderId); + } + if (message.name !== "") { + writer.uint32(18).string(message.name); + } + if (message.description !== "") { + writer.uint32(26).string(message.description); + } + Object.entries(message.labels).forEach(([key, value]) => { + CreateGatewayRequest_LabelsEntry.encode( + { + $type: "yandex.cloud.vpc.v1.CreateGatewayRequest.LabelsEntry", + key: key as any, + value, + }, + writer.uint32(34).fork() + ).ldelim(); + }); + if (message.sharedEgressGatewaySpec !== undefined) { + SharedEgressGatewaySpec.encode( + message.sharedEgressGatewaySpec, + writer.uint32(42).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): CreateGatewayRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseCreateGatewayRequest } as CreateGatewayRequest; + message.labels = {}; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.folderId = reader.string(); + break; + case 2: + message.name = reader.string(); + break; + case 3: + message.description = reader.string(); + break; + case 4: + const entry4 = CreateGatewayRequest_LabelsEntry.decode( + reader, + reader.uint32() + ); + if (entry4.value !== undefined) { + message.labels[entry4.key] = entry4.value; + } + break; + case 5: + message.sharedEgressGatewaySpec = SharedEgressGatewaySpec.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateGatewayRequest { + const message = { ...baseCreateGatewayRequest } as CreateGatewayRequest; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); + message.sharedEgressGatewaySpec = + object.sharedEgressGatewaySpec !== undefined && + object.sharedEgressGatewaySpec !== null + ? SharedEgressGatewaySpec.fromJSON(object.sharedEgressGatewaySpec) + : undefined; + return message; + }, + + toJSON(message: CreateGatewayRequest): unknown { + const obj: any = {}; + message.folderId !== undefined && (obj.folderId = message.folderId); + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + obj.labels = {}; + if (message.labels) { + Object.entries(message.labels).forEach(([k, v]) => { + obj.labels[k] = v; + }); + } + message.sharedEgressGatewaySpec !== undefined && + (obj.sharedEgressGatewaySpec = message.sharedEgressGatewaySpec + ? SharedEgressGatewaySpec.toJSON(message.sharedEgressGatewaySpec) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): CreateGatewayRequest { + const message = { ...baseCreateGatewayRequest } as CreateGatewayRequest; + message.folderId = object.folderId ?? ""; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); + message.sharedEgressGatewaySpec = + object.sharedEgressGatewaySpec !== undefined && + object.sharedEgressGatewaySpec !== null + ? SharedEgressGatewaySpec.fromPartial(object.sharedEgressGatewaySpec) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(CreateGatewayRequest.$type, CreateGatewayRequest); + +const baseCreateGatewayRequest_LabelsEntry: object = { + $type: "yandex.cloud.vpc.v1.CreateGatewayRequest.LabelsEntry", + key: "", + value: "", +}; + +export const CreateGatewayRequest_LabelsEntry = { + $type: "yandex.cloud.vpc.v1.CreateGatewayRequest.LabelsEntry" as const, + + encode( + message: CreateGatewayRequest_LabelsEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): CreateGatewayRequest_LabelsEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseCreateGatewayRequest_LabelsEntry, + } as CreateGatewayRequest_LabelsEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateGatewayRequest_LabelsEntry { + const message = { + ...baseCreateGatewayRequest_LabelsEntry, + } as CreateGatewayRequest_LabelsEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: CreateGatewayRequest_LabelsEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): CreateGatewayRequest_LabelsEntry { + const message = { + ...baseCreateGatewayRequest_LabelsEntry, + } as CreateGatewayRequest_LabelsEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + CreateGatewayRequest_LabelsEntry.$type, + CreateGatewayRequest_LabelsEntry +); + +const baseCreateGatewayMetadata: object = { + $type: "yandex.cloud.vpc.v1.CreateGatewayMetadata", + gatewayId: "", +}; + +export const CreateGatewayMetadata = { + $type: "yandex.cloud.vpc.v1.CreateGatewayMetadata" as const, + + encode( + message: CreateGatewayMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.gatewayId !== "") { + writer.uint32(10).string(message.gatewayId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): CreateGatewayMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseCreateGatewayMetadata } as CreateGatewayMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.gatewayId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateGatewayMetadata { + const message = { ...baseCreateGatewayMetadata } as CreateGatewayMetadata; + message.gatewayId = + object.gatewayId !== undefined && object.gatewayId !== null + ? String(object.gatewayId) + : ""; + return message; + }, + + toJSON(message: CreateGatewayMetadata): unknown { + const obj: any = {}; + message.gatewayId !== undefined && (obj.gatewayId = message.gatewayId); + return obj; + }, + + fromPartial, I>>( + object: I + ): CreateGatewayMetadata { + const message = { ...baseCreateGatewayMetadata } as CreateGatewayMetadata; + message.gatewayId = object.gatewayId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(CreateGatewayMetadata.$type, CreateGatewayMetadata); + +const baseUpdateGatewayRequest: object = { + $type: "yandex.cloud.vpc.v1.UpdateGatewayRequest", + gatewayId: "", + name: "", + description: "", +}; + +export const UpdateGatewayRequest = { + $type: "yandex.cloud.vpc.v1.UpdateGatewayRequest" as const, + + encode( + message: UpdateGatewayRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.gatewayId !== "") { + writer.uint32(10).string(message.gatewayId); + } + if (message.updateMask !== undefined) { + FieldMask.encode(message.updateMask, writer.uint32(18).fork()).ldelim(); + } + if (message.name !== "") { + writer.uint32(26).string(message.name); + } + if (message.description !== "") { + writer.uint32(34).string(message.description); + } + Object.entries(message.labels).forEach(([key, value]) => { + UpdateGatewayRequest_LabelsEntry.encode( + { + $type: "yandex.cloud.vpc.v1.UpdateGatewayRequest.LabelsEntry", + key: key as any, + value, + }, + writer.uint32(42).fork() + ).ldelim(); + }); + if (message.sharedEgressGatewaySpec !== undefined) { + SharedEgressGatewaySpec.encode( + message.sharedEgressGatewaySpec, + writer.uint32(50).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateGatewayRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseUpdateGatewayRequest } as UpdateGatewayRequest; + message.labels = {}; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.gatewayId = reader.string(); + break; + case 2: + message.updateMask = FieldMask.decode(reader, reader.uint32()); + break; + case 3: + message.name = reader.string(); + break; + case 4: + message.description = reader.string(); + break; + case 5: + const entry5 = UpdateGatewayRequest_LabelsEntry.decode( + reader, + reader.uint32() + ); + if (entry5.value !== undefined) { + message.labels[entry5.key] = entry5.value; + } + break; + case 6: + message.sharedEgressGatewaySpec = SharedEgressGatewaySpec.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateGatewayRequest { + const message = { ...baseUpdateGatewayRequest } as UpdateGatewayRequest; + message.gatewayId = + object.gatewayId !== undefined && object.gatewayId !== null + ? String(object.gatewayId) + : ""; + message.updateMask = + object.updateMask !== undefined && object.updateMask !== null + ? FieldMask.fromJSON(object.updateMask) + : undefined; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); + message.sharedEgressGatewaySpec = + object.sharedEgressGatewaySpec !== undefined && + object.sharedEgressGatewaySpec !== null + ? SharedEgressGatewaySpec.fromJSON(object.sharedEgressGatewaySpec) + : undefined; + return message; + }, + + toJSON(message: UpdateGatewayRequest): unknown { + const obj: any = {}; + message.gatewayId !== undefined && (obj.gatewayId = message.gatewayId); + message.updateMask !== undefined && + (obj.updateMask = message.updateMask + ? FieldMask.toJSON(message.updateMask) + : undefined); + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + obj.labels = {}; + if (message.labels) { + Object.entries(message.labels).forEach(([k, v]) => { + obj.labels[k] = v; + }); + } + message.sharedEgressGatewaySpec !== undefined && + (obj.sharedEgressGatewaySpec = message.sharedEgressGatewaySpec + ? SharedEgressGatewaySpec.toJSON(message.sharedEgressGatewaySpec) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateGatewayRequest { + const message = { ...baseUpdateGatewayRequest } as UpdateGatewayRequest; + message.gatewayId = object.gatewayId ?? ""; + message.updateMask = + object.updateMask !== undefined && object.updateMask !== null + ? FieldMask.fromPartial(object.updateMask) + : undefined; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); + message.sharedEgressGatewaySpec = + object.sharedEgressGatewaySpec !== undefined && + object.sharedEgressGatewaySpec !== null + ? SharedEgressGatewaySpec.fromPartial(object.sharedEgressGatewaySpec) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(UpdateGatewayRequest.$type, UpdateGatewayRequest); + +const baseUpdateGatewayRequest_LabelsEntry: object = { + $type: "yandex.cloud.vpc.v1.UpdateGatewayRequest.LabelsEntry", + key: "", + value: "", +}; + +export const UpdateGatewayRequest_LabelsEntry = { + $type: "yandex.cloud.vpc.v1.UpdateGatewayRequest.LabelsEntry" as const, + + encode( + message: UpdateGatewayRequest_LabelsEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateGatewayRequest_LabelsEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateGatewayRequest_LabelsEntry, + } as UpdateGatewayRequest_LabelsEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateGatewayRequest_LabelsEntry { + const message = { + ...baseUpdateGatewayRequest_LabelsEntry, + } as UpdateGatewayRequest_LabelsEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: UpdateGatewayRequest_LabelsEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): UpdateGatewayRequest_LabelsEntry { + const message = { + ...baseUpdateGatewayRequest_LabelsEntry, + } as UpdateGatewayRequest_LabelsEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + UpdateGatewayRequest_LabelsEntry.$type, + UpdateGatewayRequest_LabelsEntry +); + +const baseUpdateGatewayMetadata: object = { + $type: "yandex.cloud.vpc.v1.UpdateGatewayMetadata", + gatewayId: "", +}; + +export const UpdateGatewayMetadata = { + $type: "yandex.cloud.vpc.v1.UpdateGatewayMetadata" as const, + + encode( + message: UpdateGatewayMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.gatewayId !== "") { + writer.uint32(10).string(message.gatewayId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateGatewayMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseUpdateGatewayMetadata } as UpdateGatewayMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.gatewayId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateGatewayMetadata { + const message = { ...baseUpdateGatewayMetadata } as UpdateGatewayMetadata; + message.gatewayId = + object.gatewayId !== undefined && object.gatewayId !== null + ? String(object.gatewayId) + : ""; + return message; + }, + + toJSON(message: UpdateGatewayMetadata): unknown { + const obj: any = {}; + message.gatewayId !== undefined && (obj.gatewayId = message.gatewayId); + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateGatewayMetadata { + const message = { ...baseUpdateGatewayMetadata } as UpdateGatewayMetadata; + message.gatewayId = object.gatewayId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(UpdateGatewayMetadata.$type, UpdateGatewayMetadata); + +const baseDeleteGatewayRequest: object = { + $type: "yandex.cloud.vpc.v1.DeleteGatewayRequest", + gatewayId: "", +}; + +export const DeleteGatewayRequest = { + $type: "yandex.cloud.vpc.v1.DeleteGatewayRequest" as const, + + encode( + message: DeleteGatewayRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.gatewayId !== "") { + writer.uint32(10).string(message.gatewayId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DeleteGatewayRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDeleteGatewayRequest } as DeleteGatewayRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.gatewayId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteGatewayRequest { + const message = { ...baseDeleteGatewayRequest } as DeleteGatewayRequest; + message.gatewayId = + object.gatewayId !== undefined && object.gatewayId !== null + ? String(object.gatewayId) + : ""; + return message; + }, + + toJSON(message: DeleteGatewayRequest): unknown { + const obj: any = {}; + message.gatewayId !== undefined && (obj.gatewayId = message.gatewayId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteGatewayRequest { + const message = { ...baseDeleteGatewayRequest } as DeleteGatewayRequest; + message.gatewayId = object.gatewayId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(DeleteGatewayRequest.$type, DeleteGatewayRequest); + +const baseDeleteGatewayMetadata: object = { + $type: "yandex.cloud.vpc.v1.DeleteGatewayMetadata", + gatewayId: "", +}; + +export const DeleteGatewayMetadata = { + $type: "yandex.cloud.vpc.v1.DeleteGatewayMetadata" as const, + + encode( + message: DeleteGatewayMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.gatewayId !== "") { + writer.uint32(10).string(message.gatewayId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DeleteGatewayMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDeleteGatewayMetadata } as DeleteGatewayMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.gatewayId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteGatewayMetadata { + const message = { ...baseDeleteGatewayMetadata } as DeleteGatewayMetadata; + message.gatewayId = + object.gatewayId !== undefined && object.gatewayId !== null + ? String(object.gatewayId) + : ""; + return message; + }, + + toJSON(message: DeleteGatewayMetadata): unknown { + const obj: any = {}; + message.gatewayId !== undefined && (obj.gatewayId = message.gatewayId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteGatewayMetadata { + const message = { ...baseDeleteGatewayMetadata } as DeleteGatewayMetadata; + message.gatewayId = object.gatewayId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(DeleteGatewayMetadata.$type, DeleteGatewayMetadata); + +const baseMoveGatewayRequest: object = { + $type: "yandex.cloud.vpc.v1.MoveGatewayRequest", + gatewayId: "", + destinationFolderId: "", +}; + +export const MoveGatewayRequest = { + $type: "yandex.cloud.vpc.v1.MoveGatewayRequest" as const, + + encode( + message: MoveGatewayRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.gatewayId !== "") { + writer.uint32(10).string(message.gatewayId); + } + if (message.destinationFolderId !== "") { + writer.uint32(18).string(message.destinationFolderId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): MoveGatewayRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMoveGatewayRequest } as MoveGatewayRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.gatewayId = reader.string(); + break; + case 2: + message.destinationFolderId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): MoveGatewayRequest { + const message = { ...baseMoveGatewayRequest } as MoveGatewayRequest; + message.gatewayId = + object.gatewayId !== undefined && object.gatewayId !== null + ? String(object.gatewayId) + : ""; + message.destinationFolderId = + object.destinationFolderId !== undefined && + object.destinationFolderId !== null + ? String(object.destinationFolderId) + : ""; + return message; + }, + + toJSON(message: MoveGatewayRequest): unknown { + const obj: any = {}; + message.gatewayId !== undefined && (obj.gatewayId = message.gatewayId); + message.destinationFolderId !== undefined && + (obj.destinationFolderId = message.destinationFolderId); + return obj; + }, + + fromPartial, I>>( + object: I + ): MoveGatewayRequest { + const message = { ...baseMoveGatewayRequest } as MoveGatewayRequest; + message.gatewayId = object.gatewayId ?? ""; + message.destinationFolderId = object.destinationFolderId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(MoveGatewayRequest.$type, MoveGatewayRequest); + +const baseMoveGatewayMetadata: object = { + $type: "yandex.cloud.vpc.v1.MoveGatewayMetadata", + gatewayId: "", +}; + +export const MoveGatewayMetadata = { + $type: "yandex.cloud.vpc.v1.MoveGatewayMetadata" as const, + + encode( + message: MoveGatewayMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.gatewayId !== "") { + writer.uint32(10).string(message.gatewayId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): MoveGatewayMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMoveGatewayMetadata } as MoveGatewayMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.gatewayId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): MoveGatewayMetadata { + const message = { ...baseMoveGatewayMetadata } as MoveGatewayMetadata; + message.gatewayId = + object.gatewayId !== undefined && object.gatewayId !== null + ? String(object.gatewayId) + : ""; + return message; + }, + + toJSON(message: MoveGatewayMetadata): unknown { + const obj: any = {}; + message.gatewayId !== undefined && (obj.gatewayId = message.gatewayId); + return obj; + }, + + fromPartial, I>>( + object: I + ): MoveGatewayMetadata { + const message = { ...baseMoveGatewayMetadata } as MoveGatewayMetadata; + message.gatewayId = object.gatewayId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(MoveGatewayMetadata.$type, MoveGatewayMetadata); + +export const GatewayServiceService = { + /** + * Returns the specified Gateway resource. + * + * To get the list of all available Gateway resources, make a [List] request. + */ + get: { + path: "/yandex.cloud.vpc.v1.GatewayService/Get", + requestStream: false, + responseStream: false, + requestSerialize: (value: GetGatewayRequest) => + Buffer.from(GetGatewayRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => GetGatewayRequest.decode(value), + responseSerialize: (value: Gateway) => + Buffer.from(Gateway.encode(value).finish()), + responseDeserialize: (value: Buffer) => Gateway.decode(value), + }, + /** Retrieves the list of Gateway resources in the specified folder. */ + list: { + path: "/yandex.cloud.vpc.v1.GatewayService/List", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListGatewaysRequest) => + Buffer.from(ListGatewaysRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => ListGatewaysRequest.decode(value), + responseSerialize: (value: ListGatewaysResponse) => + Buffer.from(ListGatewaysResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => ListGatewaysResponse.decode(value), + }, + /** Creates a gateway in the specified folder. */ + create: { + path: "/yandex.cloud.vpc.v1.GatewayService/Create", + requestStream: false, + responseStream: false, + requestSerialize: (value: CreateGatewayRequest) => + Buffer.from(CreateGatewayRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => CreateGatewayRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Updates the specified gateway. */ + update: { + path: "/yandex.cloud.vpc.v1.GatewayService/Update", + requestStream: false, + responseStream: false, + requestSerialize: (value: UpdateGatewayRequest) => + Buffer.from(UpdateGatewayRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => UpdateGatewayRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Deletes the specified gateway. */ + delete: { + path: "/yandex.cloud.vpc.v1.GatewayService/Delete", + requestStream: false, + responseStream: false, + requestSerialize: (value: DeleteGatewayRequest) => + Buffer.from(DeleteGatewayRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => DeleteGatewayRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** List operations for the specified gateway. */ + listOperations: { + path: "/yandex.cloud.vpc.v1.GatewayService/ListOperations", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListGatewayOperationsRequest) => + Buffer.from(ListGatewayOperationsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + ListGatewayOperationsRequest.decode(value), + responseSerialize: (value: ListGatewayOperationsResponse) => + Buffer.from(ListGatewayOperationsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + ListGatewayOperationsResponse.decode(value), + }, + /** Move a gateway to another folder */ + move: { + path: "/yandex.cloud.vpc.v1.GatewayService/Move", + requestStream: false, + responseStream: false, + requestSerialize: (value: MoveGatewayRequest) => + Buffer.from(MoveGatewayRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => MoveGatewayRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, +} as const; + +export interface GatewayServiceServer extends UntypedServiceImplementation { + /** + * Returns the specified Gateway resource. + * + * To get the list of all available Gateway resources, make a [List] request. + */ + get: handleUnaryCall; + /** Retrieves the list of Gateway resources in the specified folder. */ + list: handleUnaryCall; + /** Creates a gateway in the specified folder. */ + create: handleUnaryCall; + /** Updates the specified gateway. */ + update: handleUnaryCall; + /** Deletes the specified gateway. */ + delete: handleUnaryCall; + /** List operations for the specified gateway. */ + listOperations: handleUnaryCall< + ListGatewayOperationsRequest, + ListGatewayOperationsResponse + >; + /** Move a gateway to another folder */ + move: handleUnaryCall; +} + +export interface GatewayServiceClient extends Client { + /** + * Returns the specified Gateway resource. + * + * To get the list of all available Gateway resources, make a [List] request. + */ + get( + request: GetGatewayRequest, + callback: (error: ServiceError | null, response: Gateway) => void + ): ClientUnaryCall; + get( + request: GetGatewayRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Gateway) => void + ): ClientUnaryCall; + get( + request: GetGatewayRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Gateway) => void + ): ClientUnaryCall; + /** Retrieves the list of Gateway resources in the specified folder. */ + list( + request: ListGatewaysRequest, + callback: ( + error: ServiceError | null, + response: ListGatewaysResponse + ) => void + ): ClientUnaryCall; + list( + request: ListGatewaysRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListGatewaysResponse + ) => void + ): ClientUnaryCall; + list( + request: ListGatewaysRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListGatewaysResponse + ) => void + ): ClientUnaryCall; + /** Creates a gateway in the specified folder. */ + create( + request: CreateGatewayRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + create( + request: CreateGatewayRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + create( + request: CreateGatewayRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Updates the specified gateway. */ + update( + request: UpdateGatewayRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + update( + request: UpdateGatewayRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + update( + request: UpdateGatewayRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Deletes the specified gateway. */ + delete( + request: DeleteGatewayRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + delete( + request: DeleteGatewayRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + delete( + request: DeleteGatewayRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** List operations for the specified gateway. */ + listOperations( + request: ListGatewayOperationsRequest, + callback: ( + error: ServiceError | null, + response: ListGatewayOperationsResponse + ) => void + ): ClientUnaryCall; + listOperations( + request: ListGatewayOperationsRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListGatewayOperationsResponse + ) => void + ): ClientUnaryCall; + listOperations( + request: ListGatewayOperationsRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListGatewayOperationsResponse + ) => void + ): ClientUnaryCall; + /** Move a gateway to another folder */ + move( + request: MoveGatewayRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + move( + request: MoveGatewayRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + move( + request: MoveGatewayRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; +} + +export const GatewayServiceClient = makeGenericClientConstructor( + GatewayServiceService, + "yandex.cloud.vpc.v1.GatewayService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): GatewayServiceClient; + service: typeof GatewayServiceService; +}; + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/ydb/v1/backup_service.ts b/src/generated/yandex/cloud/ydb/v1/backup_service.ts index 2d6f0325..800d7fc8 100644 --- a/src/generated/yandex/cloud/ydb/v1/backup_service.ts +++ b/src/generated/yandex/cloud/ydb/v1/backup_service.ts @@ -15,6 +15,12 @@ import { } from "@grpc/grpc-js"; import _m0 from "protobufjs/minimal"; import { Backup } from "../../../../yandex/cloud/ydb/v1/backup"; +import { + ListAccessBindingsRequest, + ListAccessBindingsResponse, + SetAccessBindingsRequest, + UpdateAccessBindingsRequest, +} from "../../../../yandex/cloud/access/access"; import { Operation } from "../../../../yandex/cloud/operation/operation"; export const protobufPackage = "yandex.cloud.ydb.v1"; @@ -666,6 +672,43 @@ export const BackupServiceService = { Buffer.from(ListBackupsResponse.encode(value).finish()), responseDeserialize: (value: Buffer) => ListBackupsResponse.decode(value), }, + listAccessBindings: { + path: "/yandex.cloud.ydb.v1.BackupService/ListAccessBindings", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListAccessBindingsRequest) => + Buffer.from(ListAccessBindingsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + ListAccessBindingsRequest.decode(value), + responseSerialize: (value: ListAccessBindingsResponse) => + Buffer.from(ListAccessBindingsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + ListAccessBindingsResponse.decode(value), + }, + setAccessBindings: { + path: "/yandex.cloud.ydb.v1.BackupService/SetAccessBindings", + requestStream: false, + responseStream: false, + requestSerialize: (value: SetAccessBindingsRequest) => + Buffer.from(SetAccessBindingsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + SetAccessBindingsRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + updateAccessBindings: { + path: "/yandex.cloud.ydb.v1.BackupService/UpdateAccessBindings", + requestStream: false, + responseStream: false, + requestSerialize: (value: UpdateAccessBindingsRequest) => + Buffer.from(UpdateAccessBindingsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + UpdateAccessBindingsRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, /** Deletes the specified backup. */ delete: { path: "/yandex.cloud.ydb.v1.BackupService/Delete", @@ -686,6 +729,12 @@ export interface BackupServiceServer extends UntypedServiceImplementation { listPaths: handleUnaryCall; /** Retrieves a list of backups. */ list: handleUnaryCall; + listAccessBindings: handleUnaryCall< + ListAccessBindingsRequest, + ListAccessBindingsResponse + >; + setAccessBindings: handleUnaryCall; + updateAccessBindings: handleUnaryCall; /** Deletes the specified backup. */ delete: handleUnaryCall; } @@ -747,6 +796,60 @@ export interface BackupServiceClient extends Client { response: ListBackupsResponse ) => void ): ClientUnaryCall; + listAccessBindings( + request: ListAccessBindingsRequest, + callback: ( + error: ServiceError | null, + response: ListAccessBindingsResponse + ) => void + ): ClientUnaryCall; + listAccessBindings( + request: ListAccessBindingsRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListAccessBindingsResponse + ) => void + ): ClientUnaryCall; + listAccessBindings( + request: ListAccessBindingsRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListAccessBindingsResponse + ) => void + ): ClientUnaryCall; + setAccessBindings( + request: SetAccessBindingsRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + setAccessBindings( + request: SetAccessBindingsRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + setAccessBindings( + request: SetAccessBindingsRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + updateAccessBindings( + request: UpdateAccessBindingsRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + updateAccessBindings( + request: UpdateAccessBindingsRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + updateAccessBindings( + request: UpdateAccessBindingsRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; /** Deletes the specified backup. */ delete( request: DeleteBackupRequest, diff --git a/src/generated/yandex/cloud/ydb/v1/database_service.ts b/src/generated/yandex/cloud/ydb/v1/database_service.ts index 858230af..496204a3 100644 --- a/src/generated/yandex/cloud/ydb/v1/database_service.ts +++ b/src/generated/yandex/cloud/ydb/v1/database_service.ts @@ -30,6 +30,12 @@ import { } from "../../../../yandex/cloud/ydb/v1/database"; import { FieldMask } from "../../../../google/protobuf/field_mask"; import { Operation } from "../../../../yandex/cloud/operation/operation"; +import { + ListAccessBindingsRequest, + ListAccessBindingsResponse, + SetAccessBindingsRequest, + UpdateAccessBindingsRequest, +} from "../../../../yandex/cloud/access/access"; export const protobufPackage = "yandex.cloud.ydb.v1"; @@ -2432,6 +2438,43 @@ export const DatabaseServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, + listAccessBindings: { + path: "/yandex.cloud.ydb.v1.DatabaseService/ListAccessBindings", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListAccessBindingsRequest) => + Buffer.from(ListAccessBindingsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + ListAccessBindingsRequest.decode(value), + responseSerialize: (value: ListAccessBindingsResponse) => + Buffer.from(ListAccessBindingsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + ListAccessBindingsResponse.decode(value), + }, + setAccessBindings: { + path: "/yandex.cloud.ydb.v1.DatabaseService/SetAccessBindings", + requestStream: false, + responseStream: false, + requestSerialize: (value: SetAccessBindingsRequest) => + Buffer.from(SetAccessBindingsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + SetAccessBindingsRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + updateAccessBindings: { + path: "/yandex.cloud.ydb.v1.DatabaseService/UpdateAccessBindings", + requestStream: false, + responseStream: false, + requestSerialize: (value: UpdateAccessBindingsRequest) => + Buffer.from(UpdateAccessBindingsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + UpdateAccessBindingsRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, /** Deletes the specified database. */ delete: { path: "/yandex.cloud.ydb.v1.DatabaseService/Delete", @@ -2482,6 +2525,12 @@ export interface DatabaseServiceServer extends UntypedServiceImplementation { start: handleUnaryCall; /** Stops the specified database. */ stop: handleUnaryCall; + listAccessBindings: handleUnaryCall< + ListAccessBindingsRequest, + ListAccessBindingsResponse + >; + setAccessBindings: handleUnaryCall; + updateAccessBindings: handleUnaryCall; /** Deletes the specified database. */ delete: handleUnaryCall; /** Restores the specified backup */ @@ -2595,6 +2644,60 @@ export interface DatabaseServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; + listAccessBindings( + request: ListAccessBindingsRequest, + callback: ( + error: ServiceError | null, + response: ListAccessBindingsResponse + ) => void + ): ClientUnaryCall; + listAccessBindings( + request: ListAccessBindingsRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListAccessBindingsResponse + ) => void + ): ClientUnaryCall; + listAccessBindings( + request: ListAccessBindingsRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListAccessBindingsResponse + ) => void + ): ClientUnaryCall; + setAccessBindings( + request: SetAccessBindingsRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + setAccessBindings( + request: SetAccessBindingsRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + setAccessBindings( + request: SetAccessBindingsRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + updateAccessBindings( + request: UpdateAccessBindingsRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + updateAccessBindings( + request: UpdateAccessBindingsRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + updateAccessBindings( + request: UpdateAccessBindingsRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; /** Deletes the specified database. */ delete( request: DeleteDatabaseRequest, diff --git a/src/service-endpoints.ts b/src/service-endpoints.ts index 972c4394..2a350615 100644 --- a/src/service-endpoints.ts +++ b/src/service-endpoints.ts @@ -116,6 +116,7 @@ const SERVICE_ENDPOINTS_LIST: ServiceEndpointsList = [ 'yandex.cloud.vpc.v1.RouteTableService', 'yandex.cloud.vpc.v1.SecurityGroupService', 'yandex.cloud.vpc.v1.SubnetService', + 'yandex.cloud.vpc.v1.GatewayService', ], endpoint: 'vpc.api.cloud.yandex.net:443', }, @@ -326,6 +327,12 @@ const SERVICE_ENDPOINTS_LIST: ServiceEndpointsList = [ ], endpoint: 'organization-manager.api.cloud.yandex.net:443', }, + { + serviceIds: [ + 'yandex.cloud.storage.v1.BucketService', + ], + endpoint: 'storage.yandexcloud.net', + }, ]; export const getServiceClientEndpoint = (generatedClientCtor: GeneratedServiceClientCtor): string => { From cefea2597d4f1ec297760728313f98ed2c93985c Mon Sep 17 00:00:00 2001 From: Ivan Zuev Date: Mon, 25 Apr 2022 17:28:30 +0300 Subject: [PATCH 22/54] docs: storage example --- examples/package-lock.json | 1275 ++---------------------------------- examples/package.json | 2 +- examples/storage.ts | 18 + src/service-endpoints.ts | 2 +- 4 files changed, 64 insertions(+), 1233 deletions(-) create mode 100644 examples/storage.ts diff --git a/examples/package-lock.json b/examples/package-lock.json index 5390f16d..2e20b064 100644 --- a/examples/package-lock.json +++ b/examples/package-lock.json @@ -9,7 +9,7 @@ "version": "1.0.0", "license": "ISC", "dependencies": { - "@yandex-cloud/nodejs-sdk": "^2.0.0-beta.2", + "@yandex-cloud/nodejs-sdk": "../", "wav": "^1.0.2" }, "devDependencies": { @@ -19,7 +19,6 @@ "..": { "name": "@yandex-cloud/nodejs-sdk", "version": "2.0.0-alpha.4", - "extraneous": true, "license": "MIT", "dependencies": { "@grpc/grpc-js": "https://gitpkg.now.sh/DavyJohnes/grpc-node/packages/grpc-js?fix-class-options-issue-with-dist", @@ -66,105 +65,11 @@ "node": ">=10.0.0" } }, - "node_modules/@grpc/grpc-js": { - "version": "1.5.5", - "resolved": "https://gitpkg.now.sh/DavyJohnes/grpc-node/packages/grpc-js?fix-class-options-issue-with-dist", - "integrity": "sha512-GrWZNWxbvWNKOWphkYZGiilHZvRHFzOyrSNTc52ZckQDVRG17obLd8L/eZPLZA+4OqHWLqiPSgA7cK3jqVKhkA==", - "license": "Apache-2.0", - "dependencies": { - "@grpc/proto-loader": "^0.6.4", - "@types/node": ">=12.12.47" - }, - "engines": { - "node": "^8.13.0 || >=10.10.0" - } - }, - "node_modules/@grpc/proto-loader": { - "version": "0.6.9", - "resolved": "https://registry.npmjs.org/@grpc/proto-loader/-/proto-loader-0.6.9.tgz", - "integrity": "sha512-UlcCS8VbsU9d3XTXGiEVFonN7hXk+oMXZtoHHG2oSA1/GcDP1q6OUgs20PzHDGizzyi8ufGSUDlk3O2NyY7leg==", - "dependencies": { - "@types/long": "^4.0.1", - "lodash.camelcase": "^4.3.0", - "long": "^4.0.0", - "protobufjs": "^6.10.0", - "yargs": "^16.2.0" - }, - "bin": { - "proto-loader-gen-types": "build/bin/proto-loader-gen-types.js" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/@grpc/proto-loader/node_modules/long": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/long/-/long-4.0.0.tgz", - "integrity": "sha512-XsP+KhQif4bjX1kbuSiySJFNAehNxgLb6hPRGJ9QsUr8ajHkuXGdrHmFUTUUXhDwVX2R5bY4JNZEwbUiMhV+MA==" - }, - "node_modules/@protobufjs/aspromise": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz", - "integrity": "sha1-m4sMxmPWaafY9vXQiToU00jzD78=" - }, - "node_modules/@protobufjs/base64": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@protobufjs/base64/-/base64-1.1.2.tgz", - "integrity": "sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==" - }, - "node_modules/@protobufjs/codegen": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/@protobufjs/codegen/-/codegen-2.0.4.tgz", - "integrity": "sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg==" - }, - "node_modules/@protobufjs/eventemitter": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@protobufjs/eventemitter/-/eventemitter-1.1.0.tgz", - "integrity": "sha1-NVy8mLr61ZePntCV85diHx0Ga3A=" - }, - "node_modules/@protobufjs/fetch": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@protobufjs/fetch/-/fetch-1.1.0.tgz", - "integrity": "sha1-upn7WYYUr2VwDBYZ/wbUVLDYTEU=", - "dependencies": { - "@protobufjs/aspromise": "^1.1.1", - "@protobufjs/inquire": "^1.1.0" - } - }, - "node_modules/@protobufjs/float": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/@protobufjs/float/-/float-1.0.2.tgz", - "integrity": "sha1-Xp4avctz/Ap8uLKR33jIy9l7h9E=" - }, - "node_modules/@protobufjs/inquire": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@protobufjs/inquire/-/inquire-1.1.0.tgz", - "integrity": "sha1-/yAOPnzyQp4tyvwRQIKOjMY48Ik=" - }, - "node_modules/@protobufjs/path": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@protobufjs/path/-/path-1.1.2.tgz", - "integrity": "sha1-bMKyDFya1q0NzP0hynZz2Nf79o0=" - }, - "node_modules/@protobufjs/pool": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@protobufjs/pool/-/pool-1.1.0.tgz", - "integrity": "sha1-Cf0V8tbTq/qbZbw2ZQbWrXhG/1Q=" - }, - "node_modules/@protobufjs/utf8": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@protobufjs/utf8/-/utf8-1.1.0.tgz", - "integrity": "sha1-p3c2C1s5oaLlEG+OhY8v0tBgxXA=" - }, - "node_modules/@types/long": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/@types/long/-/long-4.0.1.tgz", - "integrity": "sha512-5tXH6Bx/kNGd3MgffdmP4dy2Z+G4eaXw0SE81Tq3BNadtnMR5/ySMzX4SLEzHJzSmPNn4HIdpQsBvXMUykr58w==" - }, "node_modules/@types/node": { "version": "17.0.10", "resolved": "https://registry.npmjs.org/@types/node/-/node-17.0.10.tgz", - "integrity": "sha512-S/3xB4KzyFxYGCppyDt68yzBU9ysL88lSdIah4D6cptdcltc4NCPCAMc0+PCpg/lLIyC7IPvj2Z52OJWeIUkog==" + "integrity": "sha512-S/3xB4KzyFxYGCppyDt68yzBU9ysL88lSdIah4D6cptdcltc4NCPCAMc0+PCpg/lLIyC7IPvj2Z52OJWeIUkog==", + "dev": true }, "node_modules/@types/wav": { "version": "1.0.1", @@ -176,98 +81,8 @@ } }, "node_modules/@yandex-cloud/nodejs-sdk": { - "version": "2.0.0-beta.2", - "resolved": "https://registry.npmjs.org/@yandex-cloud/nodejs-sdk/-/nodejs-sdk-2.0.0-beta.2.tgz", - "integrity": "sha512-38malOtD1lu8CWEb3CwwBEpaicK8g9GADp7Yo8YVnhtgZjkx+4aWfz7kuLtC8Cz5lUF8woXwCmHdmVweLO81Rg==", - "dependencies": { - "@grpc/grpc-js": "https://gitpkg.now.sh/DavyJohnes/grpc-node/packages/grpc-js?fix-class-options-issue-with-dist", - "axios": "0.24.0", - "jsonwebtoken": "8.5.1", - "lodash": "4.17.21", - "log4js": "6.3.0", - "long": "5.2.0", - "luxon": "2.2.0", - "nice-grpc": "1.0.6", - "nice-grpc-client-middleware-deadline": "1.0.6", - "protobufjs": "6.8.8", - "utility-types": "3.10.0" - }, - "engines": { - "node": ">=10.0.0" - } - }, - "node_modules/@yandex-cloud/nodejs-sdk/node_modules/@types/node": { - "version": "10.17.60", - "resolved": "https://registry.npmjs.org/@types/node/-/node-10.17.60.tgz", - "integrity": "sha512-F0KIgDJfy2nA3zMLmWGKxcH2ZVEtCZXHHdOQs2gSaQ27+lNeEfGxzkIw90aXswATX7AZ33tahPbzy6KAfUreVw==" - }, - "node_modules/@yandex-cloud/nodejs-sdk/node_modules/protobufjs": { - "version": "6.8.8", - "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-6.8.8.tgz", - "integrity": "sha512-AAmHtD5pXgZfi7GMpllpO3q1Xw1OYldr+dMUlAnffGTAhqkg72WdmSY71uKBF/JuyiKs8psYbtKrhi0ASCD8qw==", - "hasInstallScript": true, - "dependencies": { - "@protobufjs/aspromise": "^1.1.2", - "@protobufjs/base64": "^1.1.2", - "@protobufjs/codegen": "^2.0.4", - "@protobufjs/eventemitter": "^1.1.0", - "@protobufjs/fetch": "^1.1.0", - "@protobufjs/float": "^1.0.2", - "@protobufjs/inquire": "^1.1.0", - "@protobufjs/path": "^1.1.2", - "@protobufjs/pool": "^1.1.0", - "@protobufjs/utf8": "^1.1.0", - "@types/long": "^4.0.0", - "@types/node": "^10.1.0", - "long": "^4.0.0" - }, - "bin": { - "pbjs": "bin/pbjs", - "pbts": "bin/pbts" - } - }, - "node_modules/@yandex-cloud/nodejs-sdk/node_modules/protobufjs/node_modules/long": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/long/-/long-4.0.0.tgz", - "integrity": "sha512-XsP+KhQif4bjX1kbuSiySJFNAehNxgLb6hPRGJ9QsUr8ajHkuXGdrHmFUTUUXhDwVX2R5bY4JNZEwbUiMhV+MA==" - }, - "node_modules/abort-controller-x": { - "version": "0.2.6", - "resolved": "https://registry.npmjs.org/abort-controller-x/-/abort-controller-x-0.2.6.tgz", - "integrity": "sha512-U8MmmcfIzl7qnzoog1woxKX/eYkQin3WR7k/S2dtpGLlSlsndXnvOYQEq8y1VnHC3+ofNFAT0GRgHq1lBbXlDQ==", - "dependencies": { - "node-abort-controller": "^1.2.1 || ^2.0.0" - } - }, - "node_modules/ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", - "engines": { - "node": ">=8" - } - }, - "node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dependencies": { - "color-convert": "^2.0.1" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/axios": { - "version": "0.24.0", - "resolved": "https://registry.npmjs.org/axios/-/axios-0.24.0.tgz", - "integrity": "sha512-Q6cWsys88HoPgAaFAVUb0WpPk0O8iTeisR9IMqy9G8AbO4NlpVknrnQS03zzF9PGAWgO3cgletO3VjV/P7VztA==", - "dependencies": { - "follow-redirects": "^1.14.4" - } + "resolved": "..", + "link": true }, "node_modules/buffer-alloc": { "version": "1.2.0", @@ -283,11 +98,6 @@ "resolved": "https://registry.npmjs.org/buffer-alloc-unsafe/-/buffer-alloc-unsafe-1.1.0.tgz", "integrity": "sha512-TEM2iMIEQdJ2yjPJoSIsldnleVaAk1oW3DBVUykyOLsEsFmEc9kn+SFFPz+gl54KQNxlDnAwCXosOS9Okx2xAg==" }, - "node_modules/buffer-equal-constant-time": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/buffer-equal-constant-time/-/buffer-equal-constant-time-1.0.1.tgz", - "integrity": "sha1-+OcRMvf/5uAaXJaXpMbz5I1cyBk=" - }, "node_modules/buffer-fill": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/buffer-fill/-/buffer-fill-1.0.0.tgz", @@ -298,354 +108,21 @@ "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==" }, - "node_modules/cliui": { - "version": "7.0.4", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz", - "integrity": "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==", - "dependencies": { - "string-width": "^4.2.0", - "strip-ansi": "^6.0.0", - "wrap-ansi": "^7.0.0" - } - }, - "node_modules/color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dependencies": { - "color-name": "~1.1.4" - }, - "engines": { - "node": ">=7.0.0" - } - }, - "node_modules/color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" - }, "node_modules/core-util-is": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz", "integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==" }, - "node_modules/date-format": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/date-format/-/date-format-3.0.0.tgz", - "integrity": "sha512-eyTcpKOcamdhWJXj56DpQMo1ylSQpcGtGKXcU0Tb97+K56/CF5amAqqqNj0+KvA0iw2ynxtHWFsPDSClCxe48w==", - "engines": { - "node": ">=4.0" - } - }, - "node_modules/debug": { - "version": "4.3.3", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.3.tgz", - "integrity": "sha512-/zxw5+vh1Tfv+4Qn7a5nsbcJKPaSvCDhojn6FEl9vupwK2VCSDtEiEtqr8DFtzYFOdz63LBkxec7DYuc2jon6Q==", - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } - } - }, - "node_modules/debug/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" - }, - "node_modules/ecdsa-sig-formatter": { - "version": "1.0.11", - "resolved": "https://registry.npmjs.org/ecdsa-sig-formatter/-/ecdsa-sig-formatter-1.0.11.tgz", - "integrity": "sha512-nagl3RYrbNv6kQkeJIpt6NJZy8twLB/2vtz6yN9Z4vRKHN4/QZJIEbqohALSgwKdnksuY3k5Addp5lg8sVoVcQ==", - "dependencies": { - "safe-buffer": "^5.0.1" - } - }, - "node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" - }, - "node_modules/escalade": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", - "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==", - "engines": { - "node": ">=6" - } - }, - "node_modules/flatted": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/flatted/-/flatted-2.0.2.tgz", - "integrity": "sha512-r5wGx7YeOwNWNlCA0wQ86zKyDLMQr+/RB8xy74M4hTphfmjlijTSSXGuH8rnvKZnfT9i+75zmd8jcKdMR4O6jA==" - }, - "node_modules/follow-redirects": { - "version": "1.14.9", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.14.9.tgz", - "integrity": "sha512-MQDfihBQYMcyy5dhRDJUHcw7lb2Pv/TuE6xP1vyraLukNDHKbDxDNaOE3NbCAdKQApno+GPRyo1YAp89yCjK4w==", - "funding": [ - { - "type": "individual", - "url": "https://github.com/sponsors/RubenVerborgh" - } - ], - "engines": { - "node": ">=4.0" - }, - "peerDependenciesMeta": { - "debug": { - "optional": true - } - } - }, - "node_modules/fs-extra": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-8.1.0.tgz", - "integrity": "sha512-yhlQgA6mnOJUKOsRUFsgJdQCvkKhcz8tlZG5HBQfReYZy46OwLcY+Zia0mtdHsOo9y/hP+CxMN0TU9QxoOtG4g==", - "dependencies": { - "graceful-fs": "^4.2.0", - "jsonfile": "^4.0.0", - "universalify": "^0.1.0" - }, - "engines": { - "node": ">=6 <7 || >=8" - } - }, - "node_modules/get-caller-file": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", - "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", - "engines": { - "node": "6.* || 8.* || >= 10.*" - } - }, - "node_modules/graceful-fs": { - "version": "4.2.9", - "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.9.tgz", - "integrity": "sha512-NtNxqUcXgpW2iMrfqSfR73Glt39K+BLwWsPs94yR63v45T0Wbej7eRmL5cWfwEgqXnmjQp3zaJTshdRW/qC2ZQ==" - }, "node_modules/inherits": { "version": "2.0.4", "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" }, - "node_modules/is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", - "engines": { - "node": ">=8" - } - }, "node_modules/isarray": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", "integrity": "sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8=" }, - "node_modules/jsonfile": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-4.0.0.tgz", - "integrity": "sha1-h3Gq4HmbZAdrdmQPygWPnBDjPss=", - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/jsonwebtoken": { - "version": "8.5.1", - "resolved": "https://registry.npmjs.org/jsonwebtoken/-/jsonwebtoken-8.5.1.tgz", - "integrity": "sha512-XjwVfRS6jTMsqYs0EsuJ4LGxXV14zQybNd4L2r0UvbVnSF9Af8x7p5MzbJ90Ioz/9TI41/hTCvznF/loiSzn8w==", - "dependencies": { - "jws": "^3.2.2", - "lodash.includes": "^4.3.0", - "lodash.isboolean": "^3.0.3", - "lodash.isinteger": "^4.0.4", - "lodash.isnumber": "^3.0.3", - "lodash.isplainobject": "^4.0.6", - "lodash.isstring": "^4.0.1", - "lodash.once": "^4.0.0", - "ms": "^2.1.1", - "semver": "^5.6.0" - }, - "engines": { - "node": ">=4", - "npm": ">=1.4.28" - } - }, - "node_modules/jwa": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/jwa/-/jwa-1.4.1.tgz", - "integrity": "sha512-qiLX/xhEEFKUAJ6FiBMbes3w9ATzyk5W7Hvzpa/SLYdxNtng+gcurvrI7TbACjIXlsJyr05/S1oUhZrc63evQA==", - "dependencies": { - "buffer-equal-constant-time": "1.0.1", - "ecdsa-sig-formatter": "1.0.11", - "safe-buffer": "^5.0.1" - } - }, - "node_modules/jws": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/jws/-/jws-3.2.2.tgz", - "integrity": "sha512-YHlZCB6lMTllWDtSPHz/ZXTsi8S00usEV6v1tjq8tOUZzw7DpSDWVXjXDre6ed1w/pd495ODpHZYSdkRTsa0HA==", - "dependencies": { - "jwa": "^1.4.1", - "safe-buffer": "^5.0.1" - } - }, - "node_modules/lodash": { - "version": "4.17.21", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", - "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" - }, - "node_modules/lodash.camelcase": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/lodash.camelcase/-/lodash.camelcase-4.3.0.tgz", - "integrity": "sha1-soqmKIorn8ZRA1x3EfZathkDMaY=" - }, - "node_modules/lodash.includes": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/lodash.includes/-/lodash.includes-4.3.0.tgz", - "integrity": "sha1-YLuYqHy5I8aMoeUTJUgzFISfVT8=" - }, - "node_modules/lodash.isboolean": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/lodash.isboolean/-/lodash.isboolean-3.0.3.tgz", - "integrity": "sha1-bC4XHbKiV82WgC/UOwGyDV9YcPY=" - }, - "node_modules/lodash.isinteger": { - "version": "4.0.4", - "resolved": "https://registry.npmjs.org/lodash.isinteger/-/lodash.isinteger-4.0.4.tgz", - "integrity": "sha1-YZwK89A/iwTDH1iChAt3sRzWg0M=" - }, - "node_modules/lodash.isnumber": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/lodash.isnumber/-/lodash.isnumber-3.0.3.tgz", - "integrity": "sha1-POdoEMWSjQM1IwGsKHMX8RwLH/w=" - }, - "node_modules/lodash.isplainobject": { - "version": "4.0.6", - "resolved": "https://registry.npmjs.org/lodash.isplainobject/-/lodash.isplainobject-4.0.6.tgz", - "integrity": "sha1-fFJqUtibRcRcxpC4gWO+BJf1UMs=" - }, - "node_modules/lodash.isstring": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/lodash.isstring/-/lodash.isstring-4.0.1.tgz", - "integrity": "sha1-1SfftUVuynzJu5XV2ur4i6VKVFE=" - }, - "node_modules/lodash.once": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/lodash.once/-/lodash.once-4.1.1.tgz", - "integrity": "sha1-DdOXEhPHxW34gJd9UEyI+0cal6w=" - }, - "node_modules/log4js": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/log4js/-/log4js-6.3.0.tgz", - "integrity": "sha512-Mc8jNuSFImQUIateBFwdOQcmC6Q5maU0VVvdC2R6XMb66/VnT+7WS4D/0EeNMZu1YODmJe5NIn2XftCzEocUgw==", - "dependencies": { - "date-format": "^3.0.0", - "debug": "^4.1.1", - "flatted": "^2.0.1", - "rfdc": "^1.1.4", - "streamroller": "^2.2.4" - }, - "engines": { - "node": ">=8.0" - } - }, - "node_modules/long": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/long/-/long-5.2.0.tgz", - "integrity": "sha512-9RTUNjK60eJbx3uz+TEGF7fUr29ZDxR5QzXcyDpeSfeH28S9ycINflOgOlppit5U+4kNTe83KQnMEerw7GmE8w==" - }, - "node_modules/luxon": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/luxon/-/luxon-2.2.0.tgz", - "integrity": "sha512-LwmknessH4jVIseCsizUgveIHwlLv/RQZWC2uDSMfGJs7w8faPUi2JFxfyfMcTPrpNbChTem3Uz6IKRtn+LcIA==", - "engines": { - "node": ">=12" - } - }, - "node_modules/ms": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", - "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" - }, - "node_modules/nice-grpc": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/nice-grpc/-/nice-grpc-1.0.6.tgz", - "integrity": "sha512-cRImN+SpnPaTAqWSbuU5ixq/wo1Jr1QOv0IZjmcb40XNU0og4JEyt7VCtTM7SAbeLAjdFxd65wiIid696kVTJA==", - "dependencies": { - "@grpc/grpc-js": "^1.5.1", - "abort-controller-x": "^0.2.4", - "nice-grpc-common": "^1.0.4", - "node-abort-controller": "^1.2.1" - } - }, - "node_modules/nice-grpc-client-middleware-deadline": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/nice-grpc-client-middleware-deadline/-/nice-grpc-client-middleware-deadline-1.0.6.tgz", - "integrity": "sha512-AokugSveg+2IPohuLbGR5OITgh3W4yZvAmLhuqistjwSRLchzQI4CwQEL1Tj4R0wscreSFoiHkXyG4qtKygOug==", - "dependencies": { - "nice-grpc-common": "^1.0.4", - "node-abort-controller": "^2.0.0" - } - }, - "node_modules/nice-grpc-client-middleware-deadline/node_modules/node-abort-controller": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/node-abort-controller/-/node-abort-controller-2.0.0.tgz", - "integrity": "sha512-L8RfEgjBTHAISTuagw51PprVAqNZoG6KSB6LQ6H1bskMVkFs5E71IyjauLBv3XbuomJlguWF/VnRHdJ1gqiAqA==" - }, - "node_modules/nice-grpc-common": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/nice-grpc-common/-/nice-grpc-common-1.0.4.tgz", - "integrity": "sha512-cpKGONNYqi2XP+5z4B4bzhLNrJu5lPbIScM0sqsht6sG9TgdN7ws3qCH82Fht94CfOifL6pQlvkgnEJp5nl2cQ==", - "dependencies": { - "node-abort-controller": "^2.0.0" - } - }, - "node_modules/nice-grpc-common/node_modules/node-abort-controller": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/node-abort-controller/-/node-abort-controller-2.0.0.tgz", - "integrity": "sha512-L8RfEgjBTHAISTuagw51PprVAqNZoG6KSB6LQ6H1bskMVkFs5E71IyjauLBv3XbuomJlguWF/VnRHdJ1gqiAqA==" - }, - "node_modules/node-abort-controller": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/node-abort-controller/-/node-abort-controller-1.2.1.tgz", - "integrity": "sha512-79PYeJuj6S9+yOHirR0JBLFOgjB6sQCir10uN6xRx25iD+ZD4ULqgRn3MwWBRaQGB0vEgReJzWwJo42T1R6YbQ==" - }, - "node_modules/protobufjs": { - "version": "6.11.2", - "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-6.11.2.tgz", - "integrity": "sha512-4BQJoPooKJl2G9j3XftkIXjoC9C0Av2NOrWmbLWT1vH32GcSUHjM0Arra6UfTsVyfMAuFzaLucXn1sadxJydAw==", - "hasInstallScript": true, - "dependencies": { - "@protobufjs/aspromise": "^1.1.2", - "@protobufjs/base64": "^1.1.2", - "@protobufjs/codegen": "^2.0.4", - "@protobufjs/eventemitter": "^1.1.0", - "@protobufjs/fetch": "^1.1.0", - "@protobufjs/float": "^1.0.2", - "@protobufjs/inquire": "^1.1.0", - "@protobufjs/path": "^1.1.2", - "@protobufjs/pool": "^1.1.0", - "@protobufjs/utf8": "^1.1.0", - "@types/long": "^4.0.1", - "@types/node": ">=13.7.0", - "long": "^4.0.0" - }, - "bin": { - "pbjs": "bin/pbjs", - "pbts": "bin/pbts" - } - }, - "node_modules/protobufjs/node_modules/long": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/long/-/long-4.0.0.tgz", - "integrity": "sha512-XsP+KhQif4bjX1kbuSiySJFNAehNxgLb6hPRGJ9QsUr8ajHkuXGdrHmFUTUUXhDwVX2R5bY4JNZEwbUiMhV+MA==" - }, "node_modules/readable-stream": { "version": "1.1.14", "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-1.1.14.tgz", @@ -657,46 +134,6 @@ "string_decoder": "~0.10.x" } }, - "node_modules/require-directory": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", - "integrity": "sha1-jGStX9MNqxyXbiNE/+f3kqam30I=", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/rfdc": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/rfdc/-/rfdc-1.3.0.tgz", - "integrity": "sha512-V2hovdzFbOi77/WajaSMXk2OLm+xNIeQdMMuB7icj7bk6zi2F8GGAxigcnDFpJHbNyNcgyJDiP+8nOrY5cZGrA==" - }, - "node_modules/safe-buffer": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ] - }, - "node_modules/semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", - "bin": { - "semver": "bin/semver" - } - }, "node_modules/stream-parser": { "version": "0.3.1", "resolved": "https://registry.npmjs.org/stream-parser/-/stream-parser-0.3.1.tgz", @@ -718,72 +155,11 @@ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" }, - "node_modules/streamroller": { - "version": "2.2.4", - "resolved": "https://registry.npmjs.org/streamroller/-/streamroller-2.2.4.tgz", - "integrity": "sha512-OG79qm3AujAM9ImoqgWEY1xG4HX+Lw+yY6qZj9R1K2mhF5bEmQ849wvrb+4vt4jLMLzwXttJlQbOdPOQVRv7DQ==", - "dependencies": { - "date-format": "^2.1.0", - "debug": "^4.1.1", - "fs-extra": "^8.1.0" - }, - "engines": { - "node": ">=8.0" - } - }, - "node_modules/streamroller/node_modules/date-format": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/date-format/-/date-format-2.1.0.tgz", - "integrity": "sha512-bYQuGLeFxhkxNOF3rcMtiZxvCBAquGzZm6oWA1oZ0g2THUzivaRhv8uOhdr19LmoobSOLoIAxeUK2RdbM8IFTA==", - "engines": { - "node": ">=4.0" - } - }, "node_modules/string_decoder": { "version": "0.10.31", "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz", "integrity": "sha1-YuIDvEF2bGwoyfyEMB2rHFMQ+pQ=" }, - "node_modules/string-width": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/universalify": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz", - "integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==", - "engines": { - "node": ">= 4.0.0" - } - }, - "node_modules/utility-types": { - "version": "3.10.0", - "resolved": "https://registry.npmjs.org/utility-types/-/utility-types-3.10.0.tgz", - "integrity": "sha512-O11mqxmi7wMKCo6HKFt5AhO4BwY3VV68YU07tgxfz8zJTIxr4BpsezN49Ffwy9j3ZpwwJp4fkRwjRzq3uWE6Rg==", - "engines": { - "node": ">= 4" - } - }, "node_modules/wav": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/wav/-/wav-1.0.2.tgz", @@ -808,148 +184,14 @@ "version": "2.0.0", "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" - }, - "node_modules/wrap-ansi": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", - "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", - "dependencies": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/wrap-ansi?sponsor=1" - } - }, - "node_modules/y18n": { - "version": "5.0.8", - "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", - "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", - "engines": { - "node": ">=10" - } - }, - "node_modules/yargs": { - "version": "16.2.0", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-16.2.0.tgz", - "integrity": "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw==", - "dependencies": { - "cliui": "^7.0.2", - "escalade": "^3.1.1", - "get-caller-file": "^2.0.5", - "require-directory": "^2.1.1", - "string-width": "^4.2.0", - "y18n": "^5.0.5", - "yargs-parser": "^20.2.2" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/yargs-parser": { - "version": "20.2.9", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.9.tgz", - "integrity": "sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w==", - "engines": { - "node": ">=10" - } } }, "dependencies": { - "@grpc/grpc-js": { - "version": "https://gitpkg.now.sh/DavyJohnes/grpc-node/packages/grpc-js?fix-class-options-issue-with-dist", - "integrity": "sha512-GrWZNWxbvWNKOWphkYZGiilHZvRHFzOyrSNTc52ZckQDVRG17obLd8L/eZPLZA+4OqHWLqiPSgA7cK3jqVKhkA==", - "requires": { - "@grpc/proto-loader": "^0.6.4", - "@types/node": ">=12.12.47" - } - }, - "@grpc/proto-loader": { - "version": "0.6.9", - "resolved": "https://registry.npmjs.org/@grpc/proto-loader/-/proto-loader-0.6.9.tgz", - "integrity": "sha512-UlcCS8VbsU9d3XTXGiEVFonN7hXk+oMXZtoHHG2oSA1/GcDP1q6OUgs20PzHDGizzyi8ufGSUDlk3O2NyY7leg==", - "requires": { - "@types/long": "^4.0.1", - "lodash.camelcase": "^4.3.0", - "long": "^4.0.0", - "protobufjs": "^6.10.0", - "yargs": "^16.2.0" - }, - "dependencies": { - "long": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/long/-/long-4.0.0.tgz", - "integrity": "sha512-XsP+KhQif4bjX1kbuSiySJFNAehNxgLb6hPRGJ9QsUr8ajHkuXGdrHmFUTUUXhDwVX2R5bY4JNZEwbUiMhV+MA==" - } - } - }, - "@protobufjs/aspromise": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz", - "integrity": "sha1-m4sMxmPWaafY9vXQiToU00jzD78=" - }, - "@protobufjs/base64": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@protobufjs/base64/-/base64-1.1.2.tgz", - "integrity": "sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==" - }, - "@protobufjs/codegen": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/@protobufjs/codegen/-/codegen-2.0.4.tgz", - "integrity": "sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg==" - }, - "@protobufjs/eventemitter": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@protobufjs/eventemitter/-/eventemitter-1.1.0.tgz", - "integrity": "sha1-NVy8mLr61ZePntCV85diHx0Ga3A=" - }, - "@protobufjs/fetch": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@protobufjs/fetch/-/fetch-1.1.0.tgz", - "integrity": "sha1-upn7WYYUr2VwDBYZ/wbUVLDYTEU=", - "requires": { - "@protobufjs/aspromise": "^1.1.1", - "@protobufjs/inquire": "^1.1.0" - } - }, - "@protobufjs/float": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/@protobufjs/float/-/float-1.0.2.tgz", - "integrity": "sha1-Xp4avctz/Ap8uLKR33jIy9l7h9E=" - }, - "@protobufjs/inquire": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@protobufjs/inquire/-/inquire-1.1.0.tgz", - "integrity": "sha1-/yAOPnzyQp4tyvwRQIKOjMY48Ik=" - }, - "@protobufjs/path": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@protobufjs/path/-/path-1.1.2.tgz", - "integrity": "sha1-bMKyDFya1q0NzP0hynZz2Nf79o0=" - }, - "@protobufjs/pool": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@protobufjs/pool/-/pool-1.1.0.tgz", - "integrity": "sha1-Cf0V8tbTq/qbZbw2ZQbWrXhG/1Q=" - }, - "@protobufjs/utf8": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@protobufjs/utf8/-/utf8-1.1.0.tgz", - "integrity": "sha1-p3c2C1s5oaLlEG+OhY8v0tBgxXA=" - }, - "@types/long": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/@types/long/-/long-4.0.1.tgz", - "integrity": "sha512-5tXH6Bx/kNGd3MgffdmP4dy2Z+G4eaXw0SE81Tq3BNadtnMR5/ySMzX4SLEzHJzSmPNn4HIdpQsBvXMUykr58w==" - }, "@types/node": { "version": "17.0.10", "resolved": "https://registry.npmjs.org/@types/node/-/node-17.0.10.tgz", - "integrity": "sha512-S/3xB4KzyFxYGCppyDt68yzBU9ysL88lSdIah4D6cptdcltc4NCPCAMc0+PCpg/lLIyC7IPvj2Z52OJWeIUkog==" + "integrity": "sha512-S/3xB4KzyFxYGCppyDt68yzBU9ysL88lSdIah4D6cptdcltc4NCPCAMc0+PCpg/lLIyC7IPvj2Z52OJWeIUkog==", + "dev": true }, "@types/wav": { "version": "1.0.1", @@ -961,84 +203,45 @@ } }, "@yandex-cloud/nodejs-sdk": { - "version": "2.0.0-beta.2", - "resolved": "https://registry.npmjs.org/@yandex-cloud/nodejs-sdk/-/nodejs-sdk-2.0.0-beta.2.tgz", - "integrity": "sha512-38malOtD1lu8CWEb3CwwBEpaicK8g9GADp7Yo8YVnhtgZjkx+4aWfz7kuLtC8Cz5lUF8woXwCmHdmVweLO81Rg==", + "version": "file:..", "requires": { + "@commitlint/cli": "^15.0.0", + "@commitlint/config-conventional": "^15.0.0", "@grpc/grpc-js": "https://gitpkg.now.sh/DavyJohnes/grpc-node/packages/grpc-js?fix-class-options-issue-with-dist", - "axios": "0.24.0", - "jsonwebtoken": "8.5.1", - "lodash": "4.17.21", - "log4js": "6.3.0", - "long": "5.2.0", - "luxon": "2.2.0", - "nice-grpc": "1.0.6", - "nice-grpc-client-middleware-deadline": "1.0.6", - "protobufjs": "6.8.8", - "utility-types": "3.10.0" - }, - "dependencies": { - "@types/node": { - "version": "10.17.60", - "resolved": "https://registry.npmjs.org/@types/node/-/node-10.17.60.tgz", - "integrity": "sha512-F0KIgDJfy2nA3zMLmWGKxcH2ZVEtCZXHHdOQs2gSaQ27+lNeEfGxzkIw90aXswATX7AZ33tahPbzy6KAfUreVw==" - }, - "protobufjs": { - "version": "6.8.8", - "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-6.8.8.tgz", - "integrity": "sha512-AAmHtD5pXgZfi7GMpllpO3q1Xw1OYldr+dMUlAnffGTAhqkg72WdmSY71uKBF/JuyiKs8psYbtKrhi0ASCD8qw==", - "requires": { - "@protobufjs/aspromise": "^1.1.2", - "@protobufjs/base64": "^1.1.2", - "@protobufjs/codegen": "^2.0.4", - "@protobufjs/eventemitter": "^1.1.0", - "@protobufjs/fetch": "^1.1.0", - "@protobufjs/float": "^1.0.2", - "@protobufjs/inquire": "^1.1.0", - "@protobufjs/path": "^1.1.2", - "@protobufjs/pool": "^1.1.0", - "@protobufjs/utf8": "^1.1.0", - "@types/long": "^4.0.0", - "@types/node": "^10.1.0", - "long": "^4.0.0" - }, - "dependencies": { - "long": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/long/-/long-4.0.0.tgz", - "integrity": "sha512-XsP+KhQif4bjX1kbuSiySJFNAehNxgLb6hPRGJ9QsUr8ajHkuXGdrHmFUTUUXhDwVX2R5bY4JNZEwbUiMhV+MA==" - } - } - } - } - }, - "abort-controller-x": { - "version": "0.2.6", - "resolved": "https://registry.npmjs.org/abort-controller-x/-/abort-controller-x-0.2.6.tgz", - "integrity": "sha512-U8MmmcfIzl7qnzoog1woxKX/eYkQin3WR7k/S2dtpGLlSlsndXnvOYQEq8y1VnHC3+ofNFAT0GRgHq1lBbXlDQ==", - "requires": { - "node-abort-controller": "^1.2.1 || ^2.0.0" - } - }, - "ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==" - }, - "ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "requires": { - "color-convert": "^2.0.1" - } - }, - "axios": { - "version": "0.24.0", - "resolved": "https://registry.npmjs.org/axios/-/axios-0.24.0.tgz", - "integrity": "sha512-Q6cWsys88HoPgAaFAVUb0WpPk0O8iTeisR9IMqy9G8AbO4NlpVknrnQS03zzF9PGAWgO3cgletO3VjV/P7VztA==", - "requires": { - "follow-redirects": "^1.14.4" + "@semantic-release/git": "^10.0.1", + "@types/jest": "^27.0.3", + "@types/jsonwebtoken": "^8.5.6", + "@types/lodash": "^4.14.178", + "@types/luxon": "^2.0.8", + "@types/node": "^16.11.3", + "@typescript-eslint/eslint-plugin": "^5.7.0", + "@typescript-eslint/parser": "^5.7.0", + "axios": "^0.24.0", + "eslint": "^8.4.1", + "eslint-config-airbnb-base": "^15.0.0", + "eslint-config-airbnb-typescript": "^16.1.0", + "eslint-plugin-import": "^2.25.3", + "eslint-plugin-jsx-a11y": "^6.5.1", + "eslint-plugin-prefer-arrow-functions": "^3.1.4", + "eslint-plugin-unicorn": "^39.0.0", + "fast-glob": "^3.2.7", + "grpc-tools": "^1.11.2", + "husky": "^7.0.4", + "jest": "^27.4.5", + "jsonwebtoken": "^8.5.1", + "lodash": "^4.17.21", + "log4js": "^6.3.0", + "long": "^5.2.0", + "luxon": "^2.2.0", + "nice-grpc": "^1.0.4", + "nice-grpc-client-middleware-deadline": "^1.0.4", + "protobufjs": "^6.8.8", + "semantic-release": "^18.0.1", + "ts-jest": "^27.1.1", + "ts-node": "^10.4.0", + "ts-proto": "^1.95.1", + "typescript": "^4.5.4", + "utility-types": "^3.10.0" } }, "buffer-alloc": { @@ -1055,11 +258,6 @@ "resolved": "https://registry.npmjs.org/buffer-alloc-unsafe/-/buffer-alloc-unsafe-1.1.0.tgz", "integrity": "sha512-TEM2iMIEQdJ2yjPJoSIsldnleVaAk1oW3DBVUykyOLsEsFmEc9kn+SFFPz+gl54KQNxlDnAwCXosOS9Okx2xAg==" }, - "buffer-equal-constant-time": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/buffer-equal-constant-time/-/buffer-equal-constant-time-1.0.1.tgz", - "integrity": "sha1-+OcRMvf/5uAaXJaXpMbz5I1cyBk=" - }, "buffer-fill": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/buffer-fill/-/buffer-fill-1.0.0.tgz", @@ -1070,307 +268,21 @@ "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==" }, - "cliui": { - "version": "7.0.4", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz", - "integrity": "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==", - "requires": { - "string-width": "^4.2.0", - "strip-ansi": "^6.0.0", - "wrap-ansi": "^7.0.0" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" - }, "core-util-is": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz", "integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==" }, - "date-format": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/date-format/-/date-format-3.0.0.tgz", - "integrity": "sha512-eyTcpKOcamdhWJXj56DpQMo1ylSQpcGtGKXcU0Tb97+K56/CF5amAqqqNj0+KvA0iw2ynxtHWFsPDSClCxe48w==" - }, - "debug": { - "version": "4.3.3", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.3.tgz", - "integrity": "sha512-/zxw5+vh1Tfv+4Qn7a5nsbcJKPaSvCDhojn6FEl9vupwK2VCSDtEiEtqr8DFtzYFOdz63LBkxec7DYuc2jon6Q==", - "requires": { - "ms": "2.1.2" - }, - "dependencies": { - "ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" - } - } - }, - "ecdsa-sig-formatter": { - "version": "1.0.11", - "resolved": "https://registry.npmjs.org/ecdsa-sig-formatter/-/ecdsa-sig-formatter-1.0.11.tgz", - "integrity": "sha512-nagl3RYrbNv6kQkeJIpt6NJZy8twLB/2vtz6yN9Z4vRKHN4/QZJIEbqohALSgwKdnksuY3k5Addp5lg8sVoVcQ==", - "requires": { - "safe-buffer": "^5.0.1" - } - }, - "emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" - }, - "escalade": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", - "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==" - }, - "flatted": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/flatted/-/flatted-2.0.2.tgz", - "integrity": "sha512-r5wGx7YeOwNWNlCA0wQ86zKyDLMQr+/RB8xy74M4hTphfmjlijTSSXGuH8rnvKZnfT9i+75zmd8jcKdMR4O6jA==" - }, - "follow-redirects": { - "version": "1.14.9", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.14.9.tgz", - "integrity": "sha512-MQDfihBQYMcyy5dhRDJUHcw7lb2Pv/TuE6xP1vyraLukNDHKbDxDNaOE3NbCAdKQApno+GPRyo1YAp89yCjK4w==" - }, - "fs-extra": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-8.1.0.tgz", - "integrity": "sha512-yhlQgA6mnOJUKOsRUFsgJdQCvkKhcz8tlZG5HBQfReYZy46OwLcY+Zia0mtdHsOo9y/hP+CxMN0TU9QxoOtG4g==", - "requires": { - "graceful-fs": "^4.2.0", - "jsonfile": "^4.0.0", - "universalify": "^0.1.0" - } - }, - "get-caller-file": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", - "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==" - }, - "graceful-fs": { - "version": "4.2.9", - "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.9.tgz", - "integrity": "sha512-NtNxqUcXgpW2iMrfqSfR73Glt39K+BLwWsPs94yR63v45T0Wbej7eRmL5cWfwEgqXnmjQp3zaJTshdRW/qC2ZQ==" - }, "inherits": { "version": "2.0.4", "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" }, - "is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==" - }, "isarray": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", "integrity": "sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8=" }, - "jsonfile": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-4.0.0.tgz", - "integrity": "sha1-h3Gq4HmbZAdrdmQPygWPnBDjPss=", - "requires": { - "graceful-fs": "^4.1.6" - } - }, - "jsonwebtoken": { - "version": "8.5.1", - "resolved": "https://registry.npmjs.org/jsonwebtoken/-/jsonwebtoken-8.5.1.tgz", - "integrity": "sha512-XjwVfRS6jTMsqYs0EsuJ4LGxXV14zQybNd4L2r0UvbVnSF9Af8x7p5MzbJ90Ioz/9TI41/hTCvznF/loiSzn8w==", - "requires": { - "jws": "^3.2.2", - "lodash.includes": "^4.3.0", - "lodash.isboolean": "^3.0.3", - "lodash.isinteger": "^4.0.4", - "lodash.isnumber": "^3.0.3", - "lodash.isplainobject": "^4.0.6", - "lodash.isstring": "^4.0.1", - "lodash.once": "^4.0.0", - "ms": "^2.1.1", - "semver": "^5.6.0" - } - }, - "jwa": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/jwa/-/jwa-1.4.1.tgz", - "integrity": "sha512-qiLX/xhEEFKUAJ6FiBMbes3w9ATzyk5W7Hvzpa/SLYdxNtng+gcurvrI7TbACjIXlsJyr05/S1oUhZrc63evQA==", - "requires": { - "buffer-equal-constant-time": "1.0.1", - "ecdsa-sig-formatter": "1.0.11", - "safe-buffer": "^5.0.1" - } - }, - "jws": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/jws/-/jws-3.2.2.tgz", - "integrity": "sha512-YHlZCB6lMTllWDtSPHz/ZXTsi8S00usEV6v1tjq8tOUZzw7DpSDWVXjXDre6ed1w/pd495ODpHZYSdkRTsa0HA==", - "requires": { - "jwa": "^1.4.1", - "safe-buffer": "^5.0.1" - } - }, - "lodash": { - "version": "4.17.21", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", - "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" - }, - "lodash.camelcase": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/lodash.camelcase/-/lodash.camelcase-4.3.0.tgz", - "integrity": "sha1-soqmKIorn8ZRA1x3EfZathkDMaY=" - }, - "lodash.includes": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/lodash.includes/-/lodash.includes-4.3.0.tgz", - "integrity": "sha1-YLuYqHy5I8aMoeUTJUgzFISfVT8=" - }, - "lodash.isboolean": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/lodash.isboolean/-/lodash.isboolean-3.0.3.tgz", - "integrity": "sha1-bC4XHbKiV82WgC/UOwGyDV9YcPY=" - }, - "lodash.isinteger": { - "version": "4.0.4", - "resolved": "https://registry.npmjs.org/lodash.isinteger/-/lodash.isinteger-4.0.4.tgz", - "integrity": "sha1-YZwK89A/iwTDH1iChAt3sRzWg0M=" - }, - "lodash.isnumber": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/lodash.isnumber/-/lodash.isnumber-3.0.3.tgz", - "integrity": "sha1-POdoEMWSjQM1IwGsKHMX8RwLH/w=" - }, - "lodash.isplainobject": { - "version": "4.0.6", - "resolved": "https://registry.npmjs.org/lodash.isplainobject/-/lodash.isplainobject-4.0.6.tgz", - "integrity": "sha1-fFJqUtibRcRcxpC4gWO+BJf1UMs=" - }, - "lodash.isstring": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/lodash.isstring/-/lodash.isstring-4.0.1.tgz", - "integrity": "sha1-1SfftUVuynzJu5XV2ur4i6VKVFE=" - }, - "lodash.once": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/lodash.once/-/lodash.once-4.1.1.tgz", - "integrity": "sha1-DdOXEhPHxW34gJd9UEyI+0cal6w=" - }, - "log4js": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/log4js/-/log4js-6.3.0.tgz", - "integrity": "sha512-Mc8jNuSFImQUIateBFwdOQcmC6Q5maU0VVvdC2R6XMb66/VnT+7WS4D/0EeNMZu1YODmJe5NIn2XftCzEocUgw==", - "requires": { - "date-format": "^3.0.0", - "debug": "^4.1.1", - "flatted": "^2.0.1", - "rfdc": "^1.1.4", - "streamroller": "^2.2.4" - } - }, - "long": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/long/-/long-5.2.0.tgz", - "integrity": "sha512-9RTUNjK60eJbx3uz+TEGF7fUr29ZDxR5QzXcyDpeSfeH28S9ycINflOgOlppit5U+4kNTe83KQnMEerw7GmE8w==" - }, - "luxon": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/luxon/-/luxon-2.2.0.tgz", - "integrity": "sha512-LwmknessH4jVIseCsizUgveIHwlLv/RQZWC2uDSMfGJs7w8faPUi2JFxfyfMcTPrpNbChTem3Uz6IKRtn+LcIA==" - }, - "ms": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", - "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" - }, - "nice-grpc": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/nice-grpc/-/nice-grpc-1.0.6.tgz", - "integrity": "sha512-cRImN+SpnPaTAqWSbuU5ixq/wo1Jr1QOv0IZjmcb40XNU0og4JEyt7VCtTM7SAbeLAjdFxd65wiIid696kVTJA==", - "requires": { - "@grpc/grpc-js": "^1.5.1", - "abort-controller-x": "^0.2.4", - "nice-grpc-common": "^1.0.4", - "node-abort-controller": "^1.2.1" - } - }, - "nice-grpc-client-middleware-deadline": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/nice-grpc-client-middleware-deadline/-/nice-grpc-client-middleware-deadline-1.0.6.tgz", - "integrity": "sha512-AokugSveg+2IPohuLbGR5OITgh3W4yZvAmLhuqistjwSRLchzQI4CwQEL1Tj4R0wscreSFoiHkXyG4qtKygOug==", - "requires": { - "nice-grpc-common": "^1.0.4", - "node-abort-controller": "^2.0.0" - }, - "dependencies": { - "node-abort-controller": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/node-abort-controller/-/node-abort-controller-2.0.0.tgz", - "integrity": "sha512-L8RfEgjBTHAISTuagw51PprVAqNZoG6KSB6LQ6H1bskMVkFs5E71IyjauLBv3XbuomJlguWF/VnRHdJ1gqiAqA==" - } - } - }, - "nice-grpc-common": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/nice-grpc-common/-/nice-grpc-common-1.0.4.tgz", - "integrity": "sha512-cpKGONNYqi2XP+5z4B4bzhLNrJu5lPbIScM0sqsht6sG9TgdN7ws3qCH82Fht94CfOifL6pQlvkgnEJp5nl2cQ==", - "requires": { - "node-abort-controller": "^2.0.0" - }, - "dependencies": { - "node-abort-controller": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/node-abort-controller/-/node-abort-controller-2.0.0.tgz", - "integrity": "sha512-L8RfEgjBTHAISTuagw51PprVAqNZoG6KSB6LQ6H1bskMVkFs5E71IyjauLBv3XbuomJlguWF/VnRHdJ1gqiAqA==" - } - } - }, - "node-abort-controller": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/node-abort-controller/-/node-abort-controller-1.2.1.tgz", - "integrity": "sha512-79PYeJuj6S9+yOHirR0JBLFOgjB6sQCir10uN6xRx25iD+ZD4ULqgRn3MwWBRaQGB0vEgReJzWwJo42T1R6YbQ==" - }, - "protobufjs": { - "version": "6.11.2", - "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-6.11.2.tgz", - "integrity": "sha512-4BQJoPooKJl2G9j3XftkIXjoC9C0Av2NOrWmbLWT1vH32GcSUHjM0Arra6UfTsVyfMAuFzaLucXn1sadxJydAw==", - "requires": { - "@protobufjs/aspromise": "^1.1.2", - "@protobufjs/base64": "^1.1.2", - "@protobufjs/codegen": "^2.0.4", - "@protobufjs/eventemitter": "^1.1.0", - "@protobufjs/fetch": "^1.1.0", - "@protobufjs/float": "^1.0.2", - "@protobufjs/inquire": "^1.1.0", - "@protobufjs/path": "^1.1.2", - "@protobufjs/pool": "^1.1.0", - "@protobufjs/utf8": "^1.1.0", - "@types/long": "^4.0.1", - "@types/node": ">=13.7.0", - "long": "^4.0.0" - }, - "dependencies": { - "long": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/long/-/long-4.0.0.tgz", - "integrity": "sha512-XsP+KhQif4bjX1kbuSiySJFNAehNxgLb6hPRGJ9QsUr8ajHkuXGdrHmFUTUUXhDwVX2R5bY4JNZEwbUiMhV+MA==" - } - } - }, "readable-stream": { "version": "1.1.14", "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-1.1.14.tgz", @@ -1382,26 +294,6 @@ "string_decoder": "~0.10.x" } }, - "require-directory": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", - "integrity": "sha1-jGStX9MNqxyXbiNE/+f3kqam30I=" - }, - "rfdc": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/rfdc/-/rfdc-1.3.0.tgz", - "integrity": "sha512-V2hovdzFbOi77/WajaSMXk2OLm+xNIeQdMMuB7icj7bk6zi2F8GGAxigcnDFpJHbNyNcgyJDiP+8nOrY5cZGrA==" - }, - "safe-buffer": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==" - }, - "semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==" - }, "stream-parser": { "version": "0.3.1", "resolved": "https://registry.npmjs.org/stream-parser/-/stream-parser-0.3.1.tgz", @@ -1425,56 +317,11 @@ } } }, - "streamroller": { - "version": "2.2.4", - "resolved": "https://registry.npmjs.org/streamroller/-/streamroller-2.2.4.tgz", - "integrity": "sha512-OG79qm3AujAM9ImoqgWEY1xG4HX+Lw+yY6qZj9R1K2mhF5bEmQ849wvrb+4vt4jLMLzwXttJlQbOdPOQVRv7DQ==", - "requires": { - "date-format": "^2.1.0", - "debug": "^4.1.1", - "fs-extra": "^8.1.0" - }, - "dependencies": { - "date-format": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/date-format/-/date-format-2.1.0.tgz", - "integrity": "sha512-bYQuGLeFxhkxNOF3rcMtiZxvCBAquGzZm6oWA1oZ0g2THUzivaRhv8uOhdr19LmoobSOLoIAxeUK2RdbM8IFTA==" - } - } - }, "string_decoder": { "version": "0.10.31", "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz", "integrity": "sha1-YuIDvEF2bGwoyfyEMB2rHFMQ+pQ=" }, - "string-width": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "requires": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - } - }, - "strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "requires": { - "ansi-regex": "^5.0.1" - } - }, - "universalify": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz", - "integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==" - }, - "utility-types": { - "version": "3.10.0", - "resolved": "https://registry.npmjs.org/utility-types/-/utility-types-3.10.0.tgz", - "integrity": "sha512-O11mqxmi7wMKCo6HKFt5AhO4BwY3VV68YU07tgxfz8zJTIxr4BpsezN49Ffwy9j3ZpwwJp4fkRwjRzq3uWE6Rg==" - }, "wav": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/wav/-/wav-1.0.2.tgz", @@ -1501,40 +348,6 @@ "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" } } - }, - "wrap-ansi": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", - "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", - "requires": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" - } - }, - "y18n": { - "version": "5.0.8", - "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", - "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==" - }, - "yargs": { - "version": "16.2.0", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-16.2.0.tgz", - "integrity": "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw==", - "requires": { - "cliui": "^7.0.2", - "escalade": "^3.1.1", - "get-caller-file": "^2.0.5", - "require-directory": "^2.1.1", - "string-width": "^4.2.0", - "y18n": "^5.0.5", - "yargs-parser": "^20.2.2" - } - }, - "yargs-parser": { - "version": "20.2.9", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.9.tgz", - "integrity": "sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w==" } } } diff --git a/examples/package.json b/examples/package.json index 92b6dbbb..7ebac36c 100644 --- a/examples/package.json +++ b/examples/package.json @@ -10,7 +10,7 @@ "author": "", "license": "ISC", "dependencies": { - "@yandex-cloud/nodejs-sdk": "^2.0.0-beta.2", + "@yandex-cloud/nodejs-sdk": "../", "wav": "^1.0.2" }, "devDependencies": { diff --git a/examples/storage.ts b/examples/storage.ts new file mode 100644 index 00000000..ea28eeb2 --- /dev/null +++ b/examples/storage.ts @@ -0,0 +1,18 @@ +import { serviceClients, Session, cloudApi } from '@yandex-cloud/nodejs-sdk'; +import { getEnv } from './utils/get-env'; +import { log } from './utils/logger'; + +const { storage: { bucket_service: { ListBucketsRequest } } } = cloudApi; +const AUTH_TOKEN = getEnv('YC_OAUTH_TOKEN'); +const FOLDER_ID = getEnv('YC_FOLDER_ID'); + +(async () => { + const session = new Session({ oauthToken: AUTH_TOKEN }); + const client = session.client(serviceClients.BucketServiceClient); + + const response = await client.list(ListBucketsRequest.fromPartial({ folderId: FOLDER_ID })); + + for (const bucket of response.buckets) { + log(`Bucket: ${bucket.name}, id: ${bucket.id}`); + } +})(); diff --git a/src/service-endpoints.ts b/src/service-endpoints.ts index 2a350615..d9a57fbf 100644 --- a/src/service-endpoints.ts +++ b/src/service-endpoints.ts @@ -331,7 +331,7 @@ const SERVICE_ENDPOINTS_LIST: ServiceEndpointsList = [ serviceIds: [ 'yandex.cloud.storage.v1.BucketService', ], - endpoint: 'storage.yandexcloud.net', + endpoint: 'storage.api.cloud.yandex.net:443', }, ]; From b0441bbaae256ae94a0b85c8fed2ba4acf685a22 Mon Sep 17 00:00:00 2001 From: yandex-cloud-bot Date: Mon, 25 Apr 2022 14:43:06 +0000 Subject: [PATCH 23/54] chore(release): 2.1.0 [skip ci] # [2.1.0](https://github.com/yandex-cloud/nodejs-sdk/compare/v2.0.1...v2.1.0) (2022-04-25) ### Features * update code according to latest proto specs ([e9fdac6](https://github.com/yandex-cloud/nodejs-sdk/commit/e9fdac62a3ff2998c406d081d02d124ff36df632)) --- package-lock.json | 4 ++-- package.json | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/package-lock.json b/package-lock.json index 9e336c0f..32208d04 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.0.1", + "version": "2.1.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.0.1", + "version": "2.1.0", "license": "MIT", "dependencies": { "@grpc/grpc-js": "1.6.0", diff --git a/package.json b/package.json index cedc2b95..79f9820f 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.0.1", + "version": "2.1.0", "description": "Yandex.Cloud NodeJS SDK", "keywords": [ "yandex-cloud", From 0cba421f0c965b6b7f14f6d6c7477bca8eb3c0eb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 2 Jun 2022 21:39:02 +0000 Subject: [PATCH 24/54] chore(deps): bump protobufjs from 6.8.8 to 6.11.3 Bumps [protobufjs](https://github.com/protobufjs/protobuf.js) from 6.8.8 to 6.11.3. - [Release notes](https://github.com/protobufjs/protobuf.js/releases) - [Changelog](https://github.com/protobufjs/protobuf.js/blob/v6.11.3/CHANGELOG.md) - [Commits](https://github.com/protobufjs/protobuf.js/compare/6.8.8...v6.11.3) --- updated-dependencies: - dependency-name: protobufjs dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- package-lock.json | 77 +++++++---------------------------------------- package.json | 2 +- 2 files changed, 12 insertions(+), 67 deletions(-) diff --git a/package-lock.json b/package-lock.json index 32208d04..8df454a7 100644 --- a/package-lock.json +++ b/package-lock.json @@ -18,7 +18,7 @@ "luxon": "2.2.0", "nice-grpc": "1.0.6", "nice-grpc-client-middleware-deadline": "1.0.6", - "protobufjs": "6.8.8", + "protobufjs": "6.11.3", "utility-types": "3.10.0" }, "devDependencies": { @@ -1153,31 +1153,6 @@ "resolved": "https://registry.npmjs.org/long/-/long-4.0.0.tgz", "integrity": "sha512-XsP+KhQif4bjX1kbuSiySJFNAehNxgLb6hPRGJ9QsUr8ajHkuXGdrHmFUTUUXhDwVX2R5bY4JNZEwbUiMhV+MA==" }, - "node_modules/@grpc/proto-loader/node_modules/protobufjs": { - "version": "6.11.2", - "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-6.11.2.tgz", - "integrity": "sha512-4BQJoPooKJl2G9j3XftkIXjoC9C0Av2NOrWmbLWT1vH32GcSUHjM0Arra6UfTsVyfMAuFzaLucXn1sadxJydAw==", - "hasInstallScript": true, - "dependencies": { - "@protobufjs/aspromise": "^1.1.2", - "@protobufjs/base64": "^1.1.2", - "@protobufjs/codegen": "^2.0.4", - "@protobufjs/eventemitter": "^1.1.0", - "@protobufjs/fetch": "^1.1.0", - "@protobufjs/float": "^1.0.2", - "@protobufjs/inquire": "^1.1.0", - "@protobufjs/path": "^1.1.2", - "@protobufjs/pool": "^1.1.0", - "@protobufjs/utf8": "^1.1.0", - "@types/long": "^4.0.1", - "@types/node": ">=13.7.0", - "long": "^4.0.0" - }, - "bin": { - "pbjs": "bin/pbjs", - "pbts": "bin/pbts" - } - }, "node_modules/@grpc/proto-loader/node_modules/yargs": { "version": "16.2.0", "resolved": "https://registry.npmjs.org/yargs/-/yargs-16.2.0.tgz", @@ -10907,9 +10882,9 @@ } }, "node_modules/protobufjs": { - "version": "6.8.8", - "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-6.8.8.tgz", - "integrity": "sha512-AAmHtD5pXgZfi7GMpllpO3q1Xw1OYldr+dMUlAnffGTAhqkg72WdmSY71uKBF/JuyiKs8psYbtKrhi0ASCD8qw==", + "version": "6.11.3", + "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-6.11.3.tgz", + "integrity": "sha512-xL96WDdCZYdU7Slin569tFX712BxsxslWwAfAhCYjQKGTq7dAU91Lomy6nLLhh/dyGhk/YH4TwTSRxTzhuHyZg==", "hasInstallScript": true, "dependencies": { "@protobufjs/aspromise": "^1.1.2", @@ -10922,8 +10897,8 @@ "@protobufjs/path": "^1.1.2", "@protobufjs/pool": "^1.1.0", "@protobufjs/utf8": "^1.1.0", - "@types/long": "^4.0.0", - "@types/node": "^10.1.0", + "@types/long": "^4.0.1", + "@types/node": ">=13.7.0", "long": "^4.0.0" }, "bin": { @@ -10931,11 +10906,6 @@ "pbts": "bin/pbts" } }, - "node_modules/protobufjs/node_modules/@types/node": { - "version": "10.17.60", - "resolved": "https://registry.npmjs.org/@types/node/-/node-10.17.60.tgz", - "integrity": "sha512-F0KIgDJfy2nA3zMLmWGKxcH2ZVEtCZXHHdOQs2gSaQ27+lNeEfGxzkIw90aXswATX7AZ33tahPbzy6KAfUreVw==" - }, "node_modules/protobufjs/node_modules/long": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/long/-/long-4.0.0.tgz", @@ -13548,26 +13518,6 @@ "resolved": "https://registry.npmjs.org/long/-/long-4.0.0.tgz", "integrity": "sha512-XsP+KhQif4bjX1kbuSiySJFNAehNxgLb6hPRGJ9QsUr8ajHkuXGdrHmFUTUUXhDwVX2R5bY4JNZEwbUiMhV+MA==" }, - "protobufjs": { - "version": "6.11.2", - "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-6.11.2.tgz", - "integrity": "sha512-4BQJoPooKJl2G9j3XftkIXjoC9C0Av2NOrWmbLWT1vH32GcSUHjM0Arra6UfTsVyfMAuFzaLucXn1sadxJydAw==", - "requires": { - "@protobufjs/aspromise": "^1.1.2", - "@protobufjs/base64": "^1.1.2", - "@protobufjs/codegen": "^2.0.4", - "@protobufjs/eventemitter": "^1.1.0", - "@protobufjs/fetch": "^1.1.0", - "@protobufjs/float": "^1.0.2", - "@protobufjs/inquire": "^1.1.0", - "@protobufjs/path": "^1.1.2", - "@protobufjs/pool": "^1.1.0", - "@protobufjs/utf8": "^1.1.0", - "@types/long": "^4.0.1", - "@types/node": ">=13.7.0", - "long": "^4.0.0" - } - }, "yargs": { "version": "16.2.0", "resolved": "https://registry.npmjs.org/yargs/-/yargs-16.2.0.tgz", @@ -20957,9 +20907,9 @@ } }, "protobufjs": { - "version": "6.8.8", - "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-6.8.8.tgz", - "integrity": "sha512-AAmHtD5pXgZfi7GMpllpO3q1Xw1OYldr+dMUlAnffGTAhqkg72WdmSY71uKBF/JuyiKs8psYbtKrhi0ASCD8qw==", + "version": "6.11.3", + "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-6.11.3.tgz", + "integrity": "sha512-xL96WDdCZYdU7Slin569tFX712BxsxslWwAfAhCYjQKGTq7dAU91Lomy6nLLhh/dyGhk/YH4TwTSRxTzhuHyZg==", "requires": { "@protobufjs/aspromise": "^1.1.2", "@protobufjs/base64": "^1.1.2", @@ -20971,16 +20921,11 @@ "@protobufjs/path": "^1.1.2", "@protobufjs/pool": "^1.1.0", "@protobufjs/utf8": "^1.1.0", - "@types/long": "^4.0.0", - "@types/node": "^10.1.0", + "@types/long": "^4.0.1", + "@types/node": ">=13.7.0", "long": "^4.0.0" }, "dependencies": { - "@types/node": { - "version": "10.17.60", - "resolved": "https://registry.npmjs.org/@types/node/-/node-10.17.60.tgz", - "integrity": "sha512-F0KIgDJfy2nA3zMLmWGKxcH2ZVEtCZXHHdOQs2gSaQ27+lNeEfGxzkIw90aXswATX7AZ33tahPbzy6KAfUreVw==" - }, "long": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/long/-/long-4.0.0.tgz", diff --git a/package.json b/package.json index 79f9820f..c926f739 100644 --- a/package.json +++ b/package.json @@ -31,7 +31,7 @@ "luxon": "2.2.0", "nice-grpc": "1.0.6", "nice-grpc-client-middleware-deadline": "1.0.6", - "protobufjs": "6.8.8", + "protobufjs": "6.11.3", "utility-types": "3.10.0" }, "devDependencies": { From 201309063bb58e40aa730ea18467d5802be19e59 Mon Sep 17 00:00:00 2001 From: Ivan Zuev Date: Fri, 3 Jun 2022 17:00:59 +0300 Subject: [PATCH 25/54] fix: minor changes in order to force new release --- README.md | 2 +- package.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index e9acd7be..6f2d1f28 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ Need to automate your infrastructure or use services provided by Yandex.Cloud? We've got you covered. ## Requirements -- nodejs >= 10 +- nodejs >= 12 ## Installation `npm install @yandex-cloud/nodejs-sdk` diff --git a/package.json b/package.json index c926f739..20d821c7 100644 --- a/package.json +++ b/package.json @@ -71,7 +71,7 @@ "prepublishOnly": "npm run build" }, "engines": { - "node": ">=10.0.0" + "node": ">=12.0.0" }, "publishConfig": { "access": "public" From bcafee6e66b2e6bd7dfc12d7587d029eeecb4967 Mon Sep 17 00:00:00 2001 From: yandex-cloud-bot Date: Fri, 3 Jun 2022 14:09:25 +0000 Subject: [PATCH 26/54] chore(release): 2.1.1 [skip ci] ## [2.1.1](https://github.com/yandex-cloud/nodejs-sdk/compare/v2.1.0...v2.1.1) (2022-06-03) ### Bug Fixes * minor changes in order to force new release ([2013090](https://github.com/yandex-cloud/nodejs-sdk/commit/201309063bb58e40aa730ea18467d5802be19e59)) --- package-lock.json | 4 ++-- package.json | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/package-lock.json b/package-lock.json index 8df454a7..9ade15de 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.1.0", + "version": "2.1.1", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.1.0", + "version": "2.1.1", "license": "MIT", "dependencies": { "@grpc/grpc-js": "1.6.0", diff --git a/package.json b/package.json index 20d821c7..5b4cee42 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.1.0", + "version": "2.1.1", "description": "Yandex.Cloud NodeJS SDK", "keywords": [ "yandex-cloud", From 01d8003821b7742a52d7b4be73e2770ebe76e5fb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 4 Jun 2022 00:06:15 +0000 Subject: [PATCH 27/54] chore(deps): bump semver-regex from 3.1.3 to 3.1.4 Bumps [semver-regex](https://github.com/sindresorhus/semver-regex) from 3.1.3 to 3.1.4. - [Release notes](https://github.com/sindresorhus/semver-regex/releases) - [Commits](https://github.com/sindresorhus/semver-regex/commits/v3.1.4) --- updated-dependencies: - dependency-name: semver-regex dependency-type: indirect ... Signed-off-by: dependabot[bot] --- package-lock.json | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/package-lock.json b/package-lock.json index 9ade15de..2fd7273b 100644 --- a/package-lock.json +++ b/package-lock.json @@ -50,7 +50,7 @@ "typescript": "4.5.4" }, "engines": { - "node": ">=10.0.0" + "node": ">=12.0.0" } }, "node_modules/@babel/code-frame": { @@ -11400,9 +11400,9 @@ } }, "node_modules/semver-regex": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/semver-regex/-/semver-regex-3.1.3.tgz", - "integrity": "sha512-Aqi54Mk9uYTjVexLnR67rTyBusmwd04cLkHy9hNvk3+G3nT2Oyg7E0l4XVbOaNwIvQ3hHeYxGcyEy+mKreyBFQ==", + "version": "3.1.4", + "resolved": "https://registry.npmjs.org/semver-regex/-/semver-regex-3.1.4.tgz", + "integrity": "sha512-6IiqeZNgq01qGf0TId0t3NvKzSvUsjcpdEO3AQNeIjR6A2+ckTnQlDpl4qu1bjRv0RzN3FP9hzFmws3lKqRWkA==", "dev": true, "engines": { "node": ">=8" @@ -21283,9 +21283,9 @@ } }, "semver-regex": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/semver-regex/-/semver-regex-3.1.3.tgz", - "integrity": "sha512-Aqi54Mk9uYTjVexLnR67rTyBusmwd04cLkHy9hNvk3+G3nT2Oyg7E0l4XVbOaNwIvQ3hHeYxGcyEy+mKreyBFQ==", + "version": "3.1.4", + "resolved": "https://registry.npmjs.org/semver-regex/-/semver-regex-3.1.4.tgz", + "integrity": "sha512-6IiqeZNgq01qGf0TId0t3NvKzSvUsjcpdEO3AQNeIjR6A2+ckTnQlDpl4qu1bjRv0RzN3FP9hzFmws3lKqRWkA==", "dev": true }, "set-blocking": { From 9961f2fb30537e19e282d3ada571e99a7e21d8c3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 10 Jun 2022 01:03:54 +0000 Subject: [PATCH 28/54] chore(deps-dev): bump semantic-release from 18.0.1 to 19.0.3 Bumps [semantic-release](https://github.com/semantic-release/semantic-release) from 18.0.1 to 19.0.3. - [Release notes](https://github.com/semantic-release/semantic-release/releases) - [Commits](https://github.com/semantic-release/semantic-release/compare/v18.0.1...v19.0.3) --- updated-dependencies: - dependency-name: semantic-release dependency-type: direct:development ... Signed-off-by: dependabot[bot] --- package-lock.json | 3034 +++++++++++++++++---------------------------- package.json | 2 +- 2 files changed, 1113 insertions(+), 1923 deletions(-) diff --git a/package-lock.json b/package-lock.json index 2fd7273b..3ce12742 100644 --- a/package-lock.json +++ b/package-lock.json @@ -43,7 +43,7 @@ "grpc-tools": "1.11.2", "husky": "7.0.4", "jest": "27.4.5", - "semantic-release": "18.0.1", + "semantic-release": "19.0.3", "ts-jest": "27.1.1", "ts-node": "10.4.0", "ts-proto": "1.95.1", @@ -692,6 +692,16 @@ "integrity": "sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==", "dev": true }, + "node_modules/@colors/colors": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/@colors/colors/-/colors-1.5.0.tgz", + "integrity": "sha512-ooWCrlZP11i8GImSjTHYHLkvFDP48nS4+204nGb1RiX/WXYHmJA2III9/e2DWVabCESdW7hBAEzHRqUn9OUVvQ==", + "dev": true, + "optional": true, + "engines": { + "node": ">=0.1.90" + } + }, "node_modules/@commitlint/cli": { "version": "15.0.0", "resolved": "https://registry.npmjs.org/@commitlint/cli/-/cli-15.0.0.tgz", @@ -1808,9 +1818,9 @@ } }, "node_modules/@semantic-release/npm": { - "version": "8.0.3", - "resolved": "https://registry.npmjs.org/@semantic-release/npm/-/npm-8.0.3.tgz", - "integrity": "sha512-Qbg7x/O1t3sJqsv2+U0AL4Utgi/ymlCiUdt67Ftz9HL9N8aDML4t2tE0T9MBaYdqwD976hz57DqHHXKVppUBoA==", + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/@semantic-release/npm/-/npm-9.0.1.tgz", + "integrity": "sha512-I5nVZklxBzfMFwemhRNbSrkiN/dsH3c7K9+KSk6jUnq0rdLFUuJt7EBsysq4Ir3moajQgFkfEryEHPqiKJj20g==", "dev": true, "dependencies": { "@semantic-release/error": "^3.0.0", @@ -1820,7 +1830,7 @@ "lodash": "^4.17.15", "nerf-dart": "^1.0.0", "normalize-url": "^6.0.0", - "npm": "^7.0.0", + "npm": "^8.3.0", "rc": "^1.2.8", "read-pkg": "^5.0.0", "registry-auth-token": "^4.0.0", @@ -1828,10 +1838,10 @@ "tempy": "^1.0.0" }, "engines": { - "node": ">=14.17" + "node": ">=16 || ^14.17" }, "peerDependencies": { - "semantic-release": ">=18.0.0" + "semantic-release": ">=19.0.0" } }, "node_modules/@semantic-release/release-notes-generator": { @@ -2412,7 +2422,7 @@ "node_modules/ansicolors": { "version": "0.3.2", "resolved": "https://registry.npmjs.org/ansicolors/-/ansicolors-0.3.2.tgz", - "integrity": "sha1-ZlWX3oap/+Oqm/vmyuXG6kJrSXk=", + "integrity": "sha512-QXu7BPrP29VllRxH8GwB7x5iX5qWKAAMLqKQGWTeLWVlNHNOpVMJ91dsxQAIWXpjuW5wqvxu3Jd/nRjrJ+0pqg==", "dev": true }, "node_modules/anymatch": { @@ -2867,7 +2877,7 @@ "node_modules/cardinal": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/cardinal/-/cardinal-2.1.1.tgz", - "integrity": "sha1-fMEFXYItISlU0HsIXeolHMe8VQU=", + "integrity": "sha512-JSr5eOgoEymtYHBjNWyjrMqet9Am2miJhlfKNdqLp6zoeAh0KN5dRAcxlecj5mAJrmQomgiOBj35xHLrFjqBpw==", "dev": true, "dependencies": { "ansicolors": "~0.3.2", @@ -2954,19 +2964,18 @@ } }, "node_modules/cli-table3": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/cli-table3/-/cli-table3-0.6.0.tgz", - "integrity": "sha512-gnB85c3MGC7Nm9I/FkiasNBOKjOiO1RNuXXarQms37q4QMpWdlbBgD/VnOStA2faG1dpXMv31RFApjX1/QdgWQ==", + "version": "0.6.2", + "resolved": "https://registry.npmjs.org/cli-table3/-/cli-table3-0.6.2.tgz", + "integrity": "sha512-QyavHCaIC80cMivimWu4aWHilIpiDpfm3hGmqAmXVL1UsnbLuBSMd21hTX6VY4ZSDSM73ESLeF8TOYId3rBTbw==", "dev": true, "dependencies": { - "object-assign": "^4.1.0", "string-width": "^4.2.0" }, "engines": { "node": "10.* || >= 12.*" }, "optionalDependencies": { - "colors": "^1.1.2" + "@colors/colors": "1.5.0" } }, "node_modules/cliui": { @@ -3020,16 +3029,6 @@ "color-support": "bin.js" } }, - "node_modules/colors": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/colors/-/colors-1.4.0.tgz", - "integrity": "sha512-a+UqTh4kgZg/SlGvfbzDHpgRu7AAQOmmqRHJnxhRZICKFUT91brVhNNt58CMWU9PsBbv3PDCZUHbVxuDiH2mtA==", - "dev": true, - "optional": true, - "engines": { - "node": ">=0.1.90" - } - }, "node_modules/combined-stream": { "version": "1.0.8", "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", @@ -3446,9 +3445,9 @@ } }, "node_modules/del": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/del/-/del-6.0.0.tgz", - "integrity": "sha512-1shh9DQ23L16oXSZKB2JxpL7iMy2E0S9d517ptA1P8iw0alkPtQcrKH7ru31rYtKwF499HkTu+DRzq3TCKDFRQ==", + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/del/-/del-6.1.1.tgz", + "integrity": "sha512-ua8BhapfP0JUJKC/zV9yHHDW/rDoDxP4Zhn3AkA6/xT6gY7jYXJiaeyBZznYVujhZZET+UgcbZiQ7sN3WqcImg==", "dev": true, "dependencies": { "globby": "^11.0.1", @@ -7004,32 +7003,74 @@ } }, "node_modules/marked": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/marked/-/marked-2.1.3.tgz", - "integrity": "sha512-/Q+7MGzaETqifOMWYEA7HVMaZb4XbcRfaOzcSsHZEith83KGlvaSG33u0SKu89Mj5h+T8V2hM+8O45Qc5XTgwA==", + "version": "4.0.16", + "resolved": "https://registry.npmjs.org/marked/-/marked-4.0.16.tgz", + "integrity": "sha512-wahonIQ5Jnyatt2fn8KqF/nIqZM8mh3oRu2+l5EANGMhu6RFjiSG52QNE2eWzFMI94HqYSgN184NurgNG6CztA==", "dev": true, "bin": { - "marked": "bin/marked" + "marked": "bin/marked.js" }, "engines": { - "node": ">= 10" + "node": ">= 12" } }, "node_modules/marked-terminal": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/marked-terminal/-/marked-terminal-4.2.0.tgz", - "integrity": "sha512-DQfNRV9svZf0Dm9Cf5x5xaVJ1+XjxQW6XjFJ5HFkVyK52SDpj5PCBzS5X5r2w9nHr3mlB0T5201UMLue9fmhUw==", + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/marked-terminal/-/marked-terminal-5.1.1.tgz", + "integrity": "sha512-+cKTOx9P4l7HwINYhzbrBSyzgxO2HaHKGZGuB1orZsMIgXYaJyfidT81VXRdpelW/PcHEWxywscePVgI/oUF6g==", "dev": true, "dependencies": { - "ansi-escapes": "^4.3.1", + "ansi-escapes": "^5.0.0", "cardinal": "^2.1.1", - "chalk": "^4.1.0", - "cli-table3": "^0.6.0", - "node-emoji": "^1.10.0", - "supports-hyperlinks": "^2.1.0" + "chalk": "^5.0.0", + "cli-table3": "^0.6.1", + "node-emoji": "^1.11.0", + "supports-hyperlinks": "^2.2.0" + }, + "engines": { + "node": ">=14.13.1 || >=16.0.0" }, "peerDependencies": { - "marked": "^1.0.0 || ^2.0.0" + "marked": "^1.0.0 || ^2.0.0 || ^3.0.0 || ^4.0.0" + } + }, + "node_modules/marked-terminal/node_modules/ansi-escapes": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-5.0.0.tgz", + "integrity": "sha512-5GFMVX8HqE/TB+FuBJGuO5XG0WrsA6ptUqoODaT/n9mmUaZFkqnBueB4leqGBCmrUHnCnC4PCZTCd0E7QQ83bA==", + "dev": true, + "dependencies": { + "type-fest": "^1.0.2" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/marked-terminal/node_modules/chalk": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-5.0.1.tgz", + "integrity": "sha512-Fo07WOYGqMfCWHOzSXOt2CxDbC6skS/jO9ynEcmpANMoPrD+W1r1K6Vx7iNm+AQmETU1Xr2t+n8nzkV9t6xh3w==", + "dev": true, + "engines": { + "node": "^12.17.0 || ^14.13 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/marked-terminal/node_modules/type-fest": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-1.4.0.tgz", + "integrity": "sha512-yGSza74xk0UG8k+pLh5oeoYirvIiWo5t0/o3zHHAO2tRDiZcxWP7fywNlXhqb6/r6sWvwi+RsyQMWhVLe4BVuA==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/meow": { @@ -7279,7 +7320,7 @@ "node_modules/nerf-dart": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/nerf-dart/-/nerf-dart-1.0.0.tgz", - "integrity": "sha1-5tq3/r9a2Bbqgc9cYpxaDr3nLBo=", + "integrity": "sha512-EZSPZB70jiVsivaBLYDCyntd5eH8NTSMOn3rB+HxwdmKThGELLdYv8qVIMWvZEFy9w8ZZpW9h9OB32l1rGtj7g==", "dev": true }, "node_modules/nice-grpc": { @@ -7424,20 +7465,19 @@ } }, "node_modules/npm": { - "version": "7.24.2", - "resolved": "https://registry.npmjs.org/npm/-/npm-7.24.2.tgz", - "integrity": "sha512-120p116CE8VMMZ+hk8IAb1inCPk4Dj3VZw29/n2g6UI77urJKVYb7FZUDW8hY+EBnfsjI/2yrobBgFyzo7YpVQ==", + "version": "8.12.1", + "resolved": "https://registry.npmjs.org/npm/-/npm-8.12.1.tgz", + "integrity": "sha512-0yOlhfgu1UzP6UijnaFuIS2bES2H9D90EA5OVsf2iOZw7VBrjntXKEwKfCaFA6vMVWkCP8qnPwCxxPdnDVwlNw==", "bundleDependencies": [ "@isaacs/string-locale-compare", "@npmcli/arborist", "@npmcli/ci-detect", "@npmcli/config", + "@npmcli/fs", "@npmcli/map-workspaces", "@npmcli/package-json", "@npmcli/run-script", "abbrev", - "ansicolors", - "ansistyles", "archy", "cacache", "chalk", @@ -7483,6 +7523,7 @@ "opener", "pacote", "parse-conflict-json", + "proc-log", "qrcode-terminal", "read", "read-package-json", @@ -7501,83 +7542,83 @@ ], "dev": true, "dependencies": { - "@isaacs/string-locale-compare": "*", - "@npmcli/arborist": "*", - "@npmcli/ci-detect": "*", - "@npmcli/config": "*", - "@npmcli/map-workspaces": "*", - "@npmcli/package-json": "*", - "@npmcli/run-script": "*", - "abbrev": "*", - "ansicolors": "*", - "ansistyles": "*", - "archy": "*", - "cacache": "*", - "chalk": "*", - "chownr": "*", - "cli-columns": "*", - "cli-table3": "*", - "columnify": "*", - "fastest-levenshtein": "*", - "glob": "*", - "graceful-fs": "*", - "hosted-git-info": "*", - "ini": "*", - "init-package-json": "*", - "is-cidr": "*", - "json-parse-even-better-errors": "*", - "libnpmaccess": "*", - "libnpmdiff": "*", - "libnpmexec": "*", - "libnpmfund": "*", - "libnpmhook": "*", - "libnpmorg": "*", - "libnpmpack": "*", - "libnpmpublish": "*", - "libnpmsearch": "*", - "libnpmteam": "*", - "libnpmversion": "*", - "make-fetch-happen": "*", - "minipass": "*", - "minipass-pipeline": "*", - "mkdirp": "*", - "mkdirp-infer-owner": "*", - "ms": "*", - "node-gyp": "*", - "nopt": "*", - "npm-audit-report": "*", - "npm-install-checks": "*", - "npm-package-arg": "*", - "npm-pick-manifest": "*", - "npm-profile": "*", - "npm-registry-fetch": "*", - "npm-user-validate": "*", - "npmlog": "*", - "opener": "*", - "pacote": "*", - "parse-conflict-json": "*", - "qrcode-terminal": "*", - "read": "*", - "read-package-json": "*", - "read-package-json-fast": "*", - "readdir-scoped-modules": "*", - "rimraf": "*", - "semver": "*", - "ssri": "*", - "tar": "*", - "text-table": "*", - "tiny-relative-date": "*", - "treeverse": "*", - "validate-npm-package-name": "*", - "which": "*", - "write-file-atomic": "*" + "@isaacs/string-locale-compare": "^1.1.0", + "@npmcli/arborist": "^5.0.4", + "@npmcli/ci-detect": "^2.0.0", + "@npmcli/config": "^4.1.0", + "@npmcli/fs": "^2.1.0", + "@npmcli/map-workspaces": "^2.0.3", + "@npmcli/package-json": "^2.0.0", + "@npmcli/run-script": "^3.0.1", + "abbrev": "~1.1.1", + "archy": "~1.0.0", + "cacache": "^16.1.0", + "chalk": "^4.1.2", + "chownr": "^2.0.0", + "cli-columns": "^4.0.0", + "cli-table3": "^0.6.2", + "columnify": "^1.6.0", + "fastest-levenshtein": "^1.0.12", + "glob": "^8.0.1", + "graceful-fs": "^4.2.10", + "hosted-git-info": "^5.0.0", + "ini": "^3.0.0", + "init-package-json": "^3.0.2", + "is-cidr": "^4.0.2", + "json-parse-even-better-errors": "^2.3.1", + "libnpmaccess": "^6.0.2", + "libnpmdiff": "^4.0.2", + "libnpmexec": "^4.0.2", + "libnpmfund": "^3.0.1", + "libnpmhook": "^8.0.2", + "libnpmorg": "^4.0.2", + "libnpmpack": "^4.0.2", + "libnpmpublish": "^6.0.2", + "libnpmsearch": "^5.0.2", + "libnpmteam": "^4.0.2", + "libnpmversion": "^3.0.1", + "make-fetch-happen": "^10.1.6", + "minipass": "^3.1.6", + "minipass-pipeline": "^1.2.4", + "mkdirp": "^1.0.4", + "mkdirp-infer-owner": "^2.0.0", + "ms": "^2.1.2", + "node-gyp": "^9.0.0", + "nopt": "^5.0.0", + "npm-audit-report": "^3.0.0", + "npm-install-checks": "^5.0.0", + "npm-package-arg": "^9.0.2", + "npm-pick-manifest": "^7.0.1", + "npm-profile": "^6.0.3", + "npm-registry-fetch": "^13.1.1", + "npm-user-validate": "^1.0.1", + "npmlog": "^6.0.2", + "opener": "^1.5.2", + "pacote": "^13.6.0", + "parse-conflict-json": "^2.0.2", + "proc-log": "^2.0.1", + "qrcode-terminal": "^0.12.0", + "read": "~1.0.7", + "read-package-json": "^5.0.1", + "read-package-json-fast": "^2.0.3", + "readdir-scoped-modules": "^1.1.0", + "rimraf": "^3.0.2", + "semver": "^7.3.7", + "ssri": "^9.0.1", + "tar": "^6.1.11", + "text-table": "~0.2.0", + "tiny-relative-date": "^1.3.0", + "treeverse": "^2.0.0", + "validate-npm-package-name": "^4.0.0", + "which": "^2.0.2", + "write-file-atomic": "^4.0.1" }, "bin": { "npm": "bin/npm-cli.js", "npx": "bin/npx-cli.js" }, "engines": { - "node": ">=10" + "node": "^12.13.0 || ^14.15.0 || >=16" } }, "node_modules/npm-run-path": { @@ -7592,8 +7633,18 @@ "node": ">=8" } }, + "node_modules/npm/node_modules/@colors/colors": { + "version": "1.5.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "optional": true, + "engines": { + "node": ">=0.1.90" + } + }, "node_modules/npm/node_modules/@gar/promisify": { - "version": "1.1.2", + "version": "1.1.3", "dev": true, "inBundle": true, "license": "MIT" @@ -7605,75 +7656,83 @@ "license": "ISC" }, "node_modules/npm/node_modules/@npmcli/arborist": { - "version": "2.9.0", + "version": "5.2.1", "dev": true, "inBundle": true, "license": "ISC", "dependencies": { - "@isaacs/string-locale-compare": "^1.0.1", + "@isaacs/string-locale-compare": "^1.1.0", "@npmcli/installed-package-contents": "^1.0.7", - "@npmcli/map-workspaces": "^1.0.2", - "@npmcli/metavuln-calculator": "^1.1.0", - "@npmcli/move-file": "^1.1.0", + "@npmcli/map-workspaces": "^2.0.3", + "@npmcli/metavuln-calculator": "^3.0.1", + "@npmcli/move-file": "^2.0.0", "@npmcli/name-from-folder": "^1.0.1", - "@npmcli/node-gyp": "^1.0.1", - "@npmcli/package-json": "^1.0.1", - "@npmcli/run-script": "^1.8.2", - "bin-links": "^2.2.1", - "cacache": "^15.0.3", + "@npmcli/node-gyp": "^2.0.0", + "@npmcli/package-json": "^2.0.0", + "@npmcli/run-script": "^3.0.0", + "bin-links": "^3.0.0", + "cacache": "^16.0.6", "common-ancestor-path": "^1.0.1", "json-parse-even-better-errors": "^2.3.1", "json-stringify-nice": "^1.1.4", "mkdirp": "^1.0.4", "mkdirp-infer-owner": "^2.0.0", - "npm-install-checks": "^4.0.0", - "npm-package-arg": "^8.1.5", - "npm-pick-manifest": "^6.1.0", - "npm-registry-fetch": "^11.0.0", - "pacote": "^11.3.5", - "parse-conflict-json": "^1.1.1", - "proc-log": "^1.0.0", + "nopt": "^5.0.0", + "npm-install-checks": "^5.0.0", + "npm-package-arg": "^9.0.0", + "npm-pick-manifest": "^7.0.0", + "npm-registry-fetch": "^13.0.0", + "npmlog": "^6.0.2", + "pacote": "^13.0.5", + "parse-conflict-json": "^2.0.1", + "proc-log": "^2.0.0", "promise-all-reject-late": "^1.0.0", "promise-call-limit": "^1.0.1", "read-package-json-fast": "^2.0.2", "readdir-scoped-modules": "^1.1.0", "rimraf": "^3.0.2", - "semver": "^7.3.5", - "ssri": "^8.0.1", - "treeverse": "^1.0.4", + "semver": "^7.3.7", + "ssri": "^9.0.0", + "treeverse": "^2.0.0", "walk-up-path": "^1.0.0" }, "bin": { "arborist": "bin/index.js" }, "engines": { - "node": ">= 10" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/npm/node_modules/@npmcli/ci-detect": { - "version": "1.3.0", + "version": "2.0.0", "dev": true, "inBundle": true, - "license": "ISC" + "license": "ISC", + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16" + } }, "node_modules/npm/node_modules/@npmcli/config": { - "version": "2.3.0", + "version": "4.1.0", "dev": true, "inBundle": true, "license": "ISC", "dependencies": { - "ini": "^2.0.0", + "@npmcli/map-workspaces": "^2.0.2", + "ini": "^3.0.0", "mkdirp-infer-owner": "^2.0.0", "nopt": "^5.0.0", - "semver": "^7.3.4", + "proc-log": "^2.0.0", + "read-package-json-fast": "^2.0.3", + "semver": "^7.3.5", "walk-up-path": "^1.0.0" }, "engines": { - "node": ">=10" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/npm/node_modules/@npmcli/disparity-colors": { - "version": "1.0.1", + "version": "2.0.0", "dev": true, "inBundle": true, "license": "ISC", @@ -7681,33 +7740,40 @@ "ansi-styles": "^4.3.0" }, "engines": { - "node": ">=10" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/npm/node_modules/@npmcli/fs": { - "version": "1.0.0", + "version": "2.1.0", "dev": true, "inBundle": true, "license": "ISC", "dependencies": { - "@gar/promisify": "^1.0.1", + "@gar/promisify": "^1.1.3", "semver": "^7.3.5" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/npm/node_modules/@npmcli/git": { - "version": "2.1.0", + "version": "3.0.1", "dev": true, "inBundle": true, "license": "ISC", "dependencies": { - "@npmcli/promise-spawn": "^1.3.2", - "lru-cache": "^6.0.0", + "@npmcli/promise-spawn": "^3.0.0", + "lru-cache": "^7.4.4", "mkdirp": "^1.0.4", - "npm-pick-manifest": "^6.1.1", + "npm-pick-manifest": "^7.0.0", + "proc-log": "^2.0.0", "promise-inflight": "^1.0.1", "promise-retry": "^2.0.1", "semver": "^7.3.5", "which": "^2.0.2" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/npm/node_modules/@npmcli/installed-package-contents": { @@ -7727,33 +7793,37 @@ } }, "node_modules/npm/node_modules/@npmcli/map-workspaces": { - "version": "1.0.4", + "version": "2.0.3", "dev": true, "inBundle": true, "license": "ISC", "dependencies": { "@npmcli/name-from-folder": "^1.0.1", - "glob": "^7.1.6", - "minimatch": "^3.0.4", - "read-package-json-fast": "^2.0.1" + "glob": "^8.0.1", + "minimatch": "^5.0.1", + "read-package-json-fast": "^2.0.3" }, "engines": { - "node": ">=10" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/npm/node_modules/@npmcli/metavuln-calculator": { - "version": "1.1.1", + "version": "3.1.0", "dev": true, "inBundle": true, "license": "ISC", "dependencies": { - "cacache": "^15.0.5", - "pacote": "^11.1.11", - "semver": "^7.3.2" + "cacache": "^16.0.0", + "json-parse-even-better-errors": "^2.3.1", + "pacote": "^13.0.3", + "semver": "^7.3.5" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/npm/node_modules/@npmcli/move-file": { - "version": "1.1.2", + "version": "2.0.0", "dev": true, "inBundle": true, "license": "MIT", @@ -7762,7 +7832,7 @@ "rimraf": "^3.0.2" }, "engines": { - "node": ">=10" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/npm/node_modules/@npmcli/name-from-folder": { @@ -7772,48 +7842,60 @@ "license": "ISC" }, "node_modules/npm/node_modules/@npmcli/node-gyp": { - "version": "1.0.2", + "version": "2.0.0", "dev": true, "inBundle": true, - "license": "ISC" + "license": "ISC", + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } }, "node_modules/npm/node_modules/@npmcli/package-json": { - "version": "1.0.1", + "version": "2.0.0", "dev": true, "inBundle": true, "license": "ISC", "dependencies": { "json-parse-even-better-errors": "^2.3.1" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/npm/node_modules/@npmcli/promise-spawn": { - "version": "1.3.2", + "version": "3.0.0", "dev": true, "inBundle": true, "license": "ISC", "dependencies": { "infer-owner": "^1.0.4" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/npm/node_modules/@npmcli/run-script": { - "version": "1.8.6", + "version": "3.0.2", "dev": true, "inBundle": true, "license": "ISC", "dependencies": { - "@npmcli/node-gyp": "^1.0.2", - "@npmcli/promise-spawn": "^1.3.2", - "node-gyp": "^7.1.0", - "read-package-json-fast": "^2.0.1" + "@npmcli/node-gyp": "^2.0.0", + "@npmcli/promise-spawn": "^3.0.0", + "node-gyp": "^9.0.0", + "read-package-json-fast": "^2.0.3" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/npm/node_modules/@tootallnate/once": { - "version": "1.1.2", + "version": "2.0.0", "dev": true, "inBundle": true, "license": "MIT", "engines": { - "node": ">= 6" + "node": ">= 10" } }, "node_modules/npm/node_modules/abbrev": { @@ -7835,7 +7917,7 @@ } }, "node_modules/npm/node_modules/agentkeepalive": { - "version": "4.1.4", + "version": "4.2.1", "dev": true, "inBundle": true, "license": "MIT", @@ -7861,29 +7943,13 @@ "node": ">=8" } }, - "node_modules/npm/node_modules/ajv": { - "version": "6.12.6", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "fast-deep-equal": "^3.1.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, "node_modules/npm/node_modules/ansi-regex": { - "version": "2.1.1", + "version": "5.0.1", "dev": true, "inBundle": true, "license": "MIT", "engines": { - "node": ">=0.10.0" + "node": ">=8" } }, "node_modules/npm/node_modules/ansi-styles": { @@ -7901,18 +7967,6 @@ "url": "https://github.com/chalk/ansi-styles?sponsor=1" } }, - "node_modules/npm/node_modules/ansicolors": { - "version": "0.3.2", - "dev": true, - "inBundle": true, - "license": "MIT" - }, - "node_modules/npm/node_modules/ansistyles": { - "version": "0.1.3", - "dev": true, - "inBundle": true, - "license": "MIT" - }, "node_modules/npm/node_modules/aproba": { "version": "2.0.0", "dev": true, @@ -7926,7 +7980,7 @@ "license": "MIT" }, "node_modules/npm/node_modules/are-we-there-yet": { - "version": "1.1.6", + "version": "3.0.0", "dev": true, "inBundle": true, "license": "ISC", @@ -7935,7 +7989,7 @@ "readable-stream": "^3.6.0" }, "engines": { - "node": ">=10" + "node": "^12.13.0 || ^14.15.0 || >=16" } }, "node_modules/npm/node_modules/asap": { @@ -7944,75 +7998,27 @@ "inBundle": true, "license": "MIT" }, - "node_modules/npm/node_modules/asn1": { - "version": "0.2.4", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "safer-buffer": "~2.1.0" - } - }, - "node_modules/npm/node_modules/assert-plus": { - "version": "1.0.0", - "dev": true, - "inBundle": true, - "license": "MIT", - "engines": { - "node": ">=0.8" - } - }, - "node_modules/npm/node_modules/asynckit": { - "version": "0.4.0", - "dev": true, - "inBundle": true, - "license": "MIT" - }, - "node_modules/npm/node_modules/aws-sign2": { - "version": "0.7.0", - "dev": true, - "inBundle": true, - "license": "Apache-2.0", - "engines": { - "node": "*" - } - }, - "node_modules/npm/node_modules/aws4": { - "version": "1.11.0", - "dev": true, - "inBundle": true, - "license": "MIT" - }, "node_modules/npm/node_modules/balanced-match": { "version": "1.0.2", "dev": true, "inBundle": true, "license": "MIT" }, - "node_modules/npm/node_modules/bcrypt-pbkdf": { - "version": "1.0.2", - "dev": true, - "inBundle": true, - "license": "BSD-3-Clause", - "dependencies": { - "tweetnacl": "^0.14.3" - } - }, "node_modules/npm/node_modules/bin-links": { - "version": "2.2.1", + "version": "3.0.1", "dev": true, "inBundle": true, "license": "ISC", "dependencies": { - "cmd-shim": "^4.0.1", - "mkdirp": "^1.0.3", + "cmd-shim": "^5.0.0", + "mkdirp-infer-owner": "^2.0.0", "npm-normalize-package-bin": "^1.0.0", - "read-cmd-shim": "^2.0.0", + "read-cmd-shim": "^3.0.0", "rimraf": "^3.0.0", - "write-file-atomic": "^3.0.3" + "write-file-atomic": "^4.0.0" }, "engines": { - "node": ">=10" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/npm/node_modules/binary-extensions": { @@ -8025,56 +8031,52 @@ } }, "node_modules/npm/node_modules/brace-expansion": { - "version": "1.1.11", + "version": "2.0.1", "dev": true, "inBundle": true, "license": "MIT", "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" + "balanced-match": "^1.0.0" } }, "node_modules/npm/node_modules/builtins": { - "version": "1.0.3", + "version": "5.0.1", "dev": true, "inBundle": true, - "license": "MIT" + "license": "MIT", + "dependencies": { + "semver": "^7.0.0" + } }, "node_modules/npm/node_modules/cacache": { - "version": "15.3.0", + "version": "16.1.0", "dev": true, "inBundle": true, "license": "ISC", "dependencies": { - "@npmcli/fs": "^1.0.0", - "@npmcli/move-file": "^1.0.1", + "@npmcli/fs": "^2.1.0", + "@npmcli/move-file": "^2.0.0", "chownr": "^2.0.0", - "fs-minipass": "^2.0.0", - "glob": "^7.1.4", + "fs-minipass": "^2.1.0", + "glob": "^8.0.1", "infer-owner": "^1.0.4", - "lru-cache": "^6.0.0", - "minipass": "^3.1.1", + "lru-cache": "^7.7.1", + "minipass": "^3.1.6", "minipass-collect": "^1.0.2", "minipass-flush": "^1.0.5", - "minipass-pipeline": "^1.2.2", - "mkdirp": "^1.0.3", + "minipass-pipeline": "^1.2.4", + "mkdirp": "^1.0.4", "p-map": "^4.0.0", "promise-inflight": "^1.0.1", "rimraf": "^3.0.2", - "ssri": "^8.0.1", - "tar": "^6.0.2", + "ssri": "^9.0.0", + "tar": "^6.1.11", "unique-filename": "^1.1.1" }, "engines": { - "node": ">= 10" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, - "node_modules/npm/node_modules/caseless": { - "version": "0.12.0", - "dev": true, - "inBundle": true, - "license": "Apache-2.0" - }, "node_modules/npm/node_modules/chalk": { "version": "4.1.2", "dev": true, @@ -8122,89 +8124,44 @@ } }, "node_modules/npm/node_modules/cli-columns": { - "version": "3.1.2", + "version": "4.0.0", "dev": true, "inBundle": true, "license": "MIT", "dependencies": { - "string-width": "^2.0.0", - "strip-ansi": "^3.0.1" + "string-width": "^4.2.3", + "strip-ansi": "^6.0.1" }, "engines": { - "node": ">= 4" + "node": ">= 10" } }, "node_modules/npm/node_modules/cli-table3": { - "version": "0.6.0", + "version": "0.6.2", "dev": true, "inBundle": true, "license": "MIT", "dependencies": { - "object-assign": "^4.1.0", "string-width": "^4.2.0" }, "engines": { "node": "10.* || >= 12.*" }, "optionalDependencies": { - "colors": "^1.1.2" + "@colors/colors": "1.5.0" } }, - "node_modules/npm/node_modules/cli-table3/node_modules/ansi-regex": { - "version": "5.0.0", + "node_modules/npm/node_modules/clone": { + "version": "1.0.4", "dev": true, "inBundle": true, "license": "MIT", "engines": { - "node": ">=8" + "node": ">=0.8" } }, - "node_modules/npm/node_modules/cli-table3/node_modules/is-fullwidth-code-point": { - "version": "3.0.0", - "dev": true, - "inBundle": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/npm/node_modules/cli-table3/node_modules/string-width": { - "version": "4.2.2", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/npm/node_modules/cli-table3/node_modules/strip-ansi": { - "version": "6.0.0", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "ansi-regex": "^5.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/npm/node_modules/clone": { - "version": "1.0.4", - "dev": true, - "inBundle": true, - "license": "MIT", - "engines": { - "node": ">=0.8" - } - }, - "node_modules/npm/node_modules/cmd-shim": { - "version": "4.1.0", + "node_modules/npm/node_modules/cmd-shim": { + "version": "5.0.0", "dev": true, "inBundle": true, "license": "ISC", @@ -8212,16 +8169,7 @@ "mkdirp-infer-owner": "^2.0.0" }, "engines": { - "node": ">=10" - } - }, - "node_modules/npm/node_modules/code-point-at": { - "version": "1.1.0", - "dev": true, - "inBundle": true, - "license": "MIT", - "engines": { - "node": ">=0.10.0" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/npm/node_modules/color-convert": { @@ -8251,36 +8199,17 @@ "color-support": "bin.js" } }, - "node_modules/npm/node_modules/colors": { - "version": "1.4.0", - "dev": true, - "inBundle": true, - "license": "MIT", - "optional": true, - "engines": { - "node": ">=0.1.90" - } - }, "node_modules/npm/node_modules/columnify": { - "version": "1.5.4", + "version": "1.6.0", "dev": true, "inBundle": true, "license": "MIT", "dependencies": { - "strip-ansi": "^3.0.0", + "strip-ansi": "^6.0.1", "wcwidth": "^1.0.0" - } - }, - "node_modules/npm/node_modules/combined-stream": { - "version": "1.0.8", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "delayed-stream": "~1.0.0" }, "engines": { - "node": ">= 0.8" + "node": ">=8.0.0" } }, "node_modules/npm/node_modules/common-ancestor-path": { @@ -8301,26 +8230,8 @@ "inBundle": true, "license": "ISC" }, - "node_modules/npm/node_modules/core-util-is": { - "version": "1.0.2", - "dev": true, - "inBundle": true, - "license": "MIT" - }, - "node_modules/npm/node_modules/dashdash": { - "version": "1.14.1", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "assert-plus": "^1.0.0" - }, - "engines": { - "node": ">=0.10" - } - }, "node_modules/npm/node_modules/debug": { - "version": "4.3.2", + "version": "4.3.4", "dev": true, "inBundle": true, "license": "MIT", @@ -8360,15 +8271,6 @@ "clone": "^1.0.2" } }, - "node_modules/npm/node_modules/delayed-stream": { - "version": "1.0.0", - "dev": true, - "inBundle": true, - "license": "MIT", - "engines": { - "node": ">=0.4.0" - } - }, "node_modules/npm/node_modules/delegates": { "version": "1.0.0", "dev": true, @@ -8385,7 +8287,7 @@ } }, "node_modules/npm/node_modules/dezalgo": { - "version": "1.0.3", + "version": "1.0.4", "dev": true, "inBundle": true, "license": "ISC", @@ -8403,16 +8305,6 @@ "node": ">=0.3.1" } }, - "node_modules/npm/node_modules/ecc-jsbn": { - "version": "0.1.2", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "jsbn": "~0.1.0", - "safer-buffer": "^2.1.0" - } - }, "node_modules/npm/node_modules/emoji-regex": { "version": "8.0.0", "dev": true, @@ -8444,48 +8336,12 @@ "inBundle": true, "license": "MIT" }, - "node_modules/npm/node_modules/extend": { - "version": "3.0.2", - "dev": true, - "inBundle": true, - "license": "MIT" - }, - "node_modules/npm/node_modules/extsprintf": { - "version": "1.3.0", - "dev": true, - "engines": [ - "node >=0.6.0" - ], - "inBundle": true, - "license": "MIT" - }, - "node_modules/npm/node_modules/fast-deep-equal": { - "version": "3.1.3", - "dev": true, - "inBundle": true, - "license": "MIT" - }, - "node_modules/npm/node_modules/fast-json-stable-stringify": { - "version": "2.1.0", - "dev": true, - "inBundle": true, - "license": "MIT" - }, "node_modules/npm/node_modules/fastest-levenshtein": { "version": "1.0.12", "dev": true, "inBundle": true, "license": "MIT" }, - "node_modules/npm/node_modules/forever-agent": { - "version": "0.6.1", - "dev": true, - "inBundle": true, - "license": "Apache-2.0", - "engines": { - "node": "*" - } - }, "node_modules/npm/node_modules/fs-minipass": { "version": "2.1.0", "dev": true, @@ -8511,36 +8367,26 @@ "license": "MIT" }, "node_modules/npm/node_modules/gauge": { - "version": "3.0.1", + "version": "4.0.4", "dev": true, "inBundle": true, "license": "ISC", "dependencies": { "aproba": "^1.0.3 || ^2.0.0", - "color-support": "^1.1.2", - "console-control-strings": "^1.0.0", + "color-support": "^1.1.3", + "console-control-strings": "^1.1.0", "has-unicode": "^2.0.1", - "object-assign": "^4.1.1", - "signal-exit": "^3.0.0", - "string-width": "^1.0.1 || ^2.0.0", - "strip-ansi": "^3.0.1 || ^4.0.0", - "wide-align": "^1.1.2" + "signal-exit": "^3.0.7", + "string-width": "^4.2.3", + "strip-ansi": "^6.0.1", + "wide-align": "^1.1.5" }, "engines": { - "node": ">=10" - } - }, - "node_modules/npm/node_modules/getpass": { - "version": "0.1.7", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "assert-plus": "^1.0.0" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/npm/node_modules/glob": { - "version": "7.2.0", + "version": "8.0.3", "dev": true, "inBundle": true, "license": "ISC", @@ -8548,45 +8394,22 @@ "fs.realpath": "^1.0.0", "inflight": "^1.0.4", "inherits": "2", - "minimatch": "^3.0.4", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" + "minimatch": "^5.0.1", + "once": "^1.3.0" }, "engines": { - "node": "*" + "node": ">=12" }, "funding": { "url": "https://github.com/sponsors/isaacs" } }, "node_modules/npm/node_modules/graceful-fs": { - "version": "4.2.8", + "version": "4.2.10", "dev": true, "inBundle": true, "license": "ISC" }, - "node_modules/npm/node_modules/har-schema": { - "version": "2.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "engines": { - "node": ">=4" - } - }, - "node_modules/npm/node_modules/har-validator": { - "version": "5.1.5", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "ajv": "^6.12.3", - "har-schema": "^2.0.0" - }, - "engines": { - "node": ">=6" - } - }, "node_modules/npm/node_modules/has": { "version": "1.0.3", "dev": true, @@ -8615,15 +8438,15 @@ "license": "ISC" }, "node_modules/npm/node_modules/hosted-git-info": { - "version": "4.0.2", + "version": "5.0.0", "dev": true, "inBundle": true, "license": "ISC", "dependencies": { - "lru-cache": "^6.0.0" + "lru-cache": "^7.5.1" }, "engines": { - "node": ">=10" + "node": "^12.13.0 || ^14.15.0 || >=16" } }, "node_modules/npm/node_modules/http-cache-semantics": { @@ -8633,12 +8456,12 @@ "license": "BSD-2-Clause" }, "node_modules/npm/node_modules/http-proxy-agent": { - "version": "4.0.1", + "version": "5.0.0", "dev": true, "inBundle": true, "license": "MIT", "dependencies": { - "@tootallnate/once": "1", + "@tootallnate/once": "2", "agent-base": "6", "debug": "4" }, @@ -8646,23 +8469,8 @@ "node": ">= 6" } }, - "node_modules/npm/node_modules/http-signature": { - "version": "1.2.0", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "assert-plus": "^1.0.0", - "jsprim": "^1.2.2", - "sshpk": "^1.7.0" - }, - "engines": { - "node": ">=0.8", - "npm": ">=1.3.7" - } - }, "node_modules/npm/node_modules/https-proxy-agent": { - "version": "5.0.0", + "version": "5.0.1", "dev": true, "inBundle": true, "license": "MIT", @@ -8697,12 +8505,15 @@ } }, "node_modules/npm/node_modules/ignore-walk": { - "version": "3.0.4", + "version": "5.0.1", "dev": true, "inBundle": true, "license": "ISC", "dependencies": { - "minimatch": "^3.0.4" + "minimatch": "^5.0.1" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/npm/node_modules/imurmurhash": { @@ -8746,34 +8557,34 @@ "license": "ISC" }, "node_modules/npm/node_modules/ini": { - "version": "2.0.0", + "version": "3.0.0", "dev": true, "inBundle": true, "license": "ISC", "engines": { - "node": ">=10" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/npm/node_modules/init-package-json": { - "version": "2.0.5", + "version": "3.0.2", "dev": true, "inBundle": true, "license": "ISC", "dependencies": { - "npm-package-arg": "^8.1.5", + "npm-package-arg": "^9.0.1", "promzard": "^0.3.0", - "read": "~1.0.1", - "read-package-json": "^4.1.1", + "read": "^1.0.7", + "read-package-json": "^5.0.0", "semver": "^7.3.5", "validate-npm-package-license": "^3.0.4", - "validate-npm-package-name": "^3.0.0" + "validate-npm-package-name": "^4.0.0" }, "engines": { - "node": ">=10" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/npm/node_modules/ip": { - "version": "1.1.5", + "version": "1.1.8", "dev": true, "inBundle": true, "license": "MIT" @@ -8800,7 +8611,7 @@ } }, "node_modules/npm/node_modules/is-core-module": { - "version": "2.7.0", + "version": "2.9.0", "dev": true, "inBundle": true, "license": "MIT", @@ -8812,12 +8623,12 @@ } }, "node_modules/npm/node_modules/is-fullwidth-code-point": { - "version": "2.0.0", + "version": "3.0.0", "dev": true, "inBundle": true, "license": "MIT", "engines": { - "node": ">=4" + "node": ">=8" } }, "node_modules/npm/node_modules/is-lambda": { @@ -8826,47 +8637,18 @@ "inBundle": true, "license": "MIT" }, - "node_modules/npm/node_modules/is-typedarray": { - "version": "1.0.0", - "dev": true, - "inBundle": true, - "license": "MIT" - }, "node_modules/npm/node_modules/isexe": { "version": "2.0.0", "dev": true, "inBundle": true, "license": "ISC" }, - "node_modules/npm/node_modules/isstream": { - "version": "0.1.2", - "dev": true, - "inBundle": true, - "license": "MIT" - }, - "node_modules/npm/node_modules/jsbn": { - "version": "0.1.1", - "dev": true, - "inBundle": true, - "license": "MIT" - }, "node_modules/npm/node_modules/json-parse-even-better-errors": { "version": "2.3.1", "dev": true, "inBundle": true, "license": "MIT" }, - "node_modules/npm/node_modules/json-schema": { - "version": "0.2.3", - "dev": true, - "inBundle": true - }, - "node_modules/npm/node_modules/json-schema-traverse": { - "version": "0.4.1", - "dev": true, - "inBundle": true, - "license": "MIT" - }, "node_modules/npm/node_modules/json-stringify-nice": { "version": "1.1.4", "dev": true, @@ -8876,12 +8658,6 @@ "url": "https://github.com/sponsors/isaacs" } }, - "node_modules/npm/node_modules/json-stringify-safe": { - "version": "5.0.1", - "dev": true, - "inBundle": true, - "license": "ISC" - }, "node_modules/npm/node_modules/jsonparse": { "version": "1.3.1", "dev": true, @@ -8891,266 +8667,234 @@ "inBundle": true, "license": "MIT" }, - "node_modules/npm/node_modules/jsprim": { - "version": "1.4.1", - "dev": true, - "engines": [ - "node >=0.6.0" - ], - "inBundle": true, - "license": "MIT", - "dependencies": { - "assert-plus": "1.0.0", - "extsprintf": "1.3.0", - "json-schema": "0.2.3", - "verror": "1.10.0" - } - }, "node_modules/npm/node_modules/just-diff": { - "version": "3.1.1", + "version": "5.0.2", "dev": true, "inBundle": true, "license": "MIT" }, "node_modules/npm/node_modules/just-diff-apply": { - "version": "3.0.0", + "version": "5.2.0", "dev": true, "inBundle": true, "license": "MIT" }, "node_modules/npm/node_modules/libnpmaccess": { - "version": "4.0.3", + "version": "6.0.3", "dev": true, "inBundle": true, "license": "ISC", "dependencies": { "aproba": "^2.0.0", "minipass": "^3.1.1", - "npm-package-arg": "^8.1.2", - "npm-registry-fetch": "^11.0.0" + "npm-package-arg": "^9.0.1", + "npm-registry-fetch": "^13.0.0" }, "engines": { - "node": ">=10" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/npm/node_modules/libnpmdiff": { - "version": "2.0.4", + "version": "4.0.3", "dev": true, "inBundle": true, "license": "ISC", "dependencies": { - "@npmcli/disparity-colors": "^1.0.1", + "@npmcli/disparity-colors": "^2.0.0", "@npmcli/installed-package-contents": "^1.0.7", "binary-extensions": "^2.2.0", "diff": "^5.0.0", - "minimatch": "^3.0.4", - "npm-package-arg": "^8.1.4", - "pacote": "^11.3.4", + "minimatch": "^5.0.1", + "npm-package-arg": "^9.0.1", + "pacote": "^13.0.5", "tar": "^6.1.0" }, "engines": { - "node": ">=10" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/npm/node_modules/libnpmexec": { - "version": "2.0.1", + "version": "4.0.6", "dev": true, "inBundle": true, "license": "ISC", "dependencies": { - "@npmcli/arborist": "^2.3.0", - "@npmcli/ci-detect": "^1.3.0", - "@npmcli/run-script": "^1.8.4", + "@npmcli/arborist": "^5.0.0", + "@npmcli/ci-detect": "^2.0.0", + "@npmcli/run-script": "^3.0.0", "chalk": "^4.1.0", "mkdirp-infer-owner": "^2.0.0", - "npm-package-arg": "^8.1.2", - "pacote": "^11.3.1", - "proc-log": "^1.0.0", + "npm-package-arg": "^9.0.1", + "npmlog": "^6.0.2", + "pacote": "^13.0.5", + "proc-log": "^2.0.0", "read": "^1.0.7", "read-package-json-fast": "^2.0.2", "walk-up-path": "^1.0.0" }, "engines": { - "node": ">=10" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/npm/node_modules/libnpmfund": { - "version": "1.1.0", + "version": "3.0.2", "dev": true, "inBundle": true, "license": "ISC", "dependencies": { - "@npmcli/arborist": "^2.5.0" + "@npmcli/arborist": "^5.0.0" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/npm/node_modules/libnpmhook": { - "version": "6.0.3", + "version": "8.0.3", "dev": true, "inBundle": true, "license": "ISC", "dependencies": { "aproba": "^2.0.0", - "npm-registry-fetch": "^11.0.0" + "npm-registry-fetch": "^13.0.0" }, "engines": { - "node": ">=10" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/npm/node_modules/libnpmorg": { - "version": "2.0.3", + "version": "4.0.3", "dev": true, "inBundle": true, "license": "ISC", "dependencies": { "aproba": "^2.0.0", - "npm-registry-fetch": "^11.0.0" + "npm-registry-fetch": "^13.0.0" }, "engines": { - "node": ">=10" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/npm/node_modules/libnpmpack": { - "version": "2.0.1", + "version": "4.1.0", "dev": true, "inBundle": true, "license": "ISC", "dependencies": { - "@npmcli/run-script": "^1.8.3", - "npm-package-arg": "^8.1.0", - "pacote": "^11.2.6" + "@npmcli/run-script": "^3.0.0", + "npm-package-arg": "^9.0.1", + "pacote": "^13.5.0" }, "engines": { - "node": ">=10" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/npm/node_modules/libnpmpublish": { - "version": "4.0.2", + "version": "6.0.4", "dev": true, "inBundle": true, "license": "ISC", "dependencies": { - "normalize-package-data": "^3.0.2", - "npm-package-arg": "^8.1.2", - "npm-registry-fetch": "^11.0.0", - "semver": "^7.1.3", - "ssri": "^8.0.1" + "normalize-package-data": "^4.0.0", + "npm-package-arg": "^9.0.1", + "npm-registry-fetch": "^13.0.0", + "semver": "^7.3.7", + "ssri": "^9.0.0" }, "engines": { - "node": ">=10" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/npm/node_modules/libnpmsearch": { - "version": "3.1.2", + "version": "5.0.3", "dev": true, "inBundle": true, "license": "ISC", "dependencies": { - "npm-registry-fetch": "^11.0.0" + "npm-registry-fetch": "^13.0.0" }, "engines": { - "node": ">=10" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/npm/node_modules/libnpmteam": { - "version": "2.0.4", + "version": "4.0.3", "dev": true, "inBundle": true, "license": "ISC", "dependencies": { "aproba": "^2.0.0", - "npm-registry-fetch": "^11.0.0" + "npm-registry-fetch": "^13.0.0" }, "engines": { - "node": ">=10" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/npm/node_modules/libnpmversion": { - "version": "1.2.1", + "version": "3.0.4", "dev": true, "inBundle": true, "license": "ISC", "dependencies": { - "@npmcli/git": "^2.0.7", - "@npmcli/run-script": "^1.8.4", + "@npmcli/git": "^3.0.0", + "@npmcli/run-script": "^3.0.0", "json-parse-even-better-errors": "^2.3.1", - "semver": "^7.3.5", - "stringify-package": "^1.0.1" + "proc-log": "^2.0.0", + "semver": "^7.3.7" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/npm/node_modules/lru-cache": { - "version": "6.0.0", + "version": "7.9.0", "dev": true, "inBundle": true, "license": "ISC", - "dependencies": { - "yallist": "^4.0.0" - }, "engines": { - "node": ">=10" + "node": ">=12" } }, "node_modules/npm/node_modules/make-fetch-happen": { - "version": "9.1.0", + "version": "10.1.6", "dev": true, "inBundle": true, "license": "ISC", "dependencies": { - "agentkeepalive": "^4.1.3", - "cacache": "^15.2.0", + "agentkeepalive": "^4.2.1", + "cacache": "^16.1.0", "http-cache-semantics": "^4.1.0", - "http-proxy-agent": "^4.0.1", + "http-proxy-agent": "^5.0.0", "https-proxy-agent": "^5.0.0", "is-lambda": "^1.0.1", - "lru-cache": "^6.0.0", - "minipass": "^3.1.3", + "lru-cache": "^7.7.1", + "minipass": "^3.1.6", "minipass-collect": "^1.0.2", - "minipass-fetch": "^1.3.2", + "minipass-fetch": "^2.0.3", "minipass-flush": "^1.0.5", "minipass-pipeline": "^1.2.4", - "negotiator": "^0.6.2", + "negotiator": "^0.6.3", "promise-retry": "^2.0.1", - "socks-proxy-agent": "^6.0.0", - "ssri": "^8.0.0" - }, - "engines": { - "node": ">= 10" - } - }, - "node_modules/npm/node_modules/mime-db": { - "version": "1.49.0", - "dev": true, - "inBundle": true, - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/npm/node_modules/mime-types": { - "version": "2.1.32", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "mime-db": "1.49.0" + "socks-proxy-agent": "^6.1.1", + "ssri": "^9.0.0" }, "engines": { - "node": ">= 0.6" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/npm/node_modules/minimatch": { - "version": "3.0.4", + "version": "5.1.0", "dev": true, "inBundle": true, "license": "ISC", "dependencies": { - "brace-expansion": "^1.1.7" + "brace-expansion": "^2.0.1" }, "engines": { - "node": "*" + "node": ">=10" } }, "node_modules/npm/node_modules/minipass": { - "version": "3.1.5", + "version": "3.1.6", "dev": true, "inBundle": true, "license": "ISC", @@ -9174,20 +8918,20 @@ } }, "node_modules/npm/node_modules/minipass-fetch": { - "version": "1.4.1", + "version": "2.1.0", "dev": true, "inBundle": true, "license": "MIT", "dependencies": { - "minipass": "^3.1.0", + "minipass": "^3.1.6", "minipass-sized": "^1.0.3", - "minizlib": "^2.0.0" + "minizlib": "^2.1.2" }, "engines": { - "node": ">=8" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" }, "optionalDependencies": { - "encoding": "^0.1.12" + "encoding": "^0.1.13" } }, "node_modules/npm/node_modules/minipass-flush": { @@ -9288,7 +9032,7 @@ "license": "ISC" }, "node_modules/npm/node_modules/negotiator": { - "version": "0.6.2", + "version": "0.6.3", "dev": true, "inBundle": true, "license": "MIT", @@ -9297,87 +9041,69 @@ } }, "node_modules/npm/node_modules/node-gyp": { - "version": "7.1.2", + "version": "9.0.0", "dev": true, "inBundle": true, "license": "MIT", "dependencies": { "env-paths": "^2.2.0", "glob": "^7.1.4", - "graceful-fs": "^4.2.3", + "graceful-fs": "^4.2.6", + "make-fetch-happen": "^10.0.3", "nopt": "^5.0.0", - "npmlog": "^4.1.2", - "request": "^2.88.2", + "npmlog": "^6.0.0", "rimraf": "^3.0.2", - "semver": "^7.3.2", - "tar": "^6.0.2", + "semver": "^7.3.5", + "tar": "^6.1.2", "which": "^2.0.2" }, "bin": { "node-gyp": "bin/node-gyp.js" }, "engines": { - "node": ">= 10.12.0" + "node": "^12.22 || ^14.13 || >=16" } }, - "node_modules/npm/node_modules/node-gyp/node_modules/aproba": { - "version": "1.2.0", - "dev": true, - "inBundle": true, - "license": "ISC" - }, - "node_modules/npm/node_modules/node-gyp/node_modules/gauge": { - "version": "2.7.4", + "node_modules/npm/node_modules/node-gyp/node_modules/brace-expansion": { + "version": "1.1.11", "dev": true, "inBundle": true, - "license": "ISC", + "license": "MIT", "dependencies": { - "aproba": "^1.0.3", - "console-control-strings": "^1.0.0", - "has-unicode": "^2.0.0", - "object-assign": "^4.1.0", - "signal-exit": "^3.0.0", - "string-width": "^1.0.1", - "strip-ansi": "^3.0.1", - "wide-align": "^1.1.0" + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" } }, - "node_modules/npm/node_modules/node-gyp/node_modules/is-fullwidth-code-point": { - "version": "1.0.0", + "node_modules/npm/node_modules/node-gyp/node_modules/glob": { + "version": "7.2.3", "dev": true, "inBundle": true, - "license": "MIT", + "license": "ISC", "dependencies": { - "number-is-nan": "^1.0.0" + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" }, "engines": { - "node": ">=0.10.0" + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" } }, - "node_modules/npm/node_modules/node-gyp/node_modules/npmlog": { - "version": "4.1.2", + "node_modules/npm/node_modules/node-gyp/node_modules/minimatch": { + "version": "3.1.2", "dev": true, "inBundle": true, "license": "ISC", "dependencies": { - "are-we-there-yet": "~1.1.2", - "console-control-strings": "~1.1.0", - "gauge": "~2.7.3", - "set-blocking": "~2.0.0" - } - }, - "node_modules/npm/node_modules/node-gyp/node_modules/string-width": { - "version": "1.0.2", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "code-point-at": "^1.0.0", - "is-fullwidth-code-point": "^1.0.0", - "strip-ansi": "^3.0.0" + "brace-expansion": "^1.1.7" }, "engines": { - "node": ">=0.10.0" + "node": "*" } }, "node_modules/npm/node_modules/nopt": { @@ -9396,22 +9122,22 @@ } }, "node_modules/npm/node_modules/normalize-package-data": { - "version": "3.0.3", + "version": "4.0.0", "dev": true, "inBundle": true, "license": "BSD-2-Clause", "dependencies": { - "hosted-git-info": "^4.0.1", - "is-core-module": "^2.5.0", - "semver": "^7.3.4", - "validate-npm-package-license": "^3.0.1" + "hosted-git-info": "^5.0.0", + "is-core-module": "^2.8.1", + "semver": "^7.3.5", + "validate-npm-package-license": "^3.0.4" }, "engines": { - "node": ">=10" + "node": "^12.13.0 || ^14.15.0 || >=16" } }, "node_modules/npm/node_modules/npm-audit-report": { - "version": "2.1.5", + "version": "3.0.0", "dev": true, "inBundle": true, "license": "ISC", @@ -9419,7 +9145,7 @@ "chalk": "^4.0.0" }, "engines": { - "node": ">=10" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/npm/node_modules/npm-bundled": { @@ -9432,7 +9158,7 @@ } }, "node_modules/npm/node_modules/npm-install-checks": { - "version": "4.0.0", + "version": "5.0.0", "dev": true, "inBundle": true, "license": "BSD-2-Clause", @@ -9440,7 +9166,7 @@ "semver": "^7.1.1" }, "engines": { - "node": ">=10" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/npm/node_modules/npm-normalize-package-bin": { @@ -9450,134 +9176,102 @@ "license": "ISC" }, "node_modules/npm/node_modules/npm-package-arg": { - "version": "8.1.5", + "version": "9.0.2", "dev": true, "inBundle": true, "license": "ISC", "dependencies": { - "hosted-git-info": "^4.0.1", - "semver": "^7.3.4", - "validate-npm-package-name": "^3.0.0" + "hosted-git-info": "^5.0.0", + "semver": "^7.3.5", + "validate-npm-package-name": "^4.0.0" }, "engines": { - "node": ">=10" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/npm/node_modules/npm-packlist": { - "version": "2.2.2", + "version": "5.1.0", "dev": true, "inBundle": true, "license": "ISC", "dependencies": { - "glob": "^7.1.6", - "ignore-walk": "^3.0.3", - "npm-bundled": "^1.1.1", + "glob": "^8.0.1", + "ignore-walk": "^5.0.1", + "npm-bundled": "^1.1.2", "npm-normalize-package-bin": "^1.0.1" }, "bin": { "npm-packlist": "bin/index.js" }, "engines": { - "node": ">=10" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/npm/node_modules/npm-pick-manifest": { - "version": "6.1.1", + "version": "7.0.1", "dev": true, "inBundle": true, "license": "ISC", "dependencies": { - "npm-install-checks": "^4.0.0", + "npm-install-checks": "^5.0.0", "npm-normalize-package-bin": "^1.0.1", - "npm-package-arg": "^8.1.2", - "semver": "^7.3.4" + "npm-package-arg": "^9.0.0", + "semver": "^7.3.5" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/npm/node_modules/npm-profile": { - "version": "5.0.4", + "version": "6.0.3", "dev": true, "inBundle": true, "license": "ISC", "dependencies": { - "npm-registry-fetch": "^11.0.0" + "npm-registry-fetch": "^13.0.1", + "proc-log": "^2.0.0" }, "engines": { - "node": ">=10" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/npm/node_modules/npm-registry-fetch": { - "version": "11.0.0", + "version": "13.1.1", "dev": true, "inBundle": true, "license": "ISC", "dependencies": { - "make-fetch-happen": "^9.0.1", - "minipass": "^3.1.3", - "minipass-fetch": "^1.3.0", + "make-fetch-happen": "^10.0.6", + "minipass": "^3.1.6", + "minipass-fetch": "^2.0.3", "minipass-json-stream": "^1.0.1", - "minizlib": "^2.0.0", - "npm-package-arg": "^8.0.0" + "minizlib": "^2.1.2", + "npm-package-arg": "^9.0.1", + "proc-log": "^2.0.0" }, "engines": { - "node": ">=10" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/npm/node_modules/npm-user-validate": { - "version": "1.0.1", - "dev": true, - "inBundle": true, - "license": "BSD-2-Clause" - }, - "node_modules/npm/node_modules/npmlog": { - "version": "5.0.1", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "are-we-there-yet": "^2.0.0", - "console-control-strings": "^1.1.0", - "gauge": "^3.0.0", - "set-blocking": "^2.0.0" - } - }, - "node_modules/npm/node_modules/npmlog/node_modules/are-we-there-yet": { - "version": "2.0.0", - "dev": true, - "inBundle": true, - "license": "ISC", - "dependencies": { - "delegates": "^1.0.0", - "readable-stream": "^3.6.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/npm/node_modules/number-is-nan": { - "version": "1.0.1", - "dev": true, - "inBundle": true, - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/npm/node_modules/oauth-sign": { - "version": "0.9.0", + "version": "1.0.1", "dev": true, "inBundle": true, - "license": "Apache-2.0", - "engines": { - "node": "*" - } + "license": "BSD-2-Clause" }, - "node_modules/npm/node_modules/object-assign": { - "version": "4.1.1", + "node_modules/npm/node_modules/npmlog": { + "version": "6.0.2", "dev": true, "inBundle": true, - "license": "MIT", + "license": "ISC", + "dependencies": { + "are-we-there-yet": "^3.0.0", + "console-control-strings": "^1.1.0", + "gauge": "^4.0.3", + "set-blocking": "^2.0.0" + }, "engines": { - "node": ">=0.10.0" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/npm/node_modules/once": { @@ -9614,47 +9308,52 @@ } }, "node_modules/npm/node_modules/pacote": { - "version": "11.3.5", + "version": "13.6.0", "dev": true, "inBundle": true, "license": "ISC", "dependencies": { - "@npmcli/git": "^2.1.0", - "@npmcli/installed-package-contents": "^1.0.6", - "@npmcli/promise-spawn": "^1.2.0", - "@npmcli/run-script": "^1.8.2", - "cacache": "^15.0.5", + "@npmcli/git": "^3.0.0", + "@npmcli/installed-package-contents": "^1.0.7", + "@npmcli/promise-spawn": "^3.0.0", + "@npmcli/run-script": "^3.0.1", + "cacache": "^16.0.0", "chownr": "^2.0.0", "fs-minipass": "^2.1.0", "infer-owner": "^1.0.4", - "minipass": "^3.1.3", - "mkdirp": "^1.0.3", - "npm-package-arg": "^8.0.1", - "npm-packlist": "^2.1.4", - "npm-pick-manifest": "^6.0.0", - "npm-registry-fetch": "^11.0.0", + "minipass": "^3.1.6", + "mkdirp": "^1.0.4", + "npm-package-arg": "^9.0.0", + "npm-packlist": "^5.1.0", + "npm-pick-manifest": "^7.0.0", + "npm-registry-fetch": "^13.0.1", + "proc-log": "^2.0.0", "promise-retry": "^2.0.1", - "read-package-json-fast": "^2.0.1", + "read-package-json": "^5.0.0", + "read-package-json-fast": "^2.0.3", "rimraf": "^3.0.2", - "ssri": "^8.0.1", - "tar": "^6.1.0" + "ssri": "^9.0.0", + "tar": "^6.1.11" }, "bin": { "pacote": "lib/bin.js" }, "engines": { - "node": ">=10" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/npm/node_modules/parse-conflict-json": { - "version": "1.1.1", + "version": "2.0.2", "dev": true, "inBundle": true, "license": "ISC", "dependencies": { - "json-parse-even-better-errors": "^2.3.0", - "just-diff": "^3.0.1", - "just-diff-apply": "^3.0.0" + "json-parse-even-better-errors": "^2.3.1", + "just-diff": "^5.0.1", + "just-diff-apply": "^5.2.0" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/npm/node_modules/path-is-absolute": { @@ -9666,17 +9365,14 @@ "node": ">=0.10.0" } }, - "node_modules/npm/node_modules/performance-now": { - "version": "2.1.0", - "dev": true, - "inBundle": true, - "license": "MIT" - }, "node_modules/npm/node_modules/proc-log": { - "version": "1.0.0", + "version": "2.0.1", "dev": true, "inBundle": true, - "license": "ISC" + "license": "ISC", + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } }, "node_modules/npm/node_modules/promise-all-reject-late": { "version": "1.0.1", @@ -9724,21 +9420,6 @@ "read": "1" } }, - "node_modules/npm/node_modules/psl": { - "version": "1.8.0", - "dev": true, - "inBundle": true, - "license": "MIT" - }, - "node_modules/npm/node_modules/punycode": { - "version": "2.1.1", - "dev": true, - "inBundle": true, - "license": "MIT", - "engines": { - "node": ">=6" - } - }, "node_modules/npm/node_modules/qrcode-terminal": { "version": "0.12.0", "dev": true, @@ -9747,15 +9428,6 @@ "qrcode-terminal": "bin/qrcode-terminal.js" } }, - "node_modules/npm/node_modules/qs": { - "version": "6.5.2", - "dev": true, - "inBundle": true, - "license": "BSD-3-Clause", - "engines": { - "node": ">=0.6" - } - }, "node_modules/npm/node_modules/read": { "version": "1.0.7", "dev": true, @@ -9769,24 +9441,27 @@ } }, "node_modules/npm/node_modules/read-cmd-shim": { - "version": "2.0.0", + "version": "3.0.0", "dev": true, "inBundle": true, - "license": "ISC" + "license": "ISC", + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } }, "node_modules/npm/node_modules/read-package-json": { - "version": "4.1.1", + "version": "5.0.1", "dev": true, "inBundle": true, "license": "ISC", "dependencies": { - "glob": "^7.1.1", - "json-parse-even-better-errors": "^2.3.0", - "normalize-package-data": "^3.0.0", - "npm-normalize-package-bin": "^1.0.0" + "glob": "^8.0.1", + "json-parse-even-better-errors": "^2.3.1", + "normalize-package-data": "^4.0.0", + "npm-normalize-package-bin": "^1.0.1" }, "engines": { - "node": ">=10" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/npm/node_modules/read-package-json-fast": { @@ -9828,86 +9503,70 @@ "once": "^1.3.0" } }, - "node_modules/npm/node_modules/request": { - "version": "2.88.2", + "node_modules/npm/node_modules/retry": { + "version": "0.12.0", "dev": true, "inBundle": true, - "license": "Apache-2.0", - "dependencies": { - "aws-sign2": "~0.7.0", - "aws4": "^1.8.0", - "caseless": "~0.12.0", - "combined-stream": "~1.0.6", - "extend": "~3.0.2", - "forever-agent": "~0.6.1", - "form-data": "~2.3.2", - "har-validator": "~5.1.3", - "http-signature": "~1.2.0", - "is-typedarray": "~1.0.0", - "isstream": "~0.1.2", - "json-stringify-safe": "~5.0.1", - "mime-types": "~2.1.19", - "oauth-sign": "~0.9.0", - "performance-now": "^2.1.0", - "qs": "~6.5.2", - "safe-buffer": "^5.1.2", - "tough-cookie": "~2.5.0", - "tunnel-agent": "^0.6.0", - "uuid": "^3.3.2" - }, + "license": "MIT", "engines": { - "node": ">= 6" + "node": ">= 4" } }, - "node_modules/npm/node_modules/request/node_modules/form-data": { - "version": "2.3.3", + "node_modules/npm/node_modules/rimraf": { + "version": "3.0.2", "dev": true, "inBundle": true, - "license": "MIT", + "license": "ISC", "dependencies": { - "asynckit": "^0.4.0", - "combined-stream": "^1.0.6", - "mime-types": "^2.1.12" + "glob": "^7.1.3" }, - "engines": { - "node": ">= 0.12" + "bin": { + "rimraf": "bin.js" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" } }, - "node_modules/npm/node_modules/request/node_modules/tough-cookie": { - "version": "2.5.0", + "node_modules/npm/node_modules/rimraf/node_modules/brace-expansion": { + "version": "1.1.11", "dev": true, "inBundle": true, - "license": "BSD-3-Clause", + "license": "MIT", "dependencies": { - "psl": "^1.1.28", - "punycode": "^2.1.1" - }, - "engines": { - "node": ">=0.8" + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" } }, - "node_modules/npm/node_modules/retry": { - "version": "0.12.0", + "node_modules/npm/node_modules/rimraf/node_modules/glob": { + "version": "7.2.3", "dev": true, "inBundle": true, - "license": "MIT", + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, "engines": { - "node": ">= 4" + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" } }, - "node_modules/npm/node_modules/rimraf": { - "version": "3.0.2", + "node_modules/npm/node_modules/rimraf/node_modules/minimatch": { + "version": "3.1.2", "dev": true, "inBundle": true, "license": "ISC", "dependencies": { - "glob": "^7.1.3" - }, - "bin": { - "rimraf": "bin.js" + "brace-expansion": "^1.1.7" }, - "funding": { - "url": "https://github.com/sponsors/isaacs" + "engines": { + "node": "*" } }, "node_modules/npm/node_modules/safe-buffer": { @@ -9934,10 +9593,11 @@ "version": "2.1.2", "dev": true, "inBundle": true, - "license": "MIT" + "license": "MIT", + "optional": true }, "node_modules/npm/node_modules/semver": { - "version": "7.3.5", + "version": "7.3.7", "dev": true, "inBundle": true, "license": "ISC", @@ -9951,6 +9611,18 @@ "node": ">=10" } }, + "node_modules/npm/node_modules/semver/node_modules/lru-cache": { + "version": "6.0.0", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, "node_modules/npm/node_modules/set-blocking": { "version": "2.0.0", "dev": true, @@ -9958,7 +9630,7 @@ "license": "ISC" }, "node_modules/npm/node_modules/signal-exit": { - "version": "3.0.3", + "version": "3.0.7", "dev": true, "inBundle": true, "license": "ISC" @@ -9974,13 +9646,13 @@ } }, "node_modules/npm/node_modules/socks": { - "version": "2.6.1", + "version": "2.6.2", "dev": true, "inBundle": true, "license": "MIT", "dependencies": { "ip": "^1.1.5", - "smart-buffer": "^4.1.0" + "smart-buffer": "^4.2.0" }, "engines": { "node": ">= 10.13.0", @@ -9988,14 +9660,14 @@ } }, "node_modules/npm/node_modules/socks-proxy-agent": { - "version": "6.1.0", + "version": "6.2.0", "dev": true, "inBundle": true, "license": "MIT", "dependencies": { "agent-base": "^6.0.2", - "debug": "^4.3.1", - "socks": "^2.6.1" + "debug": "^4.3.3", + "socks": "^2.6.2" }, "engines": { "node": ">= 10" @@ -10028,38 +9700,13 @@ } }, "node_modules/npm/node_modules/spdx-license-ids": { - "version": "3.0.10", + "version": "3.0.11", "dev": true, "inBundle": true, "license": "CC0-1.0" }, - "node_modules/npm/node_modules/sshpk": { - "version": "1.16.1", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "asn1": "~0.2.3", - "assert-plus": "^1.0.0", - "bcrypt-pbkdf": "^1.0.0", - "dashdash": "^1.12.0", - "ecc-jsbn": "~0.1.1", - "getpass": "^0.1.1", - "jsbn": "~0.1.0", - "safer-buffer": "^2.0.2", - "tweetnacl": "~0.14.0" - }, - "bin": { - "sshpk-conv": "bin/sshpk-conv", - "sshpk-sign": "bin/sshpk-sign", - "sshpk-verify": "bin/sshpk-verify" - }, - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/npm/node_modules/ssri": { - "version": "8.0.1", + "version": "9.0.1", "dev": true, "inBundle": true, "license": "ISC", @@ -10067,7 +9714,7 @@ "minipass": "^3.1.1" }, "engines": { - "node": ">= 8" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/npm/node_modules/string_decoder": { @@ -10080,55 +9727,29 @@ } }, "node_modules/npm/node_modules/string-width": { - "version": "2.1.1", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "is-fullwidth-code-point": "^2.0.0", - "strip-ansi": "^4.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/npm/node_modules/string-width/node_modules/ansi-regex": { - "version": "3.0.0", - "dev": true, - "inBundle": true, - "license": "MIT", - "engines": { - "node": ">=4" - } - }, - "node_modules/npm/node_modules/string-width/node_modules/strip-ansi": { - "version": "4.0.0", + "version": "4.2.3", "dev": true, "inBundle": true, "license": "MIT", "dependencies": { - "ansi-regex": "^3.0.0" + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" }, "engines": { - "node": ">=4" + "node": ">=8" } }, - "node_modules/npm/node_modules/stringify-package": { - "version": "1.0.1", - "dev": true, - "inBundle": true, - "license": "ISC" - }, "node_modules/npm/node_modules/strip-ansi": { - "version": "3.0.1", + "version": "6.0.1", "dev": true, "inBundle": true, "license": "MIT", "dependencies": { - "ansi-regex": "^2.0.0" + "ansi-regex": "^5.0.1" }, "engines": { - "node": ">=0.10.0" + "node": ">=8" } }, "node_modules/npm/node_modules/supports-color": { @@ -10173,36 +9794,12 @@ "license": "MIT" }, "node_modules/npm/node_modules/treeverse": { - "version": "1.0.4", - "dev": true, - "inBundle": true, - "license": "ISC" - }, - "node_modules/npm/node_modules/tunnel-agent": { - "version": "0.6.0", + "version": "2.0.0", "dev": true, "inBundle": true, - "license": "Apache-2.0", - "dependencies": { - "safe-buffer": "^5.0.1" - }, + "license": "ISC", "engines": { - "node": "*" - } - }, - "node_modules/npm/node_modules/tweetnacl": { - "version": "0.14.5", - "dev": true, - "inBundle": true, - "license": "Unlicense" - }, - "node_modules/npm/node_modules/typedarray-to-buffer": { - "version": "3.1.5", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "is-typedarray": "^1.0.0" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/npm/node_modules/unique-filename": { @@ -10223,30 +9820,12 @@ "imurmurhash": "^0.1.4" } }, - "node_modules/npm/node_modules/uri-js": { - "version": "4.4.1", - "dev": true, - "inBundle": true, - "license": "BSD-2-Clause", - "dependencies": { - "punycode": "^2.1.0" - } - }, "node_modules/npm/node_modules/util-deprecate": { "version": "1.0.2", "dev": true, "inBundle": true, "license": "MIT" }, - "node_modules/npm/node_modules/uuid": { - "version": "3.4.0", - "dev": true, - "inBundle": true, - "license": "MIT", - "bin": { - "uuid": "bin/uuid" - } - }, "node_modules/npm/node_modules/validate-npm-package-license": { "version": "3.0.4", "dev": true, @@ -10258,26 +9837,15 @@ } }, "node_modules/npm/node_modules/validate-npm-package-name": { - "version": "3.0.0", + "version": "4.0.0", "dev": true, "inBundle": true, "license": "ISC", "dependencies": { - "builtins": "^1.0.3" - } - }, - "node_modules/npm/node_modules/verror": { - "version": "1.10.0", - "dev": true, - "engines": [ - "node >=0.6.0" - ], - "inBundle": true, - "license": "MIT", - "dependencies": { - "assert-plus": "^1.0.0", - "core-util-is": "1.0.2", - "extsprintf": "^1.2.0" + "builtins": "^5.0.0" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/npm/node_modules/walk-up-path": { @@ -10311,12 +9879,12 @@ } }, "node_modules/npm/node_modules/wide-align": { - "version": "1.1.3", + "version": "1.1.5", "dev": true, "inBundle": true, "license": "ISC", "dependencies": { - "string-width": "^1.0.2 || 2" + "string-width": "^1.0.2 || 2 || 3 || 4" } }, "node_modules/npm/node_modules/wrappy": { @@ -10326,15 +9894,16 @@ "license": "ISC" }, "node_modules/npm/node_modules/write-file-atomic": { - "version": "3.0.3", + "version": "4.0.1", "dev": true, "inBundle": true, "license": "ISC", "dependencies": { "imurmurhash": "^0.1.4", - "is-typedarray": "^1.0.0", - "signal-exit": "^3.0.2", - "typedarray-to-buffer": "^3.1.5" + "signal-exit": "^3.0.7" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16" } }, "node_modules/npm/node_modules/yallist": { @@ -10364,15 +9933,6 @@ "integrity": "sha512-h2AatdwYH+JHiZpv7pt/gSX1XoRGb7L/qSIeuqA6GwYoF9w1vP1cw42TO0aI2pNyshRK5893hNSl+1//vHK7hQ==", "dev": true }, - "node_modules/object-assign": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", - "integrity": "sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM=", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/object-hash": { "version": "1.3.1", "resolved": "https://registry.npmjs.org/object-hash/-/object-hash-1.3.1.tgz", @@ -10983,7 +10543,7 @@ "node_modules/rc/node_modules/strip-json-comments": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", - "integrity": "sha1-PFMZQukIwml8DsNEhYwobHygpgo=", + "integrity": "sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ==", "dev": true, "engines": { "node": ">=0.10.0" @@ -11075,7 +10635,7 @@ "node_modules/redeyed": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/redeyed/-/redeyed-2.1.1.tgz", - "integrity": "sha1-iYS1gV2ZyyIEacme7v/jiRPmzAs=", + "integrity": "sha512-FNpGGo1DycYAdnrKFxCMmKYgo/mILAqtRYbkdQD8Ep/Hk2PQ5+aEAEx+IU713RTDmuBaH0c8P5ZozurNu5ObRQ==", "dev": true, "dependencies": { "esprima": "~4.0.0" @@ -11292,15 +10852,15 @@ } }, "node_modules/semantic-release": { - "version": "18.0.1", - "resolved": "https://registry.npmjs.org/semantic-release/-/semantic-release-18.0.1.tgz", - "integrity": "sha512-xTdKCaEnCzHr+Fqyhg/5I8P9pvY9z7WHa8TFCYIwcdPbuzAtQShOTzw3VNPsqBT+Yq1kFyBQFBKBYkGOlqWmfA==", + "version": "19.0.3", + "resolved": "https://registry.npmjs.org/semantic-release/-/semantic-release-19.0.3.tgz", + "integrity": "sha512-HaFbydST1cDKZHuFZxB8DTrBLJVK/AnDExpK0s3EqLIAAUAHUgnd+VSJCUtTYQKkAkauL8G9CucODrVCc7BuAA==", "dev": true, "dependencies": { "@semantic-release/commit-analyzer": "^9.0.2", "@semantic-release/error": "^3.0.0", "@semantic-release/github": "^8.0.0", - "@semantic-release/npm": "^8.0.0", + "@semantic-release/npm": "^9.0.0", "@semantic-release/release-notes-generator": "^10.0.0", "aggregate-error": "^3.0.0", "cosmiconfig": "^7.0.0", @@ -11314,8 +10874,8 @@ "hook-std": "^2.0.0", "hosted-git-info": "^4.0.0", "lodash": "^4.17.21", - "marked": "^2.0.0", - "marked-terminal": "^4.1.1", + "marked": "^4.0.10", + "marked-terminal": "^5.0.0", "micromatch": "^4.0.2", "p-each-series": "^2.1.0", "p-reduce": "^2.0.0", @@ -11330,7 +10890,7 @@ "semantic-release": "bin/semantic-release.js" }, "engines": { - "node": ">=14.17" + "node": ">=16 || ^14.17" } }, "node_modules/semantic-release/node_modules/hosted-git-info": { @@ -13180,6 +12740,13 @@ "integrity": "sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==", "dev": true }, + "@colors/colors": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/@colors/colors/-/colors-1.5.0.tgz", + "integrity": "sha512-ooWCrlZP11i8GImSjTHYHLkvFDP48nS4+204nGb1RiX/WXYHmJA2III9/e2DWVabCESdW7hBAEzHRqUn9OUVvQ==", + "dev": true, + "optional": true + }, "@commitlint/cli": { "version": "15.0.0", "resolved": "https://registry.npmjs.org/@commitlint/cli/-/cli-15.0.0.tgz", @@ -14069,9 +13636,9 @@ } }, "@semantic-release/npm": { - "version": "8.0.3", - "resolved": "https://registry.npmjs.org/@semantic-release/npm/-/npm-8.0.3.tgz", - "integrity": "sha512-Qbg7x/O1t3sJqsv2+U0AL4Utgi/ymlCiUdt67Ftz9HL9N8aDML4t2tE0T9MBaYdqwD976hz57DqHHXKVppUBoA==", + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/@semantic-release/npm/-/npm-9.0.1.tgz", + "integrity": "sha512-I5nVZklxBzfMFwemhRNbSrkiN/dsH3c7K9+KSk6jUnq0rdLFUuJt7EBsysq4Ir3moajQgFkfEryEHPqiKJj20g==", "dev": true, "requires": { "@semantic-release/error": "^3.0.0", @@ -14081,7 +13648,7 @@ "lodash": "^4.17.15", "nerf-dart": "^1.0.0", "normalize-url": "^6.0.0", - "npm": "^7.0.0", + "npm": "^8.3.0", "rc": "^1.2.8", "read-pkg": "^5.0.0", "registry-auth-token": "^4.0.0", @@ -14541,7 +14108,7 @@ "ansicolors": { "version": "0.3.2", "resolved": "https://registry.npmjs.org/ansicolors/-/ansicolors-0.3.2.tgz", - "integrity": "sha1-ZlWX3oap/+Oqm/vmyuXG6kJrSXk=", + "integrity": "sha512-QXu7BPrP29VllRxH8GwB7x5iX5qWKAAMLqKQGWTeLWVlNHNOpVMJ91dsxQAIWXpjuW5wqvxu3Jd/nRjrJ+0pqg==", "dev": true }, "anymatch": { @@ -14900,7 +14467,7 @@ "cardinal": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/cardinal/-/cardinal-2.1.1.tgz", - "integrity": "sha1-fMEFXYItISlU0HsIXeolHMe8VQU=", + "integrity": "sha512-JSr5eOgoEymtYHBjNWyjrMqet9Am2miJhlfKNdqLp6zoeAh0KN5dRAcxlecj5mAJrmQomgiOBj35xHLrFjqBpw==", "dev": true, "requires": { "ansicolors": "~0.3.2", @@ -14965,13 +14532,12 @@ "dev": true }, "cli-table3": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/cli-table3/-/cli-table3-0.6.0.tgz", - "integrity": "sha512-gnB85c3MGC7Nm9I/FkiasNBOKjOiO1RNuXXarQms37q4QMpWdlbBgD/VnOStA2faG1dpXMv31RFApjX1/QdgWQ==", + "version": "0.6.2", + "resolved": "https://registry.npmjs.org/cli-table3/-/cli-table3-0.6.2.tgz", + "integrity": "sha512-QyavHCaIC80cMivimWu4aWHilIpiDpfm3hGmqAmXVL1UsnbLuBSMd21hTX6VY4ZSDSM73ESLeF8TOYId3rBTbw==", "dev": true, "requires": { - "colors": "^1.1.2", - "object-assign": "^4.1.0", + "@colors/colors": "1.5.0", "string-width": "^4.2.0" } }, @@ -15016,13 +14582,6 @@ "integrity": "sha512-qiBjkpbMLO/HL68y+lh4q0/O1MZFj2RX6X/KmMa3+gJD3z+WwI1ZzDHysvqHGS3mP6mznPckpXmw1nI9cJjyRg==", "dev": true }, - "colors": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/colors/-/colors-1.4.0.tgz", - "integrity": "sha512-a+UqTh4kgZg/SlGvfbzDHpgRu7AAQOmmqRHJnxhRZICKFUT91brVhNNt58CMWU9PsBbv3PDCZUHbVxuDiH2mtA==", - "dev": true, - "optional": true - }, "combined-stream": { "version": "1.0.8", "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", @@ -15358,9 +14917,9 @@ } }, "del": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/del/-/del-6.0.0.tgz", - "integrity": "sha512-1shh9DQ23L16oXSZKB2JxpL7iMy2E0S9d517ptA1P8iw0alkPtQcrKH7ru31rYtKwF499HkTu+DRzq3TCKDFRQ==", + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/del/-/del-6.1.1.tgz", + "integrity": "sha512-ua8BhapfP0JUJKC/zV9yHHDW/rDoDxP4Zhn3AkA6/xT6gY7jYXJiaeyBZznYVujhZZET+UgcbZiQ7sN3WqcImg==", "dev": true, "requires": { "globby": "^11.0.1", @@ -18097,23 +17656,46 @@ "dev": true }, "marked": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/marked/-/marked-2.1.3.tgz", - "integrity": "sha512-/Q+7MGzaETqifOMWYEA7HVMaZb4XbcRfaOzcSsHZEith83KGlvaSG33u0SKu89Mj5h+T8V2hM+8O45Qc5XTgwA==", + "version": "4.0.16", + "resolved": "https://registry.npmjs.org/marked/-/marked-4.0.16.tgz", + "integrity": "sha512-wahonIQ5Jnyatt2fn8KqF/nIqZM8mh3oRu2+l5EANGMhu6RFjiSG52QNE2eWzFMI94HqYSgN184NurgNG6CztA==", "dev": true }, "marked-terminal": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/marked-terminal/-/marked-terminal-4.2.0.tgz", - "integrity": "sha512-DQfNRV9svZf0Dm9Cf5x5xaVJ1+XjxQW6XjFJ5HFkVyK52SDpj5PCBzS5X5r2w9nHr3mlB0T5201UMLue9fmhUw==", + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/marked-terminal/-/marked-terminal-5.1.1.tgz", + "integrity": "sha512-+cKTOx9P4l7HwINYhzbrBSyzgxO2HaHKGZGuB1orZsMIgXYaJyfidT81VXRdpelW/PcHEWxywscePVgI/oUF6g==", "dev": true, "requires": { - "ansi-escapes": "^4.3.1", + "ansi-escapes": "^5.0.0", "cardinal": "^2.1.1", - "chalk": "^4.1.0", - "cli-table3": "^0.6.0", - "node-emoji": "^1.10.0", - "supports-hyperlinks": "^2.1.0" + "chalk": "^5.0.0", + "cli-table3": "^0.6.1", + "node-emoji": "^1.11.0", + "supports-hyperlinks": "^2.2.0" + }, + "dependencies": { + "ansi-escapes": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-5.0.0.tgz", + "integrity": "sha512-5GFMVX8HqE/TB+FuBJGuO5XG0WrsA6ptUqoODaT/n9mmUaZFkqnBueB4leqGBCmrUHnCnC4PCZTCd0E7QQ83bA==", + "dev": true, + "requires": { + "type-fest": "^1.0.2" + } + }, + "chalk": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-5.0.1.tgz", + "integrity": "sha512-Fo07WOYGqMfCWHOzSXOt2CxDbC6skS/jO9ynEcmpANMoPrD+W1r1K6Vx7iNm+AQmETU1Xr2t+n8nzkV9t6xh3w==", + "dev": true + }, + "type-fest": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-1.4.0.tgz", + "integrity": "sha512-yGSza74xk0UG8k+pLh5oeoYirvIiWo5t0/o3zHHAO2tRDiZcxWP7fywNlXhqb6/r6sWvwi+RsyQMWhVLe4BVuA==", + "dev": true + } } }, "meow": { @@ -18302,7 +17884,7 @@ "nerf-dart": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/nerf-dart/-/nerf-dart-1.0.0.tgz", - "integrity": "sha1-5tq3/r9a2Bbqgc9cYpxaDr3nLBo=", + "integrity": "sha512-EZSPZB70jiVsivaBLYDCyntd5eH8NTSMOn3rB+HxwdmKThGELLdYv8qVIMWvZEFy9w8ZZpW9h9OB32l1rGtj7g==", "dev": true }, "nice-grpc": { @@ -18424,85 +18006,91 @@ "dev": true }, "npm": { - "version": "7.24.2", - "resolved": "https://registry.npmjs.org/npm/-/npm-7.24.2.tgz", - "integrity": "sha512-120p116CE8VMMZ+hk8IAb1inCPk4Dj3VZw29/n2g6UI77urJKVYb7FZUDW8hY+EBnfsjI/2yrobBgFyzo7YpVQ==", - "dev": true, - "requires": { - "@isaacs/string-locale-compare": "*", - "@npmcli/arborist": "*", - "@npmcli/ci-detect": "*", - "@npmcli/config": "*", - "@npmcli/map-workspaces": "*", - "@npmcli/package-json": "*", - "@npmcli/run-script": "*", - "abbrev": "*", - "ansicolors": "*", - "ansistyles": "*", - "archy": "*", - "cacache": "*", - "chalk": "*", - "chownr": "*", - "cli-columns": "*", - "cli-table3": "*", - "columnify": "*", - "fastest-levenshtein": "*", - "glob": "*", - "graceful-fs": "*", - "hosted-git-info": "*", - "ini": "*", - "init-package-json": "*", - "is-cidr": "*", - "json-parse-even-better-errors": "*", - "libnpmaccess": "*", - "libnpmdiff": "*", - "libnpmexec": "*", - "libnpmfund": "*", - "libnpmhook": "*", - "libnpmorg": "*", - "libnpmpack": "*", - "libnpmpublish": "*", - "libnpmsearch": "*", - "libnpmteam": "*", - "libnpmversion": "*", - "make-fetch-happen": "*", - "minipass": "*", - "minipass-pipeline": "*", - "mkdirp": "*", - "mkdirp-infer-owner": "*", - "ms": "*", - "node-gyp": "*", - "nopt": "*", - "npm-audit-report": "*", - "npm-install-checks": "*", - "npm-package-arg": "*", - "npm-pick-manifest": "*", - "npm-profile": "*", - "npm-registry-fetch": "*", - "npm-user-validate": "*", - "npmlog": "*", - "opener": "*", - "pacote": "*", - "parse-conflict-json": "*", - "qrcode-terminal": "*", - "read": "*", - "read-package-json": "*", - "read-package-json-fast": "*", - "readdir-scoped-modules": "*", - "rimraf": "*", - "semver": "*", - "ssri": "*", - "tar": "*", - "text-table": "*", - "tiny-relative-date": "*", - "treeverse": "*", - "validate-npm-package-name": "*", - "which": "*", - "write-file-atomic": "*" - }, - "dependencies": { + "version": "8.12.1", + "resolved": "https://registry.npmjs.org/npm/-/npm-8.12.1.tgz", + "integrity": "sha512-0yOlhfgu1UzP6UijnaFuIS2bES2H9D90EA5OVsf2iOZw7VBrjntXKEwKfCaFA6vMVWkCP8qnPwCxxPdnDVwlNw==", + "dev": true, + "requires": { + "@isaacs/string-locale-compare": "^1.1.0", + "@npmcli/arborist": "^5.0.4", + "@npmcli/ci-detect": "^2.0.0", + "@npmcli/config": "^4.1.0", + "@npmcli/fs": "^2.1.0", + "@npmcli/map-workspaces": "^2.0.3", + "@npmcli/package-json": "^2.0.0", + "@npmcli/run-script": "^3.0.1", + "abbrev": "~1.1.1", + "archy": "~1.0.0", + "cacache": "^16.1.0", + "chalk": "^4.1.2", + "chownr": "^2.0.0", + "cli-columns": "^4.0.0", + "cli-table3": "^0.6.2", + "columnify": "^1.6.0", + "fastest-levenshtein": "^1.0.12", + "glob": "^8.0.1", + "graceful-fs": "^4.2.10", + "hosted-git-info": "^5.0.0", + "ini": "^3.0.0", + "init-package-json": "^3.0.2", + "is-cidr": "^4.0.2", + "json-parse-even-better-errors": "^2.3.1", + "libnpmaccess": "^6.0.2", + "libnpmdiff": "^4.0.2", + "libnpmexec": "^4.0.2", + "libnpmfund": "^3.0.1", + "libnpmhook": "^8.0.2", + "libnpmorg": "^4.0.2", + "libnpmpack": "^4.0.2", + "libnpmpublish": "^6.0.2", + "libnpmsearch": "^5.0.2", + "libnpmteam": "^4.0.2", + "libnpmversion": "^3.0.1", + "make-fetch-happen": "^10.1.6", + "minipass": "^3.1.6", + "minipass-pipeline": "^1.2.4", + "mkdirp": "^1.0.4", + "mkdirp-infer-owner": "^2.0.0", + "ms": "^2.1.2", + "node-gyp": "^9.0.0", + "nopt": "^5.0.0", + "npm-audit-report": "^3.0.0", + "npm-install-checks": "^5.0.0", + "npm-package-arg": "^9.0.2", + "npm-pick-manifest": "^7.0.1", + "npm-profile": "^6.0.3", + "npm-registry-fetch": "^13.1.1", + "npm-user-validate": "^1.0.1", + "npmlog": "^6.0.2", + "opener": "^1.5.2", + "pacote": "^13.6.0", + "parse-conflict-json": "^2.0.2", + "proc-log": "^2.0.1", + "qrcode-terminal": "^0.12.0", + "read": "~1.0.7", + "read-package-json": "^5.0.1", + "read-package-json-fast": "^2.0.3", + "readdir-scoped-modules": "^1.1.0", + "rimraf": "^3.0.2", + "semver": "^7.3.7", + "ssri": "^9.0.1", + "tar": "^6.1.11", + "text-table": "~0.2.0", + "tiny-relative-date": "^1.3.0", + "treeverse": "^2.0.0", + "validate-npm-package-name": "^4.0.0", + "which": "^2.0.2", + "write-file-atomic": "^4.0.1" + }, + "dependencies": { + "@colors/colors": { + "version": "1.5.0", + "bundled": true, + "dev": true, + "optional": true + }, "@gar/promisify": { - "version": "1.1.2", + "version": "1.1.3", "bundled": true, "dev": true }, @@ -18512,63 +18100,68 @@ "dev": true }, "@npmcli/arborist": { - "version": "2.9.0", + "version": "5.2.1", "bundled": true, "dev": true, "requires": { - "@isaacs/string-locale-compare": "^1.0.1", + "@isaacs/string-locale-compare": "^1.1.0", "@npmcli/installed-package-contents": "^1.0.7", - "@npmcli/map-workspaces": "^1.0.2", - "@npmcli/metavuln-calculator": "^1.1.0", - "@npmcli/move-file": "^1.1.0", + "@npmcli/map-workspaces": "^2.0.3", + "@npmcli/metavuln-calculator": "^3.0.1", + "@npmcli/move-file": "^2.0.0", "@npmcli/name-from-folder": "^1.0.1", - "@npmcli/node-gyp": "^1.0.1", - "@npmcli/package-json": "^1.0.1", - "@npmcli/run-script": "^1.8.2", - "bin-links": "^2.2.1", - "cacache": "^15.0.3", + "@npmcli/node-gyp": "^2.0.0", + "@npmcli/package-json": "^2.0.0", + "@npmcli/run-script": "^3.0.0", + "bin-links": "^3.0.0", + "cacache": "^16.0.6", "common-ancestor-path": "^1.0.1", "json-parse-even-better-errors": "^2.3.1", "json-stringify-nice": "^1.1.4", "mkdirp": "^1.0.4", "mkdirp-infer-owner": "^2.0.0", - "npm-install-checks": "^4.0.0", - "npm-package-arg": "^8.1.5", - "npm-pick-manifest": "^6.1.0", - "npm-registry-fetch": "^11.0.0", - "pacote": "^11.3.5", - "parse-conflict-json": "^1.1.1", - "proc-log": "^1.0.0", + "nopt": "^5.0.0", + "npm-install-checks": "^5.0.0", + "npm-package-arg": "^9.0.0", + "npm-pick-manifest": "^7.0.0", + "npm-registry-fetch": "^13.0.0", + "npmlog": "^6.0.2", + "pacote": "^13.0.5", + "parse-conflict-json": "^2.0.1", + "proc-log": "^2.0.0", "promise-all-reject-late": "^1.0.0", "promise-call-limit": "^1.0.1", "read-package-json-fast": "^2.0.2", "readdir-scoped-modules": "^1.1.0", "rimraf": "^3.0.2", - "semver": "^7.3.5", - "ssri": "^8.0.1", - "treeverse": "^1.0.4", + "semver": "^7.3.7", + "ssri": "^9.0.0", + "treeverse": "^2.0.0", "walk-up-path": "^1.0.0" } }, "@npmcli/ci-detect": { - "version": "1.3.0", + "version": "2.0.0", "bundled": true, "dev": true }, "@npmcli/config": { - "version": "2.3.0", + "version": "4.1.0", "bundled": true, "dev": true, "requires": { - "ini": "^2.0.0", + "@npmcli/map-workspaces": "^2.0.2", + "ini": "^3.0.0", "mkdirp-infer-owner": "^2.0.0", "nopt": "^5.0.0", - "semver": "^7.3.4", + "proc-log": "^2.0.0", + "read-package-json-fast": "^2.0.3", + "semver": "^7.3.5", "walk-up-path": "^1.0.0" } }, "@npmcli/disparity-colors": { - "version": "1.0.1", + "version": "2.0.0", "bundled": true, "dev": true, "requires": { @@ -18576,23 +18169,24 @@ } }, "@npmcli/fs": { - "version": "1.0.0", + "version": "2.1.0", "bundled": true, "dev": true, "requires": { - "@gar/promisify": "^1.0.1", + "@gar/promisify": "^1.1.3", "semver": "^7.3.5" } }, "@npmcli/git": { - "version": "2.1.0", + "version": "3.0.1", "bundled": true, "dev": true, "requires": { - "@npmcli/promise-spawn": "^1.3.2", - "lru-cache": "^6.0.0", + "@npmcli/promise-spawn": "^3.0.0", + "lru-cache": "^7.4.4", "mkdirp": "^1.0.4", - "npm-pick-manifest": "^6.1.1", + "npm-pick-manifest": "^7.0.0", + "proc-log": "^2.0.0", "promise-inflight": "^1.0.1", "promise-retry": "^2.0.1", "semver": "^7.3.5", @@ -18609,28 +18203,29 @@ } }, "@npmcli/map-workspaces": { - "version": "1.0.4", + "version": "2.0.3", "bundled": true, "dev": true, "requires": { "@npmcli/name-from-folder": "^1.0.1", - "glob": "^7.1.6", - "minimatch": "^3.0.4", - "read-package-json-fast": "^2.0.1" + "glob": "^8.0.1", + "minimatch": "^5.0.1", + "read-package-json-fast": "^2.0.3" } }, "@npmcli/metavuln-calculator": { - "version": "1.1.1", + "version": "3.1.0", "bundled": true, "dev": true, "requires": { - "cacache": "^15.0.5", - "pacote": "^11.1.11", - "semver": "^7.3.2" + "cacache": "^16.0.0", + "json-parse-even-better-errors": "^2.3.1", + "pacote": "^13.0.3", + "semver": "^7.3.5" } }, "@npmcli/move-file": { - "version": "1.1.2", + "version": "2.0.0", "bundled": true, "dev": true, "requires": { @@ -18644,12 +18239,12 @@ "dev": true }, "@npmcli/node-gyp": { - "version": "1.0.2", + "version": "2.0.0", "bundled": true, "dev": true }, "@npmcli/package-json": { - "version": "1.0.1", + "version": "2.0.0", "bundled": true, "dev": true, "requires": { @@ -18657,7 +18252,7 @@ } }, "@npmcli/promise-spawn": { - "version": "1.3.2", + "version": "3.0.0", "bundled": true, "dev": true, "requires": { @@ -18665,18 +18260,18 @@ } }, "@npmcli/run-script": { - "version": "1.8.6", + "version": "3.0.2", "bundled": true, "dev": true, "requires": { - "@npmcli/node-gyp": "^1.0.2", - "@npmcli/promise-spawn": "^1.3.2", - "node-gyp": "^7.1.0", - "read-package-json-fast": "^2.0.1" + "@npmcli/node-gyp": "^2.0.0", + "@npmcli/promise-spawn": "^3.0.0", + "node-gyp": "^9.0.0", + "read-package-json-fast": "^2.0.3" } }, "@tootallnate/once": { - "version": "1.1.2", + "version": "2.0.0", "bundled": true, "dev": true }, @@ -18694,7 +18289,7 @@ } }, "agentkeepalive": { - "version": "4.1.4", + "version": "4.2.1", "bundled": true, "dev": true, "requires": { @@ -18712,19 +18307,8 @@ "indent-string": "^4.0.0" } }, - "ajv": { - "version": "6.12.6", - "bundled": true, - "dev": true, - "requires": { - "fast-deep-equal": "^3.1.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", - "uri-js": "^4.2.2" - } - }, "ansi-regex": { - "version": "2.1.1", + "version": "5.0.1", "bundled": true, "dev": true }, @@ -18736,16 +18320,6 @@ "color-convert": "^2.0.1" } }, - "ansicolors": { - "version": "0.3.2", - "bundled": true, - "dev": true - }, - "ansistyles": { - "version": "0.1.3", - "bundled": true, - "dev": true - }, "aproba": { "version": "2.0.0", "bundled": true, @@ -18757,7 +18331,7 @@ "dev": true }, "are-we-there-yet": { - "version": "1.1.6", + "version": "3.0.0", "bundled": true, "dev": true, "requires": { @@ -18770,58 +18344,22 @@ "bundled": true, "dev": true }, - "asn1": { - "version": "0.2.4", - "bundled": true, - "dev": true, - "requires": { - "safer-buffer": "~2.1.0" - } - }, - "assert-plus": { - "version": "1.0.0", - "bundled": true, - "dev": true - }, - "asynckit": { - "version": "0.4.0", - "bundled": true, - "dev": true - }, - "aws-sign2": { - "version": "0.7.0", - "bundled": true, - "dev": true - }, - "aws4": { - "version": "1.11.0", - "bundled": true, - "dev": true - }, "balanced-match": { "version": "1.0.2", "bundled": true, "dev": true }, - "bcrypt-pbkdf": { - "version": "1.0.2", - "bundled": true, - "dev": true, - "requires": { - "tweetnacl": "^0.14.3" - } - }, "bin-links": { - "version": "2.2.1", + "version": "3.0.1", "bundled": true, "dev": true, "requires": { - "cmd-shim": "^4.0.1", - "mkdirp": "^1.0.3", + "cmd-shim": "^5.0.0", + "mkdirp-infer-owner": "^2.0.0", "npm-normalize-package-bin": "^1.0.0", - "read-cmd-shim": "^2.0.0", + "read-cmd-shim": "^3.0.0", "rimraf": "^3.0.0", - "write-file-atomic": "^3.0.3" + "write-file-atomic": "^4.0.0" } }, "binary-extensions": { @@ -18830,49 +18368,46 @@ "dev": true }, "brace-expansion": { - "version": "1.1.11", + "version": "2.0.1", "bundled": true, "dev": true, "requires": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" + "balanced-match": "^1.0.0" } }, "builtins": { - "version": "1.0.3", + "version": "5.0.1", "bundled": true, - "dev": true + "dev": true, + "requires": { + "semver": "^7.0.0" + } }, "cacache": { - "version": "15.3.0", + "version": "16.1.0", "bundled": true, "dev": true, "requires": { - "@npmcli/fs": "^1.0.0", - "@npmcli/move-file": "^1.0.1", + "@npmcli/fs": "^2.1.0", + "@npmcli/move-file": "^2.0.0", "chownr": "^2.0.0", - "fs-minipass": "^2.0.0", - "glob": "^7.1.4", + "fs-minipass": "^2.1.0", + "glob": "^8.0.1", "infer-owner": "^1.0.4", - "lru-cache": "^6.0.0", - "minipass": "^3.1.1", + "lru-cache": "^7.7.1", + "minipass": "^3.1.6", "minipass-collect": "^1.0.2", "minipass-flush": "^1.0.5", - "minipass-pipeline": "^1.2.2", - "mkdirp": "^1.0.3", + "minipass-pipeline": "^1.2.4", + "mkdirp": "^1.0.4", "p-map": "^4.0.0", "promise-inflight": "^1.0.1", "rimraf": "^3.0.2", - "ssri": "^8.0.1", - "tar": "^6.0.2", + "ssri": "^9.0.0", + "tar": "^6.1.11", "unique-filename": "^1.1.1" } }, - "caseless": { - "version": "0.12.0", - "bundled": true, - "dev": true - }, "chalk": { "version": "4.1.2", "bundled": true, @@ -18901,52 +18436,21 @@ "dev": true }, "cli-columns": { - "version": "3.1.2", + "version": "4.0.0", "bundled": true, "dev": true, "requires": { - "string-width": "^2.0.0", - "strip-ansi": "^3.0.1" + "string-width": "^4.2.3", + "strip-ansi": "^6.0.1" } }, "cli-table3": { - "version": "0.6.0", + "version": "0.6.2", "bundled": true, "dev": true, "requires": { - "colors": "^1.1.2", - "object-assign": "^4.1.0", + "@colors/colors": "1.5.0", "string-width": "^4.2.0" - }, - "dependencies": { - "ansi-regex": { - "version": "5.0.0", - "bundled": true, - "dev": true - }, - "is-fullwidth-code-point": { - "version": "3.0.0", - "bundled": true, - "dev": true - }, - "string-width": { - "version": "4.2.2", - "bundled": true, - "dev": true, - "requires": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.0" - } - }, - "strip-ansi": { - "version": "6.0.0", - "bundled": true, - "dev": true, - "requires": { - "ansi-regex": "^5.0.0" - } - } } }, "clone": { @@ -18955,18 +18459,13 @@ "dev": true }, "cmd-shim": { - "version": "4.1.0", + "version": "5.0.0", "bundled": true, "dev": true, "requires": { "mkdirp-infer-owner": "^2.0.0" } }, - "code-point-at": { - "version": "1.1.0", - "bundled": true, - "dev": true - }, "color-convert": { "version": "2.0.1", "bundled": true, @@ -18985,29 +18484,15 @@ "bundled": true, "dev": true }, - "colors": { - "version": "1.4.0", - "bundled": true, - "dev": true, - "optional": true - }, "columnify": { - "version": "1.5.4", + "version": "1.6.0", "bundled": true, "dev": true, "requires": { - "strip-ansi": "^3.0.0", + "strip-ansi": "^6.0.1", "wcwidth": "^1.0.0" } }, - "combined-stream": { - "version": "1.0.8", - "bundled": true, - "dev": true, - "requires": { - "delayed-stream": "~1.0.0" - } - }, "common-ancestor-path": { "version": "1.0.1", "bundled": true, @@ -19023,21 +18508,8 @@ "bundled": true, "dev": true }, - "core-util-is": { - "version": "1.0.2", - "bundled": true, - "dev": true - }, - "dashdash": { - "version": "1.14.1", - "bundled": true, - "dev": true, - "requires": { - "assert-plus": "^1.0.0" - } - }, "debug": { - "version": "4.3.2", + "version": "4.3.4", "bundled": true, "dev": true, "requires": { @@ -19064,11 +18536,6 @@ "clone": "^1.0.2" } }, - "delayed-stream": { - "version": "1.0.0", - "bundled": true, - "dev": true - }, "delegates": { "version": "1.0.0", "bundled": true, @@ -19080,7 +18547,7 @@ "dev": true }, "dezalgo": { - "version": "1.0.3", + "version": "1.0.4", "bundled": true, "dev": true, "requires": { @@ -19093,15 +18560,6 @@ "bundled": true, "dev": true }, - "ecc-jsbn": { - "version": "0.1.2", - "bundled": true, - "dev": true, - "requires": { - "jsbn": "~0.1.0", - "safer-buffer": "^2.1.0" - } - }, "emoji-regex": { "version": "8.0.0", "bundled": true, @@ -19126,36 +18584,11 @@ "bundled": true, "dev": true }, - "extend": { - "version": "3.0.2", - "bundled": true, - "dev": true - }, - "extsprintf": { - "version": "1.3.0", - "bundled": true, - "dev": true - }, - "fast-deep-equal": { - "version": "3.1.3", - "bundled": true, - "dev": true - }, - "fast-json-stable-stringify": { - "version": "2.1.0", - "bundled": true, - "dev": true - }, "fastest-levenshtein": { "version": "1.0.12", "bundled": true, "dev": true }, - "forever-agent": { - "version": "0.6.1", - "bundled": true, - "dev": true - }, "fs-minipass": { "version": "2.1.0", "bundled": true, @@ -19175,61 +18608,37 @@ "dev": true }, "gauge": { - "version": "3.0.1", + "version": "4.0.4", "bundled": true, "dev": true, "requires": { "aproba": "^1.0.3 || ^2.0.0", - "color-support": "^1.1.2", - "console-control-strings": "^1.0.0", + "color-support": "^1.1.3", + "console-control-strings": "^1.1.0", "has-unicode": "^2.0.1", - "object-assign": "^4.1.1", - "signal-exit": "^3.0.0", - "string-width": "^1.0.1 || ^2.0.0", - "strip-ansi": "^3.0.1 || ^4.0.0", - "wide-align": "^1.1.2" - } - }, - "getpass": { - "version": "0.1.7", - "bundled": true, - "dev": true, - "requires": { - "assert-plus": "^1.0.0" + "signal-exit": "^3.0.7", + "string-width": "^4.2.3", + "strip-ansi": "^6.0.1", + "wide-align": "^1.1.5" } }, "glob": { - "version": "7.2.0", + "version": "8.0.3", "bundled": true, "dev": true, "requires": { "fs.realpath": "^1.0.0", "inflight": "^1.0.4", "inherits": "2", - "minimatch": "^3.0.4", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" + "minimatch": "^5.0.1", + "once": "^1.3.0" } }, "graceful-fs": { - "version": "4.2.8", - "bundled": true, - "dev": true - }, - "har-schema": { - "version": "2.0.0", + "version": "4.2.10", "bundled": true, "dev": true }, - "har-validator": { - "version": "5.1.5", - "bundled": true, - "dev": true, - "requires": { - "ajv": "^6.12.3", - "har-schema": "^2.0.0" - } - }, "has": { "version": "1.0.3", "bundled": true, @@ -19249,11 +18658,11 @@ "dev": true }, "hosted-git-info": { - "version": "4.0.2", + "version": "5.0.0", "bundled": true, "dev": true, "requires": { - "lru-cache": "^6.0.0" + "lru-cache": "^7.5.1" } }, "http-cache-semantics": { @@ -19261,28 +18670,18 @@ "bundled": true, "dev": true }, - "http-proxy-agent": { - "version": "4.0.1", - "bundled": true, - "dev": true, - "requires": { - "@tootallnate/once": "1", - "agent-base": "6", - "debug": "4" - } - }, - "http-signature": { - "version": "1.2.0", + "http-proxy-agent": { + "version": "5.0.0", "bundled": true, "dev": true, "requires": { - "assert-plus": "^1.0.0", - "jsprim": "^1.2.2", - "sshpk": "^1.7.0" + "@tootallnate/once": "2", + "agent-base": "6", + "debug": "4" } }, "https-proxy-agent": { - "version": "5.0.0", + "version": "5.0.1", "bundled": true, "dev": true, "requires": { @@ -19308,11 +18707,11 @@ } }, "ignore-walk": { - "version": "3.0.4", + "version": "5.0.1", "bundled": true, "dev": true, "requires": { - "minimatch": "^3.0.4" + "minimatch": "^5.0.1" } }, "imurmurhash": { @@ -19345,26 +18744,26 @@ "dev": true }, "ini": { - "version": "2.0.0", + "version": "3.0.0", "bundled": true, "dev": true }, "init-package-json": { - "version": "2.0.5", + "version": "3.0.2", "bundled": true, "dev": true, "requires": { - "npm-package-arg": "^8.1.5", + "npm-package-arg": "^9.0.1", "promzard": "^0.3.0", - "read": "~1.0.1", - "read-package-json": "^4.1.1", + "read": "^1.0.7", + "read-package-json": "^5.0.0", "semver": "^7.3.5", "validate-npm-package-license": "^3.0.4", - "validate-npm-package-name": "^3.0.0" + "validate-npm-package-name": "^4.0.0" } }, "ip": { - "version": "1.1.5", + "version": "1.1.8", "bundled": true, "dev": true }, @@ -19382,7 +18781,7 @@ } }, "is-core-module": { - "version": "2.7.0", + "version": "2.9.0", "bundled": true, "dev": true, "requires": { @@ -19390,7 +18789,7 @@ } }, "is-fullwidth-code-point": { - "version": "2.0.0", + "version": "3.0.0", "bundled": true, "dev": true }, @@ -19399,252 +18798,196 @@ "bundled": true, "dev": true }, - "is-typedarray": { - "version": "1.0.0", - "bundled": true, - "dev": true - }, "isexe": { "version": "2.0.0", "bundled": true, "dev": true }, - "isstream": { - "version": "0.1.2", - "bundled": true, - "dev": true - }, - "jsbn": { - "version": "0.1.1", - "bundled": true, - "dev": true - }, "json-parse-even-better-errors": { "version": "2.3.1", "bundled": true, "dev": true }, - "json-schema": { - "version": "0.2.3", - "bundled": true, - "dev": true - }, - "json-schema-traverse": { - "version": "0.4.1", - "bundled": true, - "dev": true - }, "json-stringify-nice": { "version": "1.1.4", "bundled": true, "dev": true }, - "json-stringify-safe": { - "version": "5.0.1", - "bundled": true, - "dev": true - }, "jsonparse": { "version": "1.3.1", "bundled": true, "dev": true }, - "jsprim": { - "version": "1.4.1", - "bundled": true, - "dev": true, - "requires": { - "assert-plus": "1.0.0", - "extsprintf": "1.3.0", - "json-schema": "0.2.3", - "verror": "1.10.0" - } - }, "just-diff": { - "version": "3.1.1", + "version": "5.0.2", "bundled": true, "dev": true }, "just-diff-apply": { - "version": "3.0.0", + "version": "5.2.0", "bundled": true, "dev": true }, "libnpmaccess": { - "version": "4.0.3", + "version": "6.0.3", "bundled": true, "dev": true, "requires": { "aproba": "^2.0.0", "minipass": "^3.1.1", - "npm-package-arg": "^8.1.2", - "npm-registry-fetch": "^11.0.0" + "npm-package-arg": "^9.0.1", + "npm-registry-fetch": "^13.0.0" } }, "libnpmdiff": { - "version": "2.0.4", + "version": "4.0.3", "bundled": true, "dev": true, "requires": { - "@npmcli/disparity-colors": "^1.0.1", + "@npmcli/disparity-colors": "^2.0.0", "@npmcli/installed-package-contents": "^1.0.7", "binary-extensions": "^2.2.0", "diff": "^5.0.0", - "minimatch": "^3.0.4", - "npm-package-arg": "^8.1.4", - "pacote": "^11.3.4", + "minimatch": "^5.0.1", + "npm-package-arg": "^9.0.1", + "pacote": "^13.0.5", "tar": "^6.1.0" } }, "libnpmexec": { - "version": "2.0.1", + "version": "4.0.6", "bundled": true, "dev": true, "requires": { - "@npmcli/arborist": "^2.3.0", - "@npmcli/ci-detect": "^1.3.0", - "@npmcli/run-script": "^1.8.4", + "@npmcli/arborist": "^5.0.0", + "@npmcli/ci-detect": "^2.0.0", + "@npmcli/run-script": "^3.0.0", "chalk": "^4.1.0", "mkdirp-infer-owner": "^2.0.0", - "npm-package-arg": "^8.1.2", - "pacote": "^11.3.1", - "proc-log": "^1.0.0", + "npm-package-arg": "^9.0.1", + "npmlog": "^6.0.2", + "pacote": "^13.0.5", + "proc-log": "^2.0.0", "read": "^1.0.7", "read-package-json-fast": "^2.0.2", "walk-up-path": "^1.0.0" } }, "libnpmfund": { - "version": "1.1.0", + "version": "3.0.2", "bundled": true, "dev": true, "requires": { - "@npmcli/arborist": "^2.5.0" + "@npmcli/arborist": "^5.0.0" } }, "libnpmhook": { - "version": "6.0.3", + "version": "8.0.3", "bundled": true, "dev": true, "requires": { "aproba": "^2.0.0", - "npm-registry-fetch": "^11.0.0" + "npm-registry-fetch": "^13.0.0" } }, "libnpmorg": { - "version": "2.0.3", + "version": "4.0.3", "bundled": true, "dev": true, "requires": { "aproba": "^2.0.0", - "npm-registry-fetch": "^11.0.0" + "npm-registry-fetch": "^13.0.0" } }, "libnpmpack": { - "version": "2.0.1", + "version": "4.1.0", "bundled": true, "dev": true, "requires": { - "@npmcli/run-script": "^1.8.3", - "npm-package-arg": "^8.1.0", - "pacote": "^11.2.6" + "@npmcli/run-script": "^3.0.0", + "npm-package-arg": "^9.0.1", + "pacote": "^13.5.0" } }, "libnpmpublish": { - "version": "4.0.2", + "version": "6.0.4", "bundled": true, "dev": true, "requires": { - "normalize-package-data": "^3.0.2", - "npm-package-arg": "^8.1.2", - "npm-registry-fetch": "^11.0.0", - "semver": "^7.1.3", - "ssri": "^8.0.1" + "normalize-package-data": "^4.0.0", + "npm-package-arg": "^9.0.1", + "npm-registry-fetch": "^13.0.0", + "semver": "^7.3.7", + "ssri": "^9.0.0" } }, "libnpmsearch": { - "version": "3.1.2", + "version": "5.0.3", "bundled": true, "dev": true, "requires": { - "npm-registry-fetch": "^11.0.0" + "npm-registry-fetch": "^13.0.0" } }, "libnpmteam": { - "version": "2.0.4", + "version": "4.0.3", "bundled": true, "dev": true, "requires": { "aproba": "^2.0.0", - "npm-registry-fetch": "^11.0.0" + "npm-registry-fetch": "^13.0.0" } }, "libnpmversion": { - "version": "1.2.1", + "version": "3.0.4", "bundled": true, "dev": true, "requires": { - "@npmcli/git": "^2.0.7", - "@npmcli/run-script": "^1.8.4", + "@npmcli/git": "^3.0.0", + "@npmcli/run-script": "^3.0.0", "json-parse-even-better-errors": "^2.3.1", - "semver": "^7.3.5", - "stringify-package": "^1.0.1" + "proc-log": "^2.0.0", + "semver": "^7.3.7" } }, "lru-cache": { - "version": "6.0.0", + "version": "7.9.0", "bundled": true, - "dev": true, - "requires": { - "yallist": "^4.0.0" - } + "dev": true }, "make-fetch-happen": { - "version": "9.1.0", + "version": "10.1.6", "bundled": true, "dev": true, "requires": { - "agentkeepalive": "^4.1.3", - "cacache": "^15.2.0", + "agentkeepalive": "^4.2.1", + "cacache": "^16.1.0", "http-cache-semantics": "^4.1.0", - "http-proxy-agent": "^4.0.1", + "http-proxy-agent": "^5.0.0", "https-proxy-agent": "^5.0.0", "is-lambda": "^1.0.1", - "lru-cache": "^6.0.0", - "minipass": "^3.1.3", + "lru-cache": "^7.7.1", + "minipass": "^3.1.6", "minipass-collect": "^1.0.2", - "minipass-fetch": "^1.3.2", + "minipass-fetch": "^2.0.3", "minipass-flush": "^1.0.5", "minipass-pipeline": "^1.2.4", - "negotiator": "^0.6.2", + "negotiator": "^0.6.3", "promise-retry": "^2.0.1", - "socks-proxy-agent": "^6.0.0", - "ssri": "^8.0.0" - } - }, - "mime-db": { - "version": "1.49.0", - "bundled": true, - "dev": true - }, - "mime-types": { - "version": "2.1.32", - "bundled": true, - "dev": true, - "requires": { - "mime-db": "1.49.0" + "socks-proxy-agent": "^6.1.1", + "ssri": "^9.0.0" } }, "minimatch": { - "version": "3.0.4", + "version": "5.1.0", "bundled": true, "dev": true, "requires": { - "brace-expansion": "^1.1.7" + "brace-expansion": "^2.0.1" } }, "minipass": { - "version": "3.1.5", + "version": "3.1.6", "bundled": true, "dev": true, "requires": { @@ -19660,14 +19003,14 @@ } }, "minipass-fetch": { - "version": "1.4.1", + "version": "2.1.0", "bundled": true, "dev": true, "requires": { - "encoding": "^0.1.12", - "minipass": "^3.1.0", + "encoding": "^0.1.13", + "minipass": "^3.1.6", "minipass-sized": "^1.0.3", - "minizlib": "^2.0.0" + "minizlib": "^2.1.2" } }, "minipass-flush": { @@ -19738,74 +19081,55 @@ "dev": true }, "negotiator": { - "version": "0.6.2", + "version": "0.6.3", "bundled": true, "dev": true }, "node-gyp": { - "version": "7.1.2", + "version": "9.0.0", "bundled": true, "dev": true, "requires": { "env-paths": "^2.2.0", "glob": "^7.1.4", - "graceful-fs": "^4.2.3", + "graceful-fs": "^4.2.6", + "make-fetch-happen": "^10.0.3", "nopt": "^5.0.0", - "npmlog": "^4.1.2", - "request": "^2.88.2", + "npmlog": "^6.0.0", "rimraf": "^3.0.2", - "semver": "^7.3.2", - "tar": "^6.0.2", + "semver": "^7.3.5", + "tar": "^6.1.2", "which": "^2.0.2" }, "dependencies": { - "aproba": { - "version": "1.2.0", - "bundled": true, - "dev": true - }, - "gauge": { - "version": "2.7.4", - "bundled": true, - "dev": true, - "requires": { - "aproba": "^1.0.3", - "console-control-strings": "^1.0.0", - "has-unicode": "^2.0.0", - "object-assign": "^4.1.0", - "signal-exit": "^3.0.0", - "string-width": "^1.0.1", - "strip-ansi": "^3.0.1", - "wide-align": "^1.1.0" - } - }, - "is-fullwidth-code-point": { - "version": "1.0.0", + "brace-expansion": { + "version": "1.1.11", "bundled": true, "dev": true, "requires": { - "number-is-nan": "^1.0.0" + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" } }, - "npmlog": { - "version": "4.1.2", + "glob": { + "version": "7.2.3", "bundled": true, "dev": true, "requires": { - "are-we-there-yet": "~1.1.2", - "console-control-strings": "~1.1.0", - "gauge": "~2.7.3", - "set-blocking": "~2.0.0" + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" } }, - "string-width": { - "version": "1.0.2", + "minimatch": { + "version": "3.1.2", "bundled": true, "dev": true, "requires": { - "code-point-at": "^1.0.0", - "is-fullwidth-code-point": "^1.0.0", - "strip-ansi": "^3.0.0" + "brace-expansion": "^1.1.7" } } } @@ -19819,18 +19143,18 @@ } }, "normalize-package-data": { - "version": "3.0.3", + "version": "4.0.0", "bundled": true, "dev": true, "requires": { - "hosted-git-info": "^4.0.1", - "is-core-module": "^2.5.0", - "semver": "^7.3.4", - "validate-npm-package-license": "^3.0.1" + "hosted-git-info": "^5.0.0", + "is-core-module": "^2.8.1", + "semver": "^7.3.5", + "validate-npm-package-license": "^3.0.4" } }, "npm-audit-report": { - "version": "2.1.5", + "version": "3.0.0", "bundled": true, "dev": true, "requires": { @@ -19846,7 +19170,7 @@ } }, "npm-install-checks": { - "version": "4.0.0", + "version": "5.0.0", "bundled": true, "dev": true, "requires": { @@ -19859,56 +19183,58 @@ "dev": true }, "npm-package-arg": { - "version": "8.1.5", + "version": "9.0.2", "bundled": true, "dev": true, "requires": { - "hosted-git-info": "^4.0.1", - "semver": "^7.3.4", - "validate-npm-package-name": "^3.0.0" + "hosted-git-info": "^5.0.0", + "semver": "^7.3.5", + "validate-npm-package-name": "^4.0.0" } }, "npm-packlist": { - "version": "2.2.2", + "version": "5.1.0", "bundled": true, "dev": true, "requires": { - "glob": "^7.1.6", - "ignore-walk": "^3.0.3", - "npm-bundled": "^1.1.1", + "glob": "^8.0.1", + "ignore-walk": "^5.0.1", + "npm-bundled": "^1.1.2", "npm-normalize-package-bin": "^1.0.1" } }, "npm-pick-manifest": { - "version": "6.1.1", + "version": "7.0.1", "bundled": true, "dev": true, "requires": { - "npm-install-checks": "^4.0.0", + "npm-install-checks": "^5.0.0", "npm-normalize-package-bin": "^1.0.1", - "npm-package-arg": "^8.1.2", - "semver": "^7.3.4" + "npm-package-arg": "^9.0.0", + "semver": "^7.3.5" } }, "npm-profile": { - "version": "5.0.4", + "version": "6.0.3", "bundled": true, "dev": true, "requires": { - "npm-registry-fetch": "^11.0.0" + "npm-registry-fetch": "^13.0.1", + "proc-log": "^2.0.0" } }, "npm-registry-fetch": { - "version": "11.0.0", + "version": "13.1.1", "bundled": true, "dev": true, "requires": { - "make-fetch-happen": "^9.0.1", - "minipass": "^3.1.3", - "minipass-fetch": "^1.3.0", + "make-fetch-happen": "^10.0.6", + "minipass": "^3.1.6", + "minipass-fetch": "^2.0.3", "minipass-json-stream": "^1.0.1", - "minizlib": "^2.0.0", - "npm-package-arg": "^8.0.0" + "minizlib": "^2.1.2", + "npm-package-arg": "^9.0.1", + "proc-log": "^2.0.0" } }, "npm-user-validate": { @@ -19917,42 +19243,16 @@ "dev": true }, "npmlog": { - "version": "5.0.1", + "version": "6.0.2", "bundled": true, "dev": true, "requires": { - "are-we-there-yet": "^2.0.0", + "are-we-there-yet": "^3.0.0", "console-control-strings": "^1.1.0", - "gauge": "^3.0.0", + "gauge": "^4.0.3", "set-blocking": "^2.0.0" - }, - "dependencies": { - "are-we-there-yet": { - "version": "2.0.0", - "bundled": true, - "dev": true, - "requires": { - "delegates": "^1.0.0", - "readable-stream": "^3.6.0" - } - } } }, - "number-is-nan": { - "version": "1.0.1", - "bundled": true, - "dev": true - }, - "oauth-sign": { - "version": "0.9.0", - "bundled": true, - "dev": true - }, - "object-assign": { - "version": "4.1.1", - "bundled": true, - "dev": true - }, "once": { "version": "1.4.0", "bundled": true, @@ -19975,39 +19275,41 @@ } }, "pacote": { - "version": "11.3.5", + "version": "13.6.0", "bundled": true, "dev": true, "requires": { - "@npmcli/git": "^2.1.0", - "@npmcli/installed-package-contents": "^1.0.6", - "@npmcli/promise-spawn": "^1.2.0", - "@npmcli/run-script": "^1.8.2", - "cacache": "^15.0.5", + "@npmcli/git": "^3.0.0", + "@npmcli/installed-package-contents": "^1.0.7", + "@npmcli/promise-spawn": "^3.0.0", + "@npmcli/run-script": "^3.0.1", + "cacache": "^16.0.0", "chownr": "^2.0.0", "fs-minipass": "^2.1.0", "infer-owner": "^1.0.4", - "minipass": "^3.1.3", - "mkdirp": "^1.0.3", - "npm-package-arg": "^8.0.1", - "npm-packlist": "^2.1.4", - "npm-pick-manifest": "^6.0.0", - "npm-registry-fetch": "^11.0.0", + "minipass": "^3.1.6", + "mkdirp": "^1.0.4", + "npm-package-arg": "^9.0.0", + "npm-packlist": "^5.1.0", + "npm-pick-manifest": "^7.0.0", + "npm-registry-fetch": "^13.0.1", + "proc-log": "^2.0.0", "promise-retry": "^2.0.1", - "read-package-json-fast": "^2.0.1", + "read-package-json": "^5.0.0", + "read-package-json-fast": "^2.0.3", "rimraf": "^3.0.2", - "ssri": "^8.0.1", - "tar": "^6.1.0" + "ssri": "^9.0.0", + "tar": "^6.1.11" } }, "parse-conflict-json": { - "version": "1.1.1", + "version": "2.0.2", "bundled": true, "dev": true, "requires": { - "json-parse-even-better-errors": "^2.3.0", - "just-diff": "^3.0.1", - "just-diff-apply": "^3.0.0" + "json-parse-even-better-errors": "^2.3.1", + "just-diff": "^5.0.1", + "just-diff-apply": "^5.2.0" } }, "path-is-absolute": { @@ -20015,13 +19317,8 @@ "bundled": true, "dev": true }, - "performance-now": { - "version": "2.1.0", - "bundled": true, - "dev": true - }, "proc-log": { - "version": "1.0.0", + "version": "2.0.1", "bundled": true, "dev": true }, @@ -20057,26 +19354,11 @@ "read": "1" } }, - "psl": { - "version": "1.8.0", - "bundled": true, - "dev": true - }, - "punycode": { - "version": "2.1.1", - "bundled": true, - "dev": true - }, "qrcode-terminal": { "version": "0.12.0", "bundled": true, "dev": true }, - "qs": { - "version": "6.5.2", - "bundled": true, - "dev": true - }, "read": { "version": "1.0.7", "bundled": true, @@ -20086,19 +19368,19 @@ } }, "read-cmd-shim": { - "version": "2.0.0", + "version": "3.0.0", "bundled": true, "dev": true }, "read-package-json": { - "version": "4.1.1", + "version": "5.0.1", "bundled": true, "dev": true, "requires": { - "glob": "^7.1.1", - "json-parse-even-better-errors": "^2.3.0", - "normalize-package-data": "^3.0.0", - "npm-normalize-package-bin": "^1.0.0" + "glob": "^8.0.1", + "json-parse-even-better-errors": "^2.3.1", + "normalize-package-data": "^4.0.0", + "npm-normalize-package-bin": "^1.0.1" } }, "read-package-json-fast": { @@ -20131,67 +19413,51 @@ "once": "^1.3.0" } }, - "request": { - "version": "2.88.2", + "retry": { + "version": "0.12.0", + "bundled": true, + "dev": true + }, + "rimraf": { + "version": "3.0.2", "bundled": true, "dev": true, "requires": { - "aws-sign2": "~0.7.0", - "aws4": "^1.8.0", - "caseless": "~0.12.0", - "combined-stream": "~1.0.6", - "extend": "~3.0.2", - "forever-agent": "~0.6.1", - "form-data": "~2.3.2", - "har-validator": "~5.1.3", - "http-signature": "~1.2.0", - "is-typedarray": "~1.0.0", - "isstream": "~0.1.2", - "json-stringify-safe": "~5.0.1", - "mime-types": "~2.1.19", - "oauth-sign": "~0.9.0", - "performance-now": "^2.1.0", - "qs": "~6.5.2", - "safe-buffer": "^5.1.2", - "tough-cookie": "~2.5.0", - "tunnel-agent": "^0.6.0", - "uuid": "^3.3.2" + "glob": "^7.1.3" }, "dependencies": { - "form-data": { - "version": "2.3.3", + "brace-expansion": { + "version": "1.1.11", + "bundled": true, + "dev": true, + "requires": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "glob": { + "version": "7.2.3", "bundled": true, "dev": true, "requires": { - "asynckit": "^0.4.0", - "combined-stream": "^1.0.6", - "mime-types": "^2.1.12" + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" } }, - "tough-cookie": { - "version": "2.5.0", + "minimatch": { + "version": "3.1.2", "bundled": true, "dev": true, "requires": { - "psl": "^1.1.28", - "punycode": "^2.1.1" + "brace-expansion": "^1.1.7" } } } }, - "retry": { - "version": "0.12.0", - "bundled": true, - "dev": true - }, - "rimraf": { - "version": "3.0.2", - "bundled": true, - "dev": true, - "requires": { - "glob": "^7.1.3" - } - }, "safe-buffer": { "version": "5.2.1", "bundled": true, @@ -20200,14 +19466,25 @@ "safer-buffer": { "version": "2.1.2", "bundled": true, - "dev": true + "dev": true, + "optional": true }, "semver": { - "version": "7.3.5", + "version": "7.3.7", "bundled": true, "dev": true, "requires": { "lru-cache": "^6.0.0" + }, + "dependencies": { + "lru-cache": { + "version": "6.0.0", + "bundled": true, + "dev": true, + "requires": { + "yallist": "^4.0.0" + } + } } }, "set-blocking": { @@ -20216,7 +19493,7 @@ "dev": true }, "signal-exit": { - "version": "3.0.3", + "version": "3.0.7", "bundled": true, "dev": true }, @@ -20226,22 +19503,22 @@ "dev": true }, "socks": { - "version": "2.6.1", + "version": "2.6.2", "bundled": true, "dev": true, "requires": { "ip": "^1.1.5", - "smart-buffer": "^4.1.0" + "smart-buffer": "^4.2.0" } }, "socks-proxy-agent": { - "version": "6.1.0", + "version": "6.2.0", "bundled": true, "dev": true, "requires": { "agent-base": "^6.0.2", - "debug": "^4.3.1", - "socks": "^2.6.1" + "debug": "^4.3.3", + "socks": "^2.6.2" } }, "spdx-correct": { @@ -20268,28 +19545,12 @@ } }, "spdx-license-ids": { - "version": "3.0.10", + "version": "3.0.11", "bundled": true, "dev": true }, - "sshpk": { - "version": "1.16.1", - "bundled": true, - "dev": true, - "requires": { - "asn1": "~0.2.3", - "assert-plus": "^1.0.0", - "bcrypt-pbkdf": "^1.0.0", - "dashdash": "^1.12.0", - "ecc-jsbn": "~0.1.1", - "getpass": "^0.1.1", - "jsbn": "~0.1.0", - "safer-buffer": "^2.0.2", - "tweetnacl": "~0.14.0" - } - }, "ssri": { - "version": "8.0.1", + "version": "9.0.1", "bundled": true, "dev": true, "requires": { @@ -20305,40 +19566,21 @@ } }, "string-width": { - "version": "2.1.1", + "version": "4.2.3", "bundled": true, "dev": true, "requires": { - "is-fullwidth-code-point": "^2.0.0", - "strip-ansi": "^4.0.0" - }, - "dependencies": { - "ansi-regex": { - "version": "3.0.0", - "bundled": true, - "dev": true - }, - "strip-ansi": { - "version": "4.0.0", - "bundled": true, - "dev": true, - "requires": { - "ansi-regex": "^3.0.0" - } - } + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" } }, - "stringify-package": { - "version": "1.0.1", - "bundled": true, - "dev": true - }, "strip-ansi": { - "version": "3.0.1", + "version": "6.0.1", "bundled": true, "dev": true, "requires": { - "ansi-regex": "^2.0.0" + "ansi-regex": "^5.0.1" } }, "supports-color": { @@ -20373,31 +19615,10 @@ "dev": true }, "treeverse": { - "version": "1.0.4", - "bundled": true, - "dev": true - }, - "tunnel-agent": { - "version": "0.6.0", - "bundled": true, - "dev": true, - "requires": { - "safe-buffer": "^5.0.1" - } - }, - "tweetnacl": { - "version": "0.14.5", + "version": "2.0.0", "bundled": true, "dev": true }, - "typedarray-to-buffer": { - "version": "3.1.5", - "bundled": true, - "dev": true, - "requires": { - "is-typedarray": "^1.0.0" - } - }, "unique-filename": { "version": "1.1.1", "bundled": true, @@ -20414,24 +19635,11 @@ "imurmurhash": "^0.1.4" } }, - "uri-js": { - "version": "4.4.1", - "bundled": true, - "dev": true, - "requires": { - "punycode": "^2.1.0" - } - }, "util-deprecate": { "version": "1.0.2", "bundled": true, "dev": true }, - "uuid": { - "version": "3.4.0", - "bundled": true, - "dev": true - }, "validate-npm-package-license": { "version": "3.0.4", "bundled": true, @@ -20442,21 +19650,11 @@ } }, "validate-npm-package-name": { - "version": "3.0.0", - "bundled": true, - "dev": true, - "requires": { - "builtins": "^1.0.3" - } - }, - "verror": { - "version": "1.10.0", + "version": "4.0.0", "bundled": true, "dev": true, "requires": { - "assert-plus": "^1.0.0", - "core-util-is": "1.0.2", - "extsprintf": "^1.2.0" + "builtins": "^5.0.0" } }, "walk-up-path": { @@ -20481,11 +19679,11 @@ } }, "wide-align": { - "version": "1.1.3", + "version": "1.1.5", "bundled": true, "dev": true, "requires": { - "string-width": "^1.0.2 || 2" + "string-width": "^1.0.2 || 2 || 3 || 4" } }, "wrappy": { @@ -20494,14 +19692,12 @@ "dev": true }, "write-file-atomic": { - "version": "3.0.3", + "version": "4.0.1", "bundled": true, "dev": true, "requires": { "imurmurhash": "^0.1.4", - "is-typedarray": "^1.0.0", - "signal-exit": "^3.0.2", - "typedarray-to-buffer": "^3.1.5" + "signal-exit": "^3.0.7" } }, "yallist": { @@ -20538,12 +19734,6 @@ "integrity": "sha512-h2AatdwYH+JHiZpv7pt/gSX1XoRGb7L/qSIeuqA6GwYoF9w1vP1cw42TO0aI2pNyshRK5893hNSl+1//vHK7hQ==", "dev": true }, - "object-assign": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", - "integrity": "sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM=", - "dev": true - }, "object-hash": { "version": "1.3.1", "resolved": "https://registry.npmjs.org/object-hash/-/object-hash-1.3.1.tgz", @@ -20978,7 +20168,7 @@ "strip-json-comments": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", - "integrity": "sha1-PFMZQukIwml8DsNEhYwobHygpgo=", + "integrity": "sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ==", "dev": true } } @@ -21052,7 +20242,7 @@ "redeyed": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/redeyed/-/redeyed-2.1.1.tgz", - "integrity": "sha1-iYS1gV2ZyyIEacme7v/jiRPmzAs=", + "integrity": "sha512-FNpGGo1DycYAdnrKFxCMmKYgo/mILAqtRYbkdQD8Ep/Hk2PQ5+aEAEx+IU713RTDmuBaH0c8P5ZozurNu5ObRQ==", "dev": true, "requires": { "esprima": "~4.0.0" @@ -21195,15 +20385,15 @@ } }, "semantic-release": { - "version": "18.0.1", - "resolved": "https://registry.npmjs.org/semantic-release/-/semantic-release-18.0.1.tgz", - "integrity": "sha512-xTdKCaEnCzHr+Fqyhg/5I8P9pvY9z7WHa8TFCYIwcdPbuzAtQShOTzw3VNPsqBT+Yq1kFyBQFBKBYkGOlqWmfA==", + "version": "19.0.3", + "resolved": "https://registry.npmjs.org/semantic-release/-/semantic-release-19.0.3.tgz", + "integrity": "sha512-HaFbydST1cDKZHuFZxB8DTrBLJVK/AnDExpK0s3EqLIAAUAHUgnd+VSJCUtTYQKkAkauL8G9CucODrVCc7BuAA==", "dev": true, "requires": { "@semantic-release/commit-analyzer": "^9.0.2", "@semantic-release/error": "^3.0.0", "@semantic-release/github": "^8.0.0", - "@semantic-release/npm": "^8.0.0", + "@semantic-release/npm": "^9.0.0", "@semantic-release/release-notes-generator": "^10.0.0", "aggregate-error": "^3.0.0", "cosmiconfig": "^7.0.0", @@ -21217,8 +20407,8 @@ "hook-std": "^2.0.0", "hosted-git-info": "^4.0.0", "lodash": "^4.17.21", - "marked": "^2.0.0", - "marked-terminal": "^4.1.1", + "marked": "^4.0.10", + "marked-terminal": "^5.0.0", "micromatch": "^4.0.2", "p-each-series": "^2.1.0", "p-reduce": "^2.0.0", diff --git a/package.json b/package.json index 5b4cee42..125fd9fa 100644 --- a/package.json +++ b/package.json @@ -56,7 +56,7 @@ "grpc-tools": "1.11.2", "husky": "7.0.4", "jest": "27.4.5", - "semantic-release": "18.0.1", + "semantic-release": "19.0.3", "ts-jest": "27.1.1", "ts-node": "10.4.0", "ts-proto": "1.95.1", From 9893514ecf38c667b174167a0b215ffe61292a39 Mon Sep 17 00:00:00 2001 From: Ivan Zuev Date: Tue, 6 Sep 2022 14:00:19 +0300 Subject: [PATCH 29/54] feat: update api accordind to changes in proto Added services: - yandex.cloud.compute.v1.SnapshotScheduleService - yandex.cloud.serverless.apigateway.websocket.v1.ConnectionService - yandex.cloud.iot.devices.v1.RegistryService - yandex.cloud.iot.broker.v1.BrokerDataService - yandex.cloud.iot.broker.v1.BrokerService - yandex.cloud.organizationmanager.v1.GroupService - yandex.cloud.monitoring.v3.DashboardService --- cloudapi | 2 +- scripts/services.ts | 8 + src/generated/yandex/cloud/access/access.ts | 89 + src/generated/yandex/cloud/ai/stt/v3/stt.ts | 162 +- .../ai/translate/v2/translation_service.ts | 2 +- src/generated/yandex/cloud/ai/tts/v3/tts.ts | 21 +- .../cloud/ai/vision/v1/vision_service.ts | 2 +- .../cloud/apploadbalancer/v1/load_balancer.ts | 8 +- .../cloud/apploadbalancer/v1/virtual_host.ts | 14 + .../cloud/billing/v1/customer_service.ts | 149 + src/generated/yandex/cloud/cdn/v1/origin.ts | 8 +- .../certificatemanager/v1/certificate.ts | 118 + .../v1/certificate_content_service.ts | 69 + .../v1/certificate_service.ts | 293 ++ src/generated/yandex/cloud/compute/index.ts | 2 + src/generated/yandex/cloud/compute/v1/disk.ts | 4 +- .../yandex/cloud/compute/v1/disk_service.ts | 293 ++ .../yandex/cloud/compute/v1/image.ts | 4 +- .../yandex/cloud/compute/v1/image_service.ts | 6 +- .../yandex/cloud/compute/v1/instance.ts | 186 +- .../cloud/compute/v1/instance_service.ts | 287 +- .../v1/instancegroup/instance_group.ts | 16 +- .../yandex/cloud/compute/v1/snapshot.ts | 4 +- .../cloud/compute/v1/snapshot_schedule.ts | 798 +++ .../compute/v1/snapshot_schedule_service.ts | 3100 ++++++++++++ .../dataproc/manager/v1/manager_service.ts | 150 + .../yandex/cloud/dataproc/v1/subcluster.ts | 4 +- .../cloud/datatransfer/v1/endpoint/mysql.ts | 2 +- .../datatransfer/v1/endpoint/postgres.ts | 2 +- .../yandex/cloud/dns/v1/dns_zone_service.ts | 110 +- src/generated/yandex/cloud/index.ts | 1 + .../yandex/cloud/iot/broker/v1/broker.ts | 596 +++ .../iot/broker/v1/broker_data_service.ts | 325 ++ .../cloud/iot/broker/v1/broker_service.ts | 3089 ++++++++++++ src/generated/yandex/cloud/iot/index.ts | 3 + src/generated/yandex/cloud/k8s/v1/cluster.ts | 34 + .../yandex/cloud/k8s/v1/cluster_service.ts | 53 +- src/generated/yandex/cloud/k8s/v1/node.ts | 291 ++ src/generated/yandex/cloud/k8s/v1/version.ts | 2 +- .../yandex/cloud/logging/v1/log_entry.ts | 45 + .../yandex/cloud/logging/v1/log_group.ts | 1 + .../cloud/logging/v1/log_reading_service.ts | 21 + .../cloud/mdb/clickhouse/v1/backup_service.ts | 2 +- .../yandex/cloud/mdb/clickhouse/v1/cluster.ts | 87 +- .../mdb/clickhouse/v1/cluster_service.ts | 292 +- .../mdb/clickhouse/v1/config/clickhouse.ts | 26 +- .../mdb/clickhouse/v1/database_service.ts | 2 +- .../cloud/mdb/clickhouse/v1/ml_model.ts | 2 +- .../mdb/clickhouse/v1/ml_model_service.ts | 6 +- .../clickhouse/v1/resource_preset_service.ts | 2 +- .../yandex/cloud/mdb/clickhouse/v1/user.ts | 70 +- .../cloud/mdb/clickhouse/v1/user_service.ts | 2 +- .../cloud/mdb/elasticsearch/v1/cluster.ts | 2 +- .../mdb/elasticsearch/v1/cluster_service.ts | 14 +- .../elasticsearch/v1/config/elasticsearch.ts | 41 + .../cloud/mdb/elasticsearch/v1/extension.ts | 12 +- .../mdb/elasticsearch/v1/extension_service.ts | 57 +- .../v1/resource_preset_service.ts | 2 +- .../mdb/elasticsearch/v1/user_service.ts | 2 +- .../yandex/cloud/mdb/greenplum/v1/backup.ts | 9 +- .../cloud/mdb/greenplum/v1/backup_service.ts | 29 +- .../yandex/cloud/mdb/greenplum/v1/cluster.ts | 63 +- .../cloud/mdb/greenplum/v1/cluster_service.ts | 528 +- .../yandex/cloud/mdb/greenplum/v1/config.ts | 257 +- .../yandex/cloud/mdb/greenplum/v1/host.ts | 13 +- .../cloud/mdb/greenplum/v1/maintenance.ts | 11 +- .../cloud/mdb/greenplum/v1/resource_preset.ts | 65 +- .../greenplum/v1/resource_preset_service.ts | 62 +- src/generated/yandex/cloud/mdb/index.ts | 11 +- .../yandex/cloud/mdb/kafka/v1/cluster.ts | 844 ++++ .../cloud/mdb/kafka/v1/cluster_service.ts | 8 +- .../yandex/cloud/mdb/kafka/v1/connector.ts | 1380 +++++- .../cloud/mdb/kafka/v1/connector_service.ts | 163 +- .../mdb/kafka/v1/resource_preset_service.ts | 2 +- .../yandex/cloud/mdb/kafka/v1/topic.ts | 455 ++ .../cloud/mdb/kafka/v1/topic_service.ts | 2 +- .../yandex/cloud/mdb/kafka/v1/user_service.ts | 2 +- .../yandex/cloud/mdb/mongodb/v1/backup.ts | 2 +- .../cloud/mdb/mongodb/v1/backup_service.ts | 7 +- .../yandex/cloud/mdb/mongodb/v1/cluster.ts | 1594 +++++- .../cloud/mdb/mongodb/v1/cluster_service.ts | 1713 ++++++- .../cloud/mdb/mongodb/v1/config/mongodb4_2.ts | 7 + .../cloud/mdb/mongodb/v1/config/mongodb4_4.ts | 7 + .../v1/config/mongodb4_4_enterprise.ts | 7 + .../cloud/mdb/mongodb/v1/config/mongodb5_0.ts | 7 + .../v1/config/mongodb5_0_enterprise.ts | 7 + .../cloud/mdb/mongodb/v1/config/mongodb6_0.ts | 2215 +++++++++ .../v1/config/mongodb6_0_enterprise.ts | 2934 +++++++++++ .../cloud/mdb/mongodb/v1/database_service.ts | 2 +- .../mdb/mongodb/v1/resource_preset_service.ts | 3 +- .../cloud/mdb/mongodb/v1/user_service.ts | 7 +- .../yandex/cloud/mdb/mysql/v1/cluster.ts | 34 +- .../cloud/mdb/mysql/v1/cluster_service.ts | 26 + .../cloud/mdb/mysql/v1/config/mysql5_7.ts | 104 + .../cloud/mdb/mysql/v1/config/mysql8_0.ts | 104 + .../cloud/mdb/mysql/v1alpha/backup_service.ts | 2 +- .../yandex/cloud/mdb/mysql/v1alpha/cluster.ts | 2 +- .../mdb/mysql/v1alpha/cluster_service.ts | 10 +- .../mdb/mysql/v1alpha/database_service.ts | 2 +- .../mysql/v1alpha/resource_preset_service.ts | 2 +- .../cloud/mdb/mysql/v1alpha/user_service.ts | 2 +- .../cloud/mdb/postgresql/v1/backup_service.ts | 2 +- .../yandex/cloud/mdb/postgresql/v1/cluster.ts | 126 +- .../mdb/postgresql/v1/cluster_service.ts | 128 +- .../cloud/mdb/postgresql/v1/config/host11.ts | 288 +- .../mdb/postgresql/v1/config/host11_1c.ts | 288 +- .../cloud/mdb/postgresql/v1/config/host12.ts | 288 +- .../mdb/postgresql/v1/config/host12_1c.ts | 288 +- .../cloud/mdb/postgresql/v1/config/host13.ts | 288 +- .../mdb/postgresql/v1/config/host13_1c.ts | 2062 ++++++++ .../cloud/mdb/postgresql/v1/config/host14.ts | 288 +- .../mdb/postgresql/v1/config/host14_1c.ts | 2037 ++++++++ .../mdb/postgresql/v1/config/postgresql10.ts | 201 +- .../postgresql/v1/config/postgresql10_1c.ts | 201 +- .../mdb/postgresql/v1/config/postgresql11.ts | 687 +-- .../postgresql/v1/config/postgresql11_1c.ts | 741 +-- .../mdb/postgresql/v1/config/postgresql12.ts | 757 +-- .../postgresql/v1/config/postgresql12_1c.ts | 805 +-- .../mdb/postgresql/v1/config/postgresql13.ts | 757 +-- .../postgresql/v1/config/postgresql13_1c.ts | 4258 ++++++++++++++++ .../mdb/postgresql/v1/config/postgresql14.ts | 757 +-- .../postgresql/v1/config/postgresql14_1c.ts | 4379 +++++++++++++++++ .../cloud/mdb/postgresql/v1/database.ts | 30 + .../mdb/postgresql/v1/database_service.ts | 23 +- .../postgresql/v1/resource_preset_service.ts | 2 +- .../cloud/mdb/postgresql/v1/user_service.ts | 2 +- .../cloud/mdb/redis/v1/backup_service.ts | 2 +- .../yandex/cloud/mdb/redis/v1/cluster.ts | 29 +- .../cloud/mdb/redis/v1/cluster_service.ts | 52 +- .../cloud/mdb/redis/v1/config/redis7_0.ts | 647 +++ .../mdb/redis/v1/resource_preset_service.ts | 2 +- .../yandex/cloud/mdb/sqlserver/v1/backup.ts | 5 +- .../cloud/mdb/sqlserver/v1/backup_service.ts | 20 +- .../yandex/cloud/mdb/sqlserver/v1/cluster.ts | 257 +- .../cloud/mdb/sqlserver/v1/cluster_service.ts | 582 ++- .../sqlserver/v1/config/sqlserver2016sp2.ts | 21 +- .../mdb/sqlserver/v1/config/sqlserver2017.ts | 761 +++ .../mdb/sqlserver/v1/config/sqlserver2019.ts | 761 +++ .../yandex/cloud/mdb/sqlserver/v1/database.ts | 3 +- .../mdb/sqlserver/v1/database_service.ts | 91 +- .../sqlserver/v1/resource_preset_service.ts | 20 +- .../yandex/cloud/mdb/sqlserver/v1/user.ts | 105 +- .../cloud/mdb/sqlserver/v1/user_service.ts | 52 +- .../yandex/cloud/monitoring/index.ts | 9 + .../cloud/monitoring/v3/chart_widget.ts | 2471 ++++++++++ .../yandex/cloud/monitoring/v3/dashboard.ts | 442 ++ .../cloud/monitoring/v3/dashboard_service.ts | 1752 +++++++ .../cloud/monitoring/v3/downsampling.ts | 316 ++ .../cloud/monitoring/v3/parametrization.ts | 967 ++++ .../yandex/cloud/monitoring/v3/text_widget.ts | 107 + .../cloud/monitoring/v3/title_widget.ts | 182 + .../yandex/cloud/monitoring/v3/unit_format.ts | 666 +++ .../yandex/cloud/monitoring/v3/widget.ts | 302 ++ .../cloud/operation/operation_service.ts | 6 +- .../yandex/cloud/organizationmanager/index.ts | 2 + .../cloud/organizationmanager/v1/group.ts | 193 + .../organizationmanager/v1/group_service.ts | 2155 ++++++++ .../organizationmanager/v1/user_service.ts | 2 +- .../apigateway/v1/apigateway_service.ts | 52 +- .../apigateway/websocket/v1/connection.ts | 284 ++ .../websocket/v1/connection_service.ts | 631 +++ .../serverless/containers/v1/container.ts | 93 + .../containers/v1/container_service.ts | 26 + .../functions/v1/function_service.ts | 12 +- .../yandex/cloud/serverless/index.ts | 4 +- .../cloud/serverless/triggers/v1/trigger.ts | 334 +- src/generated/yandex/cloud/service_clients.ts | 6 + .../yandex/cloud/storage/v1/bucket.ts | 42 +- .../yandex/cloud/storage/v1/bucket_service.ts | 14 +- .../yandex/cloud/vpc/v1/route_table.ts | 14 + src/generated/yandex/cloud/ydb/v1/database.ts | 32 + .../yandex/cloud/ydb/v1/database_service.ts | 229 + src/service-endpoints.ts | 24 +- 173 files changed, 53786 insertions(+), 4375 deletions(-) create mode 100644 src/generated/yandex/cloud/compute/v1/snapshot_schedule.ts create mode 100644 src/generated/yandex/cloud/compute/v1/snapshot_schedule_service.ts create mode 100644 src/generated/yandex/cloud/iot/broker/v1/broker.ts create mode 100644 src/generated/yandex/cloud/iot/broker/v1/broker_data_service.ts create mode 100644 src/generated/yandex/cloud/iot/broker/v1/broker_service.ts create mode 100644 src/generated/yandex/cloud/mdb/mongodb/v1/config/mongodb6_0.ts create mode 100644 src/generated/yandex/cloud/mdb/mongodb/v1/config/mongodb6_0_enterprise.ts create mode 100644 src/generated/yandex/cloud/mdb/postgresql/v1/config/host13_1c.ts create mode 100644 src/generated/yandex/cloud/mdb/postgresql/v1/config/host14_1c.ts create mode 100644 src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql13_1c.ts create mode 100644 src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql14_1c.ts create mode 100644 src/generated/yandex/cloud/mdb/redis/v1/config/redis7_0.ts create mode 100644 src/generated/yandex/cloud/mdb/sqlserver/v1/config/sqlserver2017.ts create mode 100644 src/generated/yandex/cloud/mdb/sqlserver/v1/config/sqlserver2019.ts create mode 100644 src/generated/yandex/cloud/monitoring/index.ts create mode 100644 src/generated/yandex/cloud/monitoring/v3/chart_widget.ts create mode 100644 src/generated/yandex/cloud/monitoring/v3/dashboard.ts create mode 100644 src/generated/yandex/cloud/monitoring/v3/dashboard_service.ts create mode 100644 src/generated/yandex/cloud/monitoring/v3/downsampling.ts create mode 100644 src/generated/yandex/cloud/monitoring/v3/parametrization.ts create mode 100644 src/generated/yandex/cloud/monitoring/v3/text_widget.ts create mode 100644 src/generated/yandex/cloud/monitoring/v3/title_widget.ts create mode 100644 src/generated/yandex/cloud/monitoring/v3/unit_format.ts create mode 100644 src/generated/yandex/cloud/monitoring/v3/widget.ts create mode 100644 src/generated/yandex/cloud/organizationmanager/v1/group.ts create mode 100644 src/generated/yandex/cloud/organizationmanager/v1/group_service.ts create mode 100644 src/generated/yandex/cloud/serverless/apigateway/websocket/v1/connection.ts create mode 100644 src/generated/yandex/cloud/serverless/apigateway/websocket/v1/connection_service.ts diff --git a/cloudapi b/cloudapi index 532f96d2..a321f934 160000 --- a/cloudapi +++ b/cloudapi @@ -1 +1 @@ -Subproject commit 532f96d2834997d04cf0e50a7e65d99a88a85a89 +Subproject commit a321f9341d37fcbc849a4b38f7e659724a051ff1 diff --git a/scripts/services.ts b/scripts/services.ts index 8240dd51..eea8f8f6 100644 --- a/scripts/services.ts +++ b/scripts/services.ts @@ -48,6 +48,7 @@ export const servicesConfig: ServicesConfig = { snapshot_service: { importClassName: 'SnapshotServiceClient' }, zone_service: { importClassName: 'ZoneServiceClient' }, instance_group_service: { importClassName: 'InstanceGroupServiceClient' }, + snapshot_schedule_service: { importClassName: 'SnapshotScheduleServiceClient' }, }, containerregistry: { image_service: { importClassName: 'ImageServiceClient', exportClassName: 'CrImageServiceClient' }, @@ -90,6 +91,8 @@ export const servicesConfig: ServicesConfig = { access_key_service: { importClassName: 'AccessKeyServiceClient' }, }, iot: { + broker_broker_data_service: { importClassName: 'BrokerDataServiceClient' }, + broker_service: { importClassName: 'BrokerServiceClient' }, devices_device_data_service: { importClassName: 'DeviceDataServiceClient' }, devices_device_service: { importClassName: 'DeviceServiceClient' }, devices_registry_data_service: { importClassName: 'RegistryDataServiceClient' }, @@ -167,10 +170,14 @@ export const servicesConfig: ServicesConfig = { sqlserver_resource_preset_service: { importClassName: 'ResourcePresetServiceClient', exportClassName: 'SqlServerResourcePresetServiceClient' }, sqlserver_user_service: { importClassName: 'UserServiceClient', exportClassName: 'SqlServerUserServiceClient' }, }, + monitoring: { + dashboard_service: { importClassName: 'DashboardServiceClient' }, + }, operation: { operation_service: { importClassName: 'OperationServiceClient' }, }, organizationmanager: { + group_service: { importClassName: 'GroupServiceClient' }, organization_service: { importClassName: 'OrganizationServiceClient' }, user_service: { importClassName: 'UserServiceClient' }, certificate_service: { importClassName: 'CertificateServiceClient', exportClassName: 'OmCertificateServiceClient' }, @@ -186,6 +193,7 @@ export const servicesConfig: ServicesConfig = { functions_function_service: { importClassName: 'FunctionServiceClient' }, mdbproxy_proxy_service: { importClassName: 'ProxyServiceClient' }, triggers_trigger_service: { importClassName: 'TriggerServiceClient' }, + apigateway_connection_service: { importClassName: 'ConnectionServiceClient', exportClassName: 'WebSocketConnectionServiceClient' }, }, storage: { bucket_service: { importClassName: 'BucketServiceClient' }, diff --git a/src/generated/yandex/cloud/access/access.ts b/src/generated/yandex/cloud/access/access.ts index 35939c1c..2401f24f 100644 --- a/src/generated/yandex/cloud/access/access.ts +++ b/src/generated/yandex/cloud/access/access.ts @@ -164,6 +164,12 @@ export interface AccessBindingDelta { accessBinding?: AccessBinding; } +export interface AccessBindingsOperationResult { + $type: "yandex.cloud.access.AccessBindingsOperationResult"; + /** Result access binding deltas. */ + effectiveDeltas: AccessBindingDelta[]; +} + const baseSubject: object = { $type: "yandex.cloud.access.Subject", id: "", @@ -936,6 +942,89 @@ export const AccessBindingDelta = { messageTypeRegistry.set(AccessBindingDelta.$type, AccessBindingDelta); +const baseAccessBindingsOperationResult: object = { + $type: "yandex.cloud.access.AccessBindingsOperationResult", +}; + +export const AccessBindingsOperationResult = { + $type: "yandex.cloud.access.AccessBindingsOperationResult" as const, + + encode( + message: AccessBindingsOperationResult, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.effectiveDeltas) { + AccessBindingDelta.encode(v!, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): AccessBindingsOperationResult { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseAccessBindingsOperationResult, + } as AccessBindingsOperationResult; + message.effectiveDeltas = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.effectiveDeltas.push( + AccessBindingDelta.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AccessBindingsOperationResult { + const message = { + ...baseAccessBindingsOperationResult, + } as AccessBindingsOperationResult; + message.effectiveDeltas = (object.effectiveDeltas ?? []).map((e: any) => + AccessBindingDelta.fromJSON(e) + ); + return message; + }, + + toJSON(message: AccessBindingsOperationResult): unknown { + const obj: any = {}; + if (message.effectiveDeltas) { + obj.effectiveDeltas = message.effectiveDeltas.map((e) => + e ? AccessBindingDelta.toJSON(e) : undefined + ); + } else { + obj.effectiveDeltas = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): AccessBindingsOperationResult { + const message = { + ...baseAccessBindingsOperationResult, + } as AccessBindingsOperationResult; + message.effectiveDeltas = + object.effectiveDeltas?.map((e) => AccessBindingDelta.fromPartial(e)) || + []; + return message; + }, +}; + +messageTypeRegistry.set( + AccessBindingsOperationResult.$type, + AccessBindingsOperationResult +); + declare var self: any | undefined; declare var window: any | undefined; declare var global: any | undefined; diff --git a/src/generated/yandex/cloud/ai/stt/v3/stt.ts b/src/generated/yandex/cloud/ai/stt/v3/stt.ts index c941aa89..40f4897a 100644 --- a/src/generated/yandex/cloud/ai/stt/v3/stt.ts +++ b/src/generated/yandex/cloud/ai/stt/v3/stt.ts @@ -9,9 +9,9 @@ export enum CodeType { CODE_TYPE_UNSPECIFIED = 0, /** WORKING - all good */ WORKING = 1, - /** WARNING - for example, if speech is sent not in real time. or unknown context (and we've made fallback) */ + /** WARNING - for example, if speech is sent not in real time. or unknown context (and we've made fallback). */ WARNING = 2, - /** CLOSED - after session was closed */ + /** CLOSED - after session was closed. */ CLOSED = 3, UNRECOGNIZED = -1, } @@ -56,9 +56,9 @@ export function codeTypeToJSON(object: CodeType): string { export interface TextNormalizationOptions { $type: "speechkit.stt.v3.TextNormalizationOptions"; textNormalization: TextNormalizationOptions_TextNormalization; - /** Filter profanity (default: false) */ + /** Filter profanity (default: false). */ profanityFilter: boolean; - /** Rewrite text in literature style (default: false) */ + /** Rewrite text in literature style (default: false). */ literatureText: boolean; } @@ -109,9 +109,9 @@ export function textNormalizationOptions_TextNormalizationToJSON( export interface DefaultEouClassifier { $type: "speechkit.stt.v3.DefaultEouClassifier"; - /** EOU sensitivity. Currently two levels, faster with more error and more conservative (our default) */ + /** EOU sensitivity. Currently two levels, faster with more error and more conservative (our default). */ type: DefaultEouClassifier_EouSensitivity; - /** hint for max pause between words. Our EoU detector could use this information to distinguish between end of utterance and slow speech (like one two three, etc) */ + /** Hint for max pause between words. Our EoU detector could use this information to distinguish between end of utterance and slow speech (like one two three, etc). */ maxPauseBetweenWordsHintMs: number; } @@ -157,32 +157,33 @@ export function defaultEouClassifier_EouSensitivityToJSON( } } -/** use EOU provided by user */ +/** Use EOU provided by user */ export interface ExternalEouClassifier { $type: "speechkit.stt.v3.ExternalEouClassifier"; } export interface EouClassifierOptions { $type: "speechkit.stt.v3.EouClassifierOptions"; - /** EOU classifier provided by SpeechKit. Default */ + /** EOU classifier provided by SpeechKit. Default. */ defaultClassifier?: DefaultEouClassifier | undefined; - /** EoU is enforced by external messages from user */ + /** EoU is enforced by external messages from user. */ externalClassifier?: ExternalEouClassifier | undefined; } -/** RAW Audio format spec (no container to infer type). used in AudioFormat options */ +/** RAW Audio format spec (no container to infer type). Used in AudioFormat options. */ export interface RawAudio { $type: "speechkit.stt.v3.RawAudio"; - /** type of audio encoding */ + /** Type of audio encoding */ audioEncoding: RawAudio_AudioEncoding; /** PCM sample rate */ sampleRateHertz: number; - /** PCM channel count. Currently only single channel audio is supported in real-time recognition */ + /** PCM channel count. Currently only single channel audio is supported in real-time recognition. */ audioChannelCount: number; } export enum RawAudio_AudioEncoding { AUDIO_ENCODING_UNSPECIFIED = 0, + /** LINEAR16_PCM - Audio bit depth 16-bit signed little-endian (Linear PCM). */ LINEAR16_PCM = 1, UNRECOGNIZED = -1, } @@ -217,17 +218,20 @@ export function rawAudio_AudioEncodingToJSON( } } -/** Audio with fixed type in container. used in AudioFormat options */ +/** Audio with fixed type in container. Used in AudioFormat options. */ export interface ContainerAudio { $type: "speechkit.stt.v3.ContainerAudio"; - /** type of audio container */ + /** Type of audio container. */ containerAudioType: ContainerAudio_ContainerAudioType; } export enum ContainerAudio_ContainerAudioType { CONTAINER_AUDIO_TYPE_UNSPECIFIED = 0, + /** WAV - Audio bit depth 16-bit signed little-endian (Linear PCM). */ WAV = 1, + /** OGG_OPUS - Data is encoded using the OPUS audio codec and compressed using the OGG container format. */ OGG_OPUS = 2, + /** MP3 - Data is encoded using MPEG-1/2 Layer III and compressed using the MP3 container format. */ MP3 = 3, UNRECOGNIZED = -1, } @@ -272,12 +276,12 @@ export function containerAudio_ContainerAudioTypeToJSON( } } -/** audio format options */ +/** Audio format options. */ export interface AudioFormatOptions { $type: "speechkit.stt.v3.AudioFormatOptions"; - /** audio without container */ + /** Audio without container. */ rawAudio?: RawAudio | undefined; - /** audio is wrapped in container */ + /** Audio is wrapped in container. */ containerAudio?: ContainerAudio | undefined; } @@ -331,15 +335,15 @@ export function languageRestrictionOptions_LanguageRestrictionTypeToJSON( export interface RecognitionModelOptions { $type: "speechkit.stt.v3.RecognitionModelOptions"; - /** reserved for future, do not use */ + /** Reserved for future, do not use. */ model: string; - /** config for input audio */ + /** Specified input audio. */ audioFormat?: AudioFormatOptions; - /** text normalization options */ + /** Text normalization options. */ textNormalization?: TextNormalizationOptions; - /** possible languages in audio */ + /** Possible languages in audio. */ languageRestriction?: LanguageRestrictionOptions; - /** how to deal with audio data (in real time, after all data is received, etc). Default is REAL_TIME */ + /** How to deal with audio data (in real time, after all data is received, etc). Default is REAL_TIME. */ audioProcessingType: RecognitionModelOptions_AudioProcessingType; } @@ -387,176 +391,176 @@ export function recognitionModelOptions_AudioProcessingTypeToJSON( export interface StreamingOptions { $type: "speechkit.stt.v3.StreamingOptions"; - /** configuration for speech recognition model */ + /** Configuration for speech recognition model. */ recognitionModel?: RecognitionModelOptions; - /** configuration for end of utterance detection model */ + /** Configuration for end of utterance detection model. */ eouClassifier?: EouClassifierOptions; } -/** data chunk with audio */ +/** Data chunk with audio. */ export interface AudioChunk { $type: "speechkit.stt.v3.AudioChunk"; - /** bytes with audio data */ + /** Bytes with audio data. */ data: Buffer; } export interface SilenceChunk { $type: "speechkit.stt.v3.SilenceChunk"; - /** duration of silence chunk in ms */ + /** Duration of silence chunk in ms. */ durationMs: number; } -/** force EOU */ +/** Force EOU */ export interface Eou { $type: "speechkit.stt.v3.Eou"; } /** - * streaming audio request - * Events are control messages from user - * first message should be session options - * the next messages are audio data chunks or control messages + * Streaming audio request + * Events are control messages from user. + * First message should be session options. + * The next messages are audio data chunks or control messages. */ export interface StreamingRequest { $type: "speechkit.stt.v3.StreamingRequest"; /** Session options. should be first message from user */ sessionOptions?: StreamingOptions | undefined; - /** chunk with audio data */ + /** Chunk with audio data. */ chunk?: AudioChunk | undefined; - /** chunk with silence */ + /** Chunk with silence. */ silenceChunk?: SilenceChunk | undefined; - /** request to end current utterance. Works only with external EoU detector */ + /** Request to end current utterance. Works only with external EoU detector. */ eou?: Eou | undefined; } -/** recognized word */ +/** Recognized word. */ export interface Word { $type: "speechkit.stt.v3.Word"; - /** word text */ + /** Word text. */ text: string; - /** estimation of word start time in ms */ + /** Estimation of word start time in ms */ startTimeMs: number; - /** estimation of word end time in ms */ + /** Estimation of word end time in ms */ endTimeMs: number; } -/** recognition of specific time frame */ +/** Recognition of specific time frame. */ export interface Alternative { $type: "speechkit.stt.v3.Alternative"; - /** words in time frame */ + /** Words in time frame. */ words: Word[]; - /** text in time frame */ + /** Text in time frame. */ text: string; - /** start of time frame */ + /** Start of time frame. */ startTimeMs: number; - /** end of time frame */ + /** End of time frame. */ endTimeMs: number; - /** hypothesis confidence. Currently is not used */ + /** Hypothesis confidence. Currently is not used. */ confidence: number; } /** Update information from */ export interface EouUpdate { $type: "speechkit.stt.v3.EouUpdate"; - /** end of utterance estimated time */ + /** End of utterance estimated time. */ timeMs: number; } -/** update of hypothesis */ +/** Update of hypothesis. */ export interface AlternativeUpdate { $type: "speechkit.stt.v3.AlternativeUpdate"; - /** list of hypothesis for timeframes */ + /** List of hypothesis for timeframes. */ alternatives: Alternative[]; - /** tag for distinguish audio channels. */ + /** Tag for distinguish audio channels. */ channelTag: string; } -/** AudioCursors are state of ASR recognition stream */ +/** AudioCursors are state of ASR recognition stream. */ export interface AudioCursors { $type: "speechkit.stt.v3.AudioCursors"; - /** amount of audio chunks server received. This cursor is moved after each audio chunk was received by server. */ + /** Amount of audio chunks server received. This cursor is moved after each audio chunk was received by server. */ receivedDataMs: number; - /** input stream reset data */ + /** Input stream reset data. */ resetTimeMs: number; /** - * how much audio was processed. This time includes trimming silences as well. This cursor is moved after server received enough data - * to update recognition results (includes silence as well) + * How much audio was processed. This time includes trimming silences as well. This cursor is moved after server received enough data + * to update recognition results (includes silence as well). */ partialTimeMs: number; /** * Time of last final. This cursor is moved when server decides that recognition from start of audio until final_time_ms will not change anymore - * usually this even is followed by EOU detection (but this could change in future) + * usually this even is followed by EOU detection (but this could change in future). */ finalTimeMs: number; /** This is index of last final server send. Incremented after each new final. */ finalIndex: number; /** - * Estimated time of EOU. Cursor is updated after each new EOU is sent - * For external classifier this equals to received_data_ms at the moment EOU event arrives - * For internal classifier this is estimation of time. The time is not exact and has the same guarantees as word timings + * Estimated time of EOU. Cursor is updated after each new EOU is sent. + * For external classifier this equals to received_data_ms at the moment EOU event arrives. + * For internal classifier this is estimation of time. The time is not exact and has the same guarantees as word timings. */ eouTimeMs: number; } -/** refinement for final hypo. For example, text normalization is refinement. */ +/** Refinement for final hypo. For example, text normalization is refinement. */ export interface FinalRefinement { $type: "speechkit.stt.v3.FinalRefinement"; - /** index of final for which server sends additional information */ + /** Index of final for which server sends additional information. */ finalIndex: number; - /** normalized text instead of raw one */ + /** Normalized text instead of raw one. */ normalizedText?: AlternativeUpdate | undefined; } -/** status message */ +/** Status message */ export interface StatusCode { $type: "speechkit.stt.v3.StatusCode"; - /** code type */ + /** Code type. */ codeType: CodeType; - /** human readable message */ + /** Human readable message. */ message: string; } -/** session identifier */ +/** Session identifier. */ export interface SessionUuid { $type: "speechkit.stt.v3.SessionUuid"; - /** internal session identifier */ + /** Internal session identifier. */ uuid: string; - /** user session identifier */ + /** User session identifier. */ userRequestId: string; } /** - * responses from server - * each response contains session uuid + * Responses from server. + * Each response contains session uuid * AudioCursors - * plus specific even + * plus specific event */ export interface StreamingResponse { $type: "speechkit.stt.v3.StreamingResponse"; - /** session identifier */ + /** Session identifier */ sessionUuid?: SessionUuid; - /** progress bar for stream session recognition: how many data we obtained; final and partial times; etc */ + /** Progress bar for stream session recognition: how many data we obtained; final and partial times; etc. */ audioCursors?: AudioCursors; - /** wall clock on server side. This is time when server wrote results to stream */ + /** Wall clock on server side. This is time when server wrote results to stream */ responseWallTimeMs: number; /** - * partial results, server will send them regularly after enough audio data was received from user. This are current text estimation - * from final_time_ms to partial_time_ms. Could change after new data will arrive + * Partial results, server will send them regularly after enough audio data was received from user. This are current text estimation + * from final_time_ms to partial_time_ms. Could change after new data will arrive. */ partial?: AlternativeUpdate | undefined; - /** final results, the recognition is now fixed until final_time_ms. For now, final is sent only if the EOU event was triggered. This could be change in future releases */ + /** Final results, the recognition is now fixed until final_time_ms. For now, final is sent only if the EOU event was triggered. This could be change in future releases. */ final?: AlternativeUpdate | undefined; /** * After EOU classifier, send the message with final, send the EouUpdate with time of EOU - * before eou_update we send final with the same time. there could be several finals before eou update + * before eou_update we send final with the same time. there could be several finals before eou update. */ eouUpdate?: EouUpdate | undefined; /** * For each final, if normalization is enabled, sent the normalized text (or some other advanced post-processing). - * Final normalization will introduce additional latency + * Final normalization will introduce additional latency. */ finalRefinement?: FinalRefinement | undefined; - /** Status messages, send by server with fixed interval (keep-alive) */ + /** Status messages, send by server with fixed interval (keep-alive). */ statusCode?: StatusCode | undefined; } diff --git a/src/generated/yandex/cloud/ai/translate/v2/translation_service.ts b/src/generated/yandex/cloud/ai/translate/v2/translation_service.ts index 2e228843..9be4c6be 100644 --- a/src/generated/yandex/cloud/ai/translate/v2/translation_service.ts +++ b/src/generated/yandex/cloud/ai/translate/v2/translation_service.ts @@ -953,7 +953,7 @@ export const ListLanguagesResponse = { messageTypeRegistry.set(ListLanguagesResponse.$type, ListLanguagesResponse); -/** A set of methods for the Yandex Translate service. */ +/** A set of methods for the Translate service. */ export const TranslationServiceService = { /** Translates the text to the specified language. */ translate: { diff --git a/src/generated/yandex/cloud/ai/tts/v3/tts.ts b/src/generated/yandex/cloud/ai/tts/v3/tts.ts index 2e03f1f5..b671fde6 100644 --- a/src/generated/yandex/cloud/ai/tts/v3/tts.ts +++ b/src/generated/yandex/cloud/ai/tts/v3/tts.ts @@ -75,7 +75,9 @@ export enum ContainerAudio_ContainerAudioType { CONTAINER_AUDIO_TYPE_UNSPECIFIED = 0, /** WAV - Audio bit depth 16-bit signed little-endian (Linear PCM). */ WAV = 1, + /** OGG_OPUS - Data is encoded using the OPUS audio codec and compressed using the OGG container format. */ OGG_OPUS = 2, + /** MP3 - Data is encoded using MPEG-1/2 Layer III and compressed using the MP3 container format. */ MP3 = 3, UNRECOGNIZED = -1, } @@ -182,10 +184,15 @@ export interface Hints { voice: string | undefined; /** Template for synthesizing. */ audioTemplate?: AudioTemplate | undefined; - /** hint to change speed */ + /** Hint to change speed. */ speed: number | undefined; - /** hint to regulate volume. For LOUDNESS_NORMALIZATION_TYPE_UNSPECIFIED normalization will use MAX_PEAK, if volume in (0, 1], LUFS if volume in [-145, 0). */ + /** + * Hint to regulate normalization level. + * * For `MAX_PEAK` loudness_normalization_type: volume changes in a range (0;1], default value is 0.7. + * * For `LUFS` loudness_normalization_type: volume changes in a range [-145;0), default value is -19. + */ volume: number | undefined; + /** Hint to specify pronunciation character for the speaker. */ role: string | undefined; } @@ -193,7 +200,7 @@ export interface UtteranceSynthesisRequest { $type: "speechkit.tts.v3.UtteranceSynthesisRequest"; /** * The name of the model. - * Specifies basic synthesis functionality. Currently should be empty. Do not use it + * Specifies basic synthesis functionality. Currently should be empty. Do not use it. */ model: string; /** Raw text (e.g. "Hello, Alice"). */ @@ -204,16 +211,20 @@ export interface UtteranceSynthesisRequest { hints: Hints[]; /** Optional. Default: 22050 Hz, linear 16-bit signed little-endian PCM, with WAV header */ outputAudioSpec?: AudioFormatOptions; - /** Optional. Default: LUFS, type of loudness normalization, default value -19. */ + /** + * Specifies type of loudness normalization. + * Optional. Default: `LUFS`. + */ loudnessNormalizationType: UtteranceSynthesisRequest_LoudnessNormalizationType; /** Optional. Automatically split long text to several utterances and bill accordingly. Some degradation in service quality is possible. */ unsafeMode: boolean; } -/** Normalization type */ export enum UtteranceSynthesisRequest_LoudnessNormalizationType { LOUDNESS_NORMALIZATION_TYPE_UNSPECIFIED = 0, + /** MAX_PEAK - The type of normalization, wherein the gain is changed to bring the highest PCM sample value or analog signal peak to a given level. */ MAX_PEAK = 1, + /** LUFS - The type of normalization based on EBU R 128 recommendation. */ LUFS = 2, UNRECOGNIZED = -1, } diff --git a/src/generated/yandex/cloud/ai/vision/v1/vision_service.ts b/src/generated/yandex/cloud/ai/vision/v1/vision_service.ts index 17a1b3f3..25ccaf67 100644 --- a/src/generated/yandex/cloud/ai/vision/v1/vision_service.ts +++ b/src/generated/yandex/cloud/ai/vision/v1/vision_service.ts @@ -964,7 +964,7 @@ export const FeatureResult = { messageTypeRegistry.set(FeatureResult.$type, FeatureResult); -/** A set of methods for the Yandex Vision service. */ +/** A set of methods for the Vision service. */ export const VisionServiceService = { /** Analyzes a batch of images and returns results with annotations. */ batchAnalyze: { diff --git a/src/generated/yandex/cloud/apploadbalancer/v1/load_balancer.ts b/src/generated/yandex/cloud/apploadbalancer/v1/load_balancer.ts index 7811394d..31b86332 100644 --- a/src/generated/yandex/cloud/apploadbalancer/v1/load_balancer.ts +++ b/src/generated/yandex/cloud/apploadbalancer/v1/load_balancer.ts @@ -28,11 +28,7 @@ export interface LoadBalancer { labels: { [key: string]: string }; /** Status of the application load balancer. */ status: LoadBalancer_Status; - /** - * ID of the region that the application load balancer is located at. - * - * Currently Yandex Cloud supports only `ru-central1` region. - */ + /** ID of the region that the application load balancer is located at. */ regionId: string; /** ID of the network that the application load balancer belongs to. */ networkId: string; @@ -187,7 +183,7 @@ export interface Location { /** * ID of the availability zone where the application load balancer resides. * - * Each Yandex Cloud availability zone can only be specified once. + * Each availability zone can only be specified once. */ zoneId: string; /** ID of the subnet that the application load balancer belongs to. */ diff --git a/src/generated/yandex/cloud/apploadbalancer/v1/virtual_host.ts b/src/generated/yandex/cloud/apploadbalancer/v1/virtual_host.ts index 685be7ae..9be8db55 100644 --- a/src/generated/yandex/cloud/apploadbalancer/v1/virtual_host.ts +++ b/src/generated/yandex/cloud/apploadbalancer/v1/virtual_host.ts @@ -156,6 +156,8 @@ export interface StringMatch { exactMatch: string | undefined; /** Prefix match string. */ prefixMatch: string | undefined; + /** Regular expression match string. */ + regexMatch: string | undefined; } /** A redirect action resource. */ @@ -1312,6 +1314,9 @@ export const StringMatch = { if (message.prefixMatch !== undefined) { writer.uint32(18).string(message.prefixMatch); } + if (message.regexMatch !== undefined) { + writer.uint32(26).string(message.regexMatch); + } return writer; }, @@ -1328,6 +1333,9 @@ export const StringMatch = { case 2: message.prefixMatch = reader.string(); break; + case 3: + message.regexMatch = reader.string(); + break; default: reader.skipType(tag & 7); break; @@ -1346,6 +1354,10 @@ export const StringMatch = { object.prefixMatch !== undefined && object.prefixMatch !== null ? String(object.prefixMatch) : undefined; + message.regexMatch = + object.regexMatch !== undefined && object.regexMatch !== null + ? String(object.regexMatch) + : undefined; return message; }, @@ -1354,6 +1366,7 @@ export const StringMatch = { message.exactMatch !== undefined && (obj.exactMatch = message.exactMatch); message.prefixMatch !== undefined && (obj.prefixMatch = message.prefixMatch); + message.regexMatch !== undefined && (obj.regexMatch = message.regexMatch); return obj; }, @@ -1363,6 +1376,7 @@ export const StringMatch = { const message = { ...baseStringMatch } as StringMatch; message.exactMatch = object.exactMatch ?? undefined; message.prefixMatch = object.prefixMatch ?? undefined; + message.regexMatch = object.regexMatch ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/billing/v1/customer_service.ts b/src/generated/yandex/cloud/billing/v1/customer_service.ts index 94fa592b..7f6bde6f 100644 --- a/src/generated/yandex/cloud/billing/v1/customer_service.ts +++ b/src/generated/yandex/cloud/billing/v1/customer_service.ts @@ -68,6 +68,16 @@ export interface InviteCustomerRequest { person?: CustomerPerson; } +export interface CreateResellerServedCustomerRequest { + $type: "yandex.cloud.billing.v1.CreateResellerServedCustomerRequest"; + /** ID of the reseller that customer will be associated with. */ + resellerId: string; + /** Name of the customer. */ + name: string; + /** Person of the customer. */ + person?: CustomerPerson; +} + export interface ActivateCustomerRequest { $type: "yandex.cloud.billing.v1.ActivateCustomerRequest"; /** @@ -381,6 +391,111 @@ export const InviteCustomerRequest = { messageTypeRegistry.set(InviteCustomerRequest.$type, InviteCustomerRequest); +const baseCreateResellerServedCustomerRequest: object = { + $type: "yandex.cloud.billing.v1.CreateResellerServedCustomerRequest", + resellerId: "", + name: "", +}; + +export const CreateResellerServedCustomerRequest = { + $type: "yandex.cloud.billing.v1.CreateResellerServedCustomerRequest" as const, + + encode( + message: CreateResellerServedCustomerRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.resellerId !== "") { + writer.uint32(10).string(message.resellerId); + } + if (message.name !== "") { + writer.uint32(18).string(message.name); + } + if (message.person !== undefined) { + CustomerPerson.encode(message.person, writer.uint32(26).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): CreateResellerServedCustomerRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseCreateResellerServedCustomerRequest, + } as CreateResellerServedCustomerRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.resellerId = reader.string(); + break; + case 2: + message.name = reader.string(); + break; + case 3: + message.person = CustomerPerson.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateResellerServedCustomerRequest { + const message = { + ...baseCreateResellerServedCustomerRequest, + } as CreateResellerServedCustomerRequest; + message.resellerId = + object.resellerId !== undefined && object.resellerId !== null + ? String(object.resellerId) + : ""; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.person = + object.person !== undefined && object.person !== null + ? CustomerPerson.fromJSON(object.person) + : undefined; + return message; + }, + + toJSON(message: CreateResellerServedCustomerRequest): unknown { + const obj: any = {}; + message.resellerId !== undefined && (obj.resellerId = message.resellerId); + message.name !== undefined && (obj.name = message.name); + message.person !== undefined && + (obj.person = message.person + ? CustomerPerson.toJSON(message.person) + : undefined); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): CreateResellerServedCustomerRequest { + const message = { + ...baseCreateResellerServedCustomerRequest, + } as CreateResellerServedCustomerRequest; + message.resellerId = object.resellerId ?? ""; + message.name = object.name ?? ""; + message.person = + object.person !== undefined && object.person !== null + ? CustomerPerson.fromPartial(object.person) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + CreateResellerServedCustomerRequest.$type, + CreateResellerServedCustomerRequest +); + const baseActivateCustomerRequest: object = { $type: "yandex.cloud.billing.v1.ActivateCustomerRequest", customerId: "", @@ -618,6 +733,19 @@ export const CustomerServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, + /** Creates new reseller-served customer. */ + createResellerServed: { + path: "/yandex.cloud.billing.v1.CustomerService/CreateResellerServed", + requestStream: false, + responseStream: false, + requestSerialize: (value: CreateResellerServedCustomerRequest) => + Buffer.from(CreateResellerServedCustomerRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + CreateResellerServedCustomerRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, /** Activates specified customer. After customer is activated, he can use resources associated with his billing account. */ activate: { path: "/yandex.cloud.billing.v1.CustomerService/Activate", @@ -650,6 +778,11 @@ export interface CustomerServiceServer extends UntypedServiceImplementation { list: handleUnaryCall; /** Invites customer to the specified reseller. */ invite: handleUnaryCall; + /** Creates new reseller-served customer. */ + createResellerServed: handleUnaryCall< + CreateResellerServedCustomerRequest, + Operation + >; /** Activates specified customer. After customer is activated, he can use resources associated with his billing account. */ activate: handleUnaryCall; /** Suspend specified customer. After customer is suspended, he can't use resources associated with his billing account. */ @@ -698,6 +831,22 @@ export interface CustomerServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; + /** Creates new reseller-served customer. */ + createResellerServed( + request: CreateResellerServedCustomerRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + createResellerServed( + request: CreateResellerServedCustomerRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + createResellerServed( + request: CreateResellerServedCustomerRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; /** Activates specified customer. After customer is activated, he can use resources associated with his billing account. */ activate( request: ActivateCustomerRequest, diff --git a/src/generated/yandex/cloud/cdn/v1/origin.ts b/src/generated/yandex/cloud/cdn/v1/origin.ts index dcae02e2..50218e46 100644 --- a/src/generated/yandex/cloud/cdn/v1/origin.ts +++ b/src/generated/yandex/cloud/cdn/v1/origin.ts @@ -68,12 +68,12 @@ export interface OriginMeta { $type: "yandex.cloud.cdn.v1.OriginMeta"; /** A server with a domain name linked to it */ common?: OriginNamedMeta | undefined; - /** A Yandex Object Storage bucket not configured as a static site hosting. */ + /** An Object Storage bucket not configured as a static site hosting. */ bucket?: OriginNamedMeta | undefined; - /** A Yandex Object Storage bucket configured as a static site hosting. */ + /** An Object Storage bucket configured as a static site hosting. */ website?: OriginNamedMeta | undefined; /** - * An L7 load balancer from Yandex Application Load Balancer. + * An L7 load balancer from Application Load Balancer. * CDN servers will access the load balancer at one of its IP addresses that must be selected in the origin settings. */ balancer?: OriginBalancerMeta | undefined; @@ -86,7 +86,7 @@ export interface OriginNamedMeta { name: string; } -/** Yandex Application Load Balancer origin info. For details about the concept, see [documentation](/docs/cdn/concepts/origins). */ +/** Application Load Balancer origin info. For details about the concept, see [documentation](/docs/cdn/concepts/origins). */ export interface OriginBalancerMeta { $type: "yandex.cloud.cdn.v1.OriginBalancerMeta"; /** ID of the origin. */ diff --git a/src/generated/yandex/cloud/certificatemanager/v1/certificate.ts b/src/generated/yandex/cloud/certificatemanager/v1/certificate.ts index de14cad7..6bc93350 100644 --- a/src/generated/yandex/cloud/certificatemanager/v1/certificate.ts +++ b/src/generated/yandex/cloud/certificatemanager/v1/certificate.ts @@ -128,6 +128,8 @@ export interface Certificate { notBefore?: Date; /** Domains validation challenges of the certificate. Used only for managed certificates. */ challenges: Challenge[]; + /** Flag that protects deletion of the certificate */ + deletionProtection: boolean; } export enum Certificate_Status { @@ -299,6 +301,17 @@ export interface Challenge_HttpFile { content: string; } +/** A certificate version */ +export interface Version { + $type: "yandex.cloud.certificatemanager.v1.Version"; + /** ID of the version. */ + id: string; + /** ID of the certificate that the version belongs to. */ + certificateId: string; + /** Time when the version was created. */ + createdAt?: Date; +} + const baseCertificate: object = { $type: "yandex.cloud.certificatemanager.v1.Certificate", id: "", @@ -311,6 +324,7 @@ const baseCertificate: object = { issuer: "", subject: "", serial: "", + deletionProtection: false, }; export const Certificate = { @@ -393,6 +407,9 @@ export const Certificate = { for (const v of message.challenges) { Challenge.encode(v!, writer.uint32(138).fork()).ldelim(); } + if (message.deletionProtection === true) { + writer.uint32(144).bool(message.deletionProtection); + } return writer; }, @@ -473,6 +490,9 @@ export const Certificate = { case 17: message.challenges.push(Challenge.decode(reader, reader.uint32())); break; + case 18: + message.deletionProtection = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -547,6 +567,11 @@ export const Certificate = { message.challenges = (object.challenges ?? []).map((e: any) => Challenge.fromJSON(e) ); + message.deletionProtection = + object.deletionProtection !== undefined && + object.deletionProtection !== null + ? Boolean(object.deletionProtection) + : false; return message; }, @@ -592,6 +617,8 @@ export const Certificate = { } else { obj.challenges = []; } + message.deletionProtection !== undefined && + (obj.deletionProtection = message.deletionProtection); return obj; }, @@ -624,6 +651,7 @@ export const Certificate = { message.notBefore = object.notBefore ?? undefined; message.challenges = object.challenges?.map((e) => Challenge.fromPartial(e)) || []; + message.deletionProtection = object.deletionProtection ?? false; return message; }, }; @@ -1073,6 +1101,96 @@ export const Challenge_HttpFile = { messageTypeRegistry.set(Challenge_HttpFile.$type, Challenge_HttpFile); +const baseVersion: object = { + $type: "yandex.cloud.certificatemanager.v1.Version", + id: "", + certificateId: "", +}; + +export const Version = { + $type: "yandex.cloud.certificatemanager.v1.Version" as const, + + encode( + message: Version, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.id !== "") { + writer.uint32(10).string(message.id); + } + if (message.certificateId !== "") { + writer.uint32(18).string(message.certificateId); + } + if (message.createdAt !== undefined) { + Timestamp.encode( + toTimestamp(message.createdAt), + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Version { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseVersion } as Version; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = reader.string(); + break; + case 2: + message.certificateId = reader.string(); + break; + case 3: + message.createdAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Version { + const message = { ...baseVersion } as Version; + message.id = + object.id !== undefined && object.id !== null ? String(object.id) : ""; + message.certificateId = + object.certificateId !== undefined && object.certificateId !== null + ? String(object.certificateId) + : ""; + message.createdAt = + object.createdAt !== undefined && object.createdAt !== null + ? fromJsonTimestamp(object.createdAt) + : undefined; + return message; + }, + + toJSON(message: Version): unknown { + const obj: any = {}; + message.id !== undefined && (obj.id = message.id); + message.certificateId !== undefined && + (obj.certificateId = message.certificateId); + message.createdAt !== undefined && + (obj.createdAt = message.createdAt.toISOString()); + return obj; + }, + + fromPartial, I>>(object: I): Version { + const message = { ...baseVersion } as Version; + message.id = object.id ?? ""; + message.certificateId = object.certificateId ?? ""; + message.createdAt = object.createdAt ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(Version.$type, Version); + type Builtin = | Date | Function diff --git a/src/generated/yandex/cloud/certificatemanager/v1/certificate_content_service.ts b/src/generated/yandex/cloud/certificatemanager/v1/certificate_content_service.ts index 169859ff..37c72acf 100644 --- a/src/generated/yandex/cloud/certificatemanager/v1/certificate_content_service.ts +++ b/src/generated/yandex/cloud/certificatemanager/v1/certificate_content_service.ts @@ -17,6 +17,44 @@ import _m0 from "protobufjs/minimal"; export const protobufPackage = "yandex.cloud.certificatemanager.v1"; +export enum PrivateKeyFormat { + PRIVATE_KEY_FORMAT_UNSPECIFIED = 0, + PKCS1 = 1, + PKCS8 = 2, + UNRECOGNIZED = -1, +} + +export function privateKeyFormatFromJSON(object: any): PrivateKeyFormat { + switch (object) { + case 0: + case "PRIVATE_KEY_FORMAT_UNSPECIFIED": + return PrivateKeyFormat.PRIVATE_KEY_FORMAT_UNSPECIFIED; + case 1: + case "PKCS1": + return PrivateKeyFormat.PKCS1; + case 2: + case "PKCS8": + return PrivateKeyFormat.PKCS8; + case -1: + case "UNRECOGNIZED": + default: + return PrivateKeyFormat.UNRECOGNIZED; + } +} + +export function privateKeyFormatToJSON(object: PrivateKeyFormat): string { + switch (object) { + case PrivateKeyFormat.PRIVATE_KEY_FORMAT_UNSPECIFIED: + return "PRIVATE_KEY_FORMAT_UNSPECIFIED"; + case PrivateKeyFormat.PKCS1: + return "PKCS1"; + case PrivateKeyFormat.PKCS8: + return "PKCS8"; + default: + return "UNKNOWN"; + } +} + export interface GetCertificateContentResponse { $type: "yandex.cloud.certificatemanager.v1.GetCertificateContentResponse"; /** ID of the certificate. */ @@ -31,6 +69,10 @@ export interface GetCertificateContentRequest { $type: "yandex.cloud.certificatemanager.v1.GetCertificateContentRequest"; /** ID of the certificate to download content. */ certificateId: string; + /** Optional ID of the version. */ + versionId: string; + /** Desired format of private key */ + privateKeyFormat: PrivateKeyFormat; } const baseGetCertificateContentResponse: object = { @@ -142,6 +184,8 @@ messageTypeRegistry.set( const baseGetCertificateContentRequest: object = { $type: "yandex.cloud.certificatemanager.v1.GetCertificateContentRequest", certificateId: "", + versionId: "", + privateKeyFormat: 0, }; export const GetCertificateContentRequest = { @@ -155,6 +199,12 @@ export const GetCertificateContentRequest = { if (message.certificateId !== "") { writer.uint32(10).string(message.certificateId); } + if (message.versionId !== "") { + writer.uint32(18).string(message.versionId); + } + if (message.privateKeyFormat !== 0) { + writer.uint32(24).int32(message.privateKeyFormat); + } return writer; }, @@ -173,6 +223,12 @@ export const GetCertificateContentRequest = { case 1: message.certificateId = reader.string(); break; + case 2: + message.versionId = reader.string(); + break; + case 3: + message.privateKeyFormat = reader.int32() as any; + break; default: reader.skipType(tag & 7); break; @@ -189,6 +245,14 @@ export const GetCertificateContentRequest = { object.certificateId !== undefined && object.certificateId !== null ? String(object.certificateId) : ""; + message.versionId = + object.versionId !== undefined && object.versionId !== null + ? String(object.versionId) + : ""; + message.privateKeyFormat = + object.privateKeyFormat !== undefined && object.privateKeyFormat !== null + ? privateKeyFormatFromJSON(object.privateKeyFormat) + : 0; return message; }, @@ -196,6 +260,9 @@ export const GetCertificateContentRequest = { const obj: any = {}; message.certificateId !== undefined && (obj.certificateId = message.certificateId); + message.versionId !== undefined && (obj.versionId = message.versionId); + message.privateKeyFormat !== undefined && + (obj.privateKeyFormat = privateKeyFormatToJSON(message.privateKeyFormat)); return obj; }, @@ -206,6 +273,8 @@ export const GetCertificateContentRequest = { ...baseGetCertificateContentRequest, } as GetCertificateContentRequest; message.certificateId = object.certificateId ?? ""; + message.versionId = object.versionId ?? ""; + message.privateKeyFormat = object.privateKeyFormat ?? 0; return message; }, }; diff --git a/src/generated/yandex/cloud/certificatemanager/v1/certificate_service.ts b/src/generated/yandex/cloud/certificatemanager/v1/certificate_service.ts index 29645bfb..c82fa3ea 100644 --- a/src/generated/yandex/cloud/certificatemanager/v1/certificate_service.ts +++ b/src/generated/yandex/cloud/certificatemanager/v1/certificate_service.ts @@ -18,6 +18,7 @@ import { FieldMask } from "../../../../google/protobuf/field_mask"; import { ChallengeType, Certificate, + Version, challengeTypeFromJSON, challengeTypeToJSON, } from "../../../../yandex/cloud/certificatemanager/v1/certificate"; @@ -111,6 +112,36 @@ export interface ListCertificatesResponse { nextPageToken: string; } +export interface ListVersionsRequest { + $type: "yandex.cloud.certificatemanager.v1.ListVersionsRequest"; + /** ID of the certificate to list versions for. */ + certificateId: string; + /** + * Page token. To get the next page of results, set `page_token` to the + * [ListCertificatesResponse.next_page_token] returned by a previous list request. + */ + pageSize: number; + /** + * Page token. To get the next page of results, set `page_token` to the + * [ListCertificatesResponse.next_page_token] returned by a previous list request. + */ + pageToken: string; +} + +export interface ListVersionsResponse { + $type: "yandex.cloud.certificatemanager.v1.ListVersionsResponse"; + /** List of versions for the specified certificate. */ + versions: Version[]; + /** + * This token allows you to get the next page of results for list requests. If the number + * of results is greater than the specified [ListCertificatesRequest.page_size], use + * the `next_page_token` as the value for the [ListCertificatesRequest.page_token] query parameter + * in the next list request. Each subsequent list request will have its own + * [next_page_token] to continue paging through the results. + */ + nextPageToken: string; +} + export interface CreateCertificateRequest { $type: "yandex.cloud.certificatemanager.v1.CreateCertificateRequest"; /** ID of the folder to create a certificate in. */ @@ -130,6 +161,8 @@ export interface CreateCertificateRequest { chain: string; /** PEM-encoded private key content of the certificate. */ privateKey: string; + /** Flag that protects deletion of the certificate */ + deletionProtection: boolean; } export interface CreateCertificateRequest_LabelsEntry { @@ -165,6 +198,8 @@ export interface UpdateCertificateRequest { chain: string; /** New PEM-encoded private key content for the certificate. Used only for imported certificates. */ privateKey: string; + /** Flag that protects deletion of the certificate */ + deletionProtection: boolean; } export interface UpdateCertificateRequest_LabelsEntry { @@ -205,6 +240,8 @@ export interface RequestNewCertificateRequest { domains: string[]; /** Type of the domain validation challenge. */ challengeType: ChallengeType; + /** Flag that protects deletion of the certificate */ + deletionProtection: boolean; } export interface RequestNewCertificateRequest_LabelsEntry { @@ -542,6 +579,181 @@ messageTypeRegistry.set( ListCertificatesResponse ); +const baseListVersionsRequest: object = { + $type: "yandex.cloud.certificatemanager.v1.ListVersionsRequest", + certificateId: "", + pageSize: 0, + pageToken: "", +}; + +export const ListVersionsRequest = { + $type: "yandex.cloud.certificatemanager.v1.ListVersionsRequest" as const, + + encode( + message: ListVersionsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.certificateId !== "") { + writer.uint32(10).string(message.certificateId); + } + if (message.pageSize !== 0) { + writer.uint32(16).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(26).string(message.pageToken); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ListVersionsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListVersionsRequest } as ListVersionsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.certificateId = reader.string(); + break; + case 2: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 3: + message.pageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListVersionsRequest { + const message = { ...baseListVersionsRequest } as ListVersionsRequest; + message.certificateId = + object.certificateId !== undefined && object.certificateId !== null + ? String(object.certificateId) + : ""; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + return message; + }, + + toJSON(message: ListVersionsRequest): unknown { + const obj: any = {}; + message.certificateId !== undefined && + (obj.certificateId = message.certificateId); + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListVersionsRequest { + const message = { ...baseListVersionsRequest } as ListVersionsRequest; + message.certificateId = object.certificateId ?? ""; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListVersionsRequest.$type, ListVersionsRequest); + +const baseListVersionsResponse: object = { + $type: "yandex.cloud.certificatemanager.v1.ListVersionsResponse", + nextPageToken: "", +}; + +export const ListVersionsResponse = { + $type: "yandex.cloud.certificatemanager.v1.ListVersionsResponse" as const, + + encode( + message: ListVersionsResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.versions) { + Version.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListVersionsResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListVersionsResponse } as ListVersionsResponse; + message.versions = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.versions.push(Version.decode(reader, reader.uint32())); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListVersionsResponse { + const message = { ...baseListVersionsResponse } as ListVersionsResponse; + message.versions = (object.versions ?? []).map((e: any) => + Version.fromJSON(e) + ); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListVersionsResponse): unknown { + const obj: any = {}; + if (message.versions) { + obj.versions = message.versions.map((e) => + e ? Version.toJSON(e) : undefined + ); + } else { + obj.versions = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListVersionsResponse { + const message = { ...baseListVersionsResponse } as ListVersionsResponse; + message.versions = + object.versions?.map((e) => Version.fromPartial(e)) || []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListVersionsResponse.$type, ListVersionsResponse); + const baseCreateCertificateRequest: object = { $type: "yandex.cloud.certificatemanager.v1.CreateCertificateRequest", folderId: "", @@ -550,6 +762,7 @@ const baseCreateCertificateRequest: object = { certificate: "", chain: "", privateKey: "", + deletionProtection: false, }; export const CreateCertificateRequest = { @@ -588,6 +801,9 @@ export const CreateCertificateRequest = { if (message.privateKey !== "") { writer.uint32(58).string(message.privateKey); } + if (message.deletionProtection === true) { + writer.uint32(64).bool(message.deletionProtection); + } return writer; }, @@ -631,6 +847,9 @@ export const CreateCertificateRequest = { case 7: message.privateKey = reader.string(); break; + case 8: + message.deletionProtection = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -673,6 +892,11 @@ export const CreateCertificateRequest = { object.privateKey !== undefined && object.privateKey !== null ? String(object.privateKey) : ""; + message.deletionProtection = + object.deletionProtection !== undefined && + object.deletionProtection !== null + ? Boolean(object.deletionProtection) + : false; return message; }, @@ -692,6 +916,8 @@ export const CreateCertificateRequest = { (obj.certificate = message.certificate); message.chain !== undefined && (obj.chain = message.chain); message.privateKey !== undefined && (obj.privateKey = message.privateKey); + message.deletionProtection !== undefined && + (obj.deletionProtection = message.deletionProtection); return obj; }, @@ -715,6 +941,7 @@ export const CreateCertificateRequest = { message.certificate = object.certificate ?? ""; message.chain = object.chain ?? ""; message.privateKey = object.privateKey ?? ""; + message.deletionProtection = object.deletionProtection ?? false; return message; }, }; @@ -895,6 +1122,7 @@ const baseUpdateCertificateRequest: object = { certificate: "", chain: "", privateKey: "", + deletionProtection: false, }; export const UpdateCertificateRequest = { @@ -936,6 +1164,9 @@ export const UpdateCertificateRequest = { if (message.privateKey !== "") { writer.uint32(66).string(message.privateKey); } + if (message.deletionProtection === true) { + writer.uint32(72).bool(message.deletionProtection); + } return writer; }, @@ -982,6 +1213,9 @@ export const UpdateCertificateRequest = { case 8: message.privateKey = reader.string(); break; + case 9: + message.deletionProtection = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -1028,6 +1262,11 @@ export const UpdateCertificateRequest = { object.privateKey !== undefined && object.privateKey !== null ? String(object.privateKey) : ""; + message.deletionProtection = + object.deletionProtection !== undefined && + object.deletionProtection !== null + ? Boolean(object.deletionProtection) + : false; return message; }, @@ -1052,6 +1291,8 @@ export const UpdateCertificateRequest = { (obj.certificate = message.certificate); message.chain !== undefined && (obj.chain = message.chain); message.privateKey !== undefined && (obj.privateKey = message.privateKey); + message.deletionProtection !== undefined && + (obj.deletionProtection = message.deletionProtection); return obj; }, @@ -1079,6 +1320,7 @@ export const UpdateCertificateRequest = { message.certificate = object.certificate ?? ""; message.chain = object.chain ?? ""; message.privateKey = object.privateKey ?? ""; + message.deletionProtection = object.deletionProtection ?? false; return message; }, }; @@ -1409,6 +1651,7 @@ const baseRequestNewCertificateRequest: object = { description: "", domains: "", challengeType: 0, + deletionProtection: false, }; export const RequestNewCertificateRequest = { @@ -1445,6 +1688,9 @@ export const RequestNewCertificateRequest = { if (message.challengeType !== 0) { writer.uint32(48).int32(message.challengeType); } + if (message.deletionProtection === true) { + writer.uint32(56).bool(message.deletionProtection); + } return writer; }, @@ -1486,6 +1732,9 @@ export const RequestNewCertificateRequest = { case 6: message.challengeType = reader.int32() as any; break; + case 7: + message.deletionProtection = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -1521,6 +1770,11 @@ export const RequestNewCertificateRequest = { object.challengeType !== undefined && object.challengeType !== null ? challengeTypeFromJSON(object.challengeType) : 0; + message.deletionProtection = + object.deletionProtection !== undefined && + object.deletionProtection !== null + ? Boolean(object.deletionProtection) + : false; return message; }, @@ -1543,6 +1797,8 @@ export const RequestNewCertificateRequest = { } message.challengeType !== undefined && (obj.challengeType = challengeTypeToJSON(message.challengeType)); + message.deletionProtection !== undefined && + (obj.deletionProtection = message.deletionProtection); return obj; }, @@ -1565,6 +1821,7 @@ export const RequestNewCertificateRequest = { }, {}); message.domains = object.domains?.map((e) => e) || []; message.challengeType = object.challengeType ?? 0; + message.deletionProtection = object.deletionProtection ?? false; return message; }, }; @@ -1967,6 +2224,17 @@ export const CertificateServiceService = { responseDeserialize: (value: Buffer) => ListCertificatesResponse.decode(value), }, + listVersions: { + path: "/yandex.cloud.certificatemanager.v1.CertificateService/ListVersions", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListVersionsRequest) => + Buffer.from(ListVersionsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => ListVersionsRequest.decode(value), + responseSerialize: (value: ListVersionsResponse) => + Buffer.from(ListVersionsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => ListVersionsResponse.decode(value), + }, /** Creates a certificate in the specified folder. */ create: { path: "/yandex.cloud.certificatemanager.v1.CertificateService/Create", @@ -2084,6 +2352,7 @@ export interface CertificateServiceServer extends UntypedServiceImplementation { get: handleUnaryCall; /** Returns the list of certificates in the specified folder. */ list: handleUnaryCall; + listVersions: handleUnaryCall; /** Creates a certificate in the specified folder. */ create: handleUnaryCall; /** Updates the specified certificate. */ @@ -2154,6 +2423,30 @@ export interface CertificateServiceClient extends Client { response: ListCertificatesResponse ) => void ): ClientUnaryCall; + listVersions( + request: ListVersionsRequest, + callback: ( + error: ServiceError | null, + response: ListVersionsResponse + ) => void + ): ClientUnaryCall; + listVersions( + request: ListVersionsRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListVersionsResponse + ) => void + ): ClientUnaryCall; + listVersions( + request: ListVersionsRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListVersionsResponse + ) => void + ): ClientUnaryCall; /** Creates a certificate in the specified folder. */ create( request: CreateCertificateRequest, diff --git a/src/generated/yandex/cloud/compute/index.ts b/src/generated/yandex/cloud/compute/index.ts index d9113467..b474f30b 100644 --- a/src/generated/yandex/cloud/compute/index.ts +++ b/src/generated/yandex/cloud/compute/index.ts @@ -17,6 +17,8 @@ export * as instance_service from './v1/instance_service' export * as placement_group from './v1/placement_group' export * as placement_group_service from './v1/placement_group_service' export * as snapshot from './v1/snapshot' +export * as snapshot_schedule from './v1/snapshot_schedule' +export * as snapshot_schedule_service from './v1/snapshot_schedule_service' export * as snapshot_service from './v1/snapshot_service' export * as zone from './v1/zone' export * as zone_service from './v1/zone_service' diff --git a/src/generated/yandex/cloud/compute/v1/disk.ts b/src/generated/yandex/cloud/compute/v1/disk.ts index 0905a82e..63a50352 100644 --- a/src/generated/yandex/cloud/compute/v1/disk.ts +++ b/src/generated/yandex/cloud/compute/v1/disk.ts @@ -32,10 +32,10 @@ export interface Disk { * License IDs that indicate which licenses are attached to this resource. * License IDs are used to calculate additional charges for the use of the virtual machine. * - * The correct license ID is generated by Yandex Cloud. IDs are inherited by new resources created from this resource. + * The correct license ID is generated by the platform. IDs are inherited by new resources created from this resource. * * If you know the license IDs, specify them when you create the image. - * For example, if you create a disk image using a third-party utility and load it into Yandex Object Storage, the license IDs will be lost. + * For example, if you create a disk image using a third-party utility and load it into Object Storage, the license IDs will be lost. * You can specify them in the [yandex.cloud.compute.v1.ImageService.Create] request. */ productIds: string[]; diff --git a/src/generated/yandex/cloud/compute/v1/disk_service.ts b/src/generated/yandex/cloud/compute/v1/disk_service.ts index 7f813b4f..e27323d2 100644 --- a/src/generated/yandex/cloud/compute/v1/disk_service.ts +++ b/src/generated/yandex/cloud/compute/v1/disk_service.ts @@ -20,6 +20,7 @@ import { } from "../../../../yandex/cloud/compute/v1/disk"; import { FieldMask } from "../../../../google/protobuf/field_mask"; import { Operation } from "../../../../yandex/cloud/operation/operation"; +import { SnapshotSchedule } from "../../../../yandex/cloud/compute/v1/snapshot_schedule"; export const protobufPackage = "yandex.cloud.compute.v1"; @@ -113,6 +114,8 @@ export interface CreateDiskRequest { blockSize: number; /** Placement policy configuration. */ diskPlacementPolicy?: DiskPlacementPolicy; + /** Snapshot schedules */ + snapshotScheduleIds: string[]; } export interface CreateDiskRequest_LabelsEntry { @@ -235,6 +238,36 @@ export interface MoveDiskMetadata { destinationFolderId: string; } +export interface ListDiskSnapshotSchedulesRequest { + $type: "yandex.cloud.compute.v1.ListDiskSnapshotSchedulesRequest"; + /** ID of the Disk resource to list snapshot schedules for. */ + diskId: string; + /** + * The maximum number of results per page to return. If the number of available + * results is larger than [page_size], the service returns a [ListDiskOperationsResponse.next_page_token] + * that can be used to get the next page of results in subsequent list requests. + */ + pageSize: number; + /** + * Page token. To get the next page of results, set [page_token] to the + * [ListDiskSnapshotSchedulesResponse.next_page_token] returned by a previous list request. + */ + pageToken: string; +} + +export interface ListDiskSnapshotSchedulesResponse { + $type: "yandex.cloud.compute.v1.ListDiskSnapshotSchedulesResponse"; + /** List of snapshot schedules for the specified disk. */ + snapshotSchedules: SnapshotSchedule[]; + /** + * This token allows you to get the next page of results for list requests. If the number of results + * is larger than [ListDiskSnapshotSchedulesRequest.page_size], use the [next_page_token] as the value + * for the [ListDiskSnapshotSchedulesRequest.page_token] query parameter in the next list request. + * Each subsequent list request will have its own [next_page_token] to continue paging through the results. + */ + nextPageToken: string; +} + const baseGetDiskRequest: object = { $type: "yandex.cloud.compute.v1.GetDiskRequest", diskId: "", @@ -485,6 +518,7 @@ const baseCreateDiskRequest: object = { zoneId: "", size: 0, blockSize: 0, + snapshotScheduleIds: "", }; export const CreateDiskRequest = { @@ -537,6 +571,9 @@ export const CreateDiskRequest = { writer.uint32(90).fork() ).ldelim(); } + for (const v of message.snapshotScheduleIds) { + writer.uint32(98).string(v!); + } return writer; }, @@ -545,6 +582,7 @@ export const CreateDiskRequest = { let end = length === undefined ? reader.len : reader.pos + length; const message = { ...baseCreateDiskRequest } as CreateDiskRequest; message.labels = {}; + message.snapshotScheduleIds = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -590,6 +628,9 @@ export const CreateDiskRequest = { reader.uint32() ); break; + case 12: + message.snapshotScheduleIds.push(reader.string()); + break; default: reader.skipType(tag & 7); break; @@ -647,6 +688,9 @@ export const CreateDiskRequest = { object.diskPlacementPolicy !== null ? DiskPlacementPolicy.fromJSON(object.diskPlacementPolicy) : undefined; + message.snapshotScheduleIds = (object.snapshotScheduleIds ?? []).map( + (e: any) => String(e) + ); return message; }, @@ -673,6 +717,11 @@ export const CreateDiskRequest = { (obj.diskPlacementPolicy = message.diskPlacementPolicy ? DiskPlacementPolicy.toJSON(message.diskPlacementPolicy) : undefined); + if (message.snapshotScheduleIds) { + obj.snapshotScheduleIds = message.snapshotScheduleIds.map((e) => e); + } else { + obj.snapshotScheduleIds = []; + } return obj; }, @@ -702,6 +751,8 @@ export const CreateDiskRequest = { object.diskPlacementPolicy !== null ? DiskPlacementPolicy.fromPartial(object.diskPlacementPolicy) : undefined; + message.snapshotScheduleIds = + object.snapshotScheduleIds?.map((e) => e) || []; return message; }, }; @@ -1675,6 +1726,204 @@ export const MoveDiskMetadata = { messageTypeRegistry.set(MoveDiskMetadata.$type, MoveDiskMetadata); +const baseListDiskSnapshotSchedulesRequest: object = { + $type: "yandex.cloud.compute.v1.ListDiskSnapshotSchedulesRequest", + diskId: "", + pageSize: 0, + pageToken: "", +}; + +export const ListDiskSnapshotSchedulesRequest = { + $type: "yandex.cloud.compute.v1.ListDiskSnapshotSchedulesRequest" as const, + + encode( + message: ListDiskSnapshotSchedulesRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.diskId !== "") { + writer.uint32(10).string(message.diskId); + } + if (message.pageSize !== 0) { + writer.uint32(16).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(26).string(message.pageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListDiskSnapshotSchedulesRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListDiskSnapshotSchedulesRequest, + } as ListDiskSnapshotSchedulesRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.diskId = reader.string(); + break; + case 2: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 3: + message.pageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListDiskSnapshotSchedulesRequest { + const message = { + ...baseListDiskSnapshotSchedulesRequest, + } as ListDiskSnapshotSchedulesRequest; + message.diskId = + object.diskId !== undefined && object.diskId !== null + ? String(object.diskId) + : ""; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + return message; + }, + + toJSON(message: ListDiskSnapshotSchedulesRequest): unknown { + const obj: any = {}; + message.diskId !== undefined && (obj.diskId = message.diskId); + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): ListDiskSnapshotSchedulesRequest { + const message = { + ...baseListDiskSnapshotSchedulesRequest, + } as ListDiskSnapshotSchedulesRequest; + message.diskId = object.diskId ?? ""; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListDiskSnapshotSchedulesRequest.$type, + ListDiskSnapshotSchedulesRequest +); + +const baseListDiskSnapshotSchedulesResponse: object = { + $type: "yandex.cloud.compute.v1.ListDiskSnapshotSchedulesResponse", + nextPageToken: "", +}; + +export const ListDiskSnapshotSchedulesResponse = { + $type: "yandex.cloud.compute.v1.ListDiskSnapshotSchedulesResponse" as const, + + encode( + message: ListDiskSnapshotSchedulesResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.snapshotSchedules) { + SnapshotSchedule.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListDiskSnapshotSchedulesResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListDiskSnapshotSchedulesResponse, + } as ListDiskSnapshotSchedulesResponse; + message.snapshotSchedules = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.snapshotSchedules.push( + SnapshotSchedule.decode(reader, reader.uint32()) + ); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListDiskSnapshotSchedulesResponse { + const message = { + ...baseListDiskSnapshotSchedulesResponse, + } as ListDiskSnapshotSchedulesResponse; + message.snapshotSchedules = (object.snapshotSchedules ?? []).map((e: any) => + SnapshotSchedule.fromJSON(e) + ); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListDiskSnapshotSchedulesResponse): unknown { + const obj: any = {}; + if (message.snapshotSchedules) { + obj.snapshotSchedules = message.snapshotSchedules.map((e) => + e ? SnapshotSchedule.toJSON(e) : undefined + ); + } else { + obj.snapshotSchedules = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): ListDiskSnapshotSchedulesResponse { + const message = { + ...baseListDiskSnapshotSchedulesResponse, + } as ListDiskSnapshotSchedulesResponse; + message.snapshotSchedules = + object.snapshotSchedules?.map((e) => SnapshotSchedule.fromPartial(e)) || + []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListDiskSnapshotSchedulesResponse.$type, + ListDiskSnapshotSchedulesResponse +); + /** A set of methods for managing Disk resources. */ export const DiskServiceService = { /** @@ -1779,6 +2028,20 @@ export const DiskServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, + /** List snapshot schedules containing the disk */ + listSnapshotSchedules: { + path: "/yandex.cloud.compute.v1.DiskService/ListSnapshotSchedules", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListDiskSnapshotSchedulesRequest) => + Buffer.from(ListDiskSnapshotSchedulesRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + ListDiskSnapshotSchedulesRequest.decode(value), + responseSerialize: (value: ListDiskSnapshotSchedulesResponse) => + Buffer.from(ListDiskSnapshotSchedulesResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + ListDiskSnapshotSchedulesResponse.decode(value), + }, } as const; export interface DiskServiceServer extends UntypedServiceImplementation { @@ -1815,6 +2078,11 @@ export interface DiskServiceServer extends UntypedServiceImplementation { >; /** Moves the specified disk to another folder of the same cloud. */ move: handleUnaryCall; + /** List snapshot schedules containing the disk */ + listSnapshotSchedules: handleUnaryCall< + ListDiskSnapshotSchedulesRequest, + ListDiskSnapshotSchedulesResponse + >; } export interface DiskServiceClient extends Client { @@ -1955,6 +2223,31 @@ export interface DiskServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; + /** List snapshot schedules containing the disk */ + listSnapshotSchedules( + request: ListDiskSnapshotSchedulesRequest, + callback: ( + error: ServiceError | null, + response: ListDiskSnapshotSchedulesResponse + ) => void + ): ClientUnaryCall; + listSnapshotSchedules( + request: ListDiskSnapshotSchedulesRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListDiskSnapshotSchedulesResponse + ) => void + ): ClientUnaryCall; + listSnapshotSchedules( + request: ListDiskSnapshotSchedulesRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListDiskSnapshotSchedulesResponse + ) => void + ): ClientUnaryCall; } export const DiskServiceClient = makeGenericClientConstructor( diff --git a/src/generated/yandex/cloud/compute/v1/image.ts b/src/generated/yandex/cloud/compute/v1/image.ts index a5ad7257..766c98aa 100644 --- a/src/generated/yandex/cloud/compute/v1/image.ts +++ b/src/generated/yandex/cloud/compute/v1/image.ts @@ -36,10 +36,10 @@ export interface Image { * License IDs that indicate which licenses are attached to this resource. * License IDs are used to calculate additional charges for the use of the virtual machine. * - * The correct license ID is generated by Yandex Cloud. IDs are inherited by new resources created from this resource. + * The correct license ID is generated by the platform. IDs are inherited by new resources created from this resource. * * If you know the license IDs, specify them when you create the image. - * For example, if you create a disk image using a third-party utility and load it into Yandex Object Storage, the license IDs will be lost. + * For example, if you create a disk image using a third-party utility and load it into Object Storage, the license IDs will be lost. * You can specify them in the [yandex.cloud.compute.v1.ImageService.Create] request. */ productIds: string[]; diff --git a/src/generated/yandex/cloud/compute/v1/image_service.ts b/src/generated/yandex/cloud/compute/v1/image_service.ts index 9e4c1ec1..cb9b4e8e 100644 --- a/src/generated/yandex/cloud/compute/v1/image_service.ts +++ b/src/generated/yandex/cloud/compute/v1/image_service.ts @@ -112,10 +112,10 @@ export interface CreateImageRequest { * License IDs that indicate which licenses are attached to this resource. * License IDs are used to calculate additional charges for the use of the virtual machine. * - * The correct license ID is generated by Yandex Cloud. IDs are inherited by new resources created from this resource. + * The correct license ID is generated by the platform. IDs are inherited by new resources created from this resource. * * If you know the license IDs, specify them when you create the image. - * For example, if you create a disk image using a third-party utility and load it into Yandex Object Storage, the license IDs will be lost. + * For example, if you create a disk image using a third-party utility and load it into Object Storage, the license IDs will be lost. * You can specify them in this request. */ productIds: string[]; @@ -127,7 +127,7 @@ export interface CreateImageRequest { snapshotId: string | undefined; /** * URI of the source image to create the new image from. - * Currently only supports links to images that are stored in Yandex Object Storage. + * Currently only supports links to images that are stored in Object Storage. * Currently only supports Qcow2, VMDK, and VHD formats. */ uri: string | undefined; diff --git a/src/generated/yandex/cloud/compute/v1/instance.ts b/src/generated/yandex/cloud/compute/v1/instance.ts index 65988ab7..11632e4d 100644 --- a/src/generated/yandex/cloud/compute/v1/instance.ts +++ b/src/generated/yandex/cloud/compute/v1/instance.ts @@ -46,6 +46,46 @@ export function ipVersionToJSON(object: IpVersion): string { } } +export enum MetadataOption { + METADATA_OPTION_UNSPECIFIED = 0, + /** ENABLED - Option is enabled */ + ENABLED = 1, + /** DISABLED - Option is disabled */ + DISABLED = 2, + UNRECOGNIZED = -1, +} + +export function metadataOptionFromJSON(object: any): MetadataOption { + switch (object) { + case 0: + case "METADATA_OPTION_UNSPECIFIED": + return MetadataOption.METADATA_OPTION_UNSPECIFIED; + case 1: + case "ENABLED": + return MetadataOption.ENABLED; + case 2: + case "DISABLED": + return MetadataOption.DISABLED; + case -1: + case "UNRECOGNIZED": + default: + return MetadataOption.UNRECOGNIZED; + } +} + +export function metadataOptionToJSON(object: MetadataOption): string { + switch (object) { + case MetadataOption.METADATA_OPTION_UNSPECIFIED: + return "METADATA_OPTION_UNSPECIFIED"; + case MetadataOption.ENABLED: + return "ENABLED"; + case MetadataOption.DISABLED: + return "DISABLED"; + default: + return "UNKNOWN"; + } +} + /** An Instance resource. For more information, see [Instances](/docs/compute/concepts/vm). */ export interface Instance { $type: "yandex.cloud.compute.v1.Instance"; @@ -75,6 +115,8 @@ export interface Instance { * For more information, see [Metadata](/docs/compute/concepts/vm-metadata). */ metadata: { [key: string]: string }; + /** Options allow user to configure access to instance's metadata */ + metadataOptions?: MetadataOptions; /** Boot disk that is attached to the instance. */ bootDisk?: AttachedDisk; /** Array of secondary disks that are attached to the instance. */ @@ -481,7 +523,7 @@ export interface PlacementPolicy { hostAffinityRules: PlacementPolicy_HostAffinityRule[]; } -/** Affinitity definition */ +/** Affinity definition */ export interface PlacementPolicy_HostAffinityRule { $type: "yandex.cloud.compute.v1.PlacementPolicy.HostAffinityRule"; /** Affinity label or one of reserved values - 'yc.hostId', 'yc.hostGroupId' */ @@ -534,6 +576,18 @@ export function placementPolicy_HostAffinityRule_OperatorToJSON( } } +export interface MetadataOptions { + $type: "yandex.cloud.compute.v1.MetadataOptions"; + /** Enabled access to GCE flavored metadata */ + gceHttpEndpoint: MetadataOption; + /** Enabled access to AWS flavored metadata (IMDSv1) */ + awsV1HttpEndpoint: MetadataOption; + /** Enabled access to IAM credentials with GCE flavored metadata */ + gceHttpToken: MetadataOption; + /** Enabled access to IAM credentials with AWS flavored metadata (IMDSv1) */ + awsV1HttpToken: MetadataOption; +} + const baseInstance: object = { $type: "yandex.cloud.compute.v1.Instance", id: "", @@ -604,6 +658,12 @@ export const Instance = { writer.uint32(90).fork() ).ldelim(); }); + if (message.metadataOptions !== undefined) { + MetadataOptions.encode( + message.metadataOptions, + writer.uint32(186).fork() + ).ldelim(); + } if (message.bootDisk !== undefined) { AttachedDisk.encode(message.bootDisk, writer.uint32(98).fork()).ldelim(); } @@ -703,6 +763,12 @@ export const Instance = { message.metadata[entry11.key] = entry11.value; } break; + case 23: + message.metadataOptions = MetadataOptions.decode( + reader, + reader.uint32() + ); + break; case 12: message.bootDisk = AttachedDisk.decode(reader, reader.uint32()); break; @@ -806,6 +872,10 @@ export const Instance = { acc[key] = String(value); return acc; }, {}); + message.metadataOptions = + object.metadataOptions !== undefined && object.metadataOptions !== null + ? MetadataOptions.fromJSON(object.metadataOptions) + : undefined; message.bootDisk = object.bootDisk !== undefined && object.bootDisk !== null ? AttachedDisk.fromJSON(object.bootDisk) @@ -874,6 +944,10 @@ export const Instance = { obj.metadata[k] = v; }); } + message.metadataOptions !== undefined && + (obj.metadataOptions = message.metadataOptions + ? MetadataOptions.toJSON(message.metadataOptions) + : undefined); message.bootDisk !== undefined && (obj.bootDisk = message.bootDisk ? AttachedDisk.toJSON(message.bootDisk) @@ -954,6 +1028,10 @@ export const Instance = { } return acc; }, {}); + message.metadataOptions = + object.metadataOptions !== undefined && object.metadataOptions !== null + ? MetadataOptions.fromPartial(object.metadataOptions) + : undefined; message.bootDisk = object.bootDisk !== undefined && object.bootDisk !== null ? AttachedDisk.fromPartial(object.bootDisk) @@ -2273,6 +2351,112 @@ messageTypeRegistry.set( PlacementPolicy_HostAffinityRule ); +const baseMetadataOptions: object = { + $type: "yandex.cloud.compute.v1.MetadataOptions", + gceHttpEndpoint: 0, + awsV1HttpEndpoint: 0, + gceHttpToken: 0, + awsV1HttpToken: 0, +}; + +export const MetadataOptions = { + $type: "yandex.cloud.compute.v1.MetadataOptions" as const, + + encode( + message: MetadataOptions, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.gceHttpEndpoint !== 0) { + writer.uint32(8).int32(message.gceHttpEndpoint); + } + if (message.awsV1HttpEndpoint !== 0) { + writer.uint32(16).int32(message.awsV1HttpEndpoint); + } + if (message.gceHttpToken !== 0) { + writer.uint32(24).int32(message.gceHttpToken); + } + if (message.awsV1HttpToken !== 0) { + writer.uint32(32).int32(message.awsV1HttpToken); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): MetadataOptions { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMetadataOptions } as MetadataOptions; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.gceHttpEndpoint = reader.int32() as any; + break; + case 2: + message.awsV1HttpEndpoint = reader.int32() as any; + break; + case 3: + message.gceHttpToken = reader.int32() as any; + break; + case 4: + message.awsV1HttpToken = reader.int32() as any; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): MetadataOptions { + const message = { ...baseMetadataOptions } as MetadataOptions; + message.gceHttpEndpoint = + object.gceHttpEndpoint !== undefined && object.gceHttpEndpoint !== null + ? metadataOptionFromJSON(object.gceHttpEndpoint) + : 0; + message.awsV1HttpEndpoint = + object.awsV1HttpEndpoint !== undefined && + object.awsV1HttpEndpoint !== null + ? metadataOptionFromJSON(object.awsV1HttpEndpoint) + : 0; + message.gceHttpToken = + object.gceHttpToken !== undefined && object.gceHttpToken !== null + ? metadataOptionFromJSON(object.gceHttpToken) + : 0; + message.awsV1HttpToken = + object.awsV1HttpToken !== undefined && object.awsV1HttpToken !== null + ? metadataOptionFromJSON(object.awsV1HttpToken) + : 0; + return message; + }, + + toJSON(message: MetadataOptions): unknown { + const obj: any = {}; + message.gceHttpEndpoint !== undefined && + (obj.gceHttpEndpoint = metadataOptionToJSON(message.gceHttpEndpoint)); + message.awsV1HttpEndpoint !== undefined && + (obj.awsV1HttpEndpoint = metadataOptionToJSON(message.awsV1HttpEndpoint)); + message.gceHttpToken !== undefined && + (obj.gceHttpToken = metadataOptionToJSON(message.gceHttpToken)); + message.awsV1HttpToken !== undefined && + (obj.awsV1HttpToken = metadataOptionToJSON(message.awsV1HttpToken)); + return obj; + }, + + fromPartial, I>>( + object: I + ): MetadataOptions { + const message = { ...baseMetadataOptions } as MetadataOptions; + message.gceHttpEndpoint = object.gceHttpEndpoint ?? 0; + message.awsV1HttpEndpoint = object.awsV1HttpEndpoint ?? 0; + message.gceHttpToken = object.gceHttpToken ?? 0; + message.awsV1HttpToken = object.awsV1HttpToken ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(MetadataOptions.$type, MetadataOptions); + declare var self: any | undefined; declare var window: any | undefined; declare var global: any | undefined; diff --git a/src/generated/yandex/cloud/compute/v1/instance_service.ts b/src/generated/yandex/cloud/compute/v1/instance_service.ts index 46c7daa0..9267821d 100644 --- a/src/generated/yandex/cloud/compute/v1/instance_service.ts +++ b/src/generated/yandex/cloud/compute/v1/instance_service.ts @@ -15,6 +15,7 @@ import { } from "@grpc/grpc-js"; import _m0 from "protobufjs/minimal"; import { + MetadataOptions, SchedulingPolicy, NetworkSettings, PlacementPolicy, @@ -162,6 +163,8 @@ export interface CreateInstanceRequest { * For more information, see [Metadata](/docs/compute/concepts/vm-metadata). */ metadata: { [key: string]: string }; + /** Options allow user to configure access to instance's metadata */ + metadataOptions?: MetadataOptions; /** Boot disk to attach to the instance. */ bootDiskSpec?: AttachedDiskSpec; /** Array of secondary disks to attach to the instance. */ @@ -268,6 +271,8 @@ export interface UpdateInstanceRequest { * For more information, see [Metadata](/docs/compute/concepts/vm-metadata). */ metadata: { [key: string]: string }; + /** Options allow user to configure access to instance's metadata */ + metadataOptions?: MetadataOptions; /** * ID of the service account to use for [authentication inside the instance](/docs/compute/operations/vm-connect/auth-inside-vm). * To get the service account ID, use a [yandex.cloud.iam.v1.ServiceAccountService.List] request. @@ -825,6 +830,24 @@ export interface MoveInstanceMetadata { destinationFolderId: string; } +export interface GuestStopInstanceMetadata { + $type: "yandex.cloud.compute.v1.GuestStopInstanceMetadata"; + /** ID of the instance that was stopped from guest OS. */ + instanceId: string; +} + +export interface PreemptInstanceMetadata { + $type: "yandex.cloud.compute.v1.PreemptInstanceMetadata"; + /** ID of the instance that is being preempted. */ + instanceId: string; +} + +export interface CrashInstanceMetadata { + $type: "yandex.cloud.compute.v1.CrashInstanceMetadata"; + /** ID of the instance that was crashed. */ + instanceId: string; +} + const baseGetInstanceRequest: object = { $type: "yandex.cloud.compute.v1.GetInstanceRequest", instanceId: "", @@ -1149,6 +1172,12 @@ export const CreateInstanceRequest = { writer.uint32(66).fork() ).ldelim(); }); + if (message.metadataOptions !== undefined) { + MetadataOptions.encode( + message.metadataOptions, + writer.uint32(154).fork() + ).ldelim(); + } if (message.bootDiskSpec !== undefined) { AttachedDiskSpec.encode( message.bootDiskSpec, @@ -1246,6 +1275,12 @@ export const CreateInstanceRequest = { message.metadata[entry8.key] = entry8.value; } break; + case 19: + message.metadataOptions = MetadataOptions.decode( + reader, + reader.uint32() + ); + break; case 9: message.bootDiskSpec = AttachedDiskSpec.decode( reader, @@ -1342,6 +1377,10 @@ export const CreateInstanceRequest = { acc[key] = String(value); return acc; }, {}); + message.metadataOptions = + object.metadataOptions !== undefined && object.metadataOptions !== null + ? MetadataOptions.fromJSON(object.metadataOptions) + : undefined; message.bootDiskSpec = object.bootDiskSpec !== undefined && object.bootDiskSpec !== null ? AttachedDiskSpec.fromJSON(object.bootDiskSpec) @@ -1405,6 +1444,10 @@ export const CreateInstanceRequest = { obj.metadata[k] = v; }); } + message.metadataOptions !== undefined && + (obj.metadataOptions = message.metadataOptions + ? MetadataOptions.toJSON(message.metadataOptions) + : undefined); message.bootDiskSpec !== undefined && (obj.bootDiskSpec = message.bootDiskSpec ? AttachedDiskSpec.toJSON(message.bootDiskSpec) @@ -1484,6 +1527,10 @@ export const CreateInstanceRequest = { } return acc; }, {}); + message.metadataOptions = + object.metadataOptions !== undefined && object.metadataOptions !== null + ? MetadataOptions.fromPartial(object.metadataOptions) + : undefined; message.bootDiskSpec = object.bootDiskSpec !== undefined && object.bootDiskSpec !== null ? AttachedDiskSpec.fromPartial(object.bootDiskSpec) @@ -1814,6 +1861,12 @@ export const UpdateInstanceRequest = { writer.uint32(66).fork() ).ldelim(); }); + if (message.metadataOptions !== undefined) { + MetadataOptions.encode( + message.metadataOptions, + writer.uint32(106).fork() + ).ldelim(); + } if (message.serviceAccountId !== "") { writer.uint32(74).string(message.serviceAccountId); } @@ -1886,6 +1939,12 @@ export const UpdateInstanceRequest = { message.metadata[entry8.key] = entry8.value; } break; + case 13: + message.metadataOptions = MetadataOptions.decode( + reader, + reader.uint32() + ); + break; case 9: message.serviceAccountId = reader.string(); break; @@ -1953,6 +2012,10 @@ export const UpdateInstanceRequest = { acc[key] = String(value); return acc; }, {}); + message.metadataOptions = + object.metadataOptions !== undefined && object.metadataOptions !== null + ? MetadataOptions.fromJSON(object.metadataOptions) + : undefined; message.serviceAccountId = object.serviceAccountId !== undefined && object.serviceAccountId !== null ? String(object.serviceAccountId) @@ -1999,6 +2062,10 @@ export const UpdateInstanceRequest = { obj.metadata[k] = v; }); } + message.metadataOptions !== undefined && + (obj.metadataOptions = message.metadataOptions + ? MetadataOptions.toJSON(message.metadataOptions) + : undefined); message.serviceAccountId !== undefined && (obj.serviceAccountId = message.serviceAccountId); message.networkSettings !== undefined && @@ -2048,6 +2115,10 @@ export const UpdateInstanceRequest = { } return acc; }, {}); + message.metadataOptions = + object.metadataOptions !== undefined && object.metadataOptions !== null + ? MetadataOptions.fromPartial(object.metadataOptions) + : undefined; message.serviceAccountId = object.serviceAccountId ?? ""; message.networkSettings = object.networkSettings !== undefined && object.networkSettings !== null @@ -6045,6 +6116,216 @@ export const MoveInstanceMetadata = { messageTypeRegistry.set(MoveInstanceMetadata.$type, MoveInstanceMetadata); +const baseGuestStopInstanceMetadata: object = { + $type: "yandex.cloud.compute.v1.GuestStopInstanceMetadata", + instanceId: "", +}; + +export const GuestStopInstanceMetadata = { + $type: "yandex.cloud.compute.v1.GuestStopInstanceMetadata" as const, + + encode( + message: GuestStopInstanceMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.instanceId !== "") { + writer.uint32(10).string(message.instanceId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): GuestStopInstanceMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseGuestStopInstanceMetadata, + } as GuestStopInstanceMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.instanceId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GuestStopInstanceMetadata { + const message = { + ...baseGuestStopInstanceMetadata, + } as GuestStopInstanceMetadata; + message.instanceId = + object.instanceId !== undefined && object.instanceId !== null + ? String(object.instanceId) + : ""; + return message; + }, + + toJSON(message: GuestStopInstanceMetadata): unknown { + const obj: any = {}; + message.instanceId !== undefined && (obj.instanceId = message.instanceId); + return obj; + }, + + fromPartial, I>>( + object: I + ): GuestStopInstanceMetadata { + const message = { + ...baseGuestStopInstanceMetadata, + } as GuestStopInstanceMetadata; + message.instanceId = object.instanceId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + GuestStopInstanceMetadata.$type, + GuestStopInstanceMetadata +); + +const basePreemptInstanceMetadata: object = { + $type: "yandex.cloud.compute.v1.PreemptInstanceMetadata", + instanceId: "", +}; + +export const PreemptInstanceMetadata = { + $type: "yandex.cloud.compute.v1.PreemptInstanceMetadata" as const, + + encode( + message: PreemptInstanceMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.instanceId !== "") { + writer.uint32(10).string(message.instanceId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): PreemptInstanceMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...basePreemptInstanceMetadata, + } as PreemptInstanceMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.instanceId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): PreemptInstanceMetadata { + const message = { + ...basePreemptInstanceMetadata, + } as PreemptInstanceMetadata; + message.instanceId = + object.instanceId !== undefined && object.instanceId !== null + ? String(object.instanceId) + : ""; + return message; + }, + + toJSON(message: PreemptInstanceMetadata): unknown { + const obj: any = {}; + message.instanceId !== undefined && (obj.instanceId = message.instanceId); + return obj; + }, + + fromPartial, I>>( + object: I + ): PreemptInstanceMetadata { + const message = { + ...basePreemptInstanceMetadata, + } as PreemptInstanceMetadata; + message.instanceId = object.instanceId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(PreemptInstanceMetadata.$type, PreemptInstanceMetadata); + +const baseCrashInstanceMetadata: object = { + $type: "yandex.cloud.compute.v1.CrashInstanceMetadata", + instanceId: "", +}; + +export const CrashInstanceMetadata = { + $type: "yandex.cloud.compute.v1.CrashInstanceMetadata" as const, + + encode( + message: CrashInstanceMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.instanceId !== "") { + writer.uint32(10).string(message.instanceId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): CrashInstanceMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseCrashInstanceMetadata } as CrashInstanceMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.instanceId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CrashInstanceMetadata { + const message = { ...baseCrashInstanceMetadata } as CrashInstanceMetadata; + message.instanceId = + object.instanceId !== undefined && object.instanceId !== null + ? String(object.instanceId) + : ""; + return message; + }, + + toJSON(message: CrashInstanceMetadata): unknown { + const obj: any = {}; + message.instanceId !== undefined && (obj.instanceId = message.instanceId); + return obj; + }, + + fromPartial, I>>( + object: I + ): CrashInstanceMetadata { + const message = { ...baseCrashInstanceMetadata } as CrashInstanceMetadata; + message.instanceId = object.instanceId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(CrashInstanceMetadata.$type, CrashInstanceMetadata); + /** A set of methods for managing Instance resources. */ export const InstanceServiceService = { /** @@ -6308,7 +6589,7 @@ export const InstanceServiceService = { * * The instance must be stopped before moving. To stop the instance, make a [Stop] request. * - * After moving, the instance will start recording its Yandex Monitoring default metrics to its new folder. Metrics + * After moving, the instance will start recording its Monitoring default metrics to its new folder. Metrics * that have been recorded to the source folder prior to moving will be retained. */ move: { @@ -6406,7 +6687,7 @@ export interface InstanceServiceServer extends UntypedServiceImplementation { * * The instance must be stopped before moving. To stop the instance, make a [Stop] request. * - * After moving, the instance will start recording its Yandex Monitoring default metrics to its new folder. Metrics + * After moving, the instance will start recording its Monitoring default metrics to its new folder. Metrics * that have been recorded to the source folder prior to moving will be retained. */ move: handleUnaryCall; @@ -6761,7 +7042,7 @@ export interface InstanceServiceClient extends Client { * * The instance must be stopped before moving. To stop the instance, make a [Stop] request. * - * After moving, the instance will start recording its Yandex Monitoring default metrics to its new folder. Metrics + * After moving, the instance will start recording its Monitoring default metrics to its new folder. Metrics * that have been recorded to the source folder prior to moving will be retained. */ move( diff --git a/src/generated/yandex/cloud/compute/v1/instancegroup/instance_group.ts b/src/generated/yandex/cloud/compute/v1/instancegroup/instance_group.ts index f276493f..c0dee838 100644 --- a/src/generated/yandex/cloud/compute/v1/instancegroup/instance_group.ts +++ b/src/generated/yandex/cloud/compute/v1/instancegroup/instance_group.ts @@ -318,14 +318,14 @@ export interface ScalePolicy_AutoScale { /** * Defines an autoscaling rule based on the average CPU utilization of the instance group. * - * If more than one rule is specified, e.g. CPU utilization and one or more Yandex Monitoring metrics ([custom_rules]), + * If more than one rule is specified, e.g. CPU utilization and one or more Monitoring metrics ([custom_rules]), * the size of the instance group will be equal to the maximum of sizes calculated according to each metric. */ cpuUtilizationRule?: ScalePolicy_CpuUtilizationRule; /** - * Defines an autoscaling rule based on a [custom metric](/docs/monitoring/operations/metric/add) from Yandex Monitoring. + * Defines an autoscaling rule based on a [custom metric](/docs/monitoring/operations/metric/add) from Monitoring. * - * If more than one rule is specified, e.g. CPU utilization ([cpu_utilization_rule]) and one or more Yandex Monitoring + * If more than one rule is specified, e.g. CPU utilization ([cpu_utilization_rule]) and one or more Monitoring * metrics, the size of the instance group will be equal to the maximum of sizes calculated according to each metric. */ customRules: ScalePolicy_CustomRule[]; @@ -392,15 +392,15 @@ export interface ScalePolicy_CustomRule { ruleType: ScalePolicy_CustomRule_RuleType; /** Type of custom metric. This field affects how Instance Groups calculates the average metric value. */ metricType: ScalePolicy_CustomRule_MetricType; - /** Name of custom metric in Yandex Monitoring that should be used for scaling. */ + /** Name of custom metric in Monitoring that should be used for scaling. */ metricName: string; - /** Labels of custom metric in Yandex Monitoring that should be used for scaling. */ + /** Labels of custom metric in Monitoring that should be used for scaling. */ labels: { [key: string]: string }; /** Target value for the custom metric. Instance Groups maintains this level for each availability zone. */ target: number; - /** Folder id of custom metric in Yandex Monitoring that should be used for scaling. */ + /** Folder id of custom metric in Monitoring that should be used for scaling. */ folderId: string; - /** Service of custom metric in Yandex Monitoring that should be used for scaling. */ + /** Service of custom metric in Monitoring that should be used for scaling. */ service: string; } @@ -718,7 +718,7 @@ export interface PlacementPolicy { hostAffinityRules: PlacementPolicy_HostAffinityRule[]; } -/** Affinitity definition */ +/** Affinity definition */ export interface PlacementPolicy_HostAffinityRule { $type: "yandex.cloud.compute.v1.instancegroup.PlacementPolicy.HostAffinityRule"; /** Affinity label or one of reserved values - 'yc.hostId', 'yc.hostGroupId' */ diff --git a/src/generated/yandex/cloud/compute/v1/snapshot.ts b/src/generated/yandex/cloud/compute/v1/snapshot.ts index 4c1fe67c..b70c2c2f 100644 --- a/src/generated/yandex/cloud/compute/v1/snapshot.ts +++ b/src/generated/yandex/cloud/compute/v1/snapshot.ts @@ -28,10 +28,10 @@ export interface Snapshot { * License IDs that indicate which licenses are attached to this resource. * License IDs are used to calculate additional charges for the use of the virtual machine. * - * The correct license ID is generated by Yandex Cloud. IDs are inherited by new resources created from this resource. + * The correct license ID is generated by the platform. IDs are inherited by new resources created from this resource. * * If you know the license IDs, specify them when you create the image. - * For example, if you create a disk image using a third-party utility and load it into Yandex Object Storage, the license IDs will be lost. + * For example, if you create a disk image using a third-party utility and load it into Object Storage, the license IDs will be lost. * You can specify them in the [yandex.cloud.compute.v1.ImageService.Create] request. */ productIds: string[]; diff --git a/src/generated/yandex/cloud/compute/v1/snapshot_schedule.ts b/src/generated/yandex/cloud/compute/v1/snapshot_schedule.ts new file mode 100644 index 00000000..0fdc0d79 --- /dev/null +++ b/src/generated/yandex/cloud/compute/v1/snapshot_schedule.ts @@ -0,0 +1,798 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { Timestamp } from "../../../../google/protobuf/timestamp"; +import { Duration } from "../../../../google/protobuf/duration"; + +export const protobufPackage = "yandex.cloud.compute.v1"; + +export interface SnapshotSchedule { + $type: "yandex.cloud.compute.v1.SnapshotSchedule"; + /** ID of the snapshot schedule policy. */ + id: string; + /** ID of the folder that the scheduler policy belongs to. */ + folderId: string; + createdAt?: Date; + /** + * Name of the schedule policy. + * The name is unique within the folder. + */ + name: string; + /** Description of the schedule policy. */ + description: string; + /** Resource labels as `key:value` pairs. */ + labels: { [key: string]: string }; + status: SnapshotSchedule_Status; + /** schedule properties */ + schedulePolicy?: SchedulePolicy; + retentionPeriod?: Duration | undefined; + snapshotCount: number | undefined; + /** properties to create snapshot with. */ + snapshotSpec?: SnapshotSpec; +} + +export enum SnapshotSchedule_Status { + STATUS_UNSPECIFIED = 0, + CREATING = 1, + ACTIVE = 2, + INACTIVE = 3, + DELETING = 4, + UPDATING = 5, + UNRECOGNIZED = -1, +} + +export function snapshotSchedule_StatusFromJSON( + object: any +): SnapshotSchedule_Status { + switch (object) { + case 0: + case "STATUS_UNSPECIFIED": + return SnapshotSchedule_Status.STATUS_UNSPECIFIED; + case 1: + case "CREATING": + return SnapshotSchedule_Status.CREATING; + case 2: + case "ACTIVE": + return SnapshotSchedule_Status.ACTIVE; + case 3: + case "INACTIVE": + return SnapshotSchedule_Status.INACTIVE; + case 4: + case "DELETING": + return SnapshotSchedule_Status.DELETING; + case 5: + case "UPDATING": + return SnapshotSchedule_Status.UPDATING; + case -1: + case "UNRECOGNIZED": + default: + return SnapshotSchedule_Status.UNRECOGNIZED; + } +} + +export function snapshotSchedule_StatusToJSON( + object: SnapshotSchedule_Status +): string { + switch (object) { + case SnapshotSchedule_Status.STATUS_UNSPECIFIED: + return "STATUS_UNSPECIFIED"; + case SnapshotSchedule_Status.CREATING: + return "CREATING"; + case SnapshotSchedule_Status.ACTIVE: + return "ACTIVE"; + case SnapshotSchedule_Status.INACTIVE: + return "INACTIVE"; + case SnapshotSchedule_Status.DELETING: + return "DELETING"; + case SnapshotSchedule_Status.UPDATING: + return "UPDATING"; + default: + return "UNKNOWN"; + } +} + +export interface SnapshotSchedule_LabelsEntry { + $type: "yandex.cloud.compute.v1.SnapshotSchedule.LabelsEntry"; + key: string; + value: string; +} + +export interface SchedulePolicy { + $type: "yandex.cloud.compute.v1.SchedulePolicy"; + /** start time for the first run. */ + startAt?: Date; + /** cron format (* * * * *) */ + expression: string; +} + +/** Properties of created snapshot backup */ +export interface SnapshotSpec { + $type: "yandex.cloud.compute.v1.SnapshotSpec"; + /** Description of the created snapshot. */ + description: string; + /** Resource labels as `key:value` pairs. */ + labels: { [key: string]: string }; +} + +export interface SnapshotSpec_LabelsEntry { + $type: "yandex.cloud.compute.v1.SnapshotSpec.LabelsEntry"; + key: string; + value: string; +} + +const baseSnapshotSchedule: object = { + $type: "yandex.cloud.compute.v1.SnapshotSchedule", + id: "", + folderId: "", + name: "", + description: "", + status: 0, +}; + +export const SnapshotSchedule = { + $type: "yandex.cloud.compute.v1.SnapshotSchedule" as const, + + encode( + message: SnapshotSchedule, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.id !== "") { + writer.uint32(10).string(message.id); + } + if (message.folderId !== "") { + writer.uint32(18).string(message.folderId); + } + if (message.createdAt !== undefined) { + Timestamp.encode( + toTimestamp(message.createdAt), + writer.uint32(26).fork() + ).ldelim(); + } + if (message.name !== "") { + writer.uint32(34).string(message.name); + } + if (message.description !== "") { + writer.uint32(42).string(message.description); + } + Object.entries(message.labels).forEach(([key, value]) => { + SnapshotSchedule_LabelsEntry.encode( + { + $type: "yandex.cloud.compute.v1.SnapshotSchedule.LabelsEntry", + key: key as any, + value, + }, + writer.uint32(50).fork() + ).ldelim(); + }); + if (message.status !== 0) { + writer.uint32(56).int32(message.status); + } + if (message.schedulePolicy !== undefined) { + SchedulePolicy.encode( + message.schedulePolicy, + writer.uint32(66).fork() + ).ldelim(); + } + if (message.retentionPeriod !== undefined) { + Duration.encode( + message.retentionPeriod, + writer.uint32(74).fork() + ).ldelim(); + } + if (message.snapshotCount !== undefined) { + writer.uint32(80).int64(message.snapshotCount); + } + if (message.snapshotSpec !== undefined) { + SnapshotSpec.encode( + message.snapshotSpec, + writer.uint32(90).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): SnapshotSchedule { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseSnapshotSchedule } as SnapshotSchedule; + message.labels = {}; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = reader.string(); + break; + case 2: + message.folderId = reader.string(); + break; + case 3: + message.createdAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 4: + message.name = reader.string(); + break; + case 5: + message.description = reader.string(); + break; + case 6: + const entry6 = SnapshotSchedule_LabelsEntry.decode( + reader, + reader.uint32() + ); + if (entry6.value !== undefined) { + message.labels[entry6.key] = entry6.value; + } + break; + case 7: + message.status = reader.int32() as any; + break; + case 8: + message.schedulePolicy = SchedulePolicy.decode( + reader, + reader.uint32() + ); + break; + case 9: + message.retentionPeriod = Duration.decode(reader, reader.uint32()); + break; + case 10: + message.snapshotCount = longToNumber(reader.int64() as Long); + break; + case 11: + message.snapshotSpec = SnapshotSpec.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): SnapshotSchedule { + const message = { ...baseSnapshotSchedule } as SnapshotSchedule; + message.id = + object.id !== undefined && object.id !== null ? String(object.id) : ""; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.createdAt = + object.createdAt !== undefined && object.createdAt !== null + ? fromJsonTimestamp(object.createdAt) + : undefined; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); + message.status = + object.status !== undefined && object.status !== null + ? snapshotSchedule_StatusFromJSON(object.status) + : 0; + message.schedulePolicy = + object.schedulePolicy !== undefined && object.schedulePolicy !== null + ? SchedulePolicy.fromJSON(object.schedulePolicy) + : undefined; + message.retentionPeriod = + object.retentionPeriod !== undefined && object.retentionPeriod !== null + ? Duration.fromJSON(object.retentionPeriod) + : undefined; + message.snapshotCount = + object.snapshotCount !== undefined && object.snapshotCount !== null + ? Number(object.snapshotCount) + : undefined; + message.snapshotSpec = + object.snapshotSpec !== undefined && object.snapshotSpec !== null + ? SnapshotSpec.fromJSON(object.snapshotSpec) + : undefined; + return message; + }, + + toJSON(message: SnapshotSchedule): unknown { + const obj: any = {}; + message.id !== undefined && (obj.id = message.id); + message.folderId !== undefined && (obj.folderId = message.folderId); + message.createdAt !== undefined && + (obj.createdAt = message.createdAt.toISOString()); + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + obj.labels = {}; + if (message.labels) { + Object.entries(message.labels).forEach(([k, v]) => { + obj.labels[k] = v; + }); + } + message.status !== undefined && + (obj.status = snapshotSchedule_StatusToJSON(message.status)); + message.schedulePolicy !== undefined && + (obj.schedulePolicy = message.schedulePolicy + ? SchedulePolicy.toJSON(message.schedulePolicy) + : undefined); + message.retentionPeriod !== undefined && + (obj.retentionPeriod = message.retentionPeriod + ? Duration.toJSON(message.retentionPeriod) + : undefined); + message.snapshotCount !== undefined && + (obj.snapshotCount = Math.round(message.snapshotCount)); + message.snapshotSpec !== undefined && + (obj.snapshotSpec = message.snapshotSpec + ? SnapshotSpec.toJSON(message.snapshotSpec) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): SnapshotSchedule { + const message = { ...baseSnapshotSchedule } as SnapshotSchedule; + message.id = object.id ?? ""; + message.folderId = object.folderId ?? ""; + message.createdAt = object.createdAt ?? undefined; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); + message.status = object.status ?? 0; + message.schedulePolicy = + object.schedulePolicy !== undefined && object.schedulePolicy !== null + ? SchedulePolicy.fromPartial(object.schedulePolicy) + : undefined; + message.retentionPeriod = + object.retentionPeriod !== undefined && object.retentionPeriod !== null + ? Duration.fromPartial(object.retentionPeriod) + : undefined; + message.snapshotCount = object.snapshotCount ?? undefined; + message.snapshotSpec = + object.snapshotSpec !== undefined && object.snapshotSpec !== null + ? SnapshotSpec.fromPartial(object.snapshotSpec) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(SnapshotSchedule.$type, SnapshotSchedule); + +const baseSnapshotSchedule_LabelsEntry: object = { + $type: "yandex.cloud.compute.v1.SnapshotSchedule.LabelsEntry", + key: "", + value: "", +}; + +export const SnapshotSchedule_LabelsEntry = { + $type: "yandex.cloud.compute.v1.SnapshotSchedule.LabelsEntry" as const, + + encode( + message: SnapshotSchedule_LabelsEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): SnapshotSchedule_LabelsEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseSnapshotSchedule_LabelsEntry, + } as SnapshotSchedule_LabelsEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): SnapshotSchedule_LabelsEntry { + const message = { + ...baseSnapshotSchedule_LabelsEntry, + } as SnapshotSchedule_LabelsEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: SnapshotSchedule_LabelsEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial, I>>( + object: I + ): SnapshotSchedule_LabelsEntry { + const message = { + ...baseSnapshotSchedule_LabelsEntry, + } as SnapshotSchedule_LabelsEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + SnapshotSchedule_LabelsEntry.$type, + SnapshotSchedule_LabelsEntry +); + +const baseSchedulePolicy: object = { + $type: "yandex.cloud.compute.v1.SchedulePolicy", + expression: "", +}; + +export const SchedulePolicy = { + $type: "yandex.cloud.compute.v1.SchedulePolicy" as const, + + encode( + message: SchedulePolicy, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.startAt !== undefined) { + Timestamp.encode( + toTimestamp(message.startAt), + writer.uint32(10).fork() + ).ldelim(); + } + if (message.expression !== "") { + writer.uint32(18).string(message.expression); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): SchedulePolicy { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseSchedulePolicy } as SchedulePolicy; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.startAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 2: + message.expression = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): SchedulePolicy { + const message = { ...baseSchedulePolicy } as SchedulePolicy; + message.startAt = + object.startAt !== undefined && object.startAt !== null + ? fromJsonTimestamp(object.startAt) + : undefined; + message.expression = + object.expression !== undefined && object.expression !== null + ? String(object.expression) + : ""; + return message; + }, + + toJSON(message: SchedulePolicy): unknown { + const obj: any = {}; + message.startAt !== undefined && + (obj.startAt = message.startAt.toISOString()); + message.expression !== undefined && (obj.expression = message.expression); + return obj; + }, + + fromPartial, I>>( + object: I + ): SchedulePolicy { + const message = { ...baseSchedulePolicy } as SchedulePolicy; + message.startAt = object.startAt ?? undefined; + message.expression = object.expression ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(SchedulePolicy.$type, SchedulePolicy); + +const baseSnapshotSpec: object = { + $type: "yandex.cloud.compute.v1.SnapshotSpec", + description: "", +}; + +export const SnapshotSpec = { + $type: "yandex.cloud.compute.v1.SnapshotSpec" as const, + + encode( + message: SnapshotSpec, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.description !== "") { + writer.uint32(10).string(message.description); + } + Object.entries(message.labels).forEach(([key, value]) => { + SnapshotSpec_LabelsEntry.encode( + { + $type: "yandex.cloud.compute.v1.SnapshotSpec.LabelsEntry", + key: key as any, + value, + }, + writer.uint32(18).fork() + ).ldelim(); + }); + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): SnapshotSpec { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseSnapshotSpec } as SnapshotSpec; + message.labels = {}; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.description = reader.string(); + break; + case 2: + const entry2 = SnapshotSpec_LabelsEntry.decode( + reader, + reader.uint32() + ); + if (entry2.value !== undefined) { + message.labels[entry2.key] = entry2.value; + } + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): SnapshotSpec { + const message = { ...baseSnapshotSpec } as SnapshotSpec; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); + return message; + }, + + toJSON(message: SnapshotSpec): unknown { + const obj: any = {}; + message.description !== undefined && + (obj.description = message.description); + obj.labels = {}; + if (message.labels) { + Object.entries(message.labels).forEach(([k, v]) => { + obj.labels[k] = v; + }); + } + return obj; + }, + + fromPartial, I>>( + object: I + ): SnapshotSpec { + const message = { ...baseSnapshotSpec } as SnapshotSpec; + message.description = object.description ?? ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); + return message; + }, +}; + +messageTypeRegistry.set(SnapshotSpec.$type, SnapshotSpec); + +const baseSnapshotSpec_LabelsEntry: object = { + $type: "yandex.cloud.compute.v1.SnapshotSpec.LabelsEntry", + key: "", + value: "", +}; + +export const SnapshotSpec_LabelsEntry = { + $type: "yandex.cloud.compute.v1.SnapshotSpec.LabelsEntry" as const, + + encode( + message: SnapshotSpec_LabelsEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): SnapshotSpec_LabelsEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseSnapshotSpec_LabelsEntry, + } as SnapshotSpec_LabelsEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): SnapshotSpec_LabelsEntry { + const message = { + ...baseSnapshotSpec_LabelsEntry, + } as SnapshotSpec_LabelsEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: SnapshotSpec_LabelsEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial, I>>( + object: I + ): SnapshotSpec_LabelsEntry { + const message = { + ...baseSnapshotSpec_LabelsEntry, + } as SnapshotSpec_LabelsEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + SnapshotSpec_LabelsEntry.$type, + SnapshotSpec_LabelsEntry +); + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function toTimestamp(date: Date): Timestamp { + const seconds = date.getTime() / 1_000; + const nanos = (date.getTime() % 1_000) * 1_000_000; + return { $type: "google.protobuf.Timestamp", seconds, nanos }; +} + +function fromTimestamp(t: Timestamp): Date { + let millis = t.seconds * 1_000; + millis += t.nanos / 1_000_000; + return new Date(millis); +} + +function fromJsonTimestamp(o: any): Date { + if (o instanceof Date) { + return o; + } else if (typeof o === "string") { + return new Date(o); + } else { + return fromTimestamp(Timestamp.fromJSON(o)); + } +} + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/compute/v1/snapshot_schedule_service.ts b/src/generated/yandex/cloud/compute/v1/snapshot_schedule_service.ts new file mode 100644 index 00000000..f62f353d --- /dev/null +++ b/src/generated/yandex/cloud/compute/v1/snapshot_schedule_service.ts @@ -0,0 +1,3100 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../typeRegistry"; +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleUnaryCall, + Client, + ClientUnaryCall, + Metadata, + CallOptions, + ServiceError, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; +import { + SchedulePolicy, + SnapshotSpec, + SnapshotSchedule, +} from "../../../../yandex/cloud/compute/v1/snapshot_schedule"; +import { FieldMask } from "../../../../google/protobuf/field_mask"; +import { Duration } from "../../../../google/protobuf/duration"; +import { Operation } from "../../../../yandex/cloud/operation/operation"; +import { Snapshot } from "../../../../yandex/cloud/compute/v1/snapshot"; +import { Disk } from "../../../../yandex/cloud/compute/v1/disk"; + +export const protobufPackage = "yandex.cloud.compute.v1"; + +export interface GetSnapshotScheduleRequest { + $type: "yandex.cloud.compute.v1.GetSnapshotScheduleRequest"; + /** ID of the SnapshotSchedule resource to return. */ + snapshotScheduleId: string; +} + +export interface ListSnapshotSchedulesRequest { + $type: "yandex.cloud.compute.v1.ListSnapshotSchedulesRequest"; + /** ID of the folder to list snapshot schedules in. */ + folderId: string; + /** + * The maximum number of results per page to return. If the number of available + * results is larger than [page_size], + * the service returns a [ListSnapshotSchedulesResponse.next_page_token] + * that can be used to get the next page of results in subsequent list requests. + */ + pageSize: number; + /** + * Page token. To get the next page of results, set [page_token] to the + * [ListSnapshotSchedulesResponse.next_page_token] returned by a previous list request. + */ + pageToken: string; + filter: string; + /** + * By which column the listing should be ordered and in which direction, + * format is "createdAt desc". "id asc" if omitted. + */ + orderBy: string; +} + +export interface ListSnapshotSchedulesResponse { + $type: "yandex.cloud.compute.v1.ListSnapshotSchedulesResponse"; + /** List of SnapshotSchedule resources. */ + snapshotSchedules: SnapshotSchedule[]; + /** + * This token allows you to get the next page of results for list requests. If the number of results + * is larger than [ListSnapshotSchedulesRequest.page_size], use + * the [next_page_token] as the value + * for the [ListSnapshotSchedulesRequest.page_token] query parameter + * in the next list request. Each subsequent list request will have its own + * [next_page_token] to continue paging through the results. + */ + nextPageToken: string; +} + +export interface CreateSnapshotScheduleRequest { + $type: "yandex.cloud.compute.v1.CreateSnapshotScheduleRequest"; + /** ID of the folder to create a snapshot schedule in. */ + folderId: string; + name: string; + description: string; + labels: { [key: string]: string }; + /** schedule properties */ + schedulePolicy?: SchedulePolicy; + retentionPeriod?: Duration | undefined; + snapshotCount: number | undefined; + snapshotSpec?: SnapshotSpec; + diskIds: string[]; +} + +export interface CreateSnapshotScheduleRequest_LabelsEntry { + $type: "yandex.cloud.compute.v1.CreateSnapshotScheduleRequest.LabelsEntry"; + key: string; + value: string; +} + +export interface CreateSnapshotScheduleMetadata { + $type: "yandex.cloud.compute.v1.CreateSnapshotScheduleMetadata"; + snapshotScheduleId: string; +} + +export interface UpdateSnapshotScheduleRequest { + $type: "yandex.cloud.compute.v1.UpdateSnapshotScheduleRequest"; + /** ID of the SnapshotSchedule resource to update. */ + snapshotScheduleId: string; + /** Field mask that specifies which fields of the SnapshotSchedule resource are going to be updated. */ + updateMask?: FieldMask; + /** schedule properties */ + name: string; + description: string; + labels: { [key: string]: string }; + schedulePolicy?: SchedulePolicy; + retentionPeriod?: Duration | undefined; + snapshotCount: number | undefined; + snapshotSpec?: SnapshotSpec; +} + +export interface UpdateSnapshotScheduleRequest_LabelsEntry { + $type: "yandex.cloud.compute.v1.UpdateSnapshotScheduleRequest.LabelsEntry"; + key: string; + value: string; +} + +export interface UpdateSnapshotScheduleMetadata { + $type: "yandex.cloud.compute.v1.UpdateSnapshotScheduleMetadata"; + snapshotScheduleId: string; +} + +export interface DeleteSnapshotScheduleRequest { + $type: "yandex.cloud.compute.v1.DeleteSnapshotScheduleRequest"; + /** ID of the snapshot schedule to delete. */ + snapshotScheduleId: string; +} + +export interface DeleteSnapshotScheduleMetadata { + $type: "yandex.cloud.compute.v1.DeleteSnapshotScheduleMetadata"; + snapshotScheduleId: string; +} + +export interface DisableSnapshotScheduleRequest { + $type: "yandex.cloud.compute.v1.DisableSnapshotScheduleRequest"; + /** ID of the snapshot schedule to disable. */ + snapshotScheduleId: string; +} + +export interface DisableSnapshotScheduleMetadata { + $type: "yandex.cloud.compute.v1.DisableSnapshotScheduleMetadata"; + snapshotScheduleId: string; +} + +export interface EnableSnapshotScheduleRequest { + $type: "yandex.cloud.compute.v1.EnableSnapshotScheduleRequest"; + /** ID of the snapshot schedule to enable. */ + snapshotScheduleId: string; +} + +export interface EnableSnapshotScheduleMetadata { + $type: "yandex.cloud.compute.v1.EnableSnapshotScheduleMetadata"; + snapshotScheduleId: string; +} + +export interface ListSnapshotScheduleOperationsRequest { + $type: "yandex.cloud.compute.v1.ListSnapshotScheduleOperationsRequest"; + /** ID of the SnapshotSchedule resource to list operations for. */ + snapshotScheduleId: string; + /** + * The maximum number of results per page to return. If the number of available + * results is larger than [page_size], the service returns a [ListSnapshotScheduleOperationsResponse.next_page_token] + * that can be used to get the next page of results in subsequent list requests. + */ + pageSize: number; + /** + * Page token. To get the next page of results, set [page_token] to the + * [ListSnapshotScheduleOperationsResponse.next_page_token] returned by a previous list request. + */ + pageToken: string; +} + +export interface ListSnapshotScheduleOperationsResponse { + $type: "yandex.cloud.compute.v1.ListSnapshotScheduleOperationsResponse"; + /** List of operations for the specified snapshot schedule. */ + operations: Operation[]; + /** + * This token allows you to get the next page of results for list requests. If the number of results + * is larger than [ListSnapshotScheduleOperationsRequest.page_size], use the [next_page_token] as the value + * for the [ListSnapshotScheduleOperationsRequest.page_token] query parameter in the next list request. + * Each subsequent list request will have its own [next_page_token] to continue paging through the results. + */ + nextPageToken: string; +} + +export interface ListSnapshotScheduleSnapshotsRequest { + $type: "yandex.cloud.compute.v1.ListSnapshotScheduleSnapshotsRequest"; + /** ID of the SnapshotSchedule resource to list snapshots for. */ + snapshotScheduleId: string; + /** + * The maximum number of results per page to return. If the number of available + * results is larger than [page_size], the service returns a [ListSnapshotScheduleSnapshotsResponse.next_page_token] + * that can be used to get the next page of results in subsequent list requests. + */ + pageSize: number; + /** + * Page token. To get the next page of results, set [page_token] to the + * [ListSnapshotScheduleSnapshotsResponse.next_page_token] returned by a previous list request. + */ + pageToken: string; +} + +export interface ListSnapshotScheduleSnapshotsResponse { + $type: "yandex.cloud.compute.v1.ListSnapshotScheduleSnapshotsResponse"; + /** List of snapshots for the specified snapshot schedule. */ + snapshots: Snapshot[]; + /** + * This token allows you to get the next page of results for list requests. If the number of results + * is larger than [ListSnapshotScheduleSnapshotsRequest.page_size], use the [next_page_token] as the value + * for the [ListSnapshotScheduleSnapshotsRequest.page_token] query parameter in the next list request. + * Each subsequent list request will have its own [next_page_token] to continue paging through the results. + */ + nextPageToken: string; +} + +export interface ListSnapshotScheduleDisksRequest { + $type: "yandex.cloud.compute.v1.ListSnapshotScheduleDisksRequest"; + /** ID of the SnapshotSchedule resource to list disks for. */ + snapshotScheduleId: string; + /** + * The maximum number of results per page to return. If the number of available + * results is larger than [page_size], the service returns a [ListSnapshotScheduleDisksResponse.next_page_token] + * that can be used to get the next page of results in subsequent list requests. + */ + pageSize: number; + /** + * Page token. To get the next page of results, set [page_token] to the + * [ListSnapshotScheduleDisksResponse.next_page_token] returned by a previous list request. + */ + pageToken: string; +} + +export interface ListSnapshotScheduleDisksResponse { + $type: "yandex.cloud.compute.v1.ListSnapshotScheduleDisksResponse"; + /** List of disks for the specified snapshot schedule. */ + disks: Disk[]; + /** + * This token allows you to get the next page of results for list requests. If the number of results + * is larger than [ListSnapshotScheduleDisksRequest.page_size], use the [next_page_token] as the value + * for the [ListSnapshotScheduleDisksRequest.page_token] query parameter in the next list request. + * Each subsequent list request will have its own [next_page_token] to continue paging through the results. + */ + nextPageToken: string; +} + +export interface UpdateSnapshotScheduleDisksRequest { + $type: "yandex.cloud.compute.v1.UpdateSnapshotScheduleDisksRequest"; + /** ID of the snapshot schedule to update. */ + snapshotScheduleId: string; + /** List of disk ids to remove from the specified schedule. */ + remove: string[]; + /** List of disk ids to add to the specified schedule */ + add: string[]; +} + +export interface UpdateSnapshotScheduleDisksMetadata { + $type: "yandex.cloud.compute.v1.UpdateSnapshotScheduleDisksMetadata"; + snapshotScheduleId: string; +} + +const baseGetSnapshotScheduleRequest: object = { + $type: "yandex.cloud.compute.v1.GetSnapshotScheduleRequest", + snapshotScheduleId: "", +}; + +export const GetSnapshotScheduleRequest = { + $type: "yandex.cloud.compute.v1.GetSnapshotScheduleRequest" as const, + + encode( + message: GetSnapshotScheduleRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.snapshotScheduleId !== "") { + writer.uint32(10).string(message.snapshotScheduleId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): GetSnapshotScheduleRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseGetSnapshotScheduleRequest, + } as GetSnapshotScheduleRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.snapshotScheduleId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GetSnapshotScheduleRequest { + const message = { + ...baseGetSnapshotScheduleRequest, + } as GetSnapshotScheduleRequest; + message.snapshotScheduleId = + object.snapshotScheduleId !== undefined && + object.snapshotScheduleId !== null + ? String(object.snapshotScheduleId) + : ""; + return message; + }, + + toJSON(message: GetSnapshotScheduleRequest): unknown { + const obj: any = {}; + message.snapshotScheduleId !== undefined && + (obj.snapshotScheduleId = message.snapshotScheduleId); + return obj; + }, + + fromPartial, I>>( + object: I + ): GetSnapshotScheduleRequest { + const message = { + ...baseGetSnapshotScheduleRequest, + } as GetSnapshotScheduleRequest; + message.snapshotScheduleId = object.snapshotScheduleId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + GetSnapshotScheduleRequest.$type, + GetSnapshotScheduleRequest +); + +const baseListSnapshotSchedulesRequest: object = { + $type: "yandex.cloud.compute.v1.ListSnapshotSchedulesRequest", + folderId: "", + pageSize: 0, + pageToken: "", + filter: "", + orderBy: "", +}; + +export const ListSnapshotSchedulesRequest = { + $type: "yandex.cloud.compute.v1.ListSnapshotSchedulesRequest" as const, + + encode( + message: ListSnapshotSchedulesRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.folderId !== "") { + writer.uint32(10).string(message.folderId); + } + if (message.pageSize !== 0) { + writer.uint32(16).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(26).string(message.pageToken); + } + if (message.filter !== "") { + writer.uint32(34).string(message.filter); + } + if (message.orderBy !== "") { + writer.uint32(42).string(message.orderBy); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListSnapshotSchedulesRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListSnapshotSchedulesRequest, + } as ListSnapshotSchedulesRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.folderId = reader.string(); + break; + case 2: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 3: + message.pageToken = reader.string(); + break; + case 4: + message.filter = reader.string(); + break; + case 5: + message.orderBy = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListSnapshotSchedulesRequest { + const message = { + ...baseListSnapshotSchedulesRequest, + } as ListSnapshotSchedulesRequest; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + message.filter = + object.filter !== undefined && object.filter !== null + ? String(object.filter) + : ""; + message.orderBy = + object.orderBy !== undefined && object.orderBy !== null + ? String(object.orderBy) + : ""; + return message; + }, + + toJSON(message: ListSnapshotSchedulesRequest): unknown { + const obj: any = {}; + message.folderId !== undefined && (obj.folderId = message.folderId); + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + message.filter !== undefined && (obj.filter = message.filter); + message.orderBy !== undefined && (obj.orderBy = message.orderBy); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListSnapshotSchedulesRequest { + const message = { + ...baseListSnapshotSchedulesRequest, + } as ListSnapshotSchedulesRequest; + message.folderId = object.folderId ?? ""; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + message.filter = object.filter ?? ""; + message.orderBy = object.orderBy ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListSnapshotSchedulesRequest.$type, + ListSnapshotSchedulesRequest +); + +const baseListSnapshotSchedulesResponse: object = { + $type: "yandex.cloud.compute.v1.ListSnapshotSchedulesResponse", + nextPageToken: "", +}; + +export const ListSnapshotSchedulesResponse = { + $type: "yandex.cloud.compute.v1.ListSnapshotSchedulesResponse" as const, + + encode( + message: ListSnapshotSchedulesResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.snapshotSchedules) { + SnapshotSchedule.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListSnapshotSchedulesResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListSnapshotSchedulesResponse, + } as ListSnapshotSchedulesResponse; + message.snapshotSchedules = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.snapshotSchedules.push( + SnapshotSchedule.decode(reader, reader.uint32()) + ); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListSnapshotSchedulesResponse { + const message = { + ...baseListSnapshotSchedulesResponse, + } as ListSnapshotSchedulesResponse; + message.snapshotSchedules = (object.snapshotSchedules ?? []).map((e: any) => + SnapshotSchedule.fromJSON(e) + ); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListSnapshotSchedulesResponse): unknown { + const obj: any = {}; + if (message.snapshotSchedules) { + obj.snapshotSchedules = message.snapshotSchedules.map((e) => + e ? SnapshotSchedule.toJSON(e) : undefined + ); + } else { + obj.snapshotSchedules = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListSnapshotSchedulesResponse { + const message = { + ...baseListSnapshotSchedulesResponse, + } as ListSnapshotSchedulesResponse; + message.snapshotSchedules = + object.snapshotSchedules?.map((e) => SnapshotSchedule.fromPartial(e)) || + []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListSnapshotSchedulesResponse.$type, + ListSnapshotSchedulesResponse +); + +const baseCreateSnapshotScheduleRequest: object = { + $type: "yandex.cloud.compute.v1.CreateSnapshotScheduleRequest", + folderId: "", + name: "", + description: "", + diskIds: "", +}; + +export const CreateSnapshotScheduleRequest = { + $type: "yandex.cloud.compute.v1.CreateSnapshotScheduleRequest" as const, + + encode( + message: CreateSnapshotScheduleRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.folderId !== "") { + writer.uint32(10).string(message.folderId); + } + if (message.name !== "") { + writer.uint32(18).string(message.name); + } + if (message.description !== "") { + writer.uint32(26).string(message.description); + } + Object.entries(message.labels).forEach(([key, value]) => { + CreateSnapshotScheduleRequest_LabelsEntry.encode( + { + $type: + "yandex.cloud.compute.v1.CreateSnapshotScheduleRequest.LabelsEntry", + key: key as any, + value, + }, + writer.uint32(34).fork() + ).ldelim(); + }); + if (message.schedulePolicy !== undefined) { + SchedulePolicy.encode( + message.schedulePolicy, + writer.uint32(42).fork() + ).ldelim(); + } + if (message.retentionPeriod !== undefined) { + Duration.encode( + message.retentionPeriod, + writer.uint32(50).fork() + ).ldelim(); + } + if (message.snapshotCount !== undefined) { + writer.uint32(56).int64(message.snapshotCount); + } + if (message.snapshotSpec !== undefined) { + SnapshotSpec.encode( + message.snapshotSpec, + writer.uint32(66).fork() + ).ldelim(); + } + for (const v of message.diskIds) { + writer.uint32(74).string(v!); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): CreateSnapshotScheduleRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseCreateSnapshotScheduleRequest, + } as CreateSnapshotScheduleRequest; + message.labels = {}; + message.diskIds = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.folderId = reader.string(); + break; + case 2: + message.name = reader.string(); + break; + case 3: + message.description = reader.string(); + break; + case 4: + const entry4 = CreateSnapshotScheduleRequest_LabelsEntry.decode( + reader, + reader.uint32() + ); + if (entry4.value !== undefined) { + message.labels[entry4.key] = entry4.value; + } + break; + case 5: + message.schedulePolicy = SchedulePolicy.decode( + reader, + reader.uint32() + ); + break; + case 6: + message.retentionPeriod = Duration.decode(reader, reader.uint32()); + break; + case 7: + message.snapshotCount = longToNumber(reader.int64() as Long); + break; + case 8: + message.snapshotSpec = SnapshotSpec.decode(reader, reader.uint32()); + break; + case 9: + message.diskIds.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateSnapshotScheduleRequest { + const message = { + ...baseCreateSnapshotScheduleRequest, + } as CreateSnapshotScheduleRequest; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); + message.schedulePolicy = + object.schedulePolicy !== undefined && object.schedulePolicy !== null + ? SchedulePolicy.fromJSON(object.schedulePolicy) + : undefined; + message.retentionPeriod = + object.retentionPeriod !== undefined && object.retentionPeriod !== null + ? Duration.fromJSON(object.retentionPeriod) + : undefined; + message.snapshotCount = + object.snapshotCount !== undefined && object.snapshotCount !== null + ? Number(object.snapshotCount) + : undefined; + message.snapshotSpec = + object.snapshotSpec !== undefined && object.snapshotSpec !== null + ? SnapshotSpec.fromJSON(object.snapshotSpec) + : undefined; + message.diskIds = (object.diskIds ?? []).map((e: any) => String(e)); + return message; + }, + + toJSON(message: CreateSnapshotScheduleRequest): unknown { + const obj: any = {}; + message.folderId !== undefined && (obj.folderId = message.folderId); + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + obj.labels = {}; + if (message.labels) { + Object.entries(message.labels).forEach(([k, v]) => { + obj.labels[k] = v; + }); + } + message.schedulePolicy !== undefined && + (obj.schedulePolicy = message.schedulePolicy + ? SchedulePolicy.toJSON(message.schedulePolicy) + : undefined); + message.retentionPeriod !== undefined && + (obj.retentionPeriod = message.retentionPeriod + ? Duration.toJSON(message.retentionPeriod) + : undefined); + message.snapshotCount !== undefined && + (obj.snapshotCount = Math.round(message.snapshotCount)); + message.snapshotSpec !== undefined && + (obj.snapshotSpec = message.snapshotSpec + ? SnapshotSpec.toJSON(message.snapshotSpec) + : undefined); + if (message.diskIds) { + obj.diskIds = message.diskIds.map((e) => e); + } else { + obj.diskIds = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): CreateSnapshotScheduleRequest { + const message = { + ...baseCreateSnapshotScheduleRequest, + } as CreateSnapshotScheduleRequest; + message.folderId = object.folderId ?? ""; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); + message.schedulePolicy = + object.schedulePolicy !== undefined && object.schedulePolicy !== null + ? SchedulePolicy.fromPartial(object.schedulePolicy) + : undefined; + message.retentionPeriod = + object.retentionPeriod !== undefined && object.retentionPeriod !== null + ? Duration.fromPartial(object.retentionPeriod) + : undefined; + message.snapshotCount = object.snapshotCount ?? undefined; + message.snapshotSpec = + object.snapshotSpec !== undefined && object.snapshotSpec !== null + ? SnapshotSpec.fromPartial(object.snapshotSpec) + : undefined; + message.diskIds = object.diskIds?.map((e) => e) || []; + return message; + }, +}; + +messageTypeRegistry.set( + CreateSnapshotScheduleRequest.$type, + CreateSnapshotScheduleRequest +); + +const baseCreateSnapshotScheduleRequest_LabelsEntry: object = { + $type: "yandex.cloud.compute.v1.CreateSnapshotScheduleRequest.LabelsEntry", + key: "", + value: "", +}; + +export const CreateSnapshotScheduleRequest_LabelsEntry = { + $type: + "yandex.cloud.compute.v1.CreateSnapshotScheduleRequest.LabelsEntry" as const, + + encode( + message: CreateSnapshotScheduleRequest_LabelsEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): CreateSnapshotScheduleRequest_LabelsEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseCreateSnapshotScheduleRequest_LabelsEntry, + } as CreateSnapshotScheduleRequest_LabelsEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateSnapshotScheduleRequest_LabelsEntry { + const message = { + ...baseCreateSnapshotScheduleRequest_LabelsEntry, + } as CreateSnapshotScheduleRequest_LabelsEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: CreateSnapshotScheduleRequest_LabelsEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): CreateSnapshotScheduleRequest_LabelsEntry { + const message = { + ...baseCreateSnapshotScheduleRequest_LabelsEntry, + } as CreateSnapshotScheduleRequest_LabelsEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + CreateSnapshotScheduleRequest_LabelsEntry.$type, + CreateSnapshotScheduleRequest_LabelsEntry +); + +const baseCreateSnapshotScheduleMetadata: object = { + $type: "yandex.cloud.compute.v1.CreateSnapshotScheduleMetadata", + snapshotScheduleId: "", +}; + +export const CreateSnapshotScheduleMetadata = { + $type: "yandex.cloud.compute.v1.CreateSnapshotScheduleMetadata" as const, + + encode( + message: CreateSnapshotScheduleMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.snapshotScheduleId !== "") { + writer.uint32(10).string(message.snapshotScheduleId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): CreateSnapshotScheduleMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseCreateSnapshotScheduleMetadata, + } as CreateSnapshotScheduleMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.snapshotScheduleId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateSnapshotScheduleMetadata { + const message = { + ...baseCreateSnapshotScheduleMetadata, + } as CreateSnapshotScheduleMetadata; + message.snapshotScheduleId = + object.snapshotScheduleId !== undefined && + object.snapshotScheduleId !== null + ? String(object.snapshotScheduleId) + : ""; + return message; + }, + + toJSON(message: CreateSnapshotScheduleMetadata): unknown { + const obj: any = {}; + message.snapshotScheduleId !== undefined && + (obj.snapshotScheduleId = message.snapshotScheduleId); + return obj; + }, + + fromPartial, I>>( + object: I + ): CreateSnapshotScheduleMetadata { + const message = { + ...baseCreateSnapshotScheduleMetadata, + } as CreateSnapshotScheduleMetadata; + message.snapshotScheduleId = object.snapshotScheduleId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + CreateSnapshotScheduleMetadata.$type, + CreateSnapshotScheduleMetadata +); + +const baseUpdateSnapshotScheduleRequest: object = { + $type: "yandex.cloud.compute.v1.UpdateSnapshotScheduleRequest", + snapshotScheduleId: "", + name: "", + description: "", +}; + +export const UpdateSnapshotScheduleRequest = { + $type: "yandex.cloud.compute.v1.UpdateSnapshotScheduleRequest" as const, + + encode( + message: UpdateSnapshotScheduleRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.snapshotScheduleId !== "") { + writer.uint32(10).string(message.snapshotScheduleId); + } + if (message.updateMask !== undefined) { + FieldMask.encode(message.updateMask, writer.uint32(18).fork()).ldelim(); + } + if (message.name !== "") { + writer.uint32(26).string(message.name); + } + if (message.description !== "") { + writer.uint32(34).string(message.description); + } + Object.entries(message.labels).forEach(([key, value]) => { + UpdateSnapshotScheduleRequest_LabelsEntry.encode( + { + $type: + "yandex.cloud.compute.v1.UpdateSnapshotScheduleRequest.LabelsEntry", + key: key as any, + value, + }, + writer.uint32(42).fork() + ).ldelim(); + }); + if (message.schedulePolicy !== undefined) { + SchedulePolicy.encode( + message.schedulePolicy, + writer.uint32(50).fork() + ).ldelim(); + } + if (message.retentionPeriod !== undefined) { + Duration.encode( + message.retentionPeriod, + writer.uint32(58).fork() + ).ldelim(); + } + if (message.snapshotCount !== undefined) { + writer.uint32(64).int64(message.snapshotCount); + } + if (message.snapshotSpec !== undefined) { + SnapshotSpec.encode( + message.snapshotSpec, + writer.uint32(74).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateSnapshotScheduleRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateSnapshotScheduleRequest, + } as UpdateSnapshotScheduleRequest; + message.labels = {}; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.snapshotScheduleId = reader.string(); + break; + case 2: + message.updateMask = FieldMask.decode(reader, reader.uint32()); + break; + case 3: + message.name = reader.string(); + break; + case 4: + message.description = reader.string(); + break; + case 5: + const entry5 = UpdateSnapshotScheduleRequest_LabelsEntry.decode( + reader, + reader.uint32() + ); + if (entry5.value !== undefined) { + message.labels[entry5.key] = entry5.value; + } + break; + case 6: + message.schedulePolicy = SchedulePolicy.decode( + reader, + reader.uint32() + ); + break; + case 7: + message.retentionPeriod = Duration.decode(reader, reader.uint32()); + break; + case 8: + message.snapshotCount = longToNumber(reader.int64() as Long); + break; + case 9: + message.snapshotSpec = SnapshotSpec.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateSnapshotScheduleRequest { + const message = { + ...baseUpdateSnapshotScheduleRequest, + } as UpdateSnapshotScheduleRequest; + message.snapshotScheduleId = + object.snapshotScheduleId !== undefined && + object.snapshotScheduleId !== null + ? String(object.snapshotScheduleId) + : ""; + message.updateMask = + object.updateMask !== undefined && object.updateMask !== null + ? FieldMask.fromJSON(object.updateMask) + : undefined; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); + message.schedulePolicy = + object.schedulePolicy !== undefined && object.schedulePolicy !== null + ? SchedulePolicy.fromJSON(object.schedulePolicy) + : undefined; + message.retentionPeriod = + object.retentionPeriod !== undefined && object.retentionPeriod !== null + ? Duration.fromJSON(object.retentionPeriod) + : undefined; + message.snapshotCount = + object.snapshotCount !== undefined && object.snapshotCount !== null + ? Number(object.snapshotCount) + : undefined; + message.snapshotSpec = + object.snapshotSpec !== undefined && object.snapshotSpec !== null + ? SnapshotSpec.fromJSON(object.snapshotSpec) + : undefined; + return message; + }, + + toJSON(message: UpdateSnapshotScheduleRequest): unknown { + const obj: any = {}; + message.snapshotScheduleId !== undefined && + (obj.snapshotScheduleId = message.snapshotScheduleId); + message.updateMask !== undefined && + (obj.updateMask = message.updateMask + ? FieldMask.toJSON(message.updateMask) + : undefined); + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + obj.labels = {}; + if (message.labels) { + Object.entries(message.labels).forEach(([k, v]) => { + obj.labels[k] = v; + }); + } + message.schedulePolicy !== undefined && + (obj.schedulePolicy = message.schedulePolicy + ? SchedulePolicy.toJSON(message.schedulePolicy) + : undefined); + message.retentionPeriod !== undefined && + (obj.retentionPeriod = message.retentionPeriod + ? Duration.toJSON(message.retentionPeriod) + : undefined); + message.snapshotCount !== undefined && + (obj.snapshotCount = Math.round(message.snapshotCount)); + message.snapshotSpec !== undefined && + (obj.snapshotSpec = message.snapshotSpec + ? SnapshotSpec.toJSON(message.snapshotSpec) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateSnapshotScheduleRequest { + const message = { + ...baseUpdateSnapshotScheduleRequest, + } as UpdateSnapshotScheduleRequest; + message.snapshotScheduleId = object.snapshotScheduleId ?? ""; + message.updateMask = + object.updateMask !== undefined && object.updateMask !== null + ? FieldMask.fromPartial(object.updateMask) + : undefined; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); + message.schedulePolicy = + object.schedulePolicy !== undefined && object.schedulePolicy !== null + ? SchedulePolicy.fromPartial(object.schedulePolicy) + : undefined; + message.retentionPeriod = + object.retentionPeriod !== undefined && object.retentionPeriod !== null + ? Duration.fromPartial(object.retentionPeriod) + : undefined; + message.snapshotCount = object.snapshotCount ?? undefined; + message.snapshotSpec = + object.snapshotSpec !== undefined && object.snapshotSpec !== null + ? SnapshotSpec.fromPartial(object.snapshotSpec) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + UpdateSnapshotScheduleRequest.$type, + UpdateSnapshotScheduleRequest +); + +const baseUpdateSnapshotScheduleRequest_LabelsEntry: object = { + $type: "yandex.cloud.compute.v1.UpdateSnapshotScheduleRequest.LabelsEntry", + key: "", + value: "", +}; + +export const UpdateSnapshotScheduleRequest_LabelsEntry = { + $type: + "yandex.cloud.compute.v1.UpdateSnapshotScheduleRequest.LabelsEntry" as const, + + encode( + message: UpdateSnapshotScheduleRequest_LabelsEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateSnapshotScheduleRequest_LabelsEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateSnapshotScheduleRequest_LabelsEntry, + } as UpdateSnapshotScheduleRequest_LabelsEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateSnapshotScheduleRequest_LabelsEntry { + const message = { + ...baseUpdateSnapshotScheduleRequest_LabelsEntry, + } as UpdateSnapshotScheduleRequest_LabelsEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: UpdateSnapshotScheduleRequest_LabelsEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): UpdateSnapshotScheduleRequest_LabelsEntry { + const message = { + ...baseUpdateSnapshotScheduleRequest_LabelsEntry, + } as UpdateSnapshotScheduleRequest_LabelsEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + UpdateSnapshotScheduleRequest_LabelsEntry.$type, + UpdateSnapshotScheduleRequest_LabelsEntry +); + +const baseUpdateSnapshotScheduleMetadata: object = { + $type: "yandex.cloud.compute.v1.UpdateSnapshotScheduleMetadata", + snapshotScheduleId: "", +}; + +export const UpdateSnapshotScheduleMetadata = { + $type: "yandex.cloud.compute.v1.UpdateSnapshotScheduleMetadata" as const, + + encode( + message: UpdateSnapshotScheduleMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.snapshotScheduleId !== "") { + writer.uint32(10).string(message.snapshotScheduleId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateSnapshotScheduleMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateSnapshotScheduleMetadata, + } as UpdateSnapshotScheduleMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.snapshotScheduleId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateSnapshotScheduleMetadata { + const message = { + ...baseUpdateSnapshotScheduleMetadata, + } as UpdateSnapshotScheduleMetadata; + message.snapshotScheduleId = + object.snapshotScheduleId !== undefined && + object.snapshotScheduleId !== null + ? String(object.snapshotScheduleId) + : ""; + return message; + }, + + toJSON(message: UpdateSnapshotScheduleMetadata): unknown { + const obj: any = {}; + message.snapshotScheduleId !== undefined && + (obj.snapshotScheduleId = message.snapshotScheduleId); + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateSnapshotScheduleMetadata { + const message = { + ...baseUpdateSnapshotScheduleMetadata, + } as UpdateSnapshotScheduleMetadata; + message.snapshotScheduleId = object.snapshotScheduleId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + UpdateSnapshotScheduleMetadata.$type, + UpdateSnapshotScheduleMetadata +); + +const baseDeleteSnapshotScheduleRequest: object = { + $type: "yandex.cloud.compute.v1.DeleteSnapshotScheduleRequest", + snapshotScheduleId: "", +}; + +export const DeleteSnapshotScheduleRequest = { + $type: "yandex.cloud.compute.v1.DeleteSnapshotScheduleRequest" as const, + + encode( + message: DeleteSnapshotScheduleRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.snapshotScheduleId !== "") { + writer.uint32(10).string(message.snapshotScheduleId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DeleteSnapshotScheduleRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseDeleteSnapshotScheduleRequest, + } as DeleteSnapshotScheduleRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.snapshotScheduleId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteSnapshotScheduleRequest { + const message = { + ...baseDeleteSnapshotScheduleRequest, + } as DeleteSnapshotScheduleRequest; + message.snapshotScheduleId = + object.snapshotScheduleId !== undefined && + object.snapshotScheduleId !== null + ? String(object.snapshotScheduleId) + : ""; + return message; + }, + + toJSON(message: DeleteSnapshotScheduleRequest): unknown { + const obj: any = {}; + message.snapshotScheduleId !== undefined && + (obj.snapshotScheduleId = message.snapshotScheduleId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteSnapshotScheduleRequest { + const message = { + ...baseDeleteSnapshotScheduleRequest, + } as DeleteSnapshotScheduleRequest; + message.snapshotScheduleId = object.snapshotScheduleId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + DeleteSnapshotScheduleRequest.$type, + DeleteSnapshotScheduleRequest +); + +const baseDeleteSnapshotScheduleMetadata: object = { + $type: "yandex.cloud.compute.v1.DeleteSnapshotScheduleMetadata", + snapshotScheduleId: "", +}; + +export const DeleteSnapshotScheduleMetadata = { + $type: "yandex.cloud.compute.v1.DeleteSnapshotScheduleMetadata" as const, + + encode( + message: DeleteSnapshotScheduleMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.snapshotScheduleId !== "") { + writer.uint32(10).string(message.snapshotScheduleId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DeleteSnapshotScheduleMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseDeleteSnapshotScheduleMetadata, + } as DeleteSnapshotScheduleMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.snapshotScheduleId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteSnapshotScheduleMetadata { + const message = { + ...baseDeleteSnapshotScheduleMetadata, + } as DeleteSnapshotScheduleMetadata; + message.snapshotScheduleId = + object.snapshotScheduleId !== undefined && + object.snapshotScheduleId !== null + ? String(object.snapshotScheduleId) + : ""; + return message; + }, + + toJSON(message: DeleteSnapshotScheduleMetadata): unknown { + const obj: any = {}; + message.snapshotScheduleId !== undefined && + (obj.snapshotScheduleId = message.snapshotScheduleId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteSnapshotScheduleMetadata { + const message = { + ...baseDeleteSnapshotScheduleMetadata, + } as DeleteSnapshotScheduleMetadata; + message.snapshotScheduleId = object.snapshotScheduleId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + DeleteSnapshotScheduleMetadata.$type, + DeleteSnapshotScheduleMetadata +); + +const baseDisableSnapshotScheduleRequest: object = { + $type: "yandex.cloud.compute.v1.DisableSnapshotScheduleRequest", + snapshotScheduleId: "", +}; + +export const DisableSnapshotScheduleRequest = { + $type: "yandex.cloud.compute.v1.DisableSnapshotScheduleRequest" as const, + + encode( + message: DisableSnapshotScheduleRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.snapshotScheduleId !== "") { + writer.uint32(10).string(message.snapshotScheduleId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DisableSnapshotScheduleRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseDisableSnapshotScheduleRequest, + } as DisableSnapshotScheduleRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.snapshotScheduleId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DisableSnapshotScheduleRequest { + const message = { + ...baseDisableSnapshotScheduleRequest, + } as DisableSnapshotScheduleRequest; + message.snapshotScheduleId = + object.snapshotScheduleId !== undefined && + object.snapshotScheduleId !== null + ? String(object.snapshotScheduleId) + : ""; + return message; + }, + + toJSON(message: DisableSnapshotScheduleRequest): unknown { + const obj: any = {}; + message.snapshotScheduleId !== undefined && + (obj.snapshotScheduleId = message.snapshotScheduleId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DisableSnapshotScheduleRequest { + const message = { + ...baseDisableSnapshotScheduleRequest, + } as DisableSnapshotScheduleRequest; + message.snapshotScheduleId = object.snapshotScheduleId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + DisableSnapshotScheduleRequest.$type, + DisableSnapshotScheduleRequest +); + +const baseDisableSnapshotScheduleMetadata: object = { + $type: "yandex.cloud.compute.v1.DisableSnapshotScheduleMetadata", + snapshotScheduleId: "", +}; + +export const DisableSnapshotScheduleMetadata = { + $type: "yandex.cloud.compute.v1.DisableSnapshotScheduleMetadata" as const, + + encode( + message: DisableSnapshotScheduleMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.snapshotScheduleId !== "") { + writer.uint32(10).string(message.snapshotScheduleId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DisableSnapshotScheduleMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseDisableSnapshotScheduleMetadata, + } as DisableSnapshotScheduleMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.snapshotScheduleId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DisableSnapshotScheduleMetadata { + const message = { + ...baseDisableSnapshotScheduleMetadata, + } as DisableSnapshotScheduleMetadata; + message.snapshotScheduleId = + object.snapshotScheduleId !== undefined && + object.snapshotScheduleId !== null + ? String(object.snapshotScheduleId) + : ""; + return message; + }, + + toJSON(message: DisableSnapshotScheduleMetadata): unknown { + const obj: any = {}; + message.snapshotScheduleId !== undefined && + (obj.snapshotScheduleId = message.snapshotScheduleId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DisableSnapshotScheduleMetadata { + const message = { + ...baseDisableSnapshotScheduleMetadata, + } as DisableSnapshotScheduleMetadata; + message.snapshotScheduleId = object.snapshotScheduleId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + DisableSnapshotScheduleMetadata.$type, + DisableSnapshotScheduleMetadata +); + +const baseEnableSnapshotScheduleRequest: object = { + $type: "yandex.cloud.compute.v1.EnableSnapshotScheduleRequest", + snapshotScheduleId: "", +}; + +export const EnableSnapshotScheduleRequest = { + $type: "yandex.cloud.compute.v1.EnableSnapshotScheduleRequest" as const, + + encode( + message: EnableSnapshotScheduleRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.snapshotScheduleId !== "") { + writer.uint32(10).string(message.snapshotScheduleId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): EnableSnapshotScheduleRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseEnableSnapshotScheduleRequest, + } as EnableSnapshotScheduleRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.snapshotScheduleId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): EnableSnapshotScheduleRequest { + const message = { + ...baseEnableSnapshotScheduleRequest, + } as EnableSnapshotScheduleRequest; + message.snapshotScheduleId = + object.snapshotScheduleId !== undefined && + object.snapshotScheduleId !== null + ? String(object.snapshotScheduleId) + : ""; + return message; + }, + + toJSON(message: EnableSnapshotScheduleRequest): unknown { + const obj: any = {}; + message.snapshotScheduleId !== undefined && + (obj.snapshotScheduleId = message.snapshotScheduleId); + return obj; + }, + + fromPartial, I>>( + object: I + ): EnableSnapshotScheduleRequest { + const message = { + ...baseEnableSnapshotScheduleRequest, + } as EnableSnapshotScheduleRequest; + message.snapshotScheduleId = object.snapshotScheduleId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + EnableSnapshotScheduleRequest.$type, + EnableSnapshotScheduleRequest +); + +const baseEnableSnapshotScheduleMetadata: object = { + $type: "yandex.cloud.compute.v1.EnableSnapshotScheduleMetadata", + snapshotScheduleId: "", +}; + +export const EnableSnapshotScheduleMetadata = { + $type: "yandex.cloud.compute.v1.EnableSnapshotScheduleMetadata" as const, + + encode( + message: EnableSnapshotScheduleMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.snapshotScheduleId !== "") { + writer.uint32(10).string(message.snapshotScheduleId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): EnableSnapshotScheduleMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseEnableSnapshotScheduleMetadata, + } as EnableSnapshotScheduleMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.snapshotScheduleId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): EnableSnapshotScheduleMetadata { + const message = { + ...baseEnableSnapshotScheduleMetadata, + } as EnableSnapshotScheduleMetadata; + message.snapshotScheduleId = + object.snapshotScheduleId !== undefined && + object.snapshotScheduleId !== null + ? String(object.snapshotScheduleId) + : ""; + return message; + }, + + toJSON(message: EnableSnapshotScheduleMetadata): unknown { + const obj: any = {}; + message.snapshotScheduleId !== undefined && + (obj.snapshotScheduleId = message.snapshotScheduleId); + return obj; + }, + + fromPartial, I>>( + object: I + ): EnableSnapshotScheduleMetadata { + const message = { + ...baseEnableSnapshotScheduleMetadata, + } as EnableSnapshotScheduleMetadata; + message.snapshotScheduleId = object.snapshotScheduleId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + EnableSnapshotScheduleMetadata.$type, + EnableSnapshotScheduleMetadata +); + +const baseListSnapshotScheduleOperationsRequest: object = { + $type: "yandex.cloud.compute.v1.ListSnapshotScheduleOperationsRequest", + snapshotScheduleId: "", + pageSize: 0, + pageToken: "", +}; + +export const ListSnapshotScheduleOperationsRequest = { + $type: + "yandex.cloud.compute.v1.ListSnapshotScheduleOperationsRequest" as const, + + encode( + message: ListSnapshotScheduleOperationsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.snapshotScheduleId !== "") { + writer.uint32(10).string(message.snapshotScheduleId); + } + if (message.pageSize !== 0) { + writer.uint32(16).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(26).string(message.pageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListSnapshotScheduleOperationsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListSnapshotScheduleOperationsRequest, + } as ListSnapshotScheduleOperationsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.snapshotScheduleId = reader.string(); + break; + case 2: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 3: + message.pageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListSnapshotScheduleOperationsRequest { + const message = { + ...baseListSnapshotScheduleOperationsRequest, + } as ListSnapshotScheduleOperationsRequest; + message.snapshotScheduleId = + object.snapshotScheduleId !== undefined && + object.snapshotScheduleId !== null + ? String(object.snapshotScheduleId) + : ""; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + return message; + }, + + toJSON(message: ListSnapshotScheduleOperationsRequest): unknown { + const obj: any = {}; + message.snapshotScheduleId !== undefined && + (obj.snapshotScheduleId = message.snapshotScheduleId); + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): ListSnapshotScheduleOperationsRequest { + const message = { + ...baseListSnapshotScheduleOperationsRequest, + } as ListSnapshotScheduleOperationsRequest; + message.snapshotScheduleId = object.snapshotScheduleId ?? ""; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListSnapshotScheduleOperationsRequest.$type, + ListSnapshotScheduleOperationsRequest +); + +const baseListSnapshotScheduleOperationsResponse: object = { + $type: "yandex.cloud.compute.v1.ListSnapshotScheduleOperationsResponse", + nextPageToken: "", +}; + +export const ListSnapshotScheduleOperationsResponse = { + $type: + "yandex.cloud.compute.v1.ListSnapshotScheduleOperationsResponse" as const, + + encode( + message: ListSnapshotScheduleOperationsResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.operations) { + Operation.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListSnapshotScheduleOperationsResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListSnapshotScheduleOperationsResponse, + } as ListSnapshotScheduleOperationsResponse; + message.operations = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.operations.push(Operation.decode(reader, reader.uint32())); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListSnapshotScheduleOperationsResponse { + const message = { + ...baseListSnapshotScheduleOperationsResponse, + } as ListSnapshotScheduleOperationsResponse; + message.operations = (object.operations ?? []).map((e: any) => + Operation.fromJSON(e) + ); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListSnapshotScheduleOperationsResponse): unknown { + const obj: any = {}; + if (message.operations) { + obj.operations = message.operations.map((e) => + e ? Operation.toJSON(e) : undefined + ); + } else { + obj.operations = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): ListSnapshotScheduleOperationsResponse { + const message = { + ...baseListSnapshotScheduleOperationsResponse, + } as ListSnapshotScheduleOperationsResponse; + message.operations = + object.operations?.map((e) => Operation.fromPartial(e)) || []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListSnapshotScheduleOperationsResponse.$type, + ListSnapshotScheduleOperationsResponse +); + +const baseListSnapshotScheduleSnapshotsRequest: object = { + $type: "yandex.cloud.compute.v1.ListSnapshotScheduleSnapshotsRequest", + snapshotScheduleId: "", + pageSize: 0, + pageToken: "", +}; + +export const ListSnapshotScheduleSnapshotsRequest = { + $type: + "yandex.cloud.compute.v1.ListSnapshotScheduleSnapshotsRequest" as const, + + encode( + message: ListSnapshotScheduleSnapshotsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.snapshotScheduleId !== "") { + writer.uint32(10).string(message.snapshotScheduleId); + } + if (message.pageSize !== 0) { + writer.uint32(16).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(26).string(message.pageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListSnapshotScheduleSnapshotsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListSnapshotScheduleSnapshotsRequest, + } as ListSnapshotScheduleSnapshotsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.snapshotScheduleId = reader.string(); + break; + case 2: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 3: + message.pageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListSnapshotScheduleSnapshotsRequest { + const message = { + ...baseListSnapshotScheduleSnapshotsRequest, + } as ListSnapshotScheduleSnapshotsRequest; + message.snapshotScheduleId = + object.snapshotScheduleId !== undefined && + object.snapshotScheduleId !== null + ? String(object.snapshotScheduleId) + : ""; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + return message; + }, + + toJSON(message: ListSnapshotScheduleSnapshotsRequest): unknown { + const obj: any = {}; + message.snapshotScheduleId !== undefined && + (obj.snapshotScheduleId = message.snapshotScheduleId); + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): ListSnapshotScheduleSnapshotsRequest { + const message = { + ...baseListSnapshotScheduleSnapshotsRequest, + } as ListSnapshotScheduleSnapshotsRequest; + message.snapshotScheduleId = object.snapshotScheduleId ?? ""; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListSnapshotScheduleSnapshotsRequest.$type, + ListSnapshotScheduleSnapshotsRequest +); + +const baseListSnapshotScheduleSnapshotsResponse: object = { + $type: "yandex.cloud.compute.v1.ListSnapshotScheduleSnapshotsResponse", + nextPageToken: "", +}; + +export const ListSnapshotScheduleSnapshotsResponse = { + $type: + "yandex.cloud.compute.v1.ListSnapshotScheduleSnapshotsResponse" as const, + + encode( + message: ListSnapshotScheduleSnapshotsResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.snapshots) { + Snapshot.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListSnapshotScheduleSnapshotsResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListSnapshotScheduleSnapshotsResponse, + } as ListSnapshotScheduleSnapshotsResponse; + message.snapshots = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.snapshots.push(Snapshot.decode(reader, reader.uint32())); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListSnapshotScheduleSnapshotsResponse { + const message = { + ...baseListSnapshotScheduleSnapshotsResponse, + } as ListSnapshotScheduleSnapshotsResponse; + message.snapshots = (object.snapshots ?? []).map((e: any) => + Snapshot.fromJSON(e) + ); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListSnapshotScheduleSnapshotsResponse): unknown { + const obj: any = {}; + if (message.snapshots) { + obj.snapshots = message.snapshots.map((e) => + e ? Snapshot.toJSON(e) : undefined + ); + } else { + obj.snapshots = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): ListSnapshotScheduleSnapshotsResponse { + const message = { + ...baseListSnapshotScheduleSnapshotsResponse, + } as ListSnapshotScheduleSnapshotsResponse; + message.snapshots = + object.snapshots?.map((e) => Snapshot.fromPartial(e)) || []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListSnapshotScheduleSnapshotsResponse.$type, + ListSnapshotScheduleSnapshotsResponse +); + +const baseListSnapshotScheduleDisksRequest: object = { + $type: "yandex.cloud.compute.v1.ListSnapshotScheduleDisksRequest", + snapshotScheduleId: "", + pageSize: 0, + pageToken: "", +}; + +export const ListSnapshotScheduleDisksRequest = { + $type: "yandex.cloud.compute.v1.ListSnapshotScheduleDisksRequest" as const, + + encode( + message: ListSnapshotScheduleDisksRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.snapshotScheduleId !== "") { + writer.uint32(10).string(message.snapshotScheduleId); + } + if (message.pageSize !== 0) { + writer.uint32(16).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(26).string(message.pageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListSnapshotScheduleDisksRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListSnapshotScheduleDisksRequest, + } as ListSnapshotScheduleDisksRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.snapshotScheduleId = reader.string(); + break; + case 2: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 3: + message.pageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListSnapshotScheduleDisksRequest { + const message = { + ...baseListSnapshotScheduleDisksRequest, + } as ListSnapshotScheduleDisksRequest; + message.snapshotScheduleId = + object.snapshotScheduleId !== undefined && + object.snapshotScheduleId !== null + ? String(object.snapshotScheduleId) + : ""; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + return message; + }, + + toJSON(message: ListSnapshotScheduleDisksRequest): unknown { + const obj: any = {}; + message.snapshotScheduleId !== undefined && + (obj.snapshotScheduleId = message.snapshotScheduleId); + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): ListSnapshotScheduleDisksRequest { + const message = { + ...baseListSnapshotScheduleDisksRequest, + } as ListSnapshotScheduleDisksRequest; + message.snapshotScheduleId = object.snapshotScheduleId ?? ""; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListSnapshotScheduleDisksRequest.$type, + ListSnapshotScheduleDisksRequest +); + +const baseListSnapshotScheduleDisksResponse: object = { + $type: "yandex.cloud.compute.v1.ListSnapshotScheduleDisksResponse", + nextPageToken: "", +}; + +export const ListSnapshotScheduleDisksResponse = { + $type: "yandex.cloud.compute.v1.ListSnapshotScheduleDisksResponse" as const, + + encode( + message: ListSnapshotScheduleDisksResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.disks) { + Disk.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListSnapshotScheduleDisksResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListSnapshotScheduleDisksResponse, + } as ListSnapshotScheduleDisksResponse; + message.disks = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.disks.push(Disk.decode(reader, reader.uint32())); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListSnapshotScheduleDisksResponse { + const message = { + ...baseListSnapshotScheduleDisksResponse, + } as ListSnapshotScheduleDisksResponse; + message.disks = (object.disks ?? []).map((e: any) => Disk.fromJSON(e)); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListSnapshotScheduleDisksResponse): unknown { + const obj: any = {}; + if (message.disks) { + obj.disks = message.disks.map((e) => (e ? Disk.toJSON(e) : undefined)); + } else { + obj.disks = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): ListSnapshotScheduleDisksResponse { + const message = { + ...baseListSnapshotScheduleDisksResponse, + } as ListSnapshotScheduleDisksResponse; + message.disks = object.disks?.map((e) => Disk.fromPartial(e)) || []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListSnapshotScheduleDisksResponse.$type, + ListSnapshotScheduleDisksResponse +); + +const baseUpdateSnapshotScheduleDisksRequest: object = { + $type: "yandex.cloud.compute.v1.UpdateSnapshotScheduleDisksRequest", + snapshotScheduleId: "", + remove: "", + add: "", +}; + +export const UpdateSnapshotScheduleDisksRequest = { + $type: "yandex.cloud.compute.v1.UpdateSnapshotScheduleDisksRequest" as const, + + encode( + message: UpdateSnapshotScheduleDisksRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.snapshotScheduleId !== "") { + writer.uint32(10).string(message.snapshotScheduleId); + } + for (const v of message.remove) { + writer.uint32(18).string(v!); + } + for (const v of message.add) { + writer.uint32(26).string(v!); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateSnapshotScheduleDisksRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateSnapshotScheduleDisksRequest, + } as UpdateSnapshotScheduleDisksRequest; + message.remove = []; + message.add = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.snapshotScheduleId = reader.string(); + break; + case 2: + message.remove.push(reader.string()); + break; + case 3: + message.add.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateSnapshotScheduleDisksRequest { + const message = { + ...baseUpdateSnapshotScheduleDisksRequest, + } as UpdateSnapshotScheduleDisksRequest; + message.snapshotScheduleId = + object.snapshotScheduleId !== undefined && + object.snapshotScheduleId !== null + ? String(object.snapshotScheduleId) + : ""; + message.remove = (object.remove ?? []).map((e: any) => String(e)); + message.add = (object.add ?? []).map((e: any) => String(e)); + return message; + }, + + toJSON(message: UpdateSnapshotScheduleDisksRequest): unknown { + const obj: any = {}; + message.snapshotScheduleId !== undefined && + (obj.snapshotScheduleId = message.snapshotScheduleId); + if (message.remove) { + obj.remove = message.remove.map((e) => e); + } else { + obj.remove = []; + } + if (message.add) { + obj.add = message.add.map((e) => e); + } else { + obj.add = []; + } + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): UpdateSnapshotScheduleDisksRequest { + const message = { + ...baseUpdateSnapshotScheduleDisksRequest, + } as UpdateSnapshotScheduleDisksRequest; + message.snapshotScheduleId = object.snapshotScheduleId ?? ""; + message.remove = object.remove?.map((e) => e) || []; + message.add = object.add?.map((e) => e) || []; + return message; + }, +}; + +messageTypeRegistry.set( + UpdateSnapshotScheduleDisksRequest.$type, + UpdateSnapshotScheduleDisksRequest +); + +const baseUpdateSnapshotScheduleDisksMetadata: object = { + $type: "yandex.cloud.compute.v1.UpdateSnapshotScheduleDisksMetadata", + snapshotScheduleId: "", +}; + +export const UpdateSnapshotScheduleDisksMetadata = { + $type: "yandex.cloud.compute.v1.UpdateSnapshotScheduleDisksMetadata" as const, + + encode( + message: UpdateSnapshotScheduleDisksMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.snapshotScheduleId !== "") { + writer.uint32(10).string(message.snapshotScheduleId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateSnapshotScheduleDisksMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateSnapshotScheduleDisksMetadata, + } as UpdateSnapshotScheduleDisksMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.snapshotScheduleId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateSnapshotScheduleDisksMetadata { + const message = { + ...baseUpdateSnapshotScheduleDisksMetadata, + } as UpdateSnapshotScheduleDisksMetadata; + message.snapshotScheduleId = + object.snapshotScheduleId !== undefined && + object.snapshotScheduleId !== null + ? String(object.snapshotScheduleId) + : ""; + return message; + }, + + toJSON(message: UpdateSnapshotScheduleDisksMetadata): unknown { + const obj: any = {}; + message.snapshotScheduleId !== undefined && + (obj.snapshotScheduleId = message.snapshotScheduleId); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): UpdateSnapshotScheduleDisksMetadata { + const message = { + ...baseUpdateSnapshotScheduleDisksMetadata, + } as UpdateSnapshotScheduleDisksMetadata; + message.snapshotScheduleId = object.snapshotScheduleId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + UpdateSnapshotScheduleDisksMetadata.$type, + UpdateSnapshotScheduleDisksMetadata +); + +/** A set of methods for managing SnapshotSchedule resources. */ +export const SnapshotScheduleServiceService = { + /** + * Returns the specified SnapshotSchedule resource. + * + * To get the list of available SnapshotSchedule resources, make a [List] request. + */ + get: { + path: "/yandex.cloud.compute.v1.SnapshotScheduleService/Get", + requestStream: false, + responseStream: false, + requestSerialize: (value: GetSnapshotScheduleRequest) => + Buffer.from(GetSnapshotScheduleRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + GetSnapshotScheduleRequest.decode(value), + responseSerialize: (value: SnapshotSchedule) => + Buffer.from(SnapshotSchedule.encode(value).finish()), + responseDeserialize: (value: Buffer) => SnapshotSchedule.decode(value), + }, + /** Retrieves the list of SnapshotSchedule resources in the specified folder. */ + list: { + path: "/yandex.cloud.compute.v1.SnapshotScheduleService/List", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListSnapshotSchedulesRequest) => + Buffer.from(ListSnapshotSchedulesRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + ListSnapshotSchedulesRequest.decode(value), + responseSerialize: (value: ListSnapshotSchedulesResponse) => + Buffer.from(ListSnapshotSchedulesResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + ListSnapshotSchedulesResponse.decode(value), + }, + /** Creates a snapshot schedule in the specified folder. */ + create: { + path: "/yandex.cloud.compute.v1.SnapshotScheduleService/Create", + requestStream: false, + responseStream: false, + requestSerialize: (value: CreateSnapshotScheduleRequest) => + Buffer.from(CreateSnapshotScheduleRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + CreateSnapshotScheduleRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Updates the specified snapshot schedule. */ + update: { + path: "/yandex.cloud.compute.v1.SnapshotScheduleService/Update", + requestStream: false, + responseStream: false, + requestSerialize: (value: UpdateSnapshotScheduleRequest) => + Buffer.from(UpdateSnapshotScheduleRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + UpdateSnapshotScheduleRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** + * Deletes the specified snapshot schedule. + * + * Deleting a snapshot schedule removes its data permanently and is irreversible. However, deleting a schedule does not delete + * any snapshots previously made by the schedule. You must delete snapshots separately. + */ + delete: { + path: "/yandex.cloud.compute.v1.SnapshotScheduleService/Delete", + requestStream: false, + responseStream: false, + requestSerialize: (value: DeleteSnapshotScheduleRequest) => + Buffer.from(DeleteSnapshotScheduleRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + DeleteSnapshotScheduleRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** UpdateDisks of schedule */ + updateDisks: { + path: "/yandex.cloud.compute.v1.SnapshotScheduleService/UpdateDisks", + requestStream: false, + responseStream: false, + requestSerialize: (value: UpdateSnapshotScheduleDisksRequest) => + Buffer.from(UpdateSnapshotScheduleDisksRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + UpdateSnapshotScheduleDisksRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** + * Disable schedule sets status InActive. + * + * When schedule os disabled snapshots will not be created or deleted according to retention policy. + */ + disable: { + path: "/yandex.cloud.compute.v1.SnapshotScheduleService/Disable", + requestStream: false, + responseStream: false, + requestSerialize: (value: DisableSnapshotScheduleRequest) => + Buffer.from(DisableSnapshotScheduleRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + DisableSnapshotScheduleRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Enable schedule sets status Active. */ + enable: { + path: "/yandex.cloud.compute.v1.SnapshotScheduleService/Enable", + requestStream: false, + responseStream: false, + requestSerialize: (value: EnableSnapshotScheduleRequest) => + Buffer.from(EnableSnapshotScheduleRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + EnableSnapshotScheduleRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Lists operations for the specified snapshot schedule. */ + listOperations: { + path: "/yandex.cloud.compute.v1.SnapshotScheduleService/ListOperations", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListSnapshotScheduleOperationsRequest) => + Buffer.from(ListSnapshotScheduleOperationsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + ListSnapshotScheduleOperationsRequest.decode(value), + responseSerialize: (value: ListSnapshotScheduleOperationsResponse) => + Buffer.from( + ListSnapshotScheduleOperationsResponse.encode(value).finish() + ), + responseDeserialize: (value: Buffer) => + ListSnapshotScheduleOperationsResponse.decode(value), + }, + /** List snapshot created by schedule. */ + listSnapshots: { + path: "/yandex.cloud.compute.v1.SnapshotScheduleService/ListSnapshots", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListSnapshotScheduleSnapshotsRequest) => + Buffer.from(ListSnapshotScheduleSnapshotsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + ListSnapshotScheduleSnapshotsRequest.decode(value), + responseSerialize: (value: ListSnapshotScheduleSnapshotsResponse) => + Buffer.from(ListSnapshotScheduleSnapshotsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + ListSnapshotScheduleSnapshotsResponse.decode(value), + }, + /** List disks that belong to schedule. */ + listDisks: { + path: "/yandex.cloud.compute.v1.SnapshotScheduleService/ListDisks", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListSnapshotScheduleDisksRequest) => + Buffer.from(ListSnapshotScheduleDisksRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + ListSnapshotScheduleDisksRequest.decode(value), + responseSerialize: (value: ListSnapshotScheduleDisksResponse) => + Buffer.from(ListSnapshotScheduleDisksResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + ListSnapshotScheduleDisksResponse.decode(value), + }, +} as const; + +export interface SnapshotScheduleServiceServer + extends UntypedServiceImplementation { + /** + * Returns the specified SnapshotSchedule resource. + * + * To get the list of available SnapshotSchedule resources, make a [List] request. + */ + get: handleUnaryCall; + /** Retrieves the list of SnapshotSchedule resources in the specified folder. */ + list: handleUnaryCall< + ListSnapshotSchedulesRequest, + ListSnapshotSchedulesResponse + >; + /** Creates a snapshot schedule in the specified folder. */ + create: handleUnaryCall; + /** Updates the specified snapshot schedule. */ + update: handleUnaryCall; + /** + * Deletes the specified snapshot schedule. + * + * Deleting a snapshot schedule removes its data permanently and is irreversible. However, deleting a schedule does not delete + * any snapshots previously made by the schedule. You must delete snapshots separately. + */ + delete: handleUnaryCall; + /** UpdateDisks of schedule */ + updateDisks: handleUnaryCall; + /** + * Disable schedule sets status InActive. + * + * When schedule os disabled snapshots will not be created or deleted according to retention policy. + */ + disable: handleUnaryCall; + /** Enable schedule sets status Active. */ + enable: handleUnaryCall; + /** Lists operations for the specified snapshot schedule. */ + listOperations: handleUnaryCall< + ListSnapshotScheduleOperationsRequest, + ListSnapshotScheduleOperationsResponse + >; + /** List snapshot created by schedule. */ + listSnapshots: handleUnaryCall< + ListSnapshotScheduleSnapshotsRequest, + ListSnapshotScheduleSnapshotsResponse + >; + /** List disks that belong to schedule. */ + listDisks: handleUnaryCall< + ListSnapshotScheduleDisksRequest, + ListSnapshotScheduleDisksResponse + >; +} + +export interface SnapshotScheduleServiceClient extends Client { + /** + * Returns the specified SnapshotSchedule resource. + * + * To get the list of available SnapshotSchedule resources, make a [List] request. + */ + get( + request: GetSnapshotScheduleRequest, + callback: (error: ServiceError | null, response: SnapshotSchedule) => void + ): ClientUnaryCall; + get( + request: GetSnapshotScheduleRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: SnapshotSchedule) => void + ): ClientUnaryCall; + get( + request: GetSnapshotScheduleRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: SnapshotSchedule) => void + ): ClientUnaryCall; + /** Retrieves the list of SnapshotSchedule resources in the specified folder. */ + list( + request: ListSnapshotSchedulesRequest, + callback: ( + error: ServiceError | null, + response: ListSnapshotSchedulesResponse + ) => void + ): ClientUnaryCall; + list( + request: ListSnapshotSchedulesRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListSnapshotSchedulesResponse + ) => void + ): ClientUnaryCall; + list( + request: ListSnapshotSchedulesRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListSnapshotSchedulesResponse + ) => void + ): ClientUnaryCall; + /** Creates a snapshot schedule in the specified folder. */ + create( + request: CreateSnapshotScheduleRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + create( + request: CreateSnapshotScheduleRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + create( + request: CreateSnapshotScheduleRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Updates the specified snapshot schedule. */ + update( + request: UpdateSnapshotScheduleRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + update( + request: UpdateSnapshotScheduleRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + update( + request: UpdateSnapshotScheduleRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** + * Deletes the specified snapshot schedule. + * + * Deleting a snapshot schedule removes its data permanently and is irreversible. However, deleting a schedule does not delete + * any snapshots previously made by the schedule. You must delete snapshots separately. + */ + delete( + request: DeleteSnapshotScheduleRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + delete( + request: DeleteSnapshotScheduleRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + delete( + request: DeleteSnapshotScheduleRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** UpdateDisks of schedule */ + updateDisks( + request: UpdateSnapshotScheduleDisksRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + updateDisks( + request: UpdateSnapshotScheduleDisksRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + updateDisks( + request: UpdateSnapshotScheduleDisksRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** + * Disable schedule sets status InActive. + * + * When schedule os disabled snapshots will not be created or deleted according to retention policy. + */ + disable( + request: DisableSnapshotScheduleRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + disable( + request: DisableSnapshotScheduleRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + disable( + request: DisableSnapshotScheduleRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Enable schedule sets status Active. */ + enable( + request: EnableSnapshotScheduleRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + enable( + request: EnableSnapshotScheduleRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + enable( + request: EnableSnapshotScheduleRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Lists operations for the specified snapshot schedule. */ + listOperations( + request: ListSnapshotScheduleOperationsRequest, + callback: ( + error: ServiceError | null, + response: ListSnapshotScheduleOperationsResponse + ) => void + ): ClientUnaryCall; + listOperations( + request: ListSnapshotScheduleOperationsRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListSnapshotScheduleOperationsResponse + ) => void + ): ClientUnaryCall; + listOperations( + request: ListSnapshotScheduleOperationsRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListSnapshotScheduleOperationsResponse + ) => void + ): ClientUnaryCall; + /** List snapshot created by schedule. */ + listSnapshots( + request: ListSnapshotScheduleSnapshotsRequest, + callback: ( + error: ServiceError | null, + response: ListSnapshotScheduleSnapshotsResponse + ) => void + ): ClientUnaryCall; + listSnapshots( + request: ListSnapshotScheduleSnapshotsRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListSnapshotScheduleSnapshotsResponse + ) => void + ): ClientUnaryCall; + listSnapshots( + request: ListSnapshotScheduleSnapshotsRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListSnapshotScheduleSnapshotsResponse + ) => void + ): ClientUnaryCall; + /** List disks that belong to schedule. */ + listDisks( + request: ListSnapshotScheduleDisksRequest, + callback: ( + error: ServiceError | null, + response: ListSnapshotScheduleDisksResponse + ) => void + ): ClientUnaryCall; + listDisks( + request: ListSnapshotScheduleDisksRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListSnapshotScheduleDisksResponse + ) => void + ): ClientUnaryCall; + listDisks( + request: ListSnapshotScheduleDisksRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListSnapshotScheduleDisksResponse + ) => void + ): ClientUnaryCall; +} + +export const SnapshotScheduleServiceClient = makeGenericClientConstructor( + SnapshotScheduleServiceService, + "yandex.cloud.compute.v1.SnapshotScheduleService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): SnapshotScheduleServiceClient; + service: typeof SnapshotScheduleServiceService; +}; + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/dataproc/manager/v1/manager_service.ts b/src/generated/yandex/cloud/dataproc/manager/v1/manager_service.ts index 11b240c3..4509eab0 100644 --- a/src/generated/yandex/cloud/dataproc/manager/v1/manager_service.ts +++ b/src/generated/yandex/cloud/dataproc/manager/v1/manager_service.ts @@ -18,6 +18,54 @@ import { Timestamp } from "../../../../../google/protobuf/timestamp"; export const protobufPackage = "yandex.cloud.dataproc.manager.v1"; +export enum InitActsState { + /** INIT_ACTS_STATE_UNSPECIFIED - No init acts on cluster */ + INIT_ACTS_STATE_UNSPECIFIED = 0, + /** FAILED - At least one failed init act */ + FAILED = 1, + /** SUCCESSFUL - All init acts succeeded */ + SUCCESSFUL = 2, + /** IN_PROGRESS - Some init acts not finished */ + IN_PROGRESS = 3, + UNRECOGNIZED = -1, +} + +export function initActsStateFromJSON(object: any): InitActsState { + switch (object) { + case 0: + case "INIT_ACTS_STATE_UNSPECIFIED": + return InitActsState.INIT_ACTS_STATE_UNSPECIFIED; + case 1: + case "FAILED": + return InitActsState.FAILED; + case 2: + case "SUCCESSFUL": + return InitActsState.SUCCESSFUL; + case 3: + case "IN_PROGRESS": + return InitActsState.IN_PROGRESS; + case -1: + case "UNRECOGNIZED": + default: + return InitActsState.UNRECOGNIZED; + } +} + +export function initActsStateToJSON(object: InitActsState): string { + switch (object) { + case InitActsState.INIT_ACTS_STATE_UNSPECIFIED: + return "INIT_ACTS_STATE_UNSPECIFIED"; + case InitActsState.FAILED: + return "FAILED"; + case InitActsState.SUCCESSFUL: + return "SUCCESSFUL"; + case InitActsState.IN_PROGRESS: + return "IN_PROGRESS"; + default: + return "UNKNOWN"; + } +} + export interface HbaseNodeInfo { $type: "yandex.cloud.dataproc.manager.v1.HbaseNodeInfo"; name: string; @@ -107,6 +155,13 @@ export interface LivyInfo { alive: boolean; } +export interface InitActs { + $type: "yandex.cloud.dataproc.manager.v1.InitActs"; + state: InitActsState; + /** fqdns of nodes for error message */ + fqdns: string[]; +} + export interface Info { $type: "yandex.cloud.dataproc.manager.v1.Info"; hdfs?: HDFSInfo; @@ -122,6 +177,7 @@ export interface Info { */ reportCount: number; livy?: LivyInfo; + initActs?: InitActs; } /** The request message containing the host status report. */ @@ -1323,6 +1379,82 @@ export const LivyInfo = { messageTypeRegistry.set(LivyInfo.$type, LivyInfo); +const baseInitActs: object = { + $type: "yandex.cloud.dataproc.manager.v1.InitActs", + state: 0, + fqdns: "", +}; + +export const InitActs = { + $type: "yandex.cloud.dataproc.manager.v1.InitActs" as const, + + encode( + message: InitActs, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.state !== 0) { + writer.uint32(8).int32(message.state); + } + for (const v of message.fqdns) { + writer.uint32(18).string(v!); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): InitActs { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseInitActs } as InitActs; + message.fqdns = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.state = reader.int32() as any; + break; + case 2: + message.fqdns.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): InitActs { + const message = { ...baseInitActs } as InitActs; + message.state = + object.state !== undefined && object.state !== null + ? initActsStateFromJSON(object.state) + : 0; + message.fqdns = (object.fqdns ?? []).map((e: any) => String(e)); + return message; + }, + + toJSON(message: InitActs): unknown { + const obj: any = {}; + message.state !== undefined && + (obj.state = initActsStateToJSON(message.state)); + if (message.fqdns) { + obj.fqdns = message.fqdns.map((e) => e); + } else { + obj.fqdns = []; + } + return obj; + }, + + fromPartial, I>>(object: I): InitActs { + const message = { ...baseInitActs } as InitActs; + message.state = object.state ?? 0; + message.fqdns = object.fqdns?.map((e) => e) || []; + return message; + }, +}; + +messageTypeRegistry.set(InitActs.$type, InitActs); + const baseInfo: object = { $type: "yandex.cloud.dataproc.manager.v1.Info", reportCount: 0, @@ -1359,6 +1491,9 @@ export const Info = { if (message.livy !== undefined) { LivyInfo.encode(message.livy, writer.uint32(66).fork()).ldelim(); } + if (message.initActs !== undefined) { + InitActs.encode(message.initActs, writer.uint32(74).fork()).ldelim(); + } return writer; }, @@ -1393,6 +1528,9 @@ export const Info = { case 8: message.livy = LivyInfo.decode(reader, reader.uint32()); break; + case 9: + message.initActs = InitActs.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -1435,6 +1573,10 @@ export const Info = { object.livy !== undefined && object.livy !== null ? LivyInfo.fromJSON(object.livy) : undefined; + message.initActs = + object.initActs !== undefined && object.initActs !== null + ? InitActs.fromJSON(object.initActs) + : undefined; return message; }, @@ -1458,6 +1600,10 @@ export const Info = { (obj.reportCount = Math.round(message.reportCount)); message.livy !== undefined && (obj.livy = message.livy ? LivyInfo.toJSON(message.livy) : undefined); + message.initActs !== undefined && + (obj.initActs = message.initActs + ? InitActs.toJSON(message.initActs) + : undefined); return obj; }, @@ -1492,6 +1638,10 @@ export const Info = { object.livy !== undefined && object.livy !== null ? LivyInfo.fromPartial(object.livy) : undefined; + message.initActs = + object.initActs !== undefined && object.initActs !== null + ? InitActs.fromPartial(object.initActs) + : undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/dataproc/v1/subcluster.ts b/src/generated/yandex/cloud/dataproc/v1/subcluster.ts index d493c53a..4fa5d30b 100644 --- a/src/generated/yandex/cloud/dataproc/v1/subcluster.ts +++ b/src/generated/yandex/cloud/dataproc/v1/subcluster.ts @@ -147,8 +147,8 @@ export interface Host { $type: "yandex.cloud.dataproc.v1.Host"; /** * Name of the Data Proc host. The host name is assigned by Data Proc at creation time - * and cannot be changed. The name is generated to be unique across all existing Data Proc - * hosts in Yandex Cloud, as it defines the FQDN of the host. + * and cannot be changed. The name is generated to be unique across all Data Proc + * hosts that exist on the platform, as it defines the FQDN of the host. */ name: string; /** ID of the Data Proc subcluster that the host belongs to. */ diff --git a/src/generated/yandex/cloud/datatransfer/v1/endpoint/mysql.ts b/src/generated/yandex/cloud/datatransfer/v1/endpoint/mysql.ts index 204d3fb1..39751b0c 100644 --- a/src/generated/yandex/cloud/datatransfer/v1/endpoint/mysql.ts +++ b/src/generated/yandex/cloud/datatransfer/v1/endpoint/mysql.ts @@ -43,7 +43,7 @@ export interface MysqlConnection { /** * Managed cluster * - * Yandex.Cloud Managed MySQL cluster ID + * Managed Service for MySQL cluster ID */ mdbClusterId: string | undefined; /** diff --git a/src/generated/yandex/cloud/datatransfer/v1/endpoint/postgres.ts b/src/generated/yandex/cloud/datatransfer/v1/endpoint/postgres.ts index 4736b8a5..044424d6 100644 --- a/src/generated/yandex/cloud/datatransfer/v1/endpoint/postgres.ts +++ b/src/generated/yandex/cloud/datatransfer/v1/endpoint/postgres.ts @@ -149,7 +149,7 @@ export interface PostgresConnection { /** * Managed cluster * - * Yandex.Cloud Managed PostgreSQL cluster ID + * Managed Service for PostgreSQL cluster ID */ mdbClusterId: string | undefined; /** diff --git a/src/generated/yandex/cloud/dns/v1/dns_zone_service.ts b/src/generated/yandex/cloud/dns/v1/dns_zone_service.ts index d5fbe1e1..73c8f110 100644 --- a/src/generated/yandex/cloud/dns/v1/dns_zone_service.ts +++ b/src/generated/yandex/cloud/dns/v1/dns_zone_service.ts @@ -224,13 +224,16 @@ export interface ListDnsZoneRecordSetsRequest { */ pageToken: string; /** - * A filter expression that filters record sets listed in the response. + * A filter expression that filters record sets listed in the response. The expression consists of one or more conditions united by `AND` operator: ` [AND [<...> AND ]]`. * - * The expression must specify: - * 1. The field name. Currently you can use filtering only on the [RecordSet.name] and [RecordSet.type] fields. - * 2. An `=` operator. - * 3. The value in double quotes (`"`). Must be 3-63 characters long and match the regular expression `[a-z][-a-z0-9]{1,61}[a-z0-9]`. - * Example of a filter: `name=my-record-set`. + * Each condition has the form ` `, where: + * 1. `` is the field name. Currently you can use filtering only on the [RecordSet.name] and [RecordSet.type] fields. + * 2. `` is a logical operator, one of `=`, `!=`, `IN`, `NOT IN`. + * 3. `` represents a value. + * 3.1. In case of single value condition (`=` or `!=`), the value is a string in double (`"`) or single (`'`) quotes. C-style escape sequences are supported (`\"` turns to `"`, `\'` to `'`, `\\` to backslash). + * 3.2. In case of a list of values condition (`IN` or `NOT IN`), the value is `(, , .., )`, where `` is a string in double (`"`) or single (`'`) quotes. + * + * Examples of a filter: `name="my-record-set"`, `type IN ("MX","A") AND name="works.on.my.machine."`. */ filter: string; } @@ -287,6 +290,14 @@ export interface UpsertRecordSetsMetadata { $type: "yandex.cloud.dns.v1.UpsertRecordSetsMetadata"; } +export interface RecordSetDiff { + $type: "yandex.cloud.dns.v1.RecordSetDiff"; + /** List of record sets that were added */ + additions: RecordSet[]; + /** List of record sets that were deleted */ + deletions: RecordSet[]; +} + export interface ListDnsZoneOperationsRequest { $type: "yandex.cloud.dns.v1.ListDnsZoneOperationsRequest"; /** @@ -2072,6 +2083,93 @@ messageTypeRegistry.set( UpsertRecordSetsMetadata ); +const baseRecordSetDiff: object = { + $type: "yandex.cloud.dns.v1.RecordSetDiff", +}; + +export const RecordSetDiff = { + $type: "yandex.cloud.dns.v1.RecordSetDiff" as const, + + encode( + message: RecordSetDiff, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.additions) { + RecordSet.encode(v!, writer.uint32(10).fork()).ldelim(); + } + for (const v of message.deletions) { + RecordSet.encode(v!, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): RecordSetDiff { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseRecordSetDiff } as RecordSetDiff; + message.additions = []; + message.deletions = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.additions.push(RecordSet.decode(reader, reader.uint32())); + break; + case 2: + message.deletions.push(RecordSet.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RecordSetDiff { + const message = { ...baseRecordSetDiff } as RecordSetDiff; + message.additions = (object.additions ?? []).map((e: any) => + RecordSet.fromJSON(e) + ); + message.deletions = (object.deletions ?? []).map((e: any) => + RecordSet.fromJSON(e) + ); + return message; + }, + + toJSON(message: RecordSetDiff): unknown { + const obj: any = {}; + if (message.additions) { + obj.additions = message.additions.map((e) => + e ? RecordSet.toJSON(e) : undefined + ); + } else { + obj.additions = []; + } + if (message.deletions) { + obj.deletions = message.deletions.map((e) => + e ? RecordSet.toJSON(e) : undefined + ); + } else { + obj.deletions = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): RecordSetDiff { + const message = { ...baseRecordSetDiff } as RecordSetDiff; + message.additions = + object.additions?.map((e) => RecordSet.fromPartial(e)) || []; + message.deletions = + object.deletions?.map((e) => RecordSet.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(RecordSetDiff.$type, RecordSetDiff); + const baseListDnsZoneOperationsRequest: object = { $type: "yandex.cloud.dns.v1.ListDnsZoneOperationsRequest", dnsZoneId: "", diff --git a/src/generated/yandex/cloud/index.ts b/src/generated/yandex/cloud/index.ts index 3f8c96cb..c4005edb 100644 --- a/src/generated/yandex/cloud/index.ts +++ b/src/generated/yandex/cloud/index.ts @@ -21,6 +21,7 @@ export * as lockbox from './lockbox/' export * as logging from './logging/' export * as marketplace from './marketplace/' export * as mdb from './mdb/' +export * as monitoring from './monitoring/' export * as oauth from './oauth/' export * as operation from './operation/' export * as organizationmanager from './organizationmanager/' diff --git a/src/generated/yandex/cloud/iot/broker/v1/broker.ts b/src/generated/yandex/cloud/iot/broker/v1/broker.ts new file mode 100644 index 00000000..eaebee40 --- /dev/null +++ b/src/generated/yandex/cloud/iot/broker/v1/broker.ts @@ -0,0 +1,596 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { Timestamp } from "../../../../../google/protobuf/timestamp"; + +export const protobufPackage = "yandex.cloud.iot.broker.v1"; + +/** A broker. */ +export interface Broker { + $type: "yandex.cloud.iot.broker.v1.Broker"; + /** ID of the broker. */ + id: string; + /** ID of the folder that the broker belongs to. */ + folderId: string; + /** Creation timestamp. */ + createdAt?: Date; + /** Name of the broker. The name is unique within the folder. */ + name: string; + /** Description of the broker. 0-256 characters long. */ + description: string; + /** Resource labels as `key:value` pairs. Maximum of 64 per resource. */ + labels: { [key: string]: string }; + /** Status of the broker. */ + status: Broker_Status; +} + +export enum Broker_Status { + STATUS_UNSPECIFIED = 0, + /** CREATING - Broker is being created. */ + CREATING = 1, + /** ACTIVE - Broker is ready to use. */ + ACTIVE = 2, + /** DELETING - Broker is being deleted. */ + DELETING = 3, + UNRECOGNIZED = -1, +} + +export function broker_StatusFromJSON(object: any): Broker_Status { + switch (object) { + case 0: + case "STATUS_UNSPECIFIED": + return Broker_Status.STATUS_UNSPECIFIED; + case 1: + case "CREATING": + return Broker_Status.CREATING; + case 2: + case "ACTIVE": + return Broker_Status.ACTIVE; + case 3: + case "DELETING": + return Broker_Status.DELETING; + case -1: + case "UNRECOGNIZED": + default: + return Broker_Status.UNRECOGNIZED; + } +} + +export function broker_StatusToJSON(object: Broker_Status): string { + switch (object) { + case Broker_Status.STATUS_UNSPECIFIED: + return "STATUS_UNSPECIFIED"; + case Broker_Status.CREATING: + return "CREATING"; + case Broker_Status.ACTIVE: + return "ACTIVE"; + case Broker_Status.DELETING: + return "DELETING"; + default: + return "UNKNOWN"; + } +} + +export interface Broker_LabelsEntry { + $type: "yandex.cloud.iot.broker.v1.Broker.LabelsEntry"; + key: string; + value: string; +} + +/** A broker certificate. */ +export interface BrokerCertificate { + $type: "yandex.cloud.iot.broker.v1.BrokerCertificate"; + /** ID of the broker that the certificate belongs to. */ + brokerId: string; + /** SHA256 hash of the certificates. */ + fingerprint: string; + /** Public part of the certificate. */ + certificateData: string; + /** Creation timestamp. */ + createdAt?: Date; +} + +/** A broker password. */ +export interface BrokerPassword { + $type: "yandex.cloud.iot.broker.v1.BrokerPassword"; + /** ID of the broker that the password belongs to. */ + brokerId: string; + /** ID of the password. */ + id: string; + /** Creation timestamp. */ + createdAt?: Date; +} + +const baseBroker: object = { + $type: "yandex.cloud.iot.broker.v1.Broker", + id: "", + folderId: "", + name: "", + description: "", + status: 0, +}; + +export const Broker = { + $type: "yandex.cloud.iot.broker.v1.Broker" as const, + + encode( + message: Broker, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.id !== "") { + writer.uint32(10).string(message.id); + } + if (message.folderId !== "") { + writer.uint32(18).string(message.folderId); + } + if (message.createdAt !== undefined) { + Timestamp.encode( + toTimestamp(message.createdAt), + writer.uint32(26).fork() + ).ldelim(); + } + if (message.name !== "") { + writer.uint32(34).string(message.name); + } + if (message.description !== "") { + writer.uint32(42).string(message.description); + } + Object.entries(message.labels).forEach(([key, value]) => { + Broker_LabelsEntry.encode( + { + $type: "yandex.cloud.iot.broker.v1.Broker.LabelsEntry", + key: key as any, + value, + }, + writer.uint32(50).fork() + ).ldelim(); + }); + if (message.status !== 0) { + writer.uint32(56).int32(message.status); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Broker { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseBroker } as Broker; + message.labels = {}; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = reader.string(); + break; + case 2: + message.folderId = reader.string(); + break; + case 3: + message.createdAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 4: + message.name = reader.string(); + break; + case 5: + message.description = reader.string(); + break; + case 6: + const entry6 = Broker_LabelsEntry.decode(reader, reader.uint32()); + if (entry6.value !== undefined) { + message.labels[entry6.key] = entry6.value; + } + break; + case 7: + message.status = reader.int32() as any; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Broker { + const message = { ...baseBroker } as Broker; + message.id = + object.id !== undefined && object.id !== null ? String(object.id) : ""; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.createdAt = + object.createdAt !== undefined && object.createdAt !== null + ? fromJsonTimestamp(object.createdAt) + : undefined; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); + message.status = + object.status !== undefined && object.status !== null + ? broker_StatusFromJSON(object.status) + : 0; + return message; + }, + + toJSON(message: Broker): unknown { + const obj: any = {}; + message.id !== undefined && (obj.id = message.id); + message.folderId !== undefined && (obj.folderId = message.folderId); + message.createdAt !== undefined && + (obj.createdAt = message.createdAt.toISOString()); + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + obj.labels = {}; + if (message.labels) { + Object.entries(message.labels).forEach(([k, v]) => { + obj.labels[k] = v; + }); + } + message.status !== undefined && + (obj.status = broker_StatusToJSON(message.status)); + return obj; + }, + + fromPartial, I>>(object: I): Broker { + const message = { ...baseBroker } as Broker; + message.id = object.id ?? ""; + message.folderId = object.folderId ?? ""; + message.createdAt = object.createdAt ?? undefined; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); + message.status = object.status ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(Broker.$type, Broker); + +const baseBroker_LabelsEntry: object = { + $type: "yandex.cloud.iot.broker.v1.Broker.LabelsEntry", + key: "", + value: "", +}; + +export const Broker_LabelsEntry = { + $type: "yandex.cloud.iot.broker.v1.Broker.LabelsEntry" as const, + + encode( + message: Broker_LabelsEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Broker_LabelsEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseBroker_LabelsEntry } as Broker_LabelsEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Broker_LabelsEntry { + const message = { ...baseBroker_LabelsEntry } as Broker_LabelsEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: Broker_LabelsEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial, I>>( + object: I + ): Broker_LabelsEntry { + const message = { ...baseBroker_LabelsEntry } as Broker_LabelsEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(Broker_LabelsEntry.$type, Broker_LabelsEntry); + +const baseBrokerCertificate: object = { + $type: "yandex.cloud.iot.broker.v1.BrokerCertificate", + brokerId: "", + fingerprint: "", + certificateData: "", +}; + +export const BrokerCertificate = { + $type: "yandex.cloud.iot.broker.v1.BrokerCertificate" as const, + + encode( + message: BrokerCertificate, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.brokerId !== "") { + writer.uint32(10).string(message.brokerId); + } + if (message.fingerprint !== "") { + writer.uint32(18).string(message.fingerprint); + } + if (message.certificateData !== "") { + writer.uint32(26).string(message.certificateData); + } + if (message.createdAt !== undefined) { + Timestamp.encode( + toTimestamp(message.createdAt), + writer.uint32(34).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): BrokerCertificate { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseBrokerCertificate } as BrokerCertificate; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.brokerId = reader.string(); + break; + case 2: + message.fingerprint = reader.string(); + break; + case 3: + message.certificateData = reader.string(); + break; + case 4: + message.createdAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): BrokerCertificate { + const message = { ...baseBrokerCertificate } as BrokerCertificate; + message.brokerId = + object.brokerId !== undefined && object.brokerId !== null + ? String(object.brokerId) + : ""; + message.fingerprint = + object.fingerprint !== undefined && object.fingerprint !== null + ? String(object.fingerprint) + : ""; + message.certificateData = + object.certificateData !== undefined && object.certificateData !== null + ? String(object.certificateData) + : ""; + message.createdAt = + object.createdAt !== undefined && object.createdAt !== null + ? fromJsonTimestamp(object.createdAt) + : undefined; + return message; + }, + + toJSON(message: BrokerCertificate): unknown { + const obj: any = {}; + message.brokerId !== undefined && (obj.brokerId = message.brokerId); + message.fingerprint !== undefined && + (obj.fingerprint = message.fingerprint); + message.certificateData !== undefined && + (obj.certificateData = message.certificateData); + message.createdAt !== undefined && + (obj.createdAt = message.createdAt.toISOString()); + return obj; + }, + + fromPartial, I>>( + object: I + ): BrokerCertificate { + const message = { ...baseBrokerCertificate } as BrokerCertificate; + message.brokerId = object.brokerId ?? ""; + message.fingerprint = object.fingerprint ?? ""; + message.certificateData = object.certificateData ?? ""; + message.createdAt = object.createdAt ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(BrokerCertificate.$type, BrokerCertificate); + +const baseBrokerPassword: object = { + $type: "yandex.cloud.iot.broker.v1.BrokerPassword", + brokerId: "", + id: "", +}; + +export const BrokerPassword = { + $type: "yandex.cloud.iot.broker.v1.BrokerPassword" as const, + + encode( + message: BrokerPassword, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.brokerId !== "") { + writer.uint32(10).string(message.brokerId); + } + if (message.id !== "") { + writer.uint32(18).string(message.id); + } + if (message.createdAt !== undefined) { + Timestamp.encode( + toTimestamp(message.createdAt), + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): BrokerPassword { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseBrokerPassword } as BrokerPassword; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.brokerId = reader.string(); + break; + case 2: + message.id = reader.string(); + break; + case 3: + message.createdAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): BrokerPassword { + const message = { ...baseBrokerPassword } as BrokerPassword; + message.brokerId = + object.brokerId !== undefined && object.brokerId !== null + ? String(object.brokerId) + : ""; + message.id = + object.id !== undefined && object.id !== null ? String(object.id) : ""; + message.createdAt = + object.createdAt !== undefined && object.createdAt !== null + ? fromJsonTimestamp(object.createdAt) + : undefined; + return message; + }, + + toJSON(message: BrokerPassword): unknown { + const obj: any = {}; + message.brokerId !== undefined && (obj.brokerId = message.brokerId); + message.id !== undefined && (obj.id = message.id); + message.createdAt !== undefined && + (obj.createdAt = message.createdAt.toISOString()); + return obj; + }, + + fromPartial, I>>( + object: I + ): BrokerPassword { + const message = { ...baseBrokerPassword } as BrokerPassword; + message.brokerId = object.brokerId ?? ""; + message.id = object.id ?? ""; + message.createdAt = object.createdAt ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(BrokerPassword.$type, BrokerPassword); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function toTimestamp(date: Date): Timestamp { + const seconds = date.getTime() / 1_000; + const nanos = (date.getTime() % 1_000) * 1_000_000; + return { $type: "google.protobuf.Timestamp", seconds, nanos }; +} + +function fromTimestamp(t: Timestamp): Date { + let millis = t.seconds * 1_000; + millis += t.nanos / 1_000_000; + return new Date(millis); +} + +function fromJsonTimestamp(o: any): Date { + if (o instanceof Date) { + return o; + } else if (typeof o === "string") { + return new Date(o); + } else { + return fromTimestamp(Timestamp.fromJSON(o)); + } +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/iot/broker/v1/broker_data_service.ts b/src/generated/yandex/cloud/iot/broker/v1/broker_data_service.ts new file mode 100644 index 00000000..4854b4e6 --- /dev/null +++ b/src/generated/yandex/cloud/iot/broker/v1/broker_data_service.ts @@ -0,0 +1,325 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleUnaryCall, + Client, + ClientUnaryCall, + Metadata, + CallOptions, + ServiceError, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; + +export const protobufPackage = "yandex.cloud.iot.broker.v1"; + +export interface PublishBrokerDataRequest { + $type: "yandex.cloud.iot.broker.v1.PublishBrokerDataRequest"; + /** ID of broker publishing message */ + brokerId: string; + /** Topic where message should be published */ + topic: string; + /** Content of the message */ + data: Buffer; +} + +export interface PublishBrokerDataResponse { + $type: "yandex.cloud.iot.broker.v1.PublishBrokerDataResponse"; +} + +const basePublishBrokerDataRequest: object = { + $type: "yandex.cloud.iot.broker.v1.PublishBrokerDataRequest", + brokerId: "", + topic: "", +}; + +export const PublishBrokerDataRequest = { + $type: "yandex.cloud.iot.broker.v1.PublishBrokerDataRequest" as const, + + encode( + message: PublishBrokerDataRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.brokerId !== "") { + writer.uint32(10).string(message.brokerId); + } + if (message.topic !== "") { + writer.uint32(18).string(message.topic); + } + if (message.data.length !== 0) { + writer.uint32(26).bytes(message.data); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): PublishBrokerDataRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...basePublishBrokerDataRequest, + } as PublishBrokerDataRequest; + message.data = Buffer.alloc(0); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.brokerId = reader.string(); + break; + case 2: + message.topic = reader.string(); + break; + case 3: + message.data = reader.bytes() as Buffer; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): PublishBrokerDataRequest { + const message = { + ...basePublishBrokerDataRequest, + } as PublishBrokerDataRequest; + message.brokerId = + object.brokerId !== undefined && object.brokerId !== null + ? String(object.brokerId) + : ""; + message.topic = + object.topic !== undefined && object.topic !== null + ? String(object.topic) + : ""; + message.data = + object.data !== undefined && object.data !== null + ? Buffer.from(bytesFromBase64(object.data)) + : Buffer.alloc(0); + return message; + }, + + toJSON(message: PublishBrokerDataRequest): unknown { + const obj: any = {}; + message.brokerId !== undefined && (obj.brokerId = message.brokerId); + message.topic !== undefined && (obj.topic = message.topic); + message.data !== undefined && + (obj.data = base64FromBytes( + message.data !== undefined ? message.data : Buffer.alloc(0) + )); + return obj; + }, + + fromPartial, I>>( + object: I + ): PublishBrokerDataRequest { + const message = { + ...basePublishBrokerDataRequest, + } as PublishBrokerDataRequest; + message.brokerId = object.brokerId ?? ""; + message.topic = object.topic ?? ""; + message.data = object.data ?? Buffer.alloc(0); + return message; + }, +}; + +messageTypeRegistry.set( + PublishBrokerDataRequest.$type, + PublishBrokerDataRequest +); + +const basePublishBrokerDataResponse: object = { + $type: "yandex.cloud.iot.broker.v1.PublishBrokerDataResponse", +}; + +export const PublishBrokerDataResponse = { + $type: "yandex.cloud.iot.broker.v1.PublishBrokerDataResponse" as const, + + encode( + _: PublishBrokerDataResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): PublishBrokerDataResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...basePublishBrokerDataResponse, + } as PublishBrokerDataResponse; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(_: any): PublishBrokerDataResponse { + const message = { + ...basePublishBrokerDataResponse, + } as PublishBrokerDataResponse; + return message; + }, + + toJSON(_: PublishBrokerDataResponse): unknown { + const obj: any = {}; + return obj; + }, + + fromPartial, I>>( + _: I + ): PublishBrokerDataResponse { + const message = { + ...basePublishBrokerDataResponse, + } as PublishBrokerDataResponse; + return message; + }, +}; + +messageTypeRegistry.set( + PublishBrokerDataResponse.$type, + PublishBrokerDataResponse +); + +/** A set of methods to work with IoT Core messages on behalf of broker */ +export const BrokerDataServiceService = { + /** Publishes message on behalf of specified registry */ + publish: { + path: "/yandex.cloud.iot.broker.v1.BrokerDataService/Publish", + requestStream: false, + responseStream: false, + requestSerialize: (value: PublishBrokerDataRequest) => + Buffer.from(PublishBrokerDataRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + PublishBrokerDataRequest.decode(value), + responseSerialize: (value: PublishBrokerDataResponse) => + Buffer.from(PublishBrokerDataResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + PublishBrokerDataResponse.decode(value), + }, +} as const; + +export interface BrokerDataServiceServer extends UntypedServiceImplementation { + /** Publishes message on behalf of specified registry */ + publish: handleUnaryCall; +} + +export interface BrokerDataServiceClient extends Client { + /** Publishes message on behalf of specified registry */ + publish( + request: PublishBrokerDataRequest, + callback: ( + error: ServiceError | null, + response: PublishBrokerDataResponse + ) => void + ): ClientUnaryCall; + publish( + request: PublishBrokerDataRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: PublishBrokerDataResponse + ) => void + ): ClientUnaryCall; + publish( + request: PublishBrokerDataRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: PublishBrokerDataResponse + ) => void + ): ClientUnaryCall; +} + +export const BrokerDataServiceClient = makeGenericClientConstructor( + BrokerDataServiceService, + "yandex.cloud.iot.broker.v1.BrokerDataService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): BrokerDataServiceClient; + service: typeof BrokerDataServiceService; +}; + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +const atob: (b64: string) => string = + globalThis.atob || + ((b64) => globalThis.Buffer.from(b64, "base64").toString("binary")); +function bytesFromBase64(b64: string): Uint8Array { + const bin = atob(b64); + const arr = new Uint8Array(bin.length); + for (let i = 0; i < bin.length; ++i) { + arr[i] = bin.charCodeAt(i); + } + return arr; +} + +const btoa: (bin: string) => string = + globalThis.btoa || + ((bin) => globalThis.Buffer.from(bin, "binary").toString("base64")); +function base64FromBytes(arr: Uint8Array): string { + const bin: string[] = []; + for (const byte of arr) { + bin.push(String.fromCharCode(byte)); + } + return btoa(bin.join("")); +} + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/iot/broker/v1/broker_service.ts b/src/generated/yandex/cloud/iot/broker/v1/broker_service.ts new file mode 100644 index 00000000..8b855981 --- /dev/null +++ b/src/generated/yandex/cloud/iot/broker/v1/broker_service.ts @@ -0,0 +1,3089 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleUnaryCall, + Client, + ClientUnaryCall, + Metadata, + CallOptions, + ServiceError, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; +import { FieldMask } from "../../../../../google/protobuf/field_mask"; +import { + Broker, + BrokerCertificate, + BrokerPassword, +} from "../../../../../yandex/cloud/iot/broker/v1/broker"; +import { Operation } from "../../../../../yandex/cloud/operation/operation"; + +export const protobufPackage = "yandex.cloud.iot.broker.v1"; + +export interface GetBrokerRequest { + $type: "yandex.cloud.iot.broker.v1.GetBrokerRequest"; + /** + * ID of the broker to return. + * + * To get a broker ID make a [BrokerService.List] request. + */ + brokerId: string; +} + +export interface ListBrokersRequest { + $type: "yandex.cloud.iot.broker.v1.ListBrokersRequest"; + /** + * ID of the folder to list brokers in. + * + * To get a folder ID make a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + */ + folderId: string; + /** + * The maximum number of results per page that should be returned. If the number of available + * results is larger than `page_size`, the service returns a [ListBrokersResponse.next_page_token] + * that can be used to get the next page of results in subsequent list requests. + * Default value: 100. + */ + pageSize: number; + /** + * Page token. To get the next page of results, set `page_token` to the + * [ListBrokersResponse.next_page_token] returned by a previous list request. + */ + pageToken: string; +} + +export interface ListBrokersResponse { + $type: "yandex.cloud.iot.broker.v1.ListBrokersResponse"; + /** List of brokers. */ + brokers: Broker[]; + /** + * Token for getting the next page of the list. If the number of results is greater than + * the specified [ListBrokersRequest.page_size], use `next_page_token` as the value + * for the [ListBrokersRequest.page_token] parameter in the next list request. + * + * Each subsequent page will have its own `next_page_token` to continue paging through the results. + */ + nextPageToken: string; +} + +export interface CreateBrokerRequest { + $type: "yandex.cloud.iot.broker.v1.CreateBrokerRequest"; + /** + * ID of the folder to create a broker in. + * + * To get a folder ID, make a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + */ + folderId: string; + /** Name of the broker. The name must be unique within the folder. */ + name: string; + /** Description of the broker. */ + description: string; + /** Resource labels as `key:value` pairs. */ + labels: { [key: string]: string }; + /** Broker certificates. */ + certificates: CreateBrokerRequest_Certificate[]; + /** + * Broker passwords. + * + * The password must contain at least three character categories among the following: upper case latin, lower case latin, numbers and special symbols. + */ + password: string; +} + +export interface CreateBrokerRequest_LabelsEntry { + $type: "yandex.cloud.iot.broker.v1.CreateBrokerRequest.LabelsEntry"; + key: string; + value: string; +} + +/** Specification of a broker certificate. */ +export interface CreateBrokerRequest_Certificate { + $type: "yandex.cloud.iot.broker.v1.CreateBrokerRequest.Certificate"; + /** Public part of the broker certificate. */ + certificateData: string; +} + +export interface CreateBrokerMetadata { + $type: "yandex.cloud.iot.broker.v1.CreateBrokerMetadata"; + /** ID of the broker that is being created. */ + brokerId: string; +} + +export interface UpdateBrokerRequest { + $type: "yandex.cloud.iot.broker.v1.UpdateBrokerRequest"; + /** + * ID of the broker to update. + * + * To get a broker ID make a [BrokerService.List] request. + */ + brokerId: string; + /** Field mask that specifies which fields of the broker are going to be updated. */ + updateMask?: FieldMask; + /** Name of the broker. The name must be unique within the folder. */ + name: string; + /** Description of the broker. */ + description: string; + /** + * Resource labels as `key:value` pairs. + * + * Existing set of `labels` is completely replaced by the provided set. + */ + labels: { [key: string]: string }; +} + +export interface UpdateBrokerRequest_LabelsEntry { + $type: "yandex.cloud.iot.broker.v1.UpdateBrokerRequest.LabelsEntry"; + key: string; + value: string; +} + +export interface UpdateBrokerMetadata { + $type: "yandex.cloud.iot.broker.v1.UpdateBrokerMetadata"; + /** ID of the broker that is being updated. */ + brokerId: string; +} + +export interface DeleteBrokerRequest { + $type: "yandex.cloud.iot.broker.v1.DeleteBrokerRequest"; + /** + * ID of the broker to delete. + * + * To get a broker ID make a [BrokerService.List] request. + */ + brokerId: string; +} + +export interface DeleteBrokerMetadata { + $type: "yandex.cloud.iot.broker.v1.DeleteBrokerMetadata"; + /** ID of the broker that is being deleted. */ + brokerId: string; +} + +export interface ListBrokerCertificatesRequest { + $type: "yandex.cloud.iot.broker.v1.ListBrokerCertificatesRequest"; + /** ID of the broker to list certificates for. */ + brokerId: string; +} + +export interface ListBrokerCertificatesResponse { + $type: "yandex.cloud.iot.broker.v1.ListBrokerCertificatesResponse"; + /** List of certificates for the specified broker. */ + certificates: BrokerCertificate[]; +} + +export interface AddBrokerCertificateRequest { + $type: "yandex.cloud.iot.broker.v1.AddBrokerCertificateRequest"; + /** + * ID of the broker for which the certificate is being added. + * + * To get a broker ID make a [BrokerService.List] request. + */ + brokerId: string; + /** Public part of the certificate that is being added. */ + certificateData: string; +} + +export interface AddBrokerCertificateMetadata { + $type: "yandex.cloud.iot.broker.v1.AddBrokerCertificateMetadata"; + /** ID of the broker certificate that is being added. */ + brokerId: string; + /** Fingerprint of the certificate that is being added. */ + fingerprint: string; +} + +export interface DeleteBrokerCertificateRequest { + $type: "yandex.cloud.iot.broker.v1.DeleteBrokerCertificateRequest"; + /** + * ID of the broker to delete a certificate for. + * + * To get a broker ID make a [BrokerService.List] request. + */ + brokerId: string; + /** Fingerprint of the certificate that is being deleted. */ + fingerprint: string; +} + +export interface DeleteBrokerCertificateMetadata { + $type: "yandex.cloud.iot.broker.v1.DeleteBrokerCertificateMetadata"; + /** ID of a broker for which the certificate is being delete. */ + brokerId: string; + /** Fingerprint of the certificate to deleted. */ + fingerprint: string; +} + +export interface ListBrokerPasswordsRequest { + $type: "yandex.cloud.iot.broker.v1.ListBrokerPasswordsRequest"; + /** + * ID of the broker to list passwords in. + * + * To get a broker ID make a [BrokerService.List] request. + */ + brokerId: string; +} + +export interface ListBrokerPasswordsResponse { + $type: "yandex.cloud.iot.broker.v1.ListBrokerPasswordsResponse"; + /** List of passwords for the specified broker. */ + passwords: BrokerPassword[]; +} + +export interface AddBrokerPasswordRequest { + $type: "yandex.cloud.iot.broker.v1.AddBrokerPasswordRequest"; + /** + * ID of the broker to add a password for. + * + * To get a broker ID make a [BrokerService.List] request. + */ + brokerId: string; + /** + * Passwords for the broker. + * + * The password must contain at least three character categories among the following: upper case latin, lower case latin, numbers and special symbols. + */ + password: string; +} + +export interface AddBrokerPasswordMetadata { + $type: "yandex.cloud.iot.broker.v1.AddBrokerPasswordMetadata"; + /** ID of the broker for which the password is being added. */ + brokerId: string; + /** ID of a password that is being added. */ + passwordId: string; +} + +export interface DeleteBrokerPasswordRequest { + $type: "yandex.cloud.iot.broker.v1.DeleteBrokerPasswordRequest"; + /** + * ID of the broker to delete a password for. + * + * To get a broker ID make a [BrokerService.List] request. + */ + brokerId: string; + /** + * ID of the password to delete. + * + * To get a password ID make a [BrokerService.ListPasswords] request. + */ + passwordId: string; +} + +export interface DeleteBrokerPasswordMetadata { + $type: "yandex.cloud.iot.broker.v1.DeleteBrokerPasswordMetadata"; + /** ID of a broker for which the password is being delete. */ + brokerId: string; + /** + * ID of the password to delete. + * + * To get a password ID make a [BrokerService.ListPasswords] request. + */ + passwordId: string; +} + +export interface ListBrokerOperationsRequest { + $type: "yandex.cloud.iot.broker.v1.ListBrokerOperationsRequest"; + /** ID of the broker to list operations for. */ + brokerId: string; + /** + * The maximum number of results per page that should be returned. If the number of available + * results is larger than `page_size`, the service returns a [ListBrokerOperationsResponse.next_page_token] + * that can be used to get the next page of results in subsequent list requests. + * Default value: 100. + */ + pageSize: number; + /** + * Page token. To get the next page of results, set `page_token` to the + * [ListBrokerOperationsResponse.next_page_token] returned by a previous list request. + */ + pageToken: string; + /** + * A filter expression that filters resources listed in the response. + * Currently you can use filtering only on [Broker.name] field. + */ + filter: string; +} + +export interface ListBrokerOperationsResponse { + $type: "yandex.cloud.iot.broker.v1.ListBrokerOperationsResponse"; + /** List of operations for the specified broker. */ + operations: Operation[]; + /** + * Token for getting the next page of the list. If the number of results is greater than + * the specified [ListBrokerOperationsRequest.page_size], use `next_page_token` as the value + * for the [ListBrokerOperationsRequest.page_token] parameter in the next list request. + * + * Each subsequent page will have its own `next_page_token` to continue paging through the results. + */ + nextPageToken: string; +} + +const baseGetBrokerRequest: object = { + $type: "yandex.cloud.iot.broker.v1.GetBrokerRequest", + brokerId: "", +}; + +export const GetBrokerRequest = { + $type: "yandex.cloud.iot.broker.v1.GetBrokerRequest" as const, + + encode( + message: GetBrokerRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.brokerId !== "") { + writer.uint32(10).string(message.brokerId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): GetBrokerRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGetBrokerRequest } as GetBrokerRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.brokerId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GetBrokerRequest { + const message = { ...baseGetBrokerRequest } as GetBrokerRequest; + message.brokerId = + object.brokerId !== undefined && object.brokerId !== null + ? String(object.brokerId) + : ""; + return message; + }, + + toJSON(message: GetBrokerRequest): unknown { + const obj: any = {}; + message.brokerId !== undefined && (obj.brokerId = message.brokerId); + return obj; + }, + + fromPartial, I>>( + object: I + ): GetBrokerRequest { + const message = { ...baseGetBrokerRequest } as GetBrokerRequest; + message.brokerId = object.brokerId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(GetBrokerRequest.$type, GetBrokerRequest); + +const baseListBrokersRequest: object = { + $type: "yandex.cloud.iot.broker.v1.ListBrokersRequest", + folderId: "", + pageSize: 0, + pageToken: "", +}; + +export const ListBrokersRequest = { + $type: "yandex.cloud.iot.broker.v1.ListBrokersRequest" as const, + + encode( + message: ListBrokersRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.folderId !== "") { + writer.uint32(10).string(message.folderId); + } + if (message.pageSize !== 0) { + writer.uint32(16).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(26).string(message.pageToken); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ListBrokersRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListBrokersRequest } as ListBrokersRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.folderId = reader.string(); + break; + case 2: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 3: + message.pageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListBrokersRequest { + const message = { ...baseListBrokersRequest } as ListBrokersRequest; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + return message; + }, + + toJSON(message: ListBrokersRequest): unknown { + const obj: any = {}; + message.folderId !== undefined && (obj.folderId = message.folderId); + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListBrokersRequest { + const message = { ...baseListBrokersRequest } as ListBrokersRequest; + message.folderId = object.folderId ?? ""; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListBrokersRequest.$type, ListBrokersRequest); + +const baseListBrokersResponse: object = { + $type: "yandex.cloud.iot.broker.v1.ListBrokersResponse", + nextPageToken: "", +}; + +export const ListBrokersResponse = { + $type: "yandex.cloud.iot.broker.v1.ListBrokersResponse" as const, + + encode( + message: ListBrokersResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.brokers) { + Broker.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ListBrokersResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListBrokersResponse } as ListBrokersResponse; + message.brokers = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.brokers.push(Broker.decode(reader, reader.uint32())); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListBrokersResponse { + const message = { ...baseListBrokersResponse } as ListBrokersResponse; + message.brokers = (object.brokers ?? []).map((e: any) => + Broker.fromJSON(e) + ); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListBrokersResponse): unknown { + const obj: any = {}; + if (message.brokers) { + obj.brokers = message.brokers.map((e) => + e ? Broker.toJSON(e) : undefined + ); + } else { + obj.brokers = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListBrokersResponse { + const message = { ...baseListBrokersResponse } as ListBrokersResponse; + message.brokers = object.brokers?.map((e) => Broker.fromPartial(e)) || []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListBrokersResponse.$type, ListBrokersResponse); + +const baseCreateBrokerRequest: object = { + $type: "yandex.cloud.iot.broker.v1.CreateBrokerRequest", + folderId: "", + name: "", + description: "", + password: "", +}; + +export const CreateBrokerRequest = { + $type: "yandex.cloud.iot.broker.v1.CreateBrokerRequest" as const, + + encode( + message: CreateBrokerRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.folderId !== "") { + writer.uint32(10).string(message.folderId); + } + if (message.name !== "") { + writer.uint32(18).string(message.name); + } + if (message.description !== "") { + writer.uint32(26).string(message.description); + } + Object.entries(message.labels).forEach(([key, value]) => { + CreateBrokerRequest_LabelsEntry.encode( + { + $type: "yandex.cloud.iot.broker.v1.CreateBrokerRequest.LabelsEntry", + key: key as any, + value, + }, + writer.uint32(34).fork() + ).ldelim(); + }); + for (const v of message.certificates) { + CreateBrokerRequest_Certificate.encode( + v!, + writer.uint32(42).fork() + ).ldelim(); + } + if (message.password !== "") { + writer.uint32(50).string(message.password); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): CreateBrokerRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseCreateBrokerRequest } as CreateBrokerRequest; + message.labels = {}; + message.certificates = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.folderId = reader.string(); + break; + case 2: + message.name = reader.string(); + break; + case 3: + message.description = reader.string(); + break; + case 4: + const entry4 = CreateBrokerRequest_LabelsEntry.decode( + reader, + reader.uint32() + ); + if (entry4.value !== undefined) { + message.labels[entry4.key] = entry4.value; + } + break; + case 5: + message.certificates.push( + CreateBrokerRequest_Certificate.decode(reader, reader.uint32()) + ); + break; + case 6: + message.password = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateBrokerRequest { + const message = { ...baseCreateBrokerRequest } as CreateBrokerRequest; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); + message.certificates = (object.certificates ?? []).map((e: any) => + CreateBrokerRequest_Certificate.fromJSON(e) + ); + message.password = + object.password !== undefined && object.password !== null + ? String(object.password) + : ""; + return message; + }, + + toJSON(message: CreateBrokerRequest): unknown { + const obj: any = {}; + message.folderId !== undefined && (obj.folderId = message.folderId); + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + obj.labels = {}; + if (message.labels) { + Object.entries(message.labels).forEach(([k, v]) => { + obj.labels[k] = v; + }); + } + if (message.certificates) { + obj.certificates = message.certificates.map((e) => + e ? CreateBrokerRequest_Certificate.toJSON(e) : undefined + ); + } else { + obj.certificates = []; + } + message.password !== undefined && (obj.password = message.password); + return obj; + }, + + fromPartial, I>>( + object: I + ): CreateBrokerRequest { + const message = { ...baseCreateBrokerRequest } as CreateBrokerRequest; + message.folderId = object.folderId ?? ""; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); + message.certificates = + object.certificates?.map((e) => + CreateBrokerRequest_Certificate.fromPartial(e) + ) || []; + message.password = object.password ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(CreateBrokerRequest.$type, CreateBrokerRequest); + +const baseCreateBrokerRequest_LabelsEntry: object = { + $type: "yandex.cloud.iot.broker.v1.CreateBrokerRequest.LabelsEntry", + key: "", + value: "", +}; + +export const CreateBrokerRequest_LabelsEntry = { + $type: "yandex.cloud.iot.broker.v1.CreateBrokerRequest.LabelsEntry" as const, + + encode( + message: CreateBrokerRequest_LabelsEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): CreateBrokerRequest_LabelsEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseCreateBrokerRequest_LabelsEntry, + } as CreateBrokerRequest_LabelsEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateBrokerRequest_LabelsEntry { + const message = { + ...baseCreateBrokerRequest_LabelsEntry, + } as CreateBrokerRequest_LabelsEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: CreateBrokerRequest_LabelsEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial, I>>( + object: I + ): CreateBrokerRequest_LabelsEntry { + const message = { + ...baseCreateBrokerRequest_LabelsEntry, + } as CreateBrokerRequest_LabelsEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + CreateBrokerRequest_LabelsEntry.$type, + CreateBrokerRequest_LabelsEntry +); + +const baseCreateBrokerRequest_Certificate: object = { + $type: "yandex.cloud.iot.broker.v1.CreateBrokerRequest.Certificate", + certificateData: "", +}; + +export const CreateBrokerRequest_Certificate = { + $type: "yandex.cloud.iot.broker.v1.CreateBrokerRequest.Certificate" as const, + + encode( + message: CreateBrokerRequest_Certificate, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.certificateData !== "") { + writer.uint32(10).string(message.certificateData); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): CreateBrokerRequest_Certificate { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseCreateBrokerRequest_Certificate, + } as CreateBrokerRequest_Certificate; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.certificateData = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateBrokerRequest_Certificate { + const message = { + ...baseCreateBrokerRequest_Certificate, + } as CreateBrokerRequest_Certificate; + message.certificateData = + object.certificateData !== undefined && object.certificateData !== null + ? String(object.certificateData) + : ""; + return message; + }, + + toJSON(message: CreateBrokerRequest_Certificate): unknown { + const obj: any = {}; + message.certificateData !== undefined && + (obj.certificateData = message.certificateData); + return obj; + }, + + fromPartial, I>>( + object: I + ): CreateBrokerRequest_Certificate { + const message = { + ...baseCreateBrokerRequest_Certificate, + } as CreateBrokerRequest_Certificate; + message.certificateData = object.certificateData ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + CreateBrokerRequest_Certificate.$type, + CreateBrokerRequest_Certificate +); + +const baseCreateBrokerMetadata: object = { + $type: "yandex.cloud.iot.broker.v1.CreateBrokerMetadata", + brokerId: "", +}; + +export const CreateBrokerMetadata = { + $type: "yandex.cloud.iot.broker.v1.CreateBrokerMetadata" as const, + + encode( + message: CreateBrokerMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.brokerId !== "") { + writer.uint32(10).string(message.brokerId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): CreateBrokerMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseCreateBrokerMetadata } as CreateBrokerMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.brokerId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateBrokerMetadata { + const message = { ...baseCreateBrokerMetadata } as CreateBrokerMetadata; + message.brokerId = + object.brokerId !== undefined && object.brokerId !== null + ? String(object.brokerId) + : ""; + return message; + }, + + toJSON(message: CreateBrokerMetadata): unknown { + const obj: any = {}; + message.brokerId !== undefined && (obj.brokerId = message.brokerId); + return obj; + }, + + fromPartial, I>>( + object: I + ): CreateBrokerMetadata { + const message = { ...baseCreateBrokerMetadata } as CreateBrokerMetadata; + message.brokerId = object.brokerId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(CreateBrokerMetadata.$type, CreateBrokerMetadata); + +const baseUpdateBrokerRequest: object = { + $type: "yandex.cloud.iot.broker.v1.UpdateBrokerRequest", + brokerId: "", + name: "", + description: "", +}; + +export const UpdateBrokerRequest = { + $type: "yandex.cloud.iot.broker.v1.UpdateBrokerRequest" as const, + + encode( + message: UpdateBrokerRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.brokerId !== "") { + writer.uint32(10).string(message.brokerId); + } + if (message.updateMask !== undefined) { + FieldMask.encode(message.updateMask, writer.uint32(18).fork()).ldelim(); + } + if (message.name !== "") { + writer.uint32(26).string(message.name); + } + if (message.description !== "") { + writer.uint32(34).string(message.description); + } + Object.entries(message.labels).forEach(([key, value]) => { + UpdateBrokerRequest_LabelsEntry.encode( + { + $type: "yandex.cloud.iot.broker.v1.UpdateBrokerRequest.LabelsEntry", + key: key as any, + value, + }, + writer.uint32(42).fork() + ).ldelim(); + }); + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): UpdateBrokerRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseUpdateBrokerRequest } as UpdateBrokerRequest; + message.labels = {}; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.brokerId = reader.string(); + break; + case 2: + message.updateMask = FieldMask.decode(reader, reader.uint32()); + break; + case 3: + message.name = reader.string(); + break; + case 4: + message.description = reader.string(); + break; + case 5: + const entry5 = UpdateBrokerRequest_LabelsEntry.decode( + reader, + reader.uint32() + ); + if (entry5.value !== undefined) { + message.labels[entry5.key] = entry5.value; + } + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateBrokerRequest { + const message = { ...baseUpdateBrokerRequest } as UpdateBrokerRequest; + message.brokerId = + object.brokerId !== undefined && object.brokerId !== null + ? String(object.brokerId) + : ""; + message.updateMask = + object.updateMask !== undefined && object.updateMask !== null + ? FieldMask.fromJSON(object.updateMask) + : undefined; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); + return message; + }, + + toJSON(message: UpdateBrokerRequest): unknown { + const obj: any = {}; + message.brokerId !== undefined && (obj.brokerId = message.brokerId); + message.updateMask !== undefined && + (obj.updateMask = message.updateMask + ? FieldMask.toJSON(message.updateMask) + : undefined); + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + obj.labels = {}; + if (message.labels) { + Object.entries(message.labels).forEach(([k, v]) => { + obj.labels[k] = v; + }); + } + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateBrokerRequest { + const message = { ...baseUpdateBrokerRequest } as UpdateBrokerRequest; + message.brokerId = object.brokerId ?? ""; + message.updateMask = + object.updateMask !== undefined && object.updateMask !== null + ? FieldMask.fromPartial(object.updateMask) + : undefined; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); + return message; + }, +}; + +messageTypeRegistry.set(UpdateBrokerRequest.$type, UpdateBrokerRequest); + +const baseUpdateBrokerRequest_LabelsEntry: object = { + $type: "yandex.cloud.iot.broker.v1.UpdateBrokerRequest.LabelsEntry", + key: "", + value: "", +}; + +export const UpdateBrokerRequest_LabelsEntry = { + $type: "yandex.cloud.iot.broker.v1.UpdateBrokerRequest.LabelsEntry" as const, + + encode( + message: UpdateBrokerRequest_LabelsEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateBrokerRequest_LabelsEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateBrokerRequest_LabelsEntry, + } as UpdateBrokerRequest_LabelsEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateBrokerRequest_LabelsEntry { + const message = { + ...baseUpdateBrokerRequest_LabelsEntry, + } as UpdateBrokerRequest_LabelsEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: UpdateBrokerRequest_LabelsEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateBrokerRequest_LabelsEntry { + const message = { + ...baseUpdateBrokerRequest_LabelsEntry, + } as UpdateBrokerRequest_LabelsEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + UpdateBrokerRequest_LabelsEntry.$type, + UpdateBrokerRequest_LabelsEntry +); + +const baseUpdateBrokerMetadata: object = { + $type: "yandex.cloud.iot.broker.v1.UpdateBrokerMetadata", + brokerId: "", +}; + +export const UpdateBrokerMetadata = { + $type: "yandex.cloud.iot.broker.v1.UpdateBrokerMetadata" as const, + + encode( + message: UpdateBrokerMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.brokerId !== "") { + writer.uint32(10).string(message.brokerId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateBrokerMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseUpdateBrokerMetadata } as UpdateBrokerMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.brokerId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateBrokerMetadata { + const message = { ...baseUpdateBrokerMetadata } as UpdateBrokerMetadata; + message.brokerId = + object.brokerId !== undefined && object.brokerId !== null + ? String(object.brokerId) + : ""; + return message; + }, + + toJSON(message: UpdateBrokerMetadata): unknown { + const obj: any = {}; + message.brokerId !== undefined && (obj.brokerId = message.brokerId); + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateBrokerMetadata { + const message = { ...baseUpdateBrokerMetadata } as UpdateBrokerMetadata; + message.brokerId = object.brokerId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(UpdateBrokerMetadata.$type, UpdateBrokerMetadata); + +const baseDeleteBrokerRequest: object = { + $type: "yandex.cloud.iot.broker.v1.DeleteBrokerRequest", + brokerId: "", +}; + +export const DeleteBrokerRequest = { + $type: "yandex.cloud.iot.broker.v1.DeleteBrokerRequest" as const, + + encode( + message: DeleteBrokerRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.brokerId !== "") { + writer.uint32(10).string(message.brokerId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): DeleteBrokerRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDeleteBrokerRequest } as DeleteBrokerRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.brokerId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteBrokerRequest { + const message = { ...baseDeleteBrokerRequest } as DeleteBrokerRequest; + message.brokerId = + object.brokerId !== undefined && object.brokerId !== null + ? String(object.brokerId) + : ""; + return message; + }, + + toJSON(message: DeleteBrokerRequest): unknown { + const obj: any = {}; + message.brokerId !== undefined && (obj.brokerId = message.brokerId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteBrokerRequest { + const message = { ...baseDeleteBrokerRequest } as DeleteBrokerRequest; + message.brokerId = object.brokerId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(DeleteBrokerRequest.$type, DeleteBrokerRequest); + +const baseDeleteBrokerMetadata: object = { + $type: "yandex.cloud.iot.broker.v1.DeleteBrokerMetadata", + brokerId: "", +}; + +export const DeleteBrokerMetadata = { + $type: "yandex.cloud.iot.broker.v1.DeleteBrokerMetadata" as const, + + encode( + message: DeleteBrokerMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.brokerId !== "") { + writer.uint32(10).string(message.brokerId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DeleteBrokerMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDeleteBrokerMetadata } as DeleteBrokerMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.brokerId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteBrokerMetadata { + const message = { ...baseDeleteBrokerMetadata } as DeleteBrokerMetadata; + message.brokerId = + object.brokerId !== undefined && object.brokerId !== null + ? String(object.brokerId) + : ""; + return message; + }, + + toJSON(message: DeleteBrokerMetadata): unknown { + const obj: any = {}; + message.brokerId !== undefined && (obj.brokerId = message.brokerId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteBrokerMetadata { + const message = { ...baseDeleteBrokerMetadata } as DeleteBrokerMetadata; + message.brokerId = object.brokerId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(DeleteBrokerMetadata.$type, DeleteBrokerMetadata); + +const baseListBrokerCertificatesRequest: object = { + $type: "yandex.cloud.iot.broker.v1.ListBrokerCertificatesRequest", + brokerId: "", +}; + +export const ListBrokerCertificatesRequest = { + $type: "yandex.cloud.iot.broker.v1.ListBrokerCertificatesRequest" as const, + + encode( + message: ListBrokerCertificatesRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.brokerId !== "") { + writer.uint32(10).string(message.brokerId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListBrokerCertificatesRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListBrokerCertificatesRequest, + } as ListBrokerCertificatesRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.brokerId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListBrokerCertificatesRequest { + const message = { + ...baseListBrokerCertificatesRequest, + } as ListBrokerCertificatesRequest; + message.brokerId = + object.brokerId !== undefined && object.brokerId !== null + ? String(object.brokerId) + : ""; + return message; + }, + + toJSON(message: ListBrokerCertificatesRequest): unknown { + const obj: any = {}; + message.brokerId !== undefined && (obj.brokerId = message.brokerId); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListBrokerCertificatesRequest { + const message = { + ...baseListBrokerCertificatesRequest, + } as ListBrokerCertificatesRequest; + message.brokerId = object.brokerId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListBrokerCertificatesRequest.$type, + ListBrokerCertificatesRequest +); + +const baseListBrokerCertificatesResponse: object = { + $type: "yandex.cloud.iot.broker.v1.ListBrokerCertificatesResponse", +}; + +export const ListBrokerCertificatesResponse = { + $type: "yandex.cloud.iot.broker.v1.ListBrokerCertificatesResponse" as const, + + encode( + message: ListBrokerCertificatesResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.certificates) { + BrokerCertificate.encode(v!, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListBrokerCertificatesResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListBrokerCertificatesResponse, + } as ListBrokerCertificatesResponse; + message.certificates = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.certificates.push( + BrokerCertificate.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListBrokerCertificatesResponse { + const message = { + ...baseListBrokerCertificatesResponse, + } as ListBrokerCertificatesResponse; + message.certificates = (object.certificates ?? []).map((e: any) => + BrokerCertificate.fromJSON(e) + ); + return message; + }, + + toJSON(message: ListBrokerCertificatesResponse): unknown { + const obj: any = {}; + if (message.certificates) { + obj.certificates = message.certificates.map((e) => + e ? BrokerCertificate.toJSON(e) : undefined + ); + } else { + obj.certificates = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): ListBrokerCertificatesResponse { + const message = { + ...baseListBrokerCertificatesResponse, + } as ListBrokerCertificatesResponse; + message.certificates = + object.certificates?.map((e) => BrokerCertificate.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set( + ListBrokerCertificatesResponse.$type, + ListBrokerCertificatesResponse +); + +const baseAddBrokerCertificateRequest: object = { + $type: "yandex.cloud.iot.broker.v1.AddBrokerCertificateRequest", + brokerId: "", + certificateData: "", +}; + +export const AddBrokerCertificateRequest = { + $type: "yandex.cloud.iot.broker.v1.AddBrokerCertificateRequest" as const, + + encode( + message: AddBrokerCertificateRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.brokerId !== "") { + writer.uint32(10).string(message.brokerId); + } + if (message.certificateData !== "") { + writer.uint32(26).string(message.certificateData); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): AddBrokerCertificateRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseAddBrokerCertificateRequest, + } as AddBrokerCertificateRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.brokerId = reader.string(); + break; + case 3: + message.certificateData = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AddBrokerCertificateRequest { + const message = { + ...baseAddBrokerCertificateRequest, + } as AddBrokerCertificateRequest; + message.brokerId = + object.brokerId !== undefined && object.brokerId !== null + ? String(object.brokerId) + : ""; + message.certificateData = + object.certificateData !== undefined && object.certificateData !== null + ? String(object.certificateData) + : ""; + return message; + }, + + toJSON(message: AddBrokerCertificateRequest): unknown { + const obj: any = {}; + message.brokerId !== undefined && (obj.brokerId = message.brokerId); + message.certificateData !== undefined && + (obj.certificateData = message.certificateData); + return obj; + }, + + fromPartial, I>>( + object: I + ): AddBrokerCertificateRequest { + const message = { + ...baseAddBrokerCertificateRequest, + } as AddBrokerCertificateRequest; + message.brokerId = object.brokerId ?? ""; + message.certificateData = object.certificateData ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + AddBrokerCertificateRequest.$type, + AddBrokerCertificateRequest +); + +const baseAddBrokerCertificateMetadata: object = { + $type: "yandex.cloud.iot.broker.v1.AddBrokerCertificateMetadata", + brokerId: "", + fingerprint: "", +}; + +export const AddBrokerCertificateMetadata = { + $type: "yandex.cloud.iot.broker.v1.AddBrokerCertificateMetadata" as const, + + encode( + message: AddBrokerCertificateMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.brokerId !== "") { + writer.uint32(10).string(message.brokerId); + } + if (message.fingerprint !== "") { + writer.uint32(18).string(message.fingerprint); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): AddBrokerCertificateMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseAddBrokerCertificateMetadata, + } as AddBrokerCertificateMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.brokerId = reader.string(); + break; + case 2: + message.fingerprint = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AddBrokerCertificateMetadata { + const message = { + ...baseAddBrokerCertificateMetadata, + } as AddBrokerCertificateMetadata; + message.brokerId = + object.brokerId !== undefined && object.brokerId !== null + ? String(object.brokerId) + : ""; + message.fingerprint = + object.fingerprint !== undefined && object.fingerprint !== null + ? String(object.fingerprint) + : ""; + return message; + }, + + toJSON(message: AddBrokerCertificateMetadata): unknown { + const obj: any = {}; + message.brokerId !== undefined && (obj.brokerId = message.brokerId); + message.fingerprint !== undefined && + (obj.fingerprint = message.fingerprint); + return obj; + }, + + fromPartial, I>>( + object: I + ): AddBrokerCertificateMetadata { + const message = { + ...baseAddBrokerCertificateMetadata, + } as AddBrokerCertificateMetadata; + message.brokerId = object.brokerId ?? ""; + message.fingerprint = object.fingerprint ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + AddBrokerCertificateMetadata.$type, + AddBrokerCertificateMetadata +); + +const baseDeleteBrokerCertificateRequest: object = { + $type: "yandex.cloud.iot.broker.v1.DeleteBrokerCertificateRequest", + brokerId: "", + fingerprint: "", +}; + +export const DeleteBrokerCertificateRequest = { + $type: "yandex.cloud.iot.broker.v1.DeleteBrokerCertificateRequest" as const, + + encode( + message: DeleteBrokerCertificateRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.brokerId !== "") { + writer.uint32(10).string(message.brokerId); + } + if (message.fingerprint !== "") { + writer.uint32(18).string(message.fingerprint); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DeleteBrokerCertificateRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseDeleteBrokerCertificateRequest, + } as DeleteBrokerCertificateRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.brokerId = reader.string(); + break; + case 2: + message.fingerprint = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteBrokerCertificateRequest { + const message = { + ...baseDeleteBrokerCertificateRequest, + } as DeleteBrokerCertificateRequest; + message.brokerId = + object.brokerId !== undefined && object.brokerId !== null + ? String(object.brokerId) + : ""; + message.fingerprint = + object.fingerprint !== undefined && object.fingerprint !== null + ? String(object.fingerprint) + : ""; + return message; + }, + + toJSON(message: DeleteBrokerCertificateRequest): unknown { + const obj: any = {}; + message.brokerId !== undefined && (obj.brokerId = message.brokerId); + message.fingerprint !== undefined && + (obj.fingerprint = message.fingerprint); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteBrokerCertificateRequest { + const message = { + ...baseDeleteBrokerCertificateRequest, + } as DeleteBrokerCertificateRequest; + message.brokerId = object.brokerId ?? ""; + message.fingerprint = object.fingerprint ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + DeleteBrokerCertificateRequest.$type, + DeleteBrokerCertificateRequest +); + +const baseDeleteBrokerCertificateMetadata: object = { + $type: "yandex.cloud.iot.broker.v1.DeleteBrokerCertificateMetadata", + brokerId: "", + fingerprint: "", +}; + +export const DeleteBrokerCertificateMetadata = { + $type: "yandex.cloud.iot.broker.v1.DeleteBrokerCertificateMetadata" as const, + + encode( + message: DeleteBrokerCertificateMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.brokerId !== "") { + writer.uint32(10).string(message.brokerId); + } + if (message.fingerprint !== "") { + writer.uint32(18).string(message.fingerprint); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DeleteBrokerCertificateMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseDeleteBrokerCertificateMetadata, + } as DeleteBrokerCertificateMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.brokerId = reader.string(); + break; + case 2: + message.fingerprint = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteBrokerCertificateMetadata { + const message = { + ...baseDeleteBrokerCertificateMetadata, + } as DeleteBrokerCertificateMetadata; + message.brokerId = + object.brokerId !== undefined && object.brokerId !== null + ? String(object.brokerId) + : ""; + message.fingerprint = + object.fingerprint !== undefined && object.fingerprint !== null + ? String(object.fingerprint) + : ""; + return message; + }, + + toJSON(message: DeleteBrokerCertificateMetadata): unknown { + const obj: any = {}; + message.brokerId !== undefined && (obj.brokerId = message.brokerId); + message.fingerprint !== undefined && + (obj.fingerprint = message.fingerprint); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteBrokerCertificateMetadata { + const message = { + ...baseDeleteBrokerCertificateMetadata, + } as DeleteBrokerCertificateMetadata; + message.brokerId = object.brokerId ?? ""; + message.fingerprint = object.fingerprint ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + DeleteBrokerCertificateMetadata.$type, + DeleteBrokerCertificateMetadata +); + +const baseListBrokerPasswordsRequest: object = { + $type: "yandex.cloud.iot.broker.v1.ListBrokerPasswordsRequest", + brokerId: "", +}; + +export const ListBrokerPasswordsRequest = { + $type: "yandex.cloud.iot.broker.v1.ListBrokerPasswordsRequest" as const, + + encode( + message: ListBrokerPasswordsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.brokerId !== "") { + writer.uint32(10).string(message.brokerId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListBrokerPasswordsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListBrokerPasswordsRequest, + } as ListBrokerPasswordsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.brokerId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListBrokerPasswordsRequest { + const message = { + ...baseListBrokerPasswordsRequest, + } as ListBrokerPasswordsRequest; + message.brokerId = + object.brokerId !== undefined && object.brokerId !== null + ? String(object.brokerId) + : ""; + return message; + }, + + toJSON(message: ListBrokerPasswordsRequest): unknown { + const obj: any = {}; + message.brokerId !== undefined && (obj.brokerId = message.brokerId); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListBrokerPasswordsRequest { + const message = { + ...baseListBrokerPasswordsRequest, + } as ListBrokerPasswordsRequest; + message.brokerId = object.brokerId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListBrokerPasswordsRequest.$type, + ListBrokerPasswordsRequest +); + +const baseListBrokerPasswordsResponse: object = { + $type: "yandex.cloud.iot.broker.v1.ListBrokerPasswordsResponse", +}; + +export const ListBrokerPasswordsResponse = { + $type: "yandex.cloud.iot.broker.v1.ListBrokerPasswordsResponse" as const, + + encode( + message: ListBrokerPasswordsResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.passwords) { + BrokerPassword.encode(v!, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListBrokerPasswordsResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListBrokerPasswordsResponse, + } as ListBrokerPasswordsResponse; + message.passwords = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.passwords.push( + BrokerPassword.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListBrokerPasswordsResponse { + const message = { + ...baseListBrokerPasswordsResponse, + } as ListBrokerPasswordsResponse; + message.passwords = (object.passwords ?? []).map((e: any) => + BrokerPassword.fromJSON(e) + ); + return message; + }, + + toJSON(message: ListBrokerPasswordsResponse): unknown { + const obj: any = {}; + if (message.passwords) { + obj.passwords = message.passwords.map((e) => + e ? BrokerPassword.toJSON(e) : undefined + ); + } else { + obj.passwords = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): ListBrokerPasswordsResponse { + const message = { + ...baseListBrokerPasswordsResponse, + } as ListBrokerPasswordsResponse; + message.passwords = + object.passwords?.map((e) => BrokerPassword.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set( + ListBrokerPasswordsResponse.$type, + ListBrokerPasswordsResponse +); + +const baseAddBrokerPasswordRequest: object = { + $type: "yandex.cloud.iot.broker.v1.AddBrokerPasswordRequest", + brokerId: "", + password: "", +}; + +export const AddBrokerPasswordRequest = { + $type: "yandex.cloud.iot.broker.v1.AddBrokerPasswordRequest" as const, + + encode( + message: AddBrokerPasswordRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.brokerId !== "") { + writer.uint32(10).string(message.brokerId); + } + if (message.password !== "") { + writer.uint32(18).string(message.password); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): AddBrokerPasswordRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseAddBrokerPasswordRequest, + } as AddBrokerPasswordRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.brokerId = reader.string(); + break; + case 2: + message.password = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AddBrokerPasswordRequest { + const message = { + ...baseAddBrokerPasswordRequest, + } as AddBrokerPasswordRequest; + message.brokerId = + object.brokerId !== undefined && object.brokerId !== null + ? String(object.brokerId) + : ""; + message.password = + object.password !== undefined && object.password !== null + ? String(object.password) + : ""; + return message; + }, + + toJSON(message: AddBrokerPasswordRequest): unknown { + const obj: any = {}; + message.brokerId !== undefined && (obj.brokerId = message.brokerId); + message.password !== undefined && (obj.password = message.password); + return obj; + }, + + fromPartial, I>>( + object: I + ): AddBrokerPasswordRequest { + const message = { + ...baseAddBrokerPasswordRequest, + } as AddBrokerPasswordRequest; + message.brokerId = object.brokerId ?? ""; + message.password = object.password ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + AddBrokerPasswordRequest.$type, + AddBrokerPasswordRequest +); + +const baseAddBrokerPasswordMetadata: object = { + $type: "yandex.cloud.iot.broker.v1.AddBrokerPasswordMetadata", + brokerId: "", + passwordId: "", +}; + +export const AddBrokerPasswordMetadata = { + $type: "yandex.cloud.iot.broker.v1.AddBrokerPasswordMetadata" as const, + + encode( + message: AddBrokerPasswordMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.brokerId !== "") { + writer.uint32(10).string(message.brokerId); + } + if (message.passwordId !== "") { + writer.uint32(18).string(message.passwordId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): AddBrokerPasswordMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseAddBrokerPasswordMetadata, + } as AddBrokerPasswordMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.brokerId = reader.string(); + break; + case 2: + message.passwordId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AddBrokerPasswordMetadata { + const message = { + ...baseAddBrokerPasswordMetadata, + } as AddBrokerPasswordMetadata; + message.brokerId = + object.brokerId !== undefined && object.brokerId !== null + ? String(object.brokerId) + : ""; + message.passwordId = + object.passwordId !== undefined && object.passwordId !== null + ? String(object.passwordId) + : ""; + return message; + }, + + toJSON(message: AddBrokerPasswordMetadata): unknown { + const obj: any = {}; + message.brokerId !== undefined && (obj.brokerId = message.brokerId); + message.passwordId !== undefined && (obj.passwordId = message.passwordId); + return obj; + }, + + fromPartial, I>>( + object: I + ): AddBrokerPasswordMetadata { + const message = { + ...baseAddBrokerPasswordMetadata, + } as AddBrokerPasswordMetadata; + message.brokerId = object.brokerId ?? ""; + message.passwordId = object.passwordId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + AddBrokerPasswordMetadata.$type, + AddBrokerPasswordMetadata +); + +const baseDeleteBrokerPasswordRequest: object = { + $type: "yandex.cloud.iot.broker.v1.DeleteBrokerPasswordRequest", + brokerId: "", + passwordId: "", +}; + +export const DeleteBrokerPasswordRequest = { + $type: "yandex.cloud.iot.broker.v1.DeleteBrokerPasswordRequest" as const, + + encode( + message: DeleteBrokerPasswordRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.brokerId !== "") { + writer.uint32(10).string(message.brokerId); + } + if (message.passwordId !== "") { + writer.uint32(18).string(message.passwordId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DeleteBrokerPasswordRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseDeleteBrokerPasswordRequest, + } as DeleteBrokerPasswordRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.brokerId = reader.string(); + break; + case 2: + message.passwordId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteBrokerPasswordRequest { + const message = { + ...baseDeleteBrokerPasswordRequest, + } as DeleteBrokerPasswordRequest; + message.brokerId = + object.brokerId !== undefined && object.brokerId !== null + ? String(object.brokerId) + : ""; + message.passwordId = + object.passwordId !== undefined && object.passwordId !== null + ? String(object.passwordId) + : ""; + return message; + }, + + toJSON(message: DeleteBrokerPasswordRequest): unknown { + const obj: any = {}; + message.brokerId !== undefined && (obj.brokerId = message.brokerId); + message.passwordId !== undefined && (obj.passwordId = message.passwordId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteBrokerPasswordRequest { + const message = { + ...baseDeleteBrokerPasswordRequest, + } as DeleteBrokerPasswordRequest; + message.brokerId = object.brokerId ?? ""; + message.passwordId = object.passwordId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + DeleteBrokerPasswordRequest.$type, + DeleteBrokerPasswordRequest +); + +const baseDeleteBrokerPasswordMetadata: object = { + $type: "yandex.cloud.iot.broker.v1.DeleteBrokerPasswordMetadata", + brokerId: "", + passwordId: "", +}; + +export const DeleteBrokerPasswordMetadata = { + $type: "yandex.cloud.iot.broker.v1.DeleteBrokerPasswordMetadata" as const, + + encode( + message: DeleteBrokerPasswordMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.brokerId !== "") { + writer.uint32(10).string(message.brokerId); + } + if (message.passwordId !== "") { + writer.uint32(18).string(message.passwordId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DeleteBrokerPasswordMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseDeleteBrokerPasswordMetadata, + } as DeleteBrokerPasswordMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.brokerId = reader.string(); + break; + case 2: + message.passwordId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteBrokerPasswordMetadata { + const message = { + ...baseDeleteBrokerPasswordMetadata, + } as DeleteBrokerPasswordMetadata; + message.brokerId = + object.brokerId !== undefined && object.brokerId !== null + ? String(object.brokerId) + : ""; + message.passwordId = + object.passwordId !== undefined && object.passwordId !== null + ? String(object.passwordId) + : ""; + return message; + }, + + toJSON(message: DeleteBrokerPasswordMetadata): unknown { + const obj: any = {}; + message.brokerId !== undefined && (obj.brokerId = message.brokerId); + message.passwordId !== undefined && (obj.passwordId = message.passwordId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteBrokerPasswordMetadata { + const message = { + ...baseDeleteBrokerPasswordMetadata, + } as DeleteBrokerPasswordMetadata; + message.brokerId = object.brokerId ?? ""; + message.passwordId = object.passwordId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + DeleteBrokerPasswordMetadata.$type, + DeleteBrokerPasswordMetadata +); + +const baseListBrokerOperationsRequest: object = { + $type: "yandex.cloud.iot.broker.v1.ListBrokerOperationsRequest", + brokerId: "", + pageSize: 0, + pageToken: "", + filter: "", +}; + +export const ListBrokerOperationsRequest = { + $type: "yandex.cloud.iot.broker.v1.ListBrokerOperationsRequest" as const, + + encode( + message: ListBrokerOperationsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.brokerId !== "") { + writer.uint32(10).string(message.brokerId); + } + if (message.pageSize !== 0) { + writer.uint32(16).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(26).string(message.pageToken); + } + if (message.filter !== "") { + writer.uint32(34).string(message.filter); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListBrokerOperationsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListBrokerOperationsRequest, + } as ListBrokerOperationsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.brokerId = reader.string(); + break; + case 2: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 3: + message.pageToken = reader.string(); + break; + case 4: + message.filter = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListBrokerOperationsRequest { + const message = { + ...baseListBrokerOperationsRequest, + } as ListBrokerOperationsRequest; + message.brokerId = + object.brokerId !== undefined && object.brokerId !== null + ? String(object.brokerId) + : ""; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + message.filter = + object.filter !== undefined && object.filter !== null + ? String(object.filter) + : ""; + return message; + }, + + toJSON(message: ListBrokerOperationsRequest): unknown { + const obj: any = {}; + message.brokerId !== undefined && (obj.brokerId = message.brokerId); + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + message.filter !== undefined && (obj.filter = message.filter); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListBrokerOperationsRequest { + const message = { + ...baseListBrokerOperationsRequest, + } as ListBrokerOperationsRequest; + message.brokerId = object.brokerId ?? ""; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + message.filter = object.filter ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListBrokerOperationsRequest.$type, + ListBrokerOperationsRequest +); + +const baseListBrokerOperationsResponse: object = { + $type: "yandex.cloud.iot.broker.v1.ListBrokerOperationsResponse", + nextPageToken: "", +}; + +export const ListBrokerOperationsResponse = { + $type: "yandex.cloud.iot.broker.v1.ListBrokerOperationsResponse" as const, + + encode( + message: ListBrokerOperationsResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.operations) { + Operation.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListBrokerOperationsResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListBrokerOperationsResponse, + } as ListBrokerOperationsResponse; + message.operations = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.operations.push(Operation.decode(reader, reader.uint32())); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListBrokerOperationsResponse { + const message = { + ...baseListBrokerOperationsResponse, + } as ListBrokerOperationsResponse; + message.operations = (object.operations ?? []).map((e: any) => + Operation.fromJSON(e) + ); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListBrokerOperationsResponse): unknown { + const obj: any = {}; + if (message.operations) { + obj.operations = message.operations.map((e) => + e ? Operation.toJSON(e) : undefined + ); + } else { + obj.operations = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListBrokerOperationsResponse { + const message = { + ...baseListBrokerOperationsResponse, + } as ListBrokerOperationsResponse; + message.operations = + object.operations?.map((e) => Operation.fromPartial(e)) || []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListBrokerOperationsResponse.$type, + ListBrokerOperationsResponse +); + +/** A set of methods for managing broker. */ +export const BrokerServiceService = { + /** + * Returns the specified broker. + * + * To get the list of available brokers, make a [List] request. + */ + get: { + path: "/yandex.cloud.iot.broker.v1.BrokerService/Get", + requestStream: false, + responseStream: false, + requestSerialize: (value: GetBrokerRequest) => + Buffer.from(GetBrokerRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => GetBrokerRequest.decode(value), + responseSerialize: (value: Broker) => + Buffer.from(Broker.encode(value).finish()), + responseDeserialize: (value: Buffer) => Broker.decode(value), + }, + /** Retrieves the list of brokers in the specified folder. */ + list: { + path: "/yandex.cloud.iot.broker.v1.BrokerService/List", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListBrokersRequest) => + Buffer.from(ListBrokersRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => ListBrokersRequest.decode(value), + responseSerialize: (value: ListBrokersResponse) => + Buffer.from(ListBrokersResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => ListBrokersResponse.decode(value), + }, + /** Creates a broker in the specified folder. */ + create: { + path: "/yandex.cloud.iot.broker.v1.BrokerService/Create", + requestStream: false, + responseStream: false, + requestSerialize: (value: CreateBrokerRequest) => + Buffer.from(CreateBrokerRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => CreateBrokerRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Updates the specified broker. */ + update: { + path: "/yandex.cloud.iot.broker.v1.BrokerService/Update", + requestStream: false, + responseStream: false, + requestSerialize: (value: UpdateBrokerRequest) => + Buffer.from(UpdateBrokerRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => UpdateBrokerRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Deletes the specified broker. */ + delete: { + path: "/yandex.cloud.iot.broker.v1.BrokerService/Delete", + requestStream: false, + responseStream: false, + requestSerialize: (value: DeleteBrokerRequest) => + Buffer.from(DeleteBrokerRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => DeleteBrokerRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Retrieves the list of broker certificates for the specified broker. */ + listCertificates: { + path: "/yandex.cloud.iot.broker.v1.BrokerService/ListCertificates", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListBrokerCertificatesRequest) => + Buffer.from(ListBrokerCertificatesRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + ListBrokerCertificatesRequest.decode(value), + responseSerialize: (value: ListBrokerCertificatesResponse) => + Buffer.from(ListBrokerCertificatesResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + ListBrokerCertificatesResponse.decode(value), + }, + /** Adds a certificate. */ + addCertificate: { + path: "/yandex.cloud.iot.broker.v1.BrokerService/AddCertificate", + requestStream: false, + responseStream: false, + requestSerialize: (value: AddBrokerCertificateRequest) => + Buffer.from(AddBrokerCertificateRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + AddBrokerCertificateRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Deletes the specified broker certificate. */ + deleteCertificate: { + path: "/yandex.cloud.iot.broker.v1.BrokerService/DeleteCertificate", + requestStream: false, + responseStream: false, + requestSerialize: (value: DeleteBrokerCertificateRequest) => + Buffer.from(DeleteBrokerCertificateRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + DeleteBrokerCertificateRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Retrieves the list of passwords for the specified broker. */ + listPasswords: { + path: "/yandex.cloud.iot.broker.v1.BrokerService/ListPasswords", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListBrokerPasswordsRequest) => + Buffer.from(ListBrokerPasswordsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + ListBrokerPasswordsRequest.decode(value), + responseSerialize: (value: ListBrokerPasswordsResponse) => + Buffer.from(ListBrokerPasswordsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + ListBrokerPasswordsResponse.decode(value), + }, + /** Adds password for the specified broker. */ + addPassword: { + path: "/yandex.cloud.iot.broker.v1.BrokerService/AddPassword", + requestStream: false, + responseStream: false, + requestSerialize: (value: AddBrokerPasswordRequest) => + Buffer.from(AddBrokerPasswordRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + AddBrokerPasswordRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Deletes the specified password. */ + deletePassword: { + path: "/yandex.cloud.iot.broker.v1.BrokerService/DeletePassword", + requestStream: false, + responseStream: false, + requestSerialize: (value: DeleteBrokerPasswordRequest) => + Buffer.from(DeleteBrokerPasswordRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + DeleteBrokerPasswordRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Lists operations for the specified broker. */ + listOperations: { + path: "/yandex.cloud.iot.broker.v1.BrokerService/ListOperations", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListBrokerOperationsRequest) => + Buffer.from(ListBrokerOperationsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + ListBrokerOperationsRequest.decode(value), + responseSerialize: (value: ListBrokerOperationsResponse) => + Buffer.from(ListBrokerOperationsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + ListBrokerOperationsResponse.decode(value), + }, +} as const; + +export interface BrokerServiceServer extends UntypedServiceImplementation { + /** + * Returns the specified broker. + * + * To get the list of available brokers, make a [List] request. + */ + get: handleUnaryCall; + /** Retrieves the list of brokers in the specified folder. */ + list: handleUnaryCall; + /** Creates a broker in the specified folder. */ + create: handleUnaryCall; + /** Updates the specified broker. */ + update: handleUnaryCall; + /** Deletes the specified broker. */ + delete: handleUnaryCall; + /** Retrieves the list of broker certificates for the specified broker. */ + listCertificates: handleUnaryCall< + ListBrokerCertificatesRequest, + ListBrokerCertificatesResponse + >; + /** Adds a certificate. */ + addCertificate: handleUnaryCall; + /** Deletes the specified broker certificate. */ + deleteCertificate: handleUnaryCall; + /** Retrieves the list of passwords for the specified broker. */ + listPasswords: handleUnaryCall< + ListBrokerPasswordsRequest, + ListBrokerPasswordsResponse + >; + /** Adds password for the specified broker. */ + addPassword: handleUnaryCall; + /** Deletes the specified password. */ + deletePassword: handleUnaryCall; + /** Lists operations for the specified broker. */ + listOperations: handleUnaryCall< + ListBrokerOperationsRequest, + ListBrokerOperationsResponse + >; +} + +export interface BrokerServiceClient extends Client { + /** + * Returns the specified broker. + * + * To get the list of available brokers, make a [List] request. + */ + get( + request: GetBrokerRequest, + callback: (error: ServiceError | null, response: Broker) => void + ): ClientUnaryCall; + get( + request: GetBrokerRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Broker) => void + ): ClientUnaryCall; + get( + request: GetBrokerRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Broker) => void + ): ClientUnaryCall; + /** Retrieves the list of brokers in the specified folder. */ + list( + request: ListBrokersRequest, + callback: ( + error: ServiceError | null, + response: ListBrokersResponse + ) => void + ): ClientUnaryCall; + list( + request: ListBrokersRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListBrokersResponse + ) => void + ): ClientUnaryCall; + list( + request: ListBrokersRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListBrokersResponse + ) => void + ): ClientUnaryCall; + /** Creates a broker in the specified folder. */ + create( + request: CreateBrokerRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + create( + request: CreateBrokerRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + create( + request: CreateBrokerRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Updates the specified broker. */ + update( + request: UpdateBrokerRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + update( + request: UpdateBrokerRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + update( + request: UpdateBrokerRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Deletes the specified broker. */ + delete( + request: DeleteBrokerRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + delete( + request: DeleteBrokerRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + delete( + request: DeleteBrokerRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Retrieves the list of broker certificates for the specified broker. */ + listCertificates( + request: ListBrokerCertificatesRequest, + callback: ( + error: ServiceError | null, + response: ListBrokerCertificatesResponse + ) => void + ): ClientUnaryCall; + listCertificates( + request: ListBrokerCertificatesRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListBrokerCertificatesResponse + ) => void + ): ClientUnaryCall; + listCertificates( + request: ListBrokerCertificatesRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListBrokerCertificatesResponse + ) => void + ): ClientUnaryCall; + /** Adds a certificate. */ + addCertificate( + request: AddBrokerCertificateRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + addCertificate( + request: AddBrokerCertificateRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + addCertificate( + request: AddBrokerCertificateRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Deletes the specified broker certificate. */ + deleteCertificate( + request: DeleteBrokerCertificateRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + deleteCertificate( + request: DeleteBrokerCertificateRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + deleteCertificate( + request: DeleteBrokerCertificateRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Retrieves the list of passwords for the specified broker. */ + listPasswords( + request: ListBrokerPasswordsRequest, + callback: ( + error: ServiceError | null, + response: ListBrokerPasswordsResponse + ) => void + ): ClientUnaryCall; + listPasswords( + request: ListBrokerPasswordsRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListBrokerPasswordsResponse + ) => void + ): ClientUnaryCall; + listPasswords( + request: ListBrokerPasswordsRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListBrokerPasswordsResponse + ) => void + ): ClientUnaryCall; + /** Adds password for the specified broker. */ + addPassword( + request: AddBrokerPasswordRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + addPassword( + request: AddBrokerPasswordRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + addPassword( + request: AddBrokerPasswordRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Deletes the specified password. */ + deletePassword( + request: DeleteBrokerPasswordRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + deletePassword( + request: DeleteBrokerPasswordRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + deletePassword( + request: DeleteBrokerPasswordRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Lists operations for the specified broker. */ + listOperations( + request: ListBrokerOperationsRequest, + callback: ( + error: ServiceError | null, + response: ListBrokerOperationsResponse + ) => void + ): ClientUnaryCall; + listOperations( + request: ListBrokerOperationsRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListBrokerOperationsResponse + ) => void + ): ClientUnaryCall; + listOperations( + request: ListBrokerOperationsRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListBrokerOperationsResponse + ) => void + ): ClientUnaryCall; +} + +export const BrokerServiceClient = makeGenericClientConstructor( + BrokerServiceService, + "yandex.cloud.iot.broker.v1.BrokerService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): BrokerServiceClient; + service: typeof BrokerServiceService; +}; + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/iot/index.ts b/src/generated/yandex/cloud/iot/index.ts index 0c83ae52..ae539991 100644 --- a/src/generated/yandex/cloud/iot/index.ts +++ b/src/generated/yandex/cloud/iot/index.ts @@ -1,3 +1,6 @@ +export * as broker from './broker/v1/broker' +export * as broker_broker_data_service from './broker/v1/broker_data_service' +export * as broker_service from './broker/v1/broker_service' export * as devices_device from './devices/v1/device' export * as devices_device_data_service from './devices/v1/device_data_service' export * as devices_device_service from './devices/v1/device_service' diff --git a/src/generated/yandex/cloud/k8s/v1/cluster.ts b/src/generated/yandex/cloud/k8s/v1/cluster.ts index 3a273dbf..50584794 100644 --- a/src/generated/yandex/cloud/k8s/v1/cluster.ts +++ b/src/generated/yandex/cloud/k8s/v1/cluster.ts @@ -272,6 +272,8 @@ export interface RegionalMaster { internalV4Address: string; /** IPv4 external network address that is assigned to the master. */ externalV4Address: string; + /** IPv6 external network address that is assigned to the master. */ + externalV6Address: string; } export interface MasterEndpoints { @@ -280,6 +282,8 @@ export interface MasterEndpoints { internalV4Endpoint: string; /** External endpoint that can be used to access Kubernetes cluster API from the internet (outside of the cloud). */ externalV4Endpoint: string; + /** External IPv6 endpoint that can be used to access Kubernetes cluster API from the internet (outside of the cloud). */ + externalV6Endpoint: string; } export interface IPAllocationPolicy { @@ -1216,6 +1220,7 @@ const baseRegionalMaster: object = { regionId: "", internalV4Address: "", externalV4Address: "", + externalV6Address: "", }; export const RegionalMaster = { @@ -1234,6 +1239,9 @@ export const RegionalMaster = { if (message.externalV4Address !== "") { writer.uint32(26).string(message.externalV4Address); } + if (message.externalV6Address !== "") { + writer.uint32(34).string(message.externalV6Address); + } return writer; }, @@ -1253,6 +1261,9 @@ export const RegionalMaster = { case 3: message.externalV4Address = reader.string(); break; + case 4: + message.externalV6Address = reader.string(); + break; default: reader.skipType(tag & 7); break; @@ -1277,6 +1288,11 @@ export const RegionalMaster = { object.externalV4Address !== null ? String(object.externalV4Address) : ""; + message.externalV6Address = + object.externalV6Address !== undefined && + object.externalV6Address !== null + ? String(object.externalV6Address) + : ""; return message; }, @@ -1287,6 +1303,8 @@ export const RegionalMaster = { (obj.internalV4Address = message.internalV4Address); message.externalV4Address !== undefined && (obj.externalV4Address = message.externalV4Address); + message.externalV6Address !== undefined && + (obj.externalV6Address = message.externalV6Address); return obj; }, @@ -1297,6 +1315,7 @@ export const RegionalMaster = { message.regionId = object.regionId ?? ""; message.internalV4Address = object.internalV4Address ?? ""; message.externalV4Address = object.externalV4Address ?? ""; + message.externalV6Address = object.externalV6Address ?? ""; return message; }, }; @@ -1307,6 +1326,7 @@ const baseMasterEndpoints: object = { $type: "yandex.cloud.k8s.v1.MasterEndpoints", internalV4Endpoint: "", externalV4Endpoint: "", + externalV6Endpoint: "", }; export const MasterEndpoints = { @@ -1322,6 +1342,9 @@ export const MasterEndpoints = { if (message.externalV4Endpoint !== "") { writer.uint32(18).string(message.externalV4Endpoint); } + if (message.externalV6Endpoint !== "") { + writer.uint32(26).string(message.externalV6Endpoint); + } return writer; }, @@ -1338,6 +1361,9 @@ export const MasterEndpoints = { case 2: message.externalV4Endpoint = reader.string(); break; + case 3: + message.externalV6Endpoint = reader.string(); + break; default: reader.skipType(tag & 7); break; @@ -1358,6 +1384,11 @@ export const MasterEndpoints = { object.externalV4Endpoint !== null ? String(object.externalV4Endpoint) : ""; + message.externalV6Endpoint = + object.externalV6Endpoint !== undefined && + object.externalV6Endpoint !== null + ? String(object.externalV6Endpoint) + : ""; return message; }, @@ -1367,6 +1398,8 @@ export const MasterEndpoints = { (obj.internalV4Endpoint = message.internalV4Endpoint); message.externalV4Endpoint !== undefined && (obj.externalV4Endpoint = message.externalV4Endpoint); + message.externalV6Endpoint !== undefined && + (obj.externalV6Endpoint = message.externalV6Endpoint); return obj; }, @@ -1376,6 +1409,7 @@ export const MasterEndpoints = { const message = { ...baseMasterEndpoints } as MasterEndpoints; message.internalV4Endpoint = object.internalV4Endpoint ?? ""; message.externalV4Endpoint = object.externalV4Endpoint ?? ""; + message.externalV6Endpoint = object.externalV6Endpoint ?? ""; return message; }, }; diff --git a/src/generated/yandex/cloud/k8s/v1/cluster_service.ts b/src/generated/yandex/cloud/k8s/v1/cluster_service.ts index be1df357..1b7239f2 100644 --- a/src/generated/yandex/cloud/k8s/v1/cluster_service.ts +++ b/src/generated/yandex/cloud/k8s/v1/cluster_service.ts @@ -209,7 +209,7 @@ export interface CreateClusterRequest { labels: { [key: string]: string }; /** ID of the network. */ networkId: string; - /** IP allocation policy of the Kubernetes cluster. */ + /** Master specification of the Kubernetes cluster. */ masterSpec?: MasterSpec; /** IP allocation policy of the Kubernetes cluster. */ ipAllocationPolicy?: IPAllocationPolicy; @@ -396,6 +396,8 @@ export interface RegionalMasterSpec { locations: MasterLocation[]; /** Specify to allocate a static public IP for the master. */ externalV4AddressSpec?: ExternalAddressSpec; + /** Specification of parameters for external IPv6 networking. */ + externalV6AddressSpec?: ExternalAddressSpec; } export interface InternalAddressSpec { @@ -406,6 +408,8 @@ export interface InternalAddressSpec { export interface ExternalAddressSpec { $type: "yandex.cloud.k8s.v1.ExternalAddressSpec"; + /** IP address. */ + address: string; } export interface MasterLocation { @@ -2997,6 +3001,12 @@ export const RegionalMasterSpec = { writer.uint32(26).fork() ).ldelim(); } + if (message.externalV6AddressSpec !== undefined) { + ExternalAddressSpec.encode( + message.externalV6AddressSpec, + writer.uint32(34).fork() + ).ldelim(); + } return writer; }, @@ -3022,6 +3032,12 @@ export const RegionalMasterSpec = { reader.uint32() ); break; + case 4: + message.externalV6AddressSpec = ExternalAddressSpec.decode( + reader, + reader.uint32() + ); + break; default: reader.skipType(tag & 7); break; @@ -3044,6 +3060,11 @@ export const RegionalMasterSpec = { object.externalV4AddressSpec !== null ? ExternalAddressSpec.fromJSON(object.externalV4AddressSpec) : undefined; + message.externalV6AddressSpec = + object.externalV6AddressSpec !== undefined && + object.externalV6AddressSpec !== null + ? ExternalAddressSpec.fromJSON(object.externalV6AddressSpec) + : undefined; return message; }, @@ -3061,6 +3082,10 @@ export const RegionalMasterSpec = { (obj.externalV4AddressSpec = message.externalV4AddressSpec ? ExternalAddressSpec.toJSON(message.externalV4AddressSpec) : undefined); + message.externalV6AddressSpec !== undefined && + (obj.externalV6AddressSpec = message.externalV6AddressSpec + ? ExternalAddressSpec.toJSON(message.externalV6AddressSpec) + : undefined); return obj; }, @@ -3076,6 +3101,11 @@ export const RegionalMasterSpec = { object.externalV4AddressSpec !== null ? ExternalAddressSpec.fromPartial(object.externalV4AddressSpec) : undefined; + message.externalV6AddressSpec = + object.externalV6AddressSpec !== undefined && + object.externalV6AddressSpec !== null + ? ExternalAddressSpec.fromPartial(object.externalV6AddressSpec) + : undefined; return message; }, }; @@ -3146,15 +3176,19 @@ messageTypeRegistry.set(InternalAddressSpec.$type, InternalAddressSpec); const baseExternalAddressSpec: object = { $type: "yandex.cloud.k8s.v1.ExternalAddressSpec", + address: "", }; export const ExternalAddressSpec = { $type: "yandex.cloud.k8s.v1.ExternalAddressSpec" as const, encode( - _: ExternalAddressSpec, + message: ExternalAddressSpec, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { + if (message.address !== "") { + writer.uint32(10).string(message.address); + } return writer; }, @@ -3165,6 +3199,9 @@ export const ExternalAddressSpec = { while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { + case 1: + message.address = reader.string(); + break; default: reader.skipType(tag & 7); break; @@ -3173,20 +3210,26 @@ export const ExternalAddressSpec = { return message; }, - fromJSON(_: any): ExternalAddressSpec { + fromJSON(object: any): ExternalAddressSpec { const message = { ...baseExternalAddressSpec } as ExternalAddressSpec; + message.address = + object.address !== undefined && object.address !== null + ? String(object.address) + : ""; return message; }, - toJSON(_: ExternalAddressSpec): unknown { + toJSON(message: ExternalAddressSpec): unknown { const obj: any = {}; + message.address !== undefined && (obj.address = message.address); return obj; }, fromPartial, I>>( - _: I + object: I ): ExternalAddressSpec { const message = { ...baseExternalAddressSpec } as ExternalAddressSpec; + message.address = object.address ?? ""; return message; }, }; diff --git a/src/generated/yandex/cloud/k8s/v1/node.ts b/src/generated/yandex/cloud/k8s/v1/node.ts index 4d41614d..02a3220d 100644 --- a/src/generated/yandex/cloud/k8s/v1/node.ts +++ b/src/generated/yandex/cloud/k8s/v1/node.ts @@ -261,6 +261,19 @@ export interface AttachedVolume { export interface NodeTemplate { $type: "yandex.cloud.k8s.v1.NodeTemplate"; + /** + * Name of the instance. + * In order to be unique it must contain at least on of instance unique placeholders: + * {instance.short_id} + * {instance.index} + * combination of {instance.zone_id} and {instance.index_in_zone} + * Example: my-instance-{instance.index} + * If not set, default is used: {instance_group.id}-{instance.short_id} + * It may also contain another placeholders, see metadata doc for full list. + */ + name: string; + /** these labels will be assigned to compute nodes (instances), created by the nodegroup */ + labels: { [key: string]: string }; /** ID of the hardware platform configuration for the node. */ platformId: string; /** Computing resources of the node such as the amount of memory and number of cores. */ @@ -294,6 +307,12 @@ export interface NodeTemplate { containerRuntimeSettings?: NodeTemplate_ContainerRuntimeSettings; } +export interface NodeTemplate_LabelsEntry { + $type: "yandex.cloud.k8s.v1.NodeTemplate.LabelsEntry"; + key: string; + value: string; +} + export interface NodeTemplate_MetadataEntry { $type: "yandex.cloud.k8s.v1.NodeTemplate.MetadataEntry"; key: string; @@ -414,6 +433,20 @@ export interface NodeAddressSpec { $type: "yandex.cloud.k8s.v1.NodeAddressSpec"; /** One-to-one NAT configuration. Setting up one-to-one NAT ensures that public IP addresses are assigned to nodes, and therefore internet is accessible for all nodes of the node group. If the field is not set, NAT will not be set up. */ oneToOneNatSpec?: OneToOneNatSpec; + /** Internal DNS configuration. */ + dnsRecordSpecs: DnsRecordSpec[]; +} + +export interface DnsRecordSpec { + $type: "yandex.cloud.k8s.v1.DnsRecordSpec"; + /** FQDN (required). */ + fqdn: string; + /** DNS zone id (optional, if not set, private zone is used). */ + dnsZoneId: string; + /** DNS record ttl, values in 0-86400 (optional). */ + ttl: number; + /** When set to true, also create PTR DNS record (optional). */ + ptr: boolean; } export interface OneToOneNatSpec { @@ -1154,6 +1187,7 @@ messageTypeRegistry.set(AttachedVolume.$type, AttachedVolume); const baseNodeTemplate: object = { $type: "yandex.cloud.k8s.v1.NodeTemplate", + name: "", platformId: "", }; @@ -1164,6 +1198,19 @@ export const NodeTemplate = { message: NodeTemplate, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { + if (message.name !== "") { + writer.uint32(106).string(message.name); + } + Object.entries(message.labels).forEach(([key, value]) => { + NodeTemplate_LabelsEntry.encode( + { + $type: "yandex.cloud.k8s.v1.NodeTemplate.LabelsEntry", + key: key as any, + value, + }, + writer.uint32(122).fork() + ).ldelim(); + }); if (message.platformId !== "") { writer.uint32(10).string(message.platformId); } @@ -1226,11 +1273,24 @@ export const NodeTemplate = { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = { ...baseNodeTemplate } as NodeTemplate; + message.labels = {}; message.metadata = {}; message.networkInterfaceSpecs = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { + case 13: + message.name = reader.string(); + break; + case 15: + const entry15 = NodeTemplate_LabelsEntry.decode( + reader, + reader.uint32() + ); + if (entry15.value !== undefined) { + message.labels[entry15.key] = entry15.value; + } + break; case 1: message.platformId = reader.string(); break; @@ -1295,6 +1355,16 @@ export const NodeTemplate = { fromJSON(object: any): NodeTemplate { const message = { ...baseNodeTemplate } as NodeTemplate; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); message.platformId = object.platformId !== undefined && object.platformId !== null ? String(object.platformId) @@ -1344,6 +1414,13 @@ export const NodeTemplate = { toJSON(message: NodeTemplate): unknown { const obj: any = {}; + message.name !== undefined && (obj.name = message.name); + obj.labels = {}; + if (message.labels) { + Object.entries(message.labels).forEach(([k, v]) => { + obj.labels[k] = v; + }); + } message.platformId !== undefined && (obj.platformId = message.platformId); message.resourcesSpec !== undefined && (obj.resourcesSpec = message.resourcesSpec @@ -1395,6 +1472,15 @@ export const NodeTemplate = { object: I ): NodeTemplate { const message = { ...baseNodeTemplate } as NodeTemplate; + message.name = object.name ?? ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); message.platformId = object.platformId ?? ""; message.resourcesSpec = object.resourcesSpec !== undefined && object.resourcesSpec !== null @@ -1445,6 +1531,91 @@ export const NodeTemplate = { messageTypeRegistry.set(NodeTemplate.$type, NodeTemplate); +const baseNodeTemplate_LabelsEntry: object = { + $type: "yandex.cloud.k8s.v1.NodeTemplate.LabelsEntry", + key: "", + value: "", +}; + +export const NodeTemplate_LabelsEntry = { + $type: "yandex.cloud.k8s.v1.NodeTemplate.LabelsEntry" as const, + + encode( + message: NodeTemplate_LabelsEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): NodeTemplate_LabelsEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseNodeTemplate_LabelsEntry, + } as NodeTemplate_LabelsEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): NodeTemplate_LabelsEntry { + const message = { + ...baseNodeTemplate_LabelsEntry, + } as NodeTemplate_LabelsEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: NodeTemplate_LabelsEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial, I>>( + object: I + ): NodeTemplate_LabelsEntry { + const message = { + ...baseNodeTemplate_LabelsEntry, + } as NodeTemplate_LabelsEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + NodeTemplate_LabelsEntry.$type, + NodeTemplate_LabelsEntry +); + const baseNodeTemplate_MetadataEntry: object = { $type: "yandex.cloud.k8s.v1.NodeTemplate.MetadataEntry", key: "", @@ -1835,6 +2006,9 @@ export const NodeAddressSpec = { writer.uint32(10).fork() ).ldelim(); } + for (const v of message.dnsRecordSpecs) { + DnsRecordSpec.encode(v!, writer.uint32(18).fork()).ldelim(); + } return writer; }, @@ -1842,6 +2016,7 @@ export const NodeAddressSpec = { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = { ...baseNodeAddressSpec } as NodeAddressSpec; + message.dnsRecordSpecs = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -1851,6 +2026,11 @@ export const NodeAddressSpec = { reader.uint32() ); break; + case 2: + message.dnsRecordSpecs.push( + DnsRecordSpec.decode(reader, reader.uint32()) + ); + break; default: reader.skipType(tag & 7); break; @@ -1865,6 +2045,9 @@ export const NodeAddressSpec = { object.oneToOneNatSpec !== undefined && object.oneToOneNatSpec !== null ? OneToOneNatSpec.fromJSON(object.oneToOneNatSpec) : undefined; + message.dnsRecordSpecs = (object.dnsRecordSpecs ?? []).map((e: any) => + DnsRecordSpec.fromJSON(e) + ); return message; }, @@ -1874,6 +2057,13 @@ export const NodeAddressSpec = { (obj.oneToOneNatSpec = message.oneToOneNatSpec ? OneToOneNatSpec.toJSON(message.oneToOneNatSpec) : undefined); + if (message.dnsRecordSpecs) { + obj.dnsRecordSpecs = message.dnsRecordSpecs.map((e) => + e ? DnsRecordSpec.toJSON(e) : undefined + ); + } else { + obj.dnsRecordSpecs = []; + } return obj; }, @@ -1885,12 +2075,113 @@ export const NodeAddressSpec = { object.oneToOneNatSpec !== undefined && object.oneToOneNatSpec !== null ? OneToOneNatSpec.fromPartial(object.oneToOneNatSpec) : undefined; + message.dnsRecordSpecs = + object.dnsRecordSpecs?.map((e) => DnsRecordSpec.fromPartial(e)) || []; return message; }, }; messageTypeRegistry.set(NodeAddressSpec.$type, NodeAddressSpec); +const baseDnsRecordSpec: object = { + $type: "yandex.cloud.k8s.v1.DnsRecordSpec", + fqdn: "", + dnsZoneId: "", + ttl: 0, + ptr: false, +}; + +export const DnsRecordSpec = { + $type: "yandex.cloud.k8s.v1.DnsRecordSpec" as const, + + encode( + message: DnsRecordSpec, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.fqdn !== "") { + writer.uint32(10).string(message.fqdn); + } + if (message.dnsZoneId !== "") { + writer.uint32(18).string(message.dnsZoneId); + } + if (message.ttl !== 0) { + writer.uint32(24).int64(message.ttl); + } + if (message.ptr === true) { + writer.uint32(32).bool(message.ptr); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): DnsRecordSpec { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDnsRecordSpec } as DnsRecordSpec; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.fqdn = reader.string(); + break; + case 2: + message.dnsZoneId = reader.string(); + break; + case 3: + message.ttl = longToNumber(reader.int64() as Long); + break; + case 4: + message.ptr = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DnsRecordSpec { + const message = { ...baseDnsRecordSpec } as DnsRecordSpec; + message.fqdn = + object.fqdn !== undefined && object.fqdn !== null + ? String(object.fqdn) + : ""; + message.dnsZoneId = + object.dnsZoneId !== undefined && object.dnsZoneId !== null + ? String(object.dnsZoneId) + : ""; + message.ttl = + object.ttl !== undefined && object.ttl !== null ? Number(object.ttl) : 0; + message.ptr = + object.ptr !== undefined && object.ptr !== null + ? Boolean(object.ptr) + : false; + return message; + }, + + toJSON(message: DnsRecordSpec): unknown { + const obj: any = {}; + message.fqdn !== undefined && (obj.fqdn = message.fqdn); + message.dnsZoneId !== undefined && (obj.dnsZoneId = message.dnsZoneId); + message.ttl !== undefined && (obj.ttl = Math.round(message.ttl)); + message.ptr !== undefined && (obj.ptr = message.ptr); + return obj; + }, + + fromPartial, I>>( + object: I + ): DnsRecordSpec { + const message = { ...baseDnsRecordSpec } as DnsRecordSpec; + message.fqdn = object.fqdn ?? ""; + message.dnsZoneId = object.dnsZoneId ?? ""; + message.ttl = object.ttl ?? 0; + message.ptr = object.ptr ?? false; + return message; + }, +}; + +messageTypeRegistry.set(DnsRecordSpec.$type, DnsRecordSpec); + const baseOneToOneNatSpec: object = { $type: "yandex.cloud.k8s.v1.OneToOneNatSpec", ipVersion: 0, diff --git a/src/generated/yandex/cloud/k8s/v1/version.ts b/src/generated/yandex/cloud/k8s/v1/version.ts index 100bef36..0b8f548b 100644 --- a/src/generated/yandex/cloud/k8s/v1/version.ts +++ b/src/generated/yandex/cloud/k8s/v1/version.ts @@ -11,7 +11,7 @@ export interface VersionInfo { currentVersion: string; /** * Newer revisions may include Kubernetes patches (e.g 1.15.1 -> 1.15.2) as well - * as some internal component updates - new features or bug fixes in Yandex specific + * as some internal component updates - new features or bug fixes in platform specific * components either on the master or nodes. */ newRevisionAvailable: boolean; diff --git a/src/generated/yandex/cloud/logging/v1/log_entry.ts b/src/generated/yandex/cloud/logging/v1/log_entry.ts index b7b8996f..d340c32f 100644 --- a/src/generated/yandex/cloud/logging/v1/log_entry.ts +++ b/src/generated/yandex/cloud/logging/v1/log_entry.ts @@ -43,6 +43,8 @@ export interface LogEntry { message: string; /** Entry annotation. */ jsonPayload?: { [key: string]: any }; + /** Entry stream name. */ + streamName: string; } export interface IncomingLogEntry { @@ -59,6 +61,8 @@ export interface IncomingLogEntry { message: string; /** Entry annotation. */ jsonPayload?: { [key: string]: any }; + /** Entry stream name. */ + streamName: string; } export interface LogEntryDefaults { @@ -76,6 +80,8 @@ export interface LogEntryDefaults { * Any conflict will be resolved in favor of entry own annotation. */ jsonPayload?: { [key: string]: any }; + /** Entry stream name. */ + streamName: string; } export interface Destination { @@ -199,6 +205,7 @@ const baseLogEntry: object = { uid: "", level: 0, message: "", + streamName: "", }; export const LogEntry = { @@ -247,6 +254,9 @@ export const LogEntry = { writer.uint32(66).fork() ).ldelim(); } + if (message.streamName !== "") { + writer.uint32(74).string(message.streamName); + } return writer; }, @@ -289,6 +299,9 @@ export const LogEntry = { Struct.decode(reader, reader.uint32()) ); break; + case 9: + message.streamName = reader.string(); + break; default: reader.skipType(tag & 7); break; @@ -327,6 +340,10 @@ export const LogEntry = { : ""; message.jsonPayload = typeof object.jsonPayload === "object" ? object.jsonPayload : undefined; + message.streamName = + object.streamName !== undefined && object.streamName !== null + ? String(object.streamName) + : ""; return message; }, @@ -348,6 +365,7 @@ export const LogEntry = { message.message !== undefined && (obj.message = message.message); message.jsonPayload !== undefined && (obj.jsonPayload = message.jsonPayload); + message.streamName !== undefined && (obj.streamName = message.streamName); return obj; }, @@ -364,6 +382,7 @@ export const LogEntry = { message.level = object.level ?? 0; message.message = object.message ?? ""; message.jsonPayload = object.jsonPayload ?? undefined; + message.streamName = object.streamName ?? ""; return message; }, }; @@ -374,6 +393,7 @@ const baseIncomingLogEntry: object = { $type: "yandex.cloud.logging.v1.IncomingLogEntry", level: 0, message: "", + streamName: "", }; export const IncomingLogEntry = { @@ -401,6 +421,9 @@ export const IncomingLogEntry = { writer.uint32(34).fork() ).ldelim(); } + if (message.streamName !== "") { + writer.uint32(42).string(message.streamName); + } return writer; }, @@ -427,6 +450,9 @@ export const IncomingLogEntry = { Struct.decode(reader, reader.uint32()) ); break; + case 5: + message.streamName = reader.string(); + break; default: reader.skipType(tag & 7); break; @@ -451,6 +477,10 @@ export const IncomingLogEntry = { : ""; message.jsonPayload = typeof object.jsonPayload === "object" ? object.jsonPayload : undefined; + message.streamName = + object.streamName !== undefined && object.streamName !== null + ? String(object.streamName) + : ""; return message; }, @@ -463,6 +493,7 @@ export const IncomingLogEntry = { message.message !== undefined && (obj.message = message.message); message.jsonPayload !== undefined && (obj.jsonPayload = message.jsonPayload); + message.streamName !== undefined && (obj.streamName = message.streamName); return obj; }, @@ -474,6 +505,7 @@ export const IncomingLogEntry = { message.level = object.level ?? 0; message.message = object.message ?? ""; message.jsonPayload = object.jsonPayload ?? undefined; + message.streamName = object.streamName ?? ""; return message; }, }; @@ -483,6 +515,7 @@ messageTypeRegistry.set(IncomingLogEntry.$type, IncomingLogEntry); const baseLogEntryDefaults: object = { $type: "yandex.cloud.logging.v1.LogEntryDefaults", level: 0, + streamName: "", }; export const LogEntryDefaults = { @@ -501,6 +534,9 @@ export const LogEntryDefaults = { writer.uint32(34).fork() ).ldelim(); } + if (message.streamName !== "") { + writer.uint32(42).string(message.streamName); + } return writer; }, @@ -519,6 +555,9 @@ export const LogEntryDefaults = { Struct.decode(reader, reader.uint32()) ); break; + case 5: + message.streamName = reader.string(); + break; default: reader.skipType(tag & 7); break; @@ -535,6 +574,10 @@ export const LogEntryDefaults = { : 0; message.jsonPayload = typeof object.jsonPayload === "object" ? object.jsonPayload : undefined; + message.streamName = + object.streamName !== undefined && object.streamName !== null + ? String(object.streamName) + : ""; return message; }, @@ -544,6 +587,7 @@ export const LogEntryDefaults = { (obj.level = logLevel_LevelToJSON(message.level)); message.jsonPayload !== undefined && (obj.jsonPayload = message.jsonPayload); + message.streamName !== undefined && (obj.streamName = message.streamName); return obj; }, @@ -553,6 +597,7 @@ export const LogEntryDefaults = { const message = { ...baseLogEntryDefaults } as LogEntryDefaults; message.level = object.level ?? 0; message.jsonPayload = object.jsonPayload ?? undefined; + message.streamName = object.streamName ?? ""; return message; }, }; diff --git a/src/generated/yandex/cloud/logging/v1/log_group.ts b/src/generated/yandex/cloud/logging/v1/log_group.ts index 4210d5ba..8f050505 100644 --- a/src/generated/yandex/cloud/logging/v1/log_group.ts +++ b/src/generated/yandex/cloud/logging/v1/log_group.ts @@ -31,6 +31,7 @@ export interface LogGroup { * Entries will be present in group during this period. */ retentionPeriod?: Duration; + /** Data stream name */ dataStream: string; } diff --git a/src/generated/yandex/cloud/logging/v1/log_reading_service.ts b/src/generated/yandex/cloud/logging/v1/log_reading_service.ts index 5d8450a9..aa330d88 100644 --- a/src/generated/yandex/cloud/logging/v1/log_reading_service.ts +++ b/src/generated/yandex/cloud/logging/v1/log_reading_service.ts @@ -98,6 +98,12 @@ export interface Criteria { levels: LogLevel_Level[]; /** Filter expression. For details about filtering, see [documentation](/docs/logging/concepts/filter). */ filter: string; + /** + * List of stream names to limit log entries to. + * + * Empty list disables filter. + */ + streamNames: string[]; /** The maximum number of results per page to return. */ pageSize: number; /** @@ -303,6 +309,7 @@ const baseCriteria: object = { resourceIds: "", levels: 0, filter: "", + streamNames: "", pageSize: 0, maxResponseSize: 0, }; @@ -343,6 +350,9 @@ export const Criteria = { if (message.filter !== "") { writer.uint32(58).string(message.filter); } + for (const v of message.streamNames) { + writer.uint32(82).string(v!); + } if (message.pageSize !== 0) { writer.uint32(64).int64(message.pageSize); } @@ -359,6 +369,7 @@ export const Criteria = { message.resourceTypes = []; message.resourceIds = []; message.levels = []; + message.streamNames = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -394,6 +405,9 @@ export const Criteria = { case 7: message.filter = reader.string(); break; + case 10: + message.streamNames.push(reader.string()); + break; case 8: message.pageSize = longToNumber(reader.int64() as Long); break; @@ -433,6 +447,7 @@ export const Criteria = { object.filter !== undefined && object.filter !== null ? String(object.filter) : ""; + message.streamNames = (object.streamNames ?? []).map((e: any) => String(e)); message.pageSize = object.pageSize !== undefined && object.pageSize !== null ? Number(object.pageSize) @@ -465,6 +480,11 @@ export const Criteria = { obj.levels = []; } message.filter !== undefined && (obj.filter = message.filter); + if (message.streamNames) { + obj.streamNames = message.streamNames.map((e) => e); + } else { + obj.streamNames = []; + } message.pageSize !== undefined && (obj.pageSize = Math.round(message.pageSize)); message.maxResponseSize !== undefined && @@ -481,6 +501,7 @@ export const Criteria = { message.until = object.until ?? undefined; message.levels = object.levels?.map((e) => e) || []; message.filter = object.filter ?? ""; + message.streamNames = object.streamNames?.map((e) => e) || []; message.pageSize = object.pageSize ?? 0; message.maxResponseSize = object.maxResponseSize ?? 0; return message; diff --git a/src/generated/yandex/cloud/mdb/clickhouse/v1/backup_service.ts b/src/generated/yandex/cloud/mdb/clickhouse/v1/backup_service.ts index 71dc880d..9e2f6129 100644 --- a/src/generated/yandex/cloud/mdb/clickhouse/v1/backup_service.ts +++ b/src/generated/yandex/cloud/mdb/clickhouse/v1/backup_service.ts @@ -42,7 +42,7 @@ export interface ListBackupsRequest { pageSize: number; /** * Page token. To get the next page of results, set [page_token] to the [ListBackupsResponse.next_page_token] - * returned by a previous list request. + * returned by the previous list request. */ pageToken: string; } diff --git a/src/generated/yandex/cloud/mdb/clickhouse/v1/cluster.ts b/src/generated/yandex/cloud/mdb/clickhouse/v1/cluster.ts index cbcd1c38..594fdcfb 100644 --- a/src/generated/yandex/cloud/mdb/clickhouse/v1/cluster.ts +++ b/src/generated/yandex/cloud/mdb/clickhouse/v1/cluster.ts @@ -9,7 +9,11 @@ import { import { TimeOfDay } from "../../../../../google/type/timeofday"; import { ClickhouseConfigSet } from "../../../../../yandex/cloud/mdb/clickhouse/v1/config/clickhouse"; import { Timestamp } from "../../../../../google/protobuf/timestamp"; -import { BoolValue, Int64Value } from "../../../../../google/protobuf/wrappers"; +import { + BoolValue, + Int64Value, + DoubleValue, +} from "../../../../../google/protobuf/wrappers"; export const protobufPackage = "yandex.cloud.mdb.clickhouse.v1"; @@ -49,7 +53,7 @@ export interface Cluster { health: Cluster_Health; /** Current state of the cluster. */ status: Cluster_Status; - /** ID of the service account used for access to Yandex Object Storage. */ + /** ID of the service account used for access to Object Storage. */ serviceAccountId: string; /** Maintenance window for the cluster. */ maintenanceWindow?: MaintenanceWindow; @@ -319,7 +323,7 @@ export interface ShardConfig_Clickhouse { resources?: Resources; /** * Relative weight of a shard considered when writing data to the cluster. - * For details, see [ClickHouse documentation](https://clickhouse.yandex/docs/en/operations/table_engines/distributed/). + * For details, see [ClickHouse documentation](https://clickhouse.com/docs/en/operations/table_engines/distributed/). */ weight?: number; } @@ -330,7 +334,7 @@ export interface Host { * Name of the ClickHouse host. The host name is assigned by MDB at creation time, and cannot be changed. * 1-63 characters long. * - * The name is unique across all existing MDB hosts in Yandex Cloud, as it defines the FQDN of the host. + * The name is unique across all MDB hosts that exist on the platform, as it defines the FQDN of the host. */ name: string; /** ID of the ClickHouse host. The ID is assigned by MDB at creation time. */ @@ -550,10 +554,10 @@ export interface Resources { export interface Access { $type: "yandex.cloud.mdb.clickhouse.v1.Access"; - /** Allow to export data from the cluster to Yandex DataLens. */ + /** Allow to export data from the cluster to DataLens. */ dataLens: boolean; /** - * Allow SQL queries to the cluster databases from the Yandex Cloud management console. + * Allow SQL queries to the cluster databases from the management console. * * See [SQL queries in the management console](/docs/managed-clickhouse/operations/web-sql-query) for more details. */ @@ -561,21 +565,24 @@ export interface Access { /** * Allow to import data from Yandex Metrica and AppMetrica to the cluster. * - * See [Export data to Yandex Cloud](https://appmetrica.yandex.com/docs/cloud/index.html) for more details. + * See [AppMetrica documentation](https://appmetrica.yandex.com/docs/cloud/index.html) for more details. */ metrika: boolean; /** Allow access to cluster for Serverless. */ serverless: boolean; /** Allow access for DataTransfer */ dataTransfer: boolean; - /** Allow access for YandexQuery */ + /** Allow access for Query */ yandexQuery: boolean; } export interface CloudStorage { $type: "yandex.cloud.mdb.clickhouse.v1.CloudStorage"; - /** Whether to use Yandex Object Storage for storing ClickHouse data. */ + /** Whether to use Object Storage for storing ClickHouse data. */ enabled: boolean; + moveFactor?: number; + dataCacheEnabled?: boolean; + dataCacheMaxSize?: number; } const baseCluster: object = { @@ -2356,6 +2363,30 @@ export const CloudStorage = { if (message.enabled === true) { writer.uint32(8).bool(message.enabled); } + if (message.moveFactor !== undefined) { + DoubleValue.encode( + { $type: "google.protobuf.DoubleValue", value: message.moveFactor! }, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.dataCacheEnabled !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.dataCacheEnabled!, + }, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.dataCacheMaxSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.dataCacheMaxSize!, + }, + writer.uint32(34).fork() + ).ldelim(); + } return writer; }, @@ -2369,6 +2400,24 @@ export const CloudStorage = { case 1: message.enabled = reader.bool(); break; + case 2: + message.moveFactor = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 3: + message.dataCacheEnabled = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 4: + message.dataCacheMaxSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; default: reader.skipType(tag & 7); break; @@ -2383,12 +2432,29 @@ export const CloudStorage = { object.enabled !== undefined && object.enabled !== null ? Boolean(object.enabled) : false; + message.moveFactor = + object.moveFactor !== undefined && object.moveFactor !== null + ? Number(object.moveFactor) + : undefined; + message.dataCacheEnabled = + object.dataCacheEnabled !== undefined && object.dataCacheEnabled !== null + ? Boolean(object.dataCacheEnabled) + : undefined; + message.dataCacheMaxSize = + object.dataCacheMaxSize !== undefined && object.dataCacheMaxSize !== null + ? Number(object.dataCacheMaxSize) + : undefined; return message; }, toJSON(message: CloudStorage): unknown { const obj: any = {}; message.enabled !== undefined && (obj.enabled = message.enabled); + message.moveFactor !== undefined && (obj.moveFactor = message.moveFactor); + message.dataCacheEnabled !== undefined && + (obj.dataCacheEnabled = message.dataCacheEnabled); + message.dataCacheMaxSize !== undefined && + (obj.dataCacheMaxSize = message.dataCacheMaxSize); return obj; }, @@ -2397,6 +2463,9 @@ export const CloudStorage = { ): CloudStorage { const message = { ...baseCloudStorage } as CloudStorage; message.enabled = object.enabled ?? false; + message.moveFactor = object.moveFactor ?? undefined; + message.dataCacheEnabled = object.dataCacheEnabled ?? undefined; + message.dataCacheMaxSize = object.dataCacheMaxSize ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/clickhouse/v1/cluster_service.ts b/src/generated/yandex/cloud/mdb/clickhouse/v1/cluster_service.ts index aad41db8..98d9f23e 100644 --- a/src/generated/yandex/cloud/mdb/clickhouse/v1/cluster_service.ts +++ b/src/generated/yandex/cloud/mdb/clickhouse/v1/cluster_service.ts @@ -71,7 +71,7 @@ export interface ListClustersRequest { pageSize: number; /** * Page token. To get the next page of results, set [page_token] to the [ListClustersResponse.next_page_token] - * returned by a previous list request. + * returned by the previous list request. */ pageToken: string; /** @@ -124,7 +124,7 @@ export interface CreateClusterRequest { networkId: string; /** Name of the first shard in cluster. If not set, defaults to the value 'shard1'. */ shardName: string; - /** ID of the service account used for access to Yandex Object Storage. */ + /** ID of the service account used for access to Object Storage. */ serviceAccountId: string; /** User security groups */ securityGroupIds: string[]; @@ -167,7 +167,7 @@ export interface UpdateClusterRequest { configSpec?: ConfigSpec; /** New name for the cluster. */ name: string; - /** ID of the service account used for access to Yandex Object Storage. */ + /** ID of the service account used for access to Object Storage. */ serviceAccountId: string; /** New maintenance window settings for the cluster. */ maintenanceWindow?: MaintenanceWindow; @@ -313,7 +313,7 @@ export interface RestoreClusterRequest { networkId: string; /** ID of the folder to create the ClickHouse cluster in. */ folderId: string; - /** ID of the service account used for access to Yandex Object Storage. */ + /** ID of the service account used for access to Object Storage. */ serviceAccountId: string; /** User security groups */ securityGroupIds: string[]; @@ -443,7 +443,7 @@ export interface ListClusterLogsRequest { pageSize: number; /** * Page token. To get the next page of results, set [page_token] to the [ListClusterLogsResponse.next_page_token] - * returned by a previous list request. + * returned by the previous list request. */ pageToken: string; } @@ -594,7 +594,7 @@ export interface ListClusterOperationsRequest { pageSize: number; /** * Page token. To get the next page of results, set [page_token] to the [ListClusterOperationsResponse.next_page_token] - * returned by a previous list request. + * returned by the previous list request. */ pageToken: string; } @@ -627,7 +627,7 @@ export interface ListClusterBackupsRequest { pageSize: number; /** * Page token. To get the next page of results, set [page_token] to the - * [ListClusterBackupsResponse.next_page_token] returned by a previous list request. + * [ListClusterBackupsResponse.next_page_token] returned by the previous list request. */ pageToken: string; } @@ -660,7 +660,7 @@ export interface ListClusterHostsRequest { pageSize: number; /** * Page token. To get the next page of results, set [page_token] to the [ListClusterHostsResponse.next_page_token] - * returned by a previous list request. + * returned by the previous list request. */ pageToken: string; } @@ -780,7 +780,7 @@ export interface ListClusterShardsRequest { pageSize: number; /** * Page token. to get the next page of results, set [page_token] to the [ListClusterShardsResponse.next_page_token] - * returned by a previous list request. + * returned by the previous list request. */ pageToken: string; } @@ -904,7 +904,7 @@ export interface ListClusterShardGroupsRequest { /** * Page token. * - * To get the next page of results, set [page_token] to the [ListClusterShardGroupsResponse.next_page_token] returned by a previous list request. + * To get the next page of results, set [page_token] to the [ListClusterShardGroupsResponse.next_page_token] returned by the previous list request. */ pageToken: string; } @@ -1020,6 +1020,27 @@ export interface CreateClusterExternalDictionaryMetadata { clusterId: string; } +export interface UpdateClusterExternalDictionaryRequest { + $type: "yandex.cloud.mdb.clickhouse.v1.UpdateClusterExternalDictionaryRequest"; + /** + * ID of the ClickHouse cluster to update the external dictionary for. + * To get the cluster ID, use a [List] request. + */ + clusterId: string; + /** Configuration of the external dictionary. */ + externalDictionary?: ClickhouseConfig_ExternalDictionary; + /** Field mask that specifies which fields of the External Dictionary should be updated. */ + updateMask?: FieldMask; +} + +export interface UpdateClusterExternalDictionaryMetadata { + $type: "yandex.cloud.mdb.clickhouse.v1.UpdateClusterExternalDictionaryMetadata"; + /** ID of the cluster for which an external dictionary is being updated. */ + clusterId: string; + /** Name of the external dictionary. */ + externalDictionaryName: string; +} + export interface DeleteClusterExternalDictionaryRequest { $type: "yandex.cloud.mdb.clickhouse.v1.DeleteClusterExternalDictionaryRequest"; /** @@ -1125,7 +1146,7 @@ export interface ShardConfigSpec_Clickhouse { resources?: Resources; /** * Relative weight of the shard considered when writing data to the cluster. - * For details, see [ClickHouse documentation](https://clickhouse.yandex/docs/en/operations/table_engines/distributed/). + * For details, see [ClickHouse documentation](https://clickhouse.com/docs/en/operations/table_engines/distributed/). */ weight?: number; } @@ -7554,6 +7575,219 @@ messageTypeRegistry.set( CreateClusterExternalDictionaryMetadata ); +const baseUpdateClusterExternalDictionaryRequest: object = { + $type: + "yandex.cloud.mdb.clickhouse.v1.UpdateClusterExternalDictionaryRequest", + clusterId: "", +}; + +export const UpdateClusterExternalDictionaryRequest = { + $type: + "yandex.cloud.mdb.clickhouse.v1.UpdateClusterExternalDictionaryRequest" as const, + + encode( + message: UpdateClusterExternalDictionaryRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.externalDictionary !== undefined) { + ClickhouseConfig_ExternalDictionary.encode( + message.externalDictionary, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.updateMask !== undefined) { + FieldMask.encode(message.updateMask, writer.uint32(26).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateClusterExternalDictionaryRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateClusterExternalDictionaryRequest, + } as UpdateClusterExternalDictionaryRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.externalDictionary = + ClickhouseConfig_ExternalDictionary.decode(reader, reader.uint32()); + break; + case 3: + message.updateMask = FieldMask.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateClusterExternalDictionaryRequest { + const message = { + ...baseUpdateClusterExternalDictionaryRequest, + } as UpdateClusterExternalDictionaryRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.externalDictionary = + object.externalDictionary !== undefined && + object.externalDictionary !== null + ? ClickhouseConfig_ExternalDictionary.fromJSON( + object.externalDictionary + ) + : undefined; + message.updateMask = + object.updateMask !== undefined && object.updateMask !== null + ? FieldMask.fromJSON(object.updateMask) + : undefined; + return message; + }, + + toJSON(message: UpdateClusterExternalDictionaryRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.externalDictionary !== undefined && + (obj.externalDictionary = message.externalDictionary + ? ClickhouseConfig_ExternalDictionary.toJSON(message.externalDictionary) + : undefined); + message.updateMask !== undefined && + (obj.updateMask = message.updateMask + ? FieldMask.toJSON(message.updateMask) + : undefined); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): UpdateClusterExternalDictionaryRequest { + const message = { + ...baseUpdateClusterExternalDictionaryRequest, + } as UpdateClusterExternalDictionaryRequest; + message.clusterId = object.clusterId ?? ""; + message.externalDictionary = + object.externalDictionary !== undefined && + object.externalDictionary !== null + ? ClickhouseConfig_ExternalDictionary.fromPartial( + object.externalDictionary + ) + : undefined; + message.updateMask = + object.updateMask !== undefined && object.updateMask !== null + ? FieldMask.fromPartial(object.updateMask) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + UpdateClusterExternalDictionaryRequest.$type, + UpdateClusterExternalDictionaryRequest +); + +const baseUpdateClusterExternalDictionaryMetadata: object = { + $type: + "yandex.cloud.mdb.clickhouse.v1.UpdateClusterExternalDictionaryMetadata", + clusterId: "", + externalDictionaryName: "", +}; + +export const UpdateClusterExternalDictionaryMetadata = { + $type: + "yandex.cloud.mdb.clickhouse.v1.UpdateClusterExternalDictionaryMetadata" as const, + + encode( + message: UpdateClusterExternalDictionaryMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.externalDictionaryName !== "") { + writer.uint32(18).string(message.externalDictionaryName); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateClusterExternalDictionaryMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateClusterExternalDictionaryMetadata, + } as UpdateClusterExternalDictionaryMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.externalDictionaryName = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateClusterExternalDictionaryMetadata { + const message = { + ...baseUpdateClusterExternalDictionaryMetadata, + } as UpdateClusterExternalDictionaryMetadata; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.externalDictionaryName = + object.externalDictionaryName !== undefined && + object.externalDictionaryName !== null + ? String(object.externalDictionaryName) + : ""; + return message; + }, + + toJSON(message: UpdateClusterExternalDictionaryMetadata): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.externalDictionaryName !== undefined && + (obj.externalDictionaryName = message.externalDictionaryName); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): UpdateClusterExternalDictionaryMetadata { + const message = { + ...baseUpdateClusterExternalDictionaryMetadata, + } as UpdateClusterExternalDictionaryMetadata; + message.clusterId = object.clusterId ?? ""; + message.externalDictionaryName = object.externalDictionaryName ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + UpdateClusterExternalDictionaryMetadata.$type, + UpdateClusterExternalDictionaryMetadata +); + const baseDeleteClusterExternalDictionaryRequest: object = { $type: "yandex.cloud.mdb.clickhouse.v1.DeleteClusterExternalDictionaryRequest", @@ -8838,6 +9072,21 @@ export const ClusterServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, + /** Updates an external dictionary for the specified ClickHouse cluster. */ + updateExternalDictionary: { + path: "/yandex.cloud.mdb.clickhouse.v1.ClusterService/UpdateExternalDictionary", + requestStream: false, + responseStream: false, + requestSerialize: (value: UpdateClusterExternalDictionaryRequest) => + Buffer.from( + UpdateClusterExternalDictionaryRequest.encode(value).finish() + ), + requestDeserialize: (value: Buffer) => + UpdateClusterExternalDictionaryRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, /** Deletes the specified external dictionary. */ deleteExternalDictionary: { path: "/yandex.cloud.mdb.clickhouse.v1.ClusterService/DeleteExternalDictionary", @@ -8946,6 +9195,11 @@ export interface ClusterServiceServer extends UntypedServiceImplementation { CreateClusterExternalDictionaryRequest, Operation >; + /** Updates an external dictionary for the specified ClickHouse cluster. */ + updateExternalDictionary: handleUnaryCall< + UpdateClusterExternalDictionaryRequest, + Operation + >; /** Deletes the specified external dictionary. */ deleteExternalDictionary: handleUnaryCall< DeleteClusterExternalDictionaryRequest, @@ -9514,6 +9768,22 @@ export interface ClusterServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; + /** Updates an external dictionary for the specified ClickHouse cluster. */ + updateExternalDictionary( + request: UpdateClusterExternalDictionaryRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + updateExternalDictionary( + request: UpdateClusterExternalDictionaryRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + updateExternalDictionary( + request: UpdateClusterExternalDictionaryRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; /** Deletes the specified external dictionary. */ deleteExternalDictionary( request: DeleteClusterExternalDictionaryRequest, diff --git a/src/generated/yandex/cloud/mdb/clickhouse/v1/config/clickhouse.ts b/src/generated/yandex/cloud/mdb/clickhouse/v1/config/clickhouse.ts index 4c0b0db6..88cf2599 100644 --- a/src/generated/yandex/cloud/mdb/clickhouse/v1/config/clickhouse.ts +++ b/src/generated/yandex/cloud/mdb/clickhouse/v1/config/clickhouse.ts @@ -11,7 +11,7 @@ export const protobufPackage = "yandex.cloud.mdb.clickhouse.v1.config"; /** * ClickHouse configuration options. Detailed description for each set of options - * is available in [ClickHouse documentation](https://clickhouse.yandex/docs/en/operations/server_settings/settings/). + * is available in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/server_settings/settings/). * * Any options not listed here are not supported. */ @@ -21,22 +21,22 @@ export interface ClickhouseConfig { logLevel: ClickhouseConfig_LogLevel; /** * Settings for the MergeTree engine. - * See description in [ClickHouse documentation](https://clickhouse.yandex/docs/en/operations/server_settings/settings/#merge_tree). + * See description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/server_settings/settings/#merge_tree). */ mergeTree?: ClickhouseConfig_MergeTree; /** * Compression settings for the ClickHouse cluster. - * See in-depth description in [ClickHouse documentation](https://clickhouse.yandex/docs/en/operations/server_settings/settings/#compression). + * See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/server_settings/settings/#compression). */ compression: ClickhouseConfig_Compression[]; /** * Configuration of external dictionaries to be used by the ClickHouse cluster. - * See in-depth description in [ClickHouse documentation](https://clickhouse.yandex/docs/en/query_language/dicts/external_dicts/). + * See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/query_language/dicts/external_dicts/). */ dictionaries: ClickhouseConfig_ExternalDictionary[]; /** * Settings for thinning Graphite data. - * See in-depth description in [ClickHouse documentation](https://clickhouse.yandex/docs/en/operations/server_settings/settings/#server_settings-graphite_rollup). + * See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/server_settings/settings/#server_settings-graphite_rollup). */ graphiteRollup: ClickhouseConfig_GraphiteRollup[]; kafka?: ClickhouseConfig_Kafka; @@ -414,12 +414,12 @@ export interface ClickhouseConfig_ExternalDictionary { name: string; /** * Set of attributes for the external dictionary. - * For in-depth description, see [ClickHouse documentation](https://clickhouse.yandex/docs/en/query_language/dicts/external_dicts_dict_structure/). + * For in-depth description, see [ClickHouse documentation](https://clickhouse.com/docs/en/query_language/dicts/external_dicts_dict_structure/). */ structure?: ClickhouseConfig_ExternalDictionary_Structure; /** * Layout for storing the dictionary in memory. - * For in-depth description, see [ClickHouse documentation](https://clickhouse.yandex/docs/en/query_language/dicts/external_dicts_dict_layout/). + * For in-depth description, see [ClickHouse documentation](https://clickhouse.com/docs/en/query_language/dicts/external_dicts_dict_layout/). */ layout?: ClickhouseConfig_ExternalDictionary_Layout; /** Fixed interval between dictionary updates. */ @@ -468,7 +468,7 @@ export interface ClickhouseConfig_ExternalDictionary_MysqlSource { where: string; /** * Query for checking the dictionary status, to pull only updated data. - * For more details, see [ClickHouse documentation on dictionaries](https://clickhouse.yandex/docs/en/query_language/dicts/external_dicts_dict_lifetime/). + * For more details, see [ClickHouse documentation on dictionaries](https://clickhouse.com/docs/en/query_language/dicts/external_dicts_dict_lifetime/). */ invalidateQuery: string; } @@ -543,7 +543,7 @@ export interface ClickhouseConfig_ExternalDictionary_PostgresqlSource { password: string; /** * Query for checking the dictionary status, to pull only updated data. - * For more details, see [ClickHouse documentation on dictionaries](https://clickhouse.yandex/docs/en/query_language/dicts/external_dicts_dict_lifetime/). + * For more details, see [ClickHouse documentation on dictionaries](https://clickhouse.com/docs/en/query_language/dicts/external_dicts_dict_lifetime/). */ invalidateQuery: string; /** @@ -624,22 +624,22 @@ export interface ClickhouseConfig_ExternalDictionary_Structure { id?: ClickhouseConfig_ExternalDictionary_Structure_Id; /** * Composite key for the dictionary, containing of one or more key columns. - * For details, see [ClickHouse documentation](https://clickhouse.yandex/docs/en/query_language/dicts/external_dicts_dict_structure/#composite-key). + * For details, see [ClickHouse documentation](https://clickhouse.com/docs/en/query_language/dicts/external_dicts_dict_structure/#composite-key). */ key?: ClickhouseConfig_ExternalDictionary_Structure_Key; /** * Field holding the beginning of the range for dictionaries with `RANGE_HASHED` layout. - * For details, see [ClickHouse documentation](https://clickhouse.yandex/docs/en/query_language/dicts/external_dicts_dict_layout/#range-hashed). + * For details, see [ClickHouse documentation](https://clickhouse.com/docs/en/query_language/dicts/external_dicts_dict_layout/#range-hashed). */ rangeMin?: ClickhouseConfig_ExternalDictionary_Structure_Attribute; /** * Field holding the end of the range for dictionaries with `RANGE_HASHED` layout. - * For details, see [ClickHouse documentation](https://clickhouse.yandex/docs/en/query_language/dicts/external_dicts_dict_layout/#range-hashed). + * For details, see [ClickHouse documentation](https://clickhouse.com/docs/en/query_language/dicts/external_dicts_dict_layout/#range-hashed). */ rangeMax?: ClickhouseConfig_ExternalDictionary_Structure_Attribute; /** * Description of the fields available for database queries. - * For details, see [ClickHouse documentation](https://clickhouse.yandex/docs/en/query_language/dicts/external_dicts_dict_structure/#attributes). + * For details, see [ClickHouse documentation](https://clickhouse.com/docs/en/query_language/dicts/external_dicts_dict_structure/#attributes). */ attributes: ClickhouseConfig_ExternalDictionary_Structure_Attribute[]; } diff --git a/src/generated/yandex/cloud/mdb/clickhouse/v1/database_service.ts b/src/generated/yandex/cloud/mdb/clickhouse/v1/database_service.ts index d6d14755..85fa9bbe 100644 --- a/src/generated/yandex/cloud/mdb/clickhouse/v1/database_service.ts +++ b/src/generated/yandex/cloud/mdb/clickhouse/v1/database_service.ts @@ -51,7 +51,7 @@ export interface ListDatabasesRequest { pageSize: number; /** * Page token. to get the next page of results, set [page_token] to the [ListDatabasesResponse.next_page_token] - * returned by a previous list request. + * returned by the previous list request. */ pageToken: string; } diff --git a/src/generated/yandex/cloud/mdb/clickhouse/v1/ml_model.ts b/src/generated/yandex/cloud/mdb/clickhouse/v1/ml_model.ts index 8059e63f..699e31a9 100644 --- a/src/generated/yandex/cloud/mdb/clickhouse/v1/ml_model.ts +++ b/src/generated/yandex/cloud/mdb/clickhouse/v1/ml_model.ts @@ -46,7 +46,7 @@ export interface MlModel { clusterId: string; /** Type of the model. */ type: MlModelType; - /** Model file URL. You can only use models stored in Yandex Object Storage. */ + /** Model file URL. You can only use models stored in Object Storage. */ uri: string; } diff --git a/src/generated/yandex/cloud/mdb/clickhouse/v1/ml_model_service.ts b/src/generated/yandex/cloud/mdb/clickhouse/v1/ml_model_service.ts index 83fb2baf..1477de13 100644 --- a/src/generated/yandex/cloud/mdb/clickhouse/v1/ml_model_service.ts +++ b/src/generated/yandex/cloud/mdb/clickhouse/v1/ml_model_service.ts @@ -50,7 +50,7 @@ export interface ListMlModelsRequest { pageSize: number; /** * Page token. To get the next page of results, set `page_token` to the - * [ListMlModelsResponse.next_page_token] returned by a previous list request. + * [ListMlModelsResponse.next_page_token] returned by the previous list request. */ pageToken: string; } @@ -81,7 +81,7 @@ export interface CreateMlModelRequest { mlModelName: string; /** Type of the model. */ type: MlModelType; - /** Model file URL. You can only use models stored in Yandex Object Storage. */ + /** Model file URL. You can only use models stored in Object Storage. */ uri: string; } @@ -104,7 +104,7 @@ export interface UpdateMlModelRequest { /** Name of the the model to update. */ mlModelName: string; updateMask?: FieldMask; - /** The new model file URL. You can only use models stored in Yandex Object Storage. */ + /** The new model file URL. You can only use models stored in Object Storage. */ uri: string; } diff --git a/src/generated/yandex/cloud/mdb/clickhouse/v1/resource_preset_service.ts b/src/generated/yandex/cloud/mdb/clickhouse/v1/resource_preset_service.ts index 1167b294..eb856565 100644 --- a/src/generated/yandex/cloud/mdb/clickhouse/v1/resource_preset_service.ts +++ b/src/generated/yandex/cloud/mdb/clickhouse/v1/resource_preset_service.ts @@ -37,7 +37,7 @@ export interface ListResourcePresetsRequest { pageSize: number; /** * Page token. To get the next page of results, Set [page_token] to the [ListResourcePresetsResponse.next_page_token] - * returned by a previous list request. + * returned by the previous list request. */ pageToken: string; } diff --git a/src/generated/yandex/cloud/mdb/clickhouse/v1/user.ts b/src/generated/yandex/cloud/mdb/clickhouse/v1/user.ts index 0157b1e2..fce18fd4 100644 --- a/src/generated/yandex/cloud/mdb/clickhouse/v1/user.ts +++ b/src/generated/yandex/cloud/mdb/clickhouse/v1/user.ts @@ -44,7 +44,7 @@ export interface UserSpec { /** * ClickHouse user settings. Supported settings are a limited subset of all settings - * described in [ClickHouse documentation](https://clickhouse.yandex/docs/en/operations/settings/). + * described in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/). */ export interface UserSettings { $type: "yandex.cloud.mdb.clickhouse.v1.UserSettings"; @@ -54,7 +54,7 @@ export interface UserSettings { * * **1**-only read data queries are allowed. * * **2**-read data and change settings queries are allowed. * - * See in-depth description in [ClickHouse documentation](https://clickhouse.tech/docs/en/operations/settings/permissions-for-queries/#settings_readonly). + * See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/permissions-for-queries/#settings_readonly). */ readonly?: number; /** @@ -62,7 +62,7 @@ export interface UserSettings { * * Default value: **true**. * - * See in-depth description in [ClickHouse documentation](https://clickhouse.tech/docs/en/operations/settings/permissions-for-queries/#settings_allow_ddl). + * See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/permissions-for-queries/#settings_allow_ddl). */ allowDdl?: boolean; /** @@ -75,7 +75,7 @@ export interface UserSettings { * * You can use [select_sequential_consistency] setting to read the data written with write quorum. * - * See in-depth description in [ClickHouse documentation](https://clickhouse.tech/docs/en/operations/settings/settings/#settings-insert_quorum). + * See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#settings-insert_quorum). */ insertQuorum?: number; /** @@ -116,7 +116,7 @@ export interface UserSettings { * * Minimum value: **1000**, 1 second (default: **300000**, 300 seconds or 5 minutes). * - * See in-depth description in [ClickHouse documentation](https://clickhouse.tech/docs/en/operations/settings/settings/#settings-max_replica_delay_for_distributed_queries). + * See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#settings-max_replica_delay_for_distributed_queries). */ maxReplicaDelayForDistributedQueries?: number; /** @@ -126,7 +126,7 @@ export interface UserSettings { * * Default value: **true** (query forcing is enabled). * - * See in-depth description in [ClickHouse documentation](https://clickhouse.tech/docs/en/operations/settings/settings/#settings-fallback_to_stale_replicas_for_distributed_queries). + * See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#settings-fallback_to_stale_replicas_for_distributed_queries). */ fallbackToStaleReplicasForDistributedQueries?: boolean; /** @@ -136,13 +136,13 @@ export interface UserSettings { * * **1**-only wait for own execution (default). * * **2**-wait for all replicas. * - * See in-depth description in [ClickHouse documentation](https://clickhouse.tech/docs/en/sql-reference/statements/alter/#synchronicity-of-alter-queries). + * See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/sql-reference/statements/alter/#synchronicity-of-alter-queries). */ replicationAlterPartitionsSync?: number; /** * Determine the behavior of distributed subqueries. * - * See in-depth description in [ClickHouse documentation](https://clickhouse.tech/docs/en/operations/settings/settings/#distributed-product-mode). + * See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#distributed-product-mode). */ distributedProductMode: UserSettings_DistributedProductMode; /** @@ -153,7 +153,7 @@ export interface UserSettings { * * Default value: **false** (memory saving mode is disabled). * - * See in-depth description in [ClickHouse documentation](https://clickhouse.tech/docs/en/sql-reference/statements/select/group-by/#select-group-by-in-external-memory). + * See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/sql-reference/statements/select/group-by/#select-group-by-in-external-memory). */ distributedAggregationMemoryEfficient?: boolean; /** Timeout for DDL queries, in milliseconds. */ @@ -165,7 +165,7 @@ export interface UserSettings { * * Default value: **false** (silent skipping is disabled). * - * See in-depth description in [ClickHouse documentation](https://clickhouse.tech/docs/en/operations/settings/settings/#settings-skip_unavailable_shards). + * See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#settings-skip_unavailable_shards). */ skipUnavailableShards?: boolean; /** @@ -177,7 +177,7 @@ export interface UserSettings { * * Default value: **false** (compilation is disabled). * - * See in-depth description in [ClickHouse documentation](https://clickhouse.tech/docs/en/operations/settings/settings/#compile). + * See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#compile). */ compile?: boolean; /** @@ -191,7 +191,7 @@ export interface UserSettings { * For all other values, compilation is asynchronous: the compilation process executes in a separate thread. * When a compiled part of query is ready, it will be used by ClickHouse for eligible queries, including the ones that are currently running. * - * See in-depth description in [ClickHouse documentation](https://clickhouse.tech/docs/en/operations/settings/settings/#min-count-to-compile). + * See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#min-count-to-compile). */ minCountToCompile?: number; /** @@ -226,7 +226,7 @@ export interface UserSettings { * * Value must be greater than **0** (default: **65536**). * - * See in-depth description in [ClickHouse documentation](https://clickhouse.tech/docs/en/operations/settings/settings/#setting-max_block_size). + * See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#setting-max_block_size). */ maxBlockSize?: number; /** @@ -249,7 +249,7 @@ export interface UserSettings { * * Value must be greater than **0** (default: **1048576**). * - * See in-depth description in [ClickHouse documentation](https://clickhouse.tech/docs/en/operations/settings/settings/#settings-max_insert_block_size). + * See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#settings-max_insert_block_size). */ maxInsertBlockSize?: number; /** @@ -273,7 +273,7 @@ export interface UserSettings { * * Default value: **false** (uncompressed cache is disabled). * - * See in-depth description in [ClickHouse documentation](https://clickhouse.tech/docs/en/operations/settings/settings/#setting-use_uncompressed_cache). + * See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#setting-use_uncompressed_cache). */ useUncompressedCache?: boolean; /** @@ -323,7 +323,7 @@ export interface UserSettings { * * When using aggregation in external memory, it is recommended to set the value of this setting twice as low as the [max_memory_usage] setting value (by default, the maximum memory usage is limited to ten gigabytes). * - * See in-depth description in [ClickHouse documentation](https://clickhouse.tech/docs/en/sql-reference/statements/select/group-by/#select-group-by-in-external-memory). + * See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/sql-reference/statements/select/group-by/#select-group-by-in-external-memory). * * See also: the [distributed_aggregation_memory_efficient] setting. */ @@ -363,7 +363,7 @@ export interface UserSettings { * * Minimal value and default value: **0** (the thread number is calculated automatically based on the number of physical CPU cores, no HyperThreading cores are taken into account). * - * See in-depth description in [ClickHouse documentation](https://clickhouse.tech/docs/en/operations/settings/settings/#settings-max_threads). + * See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#settings-max_threads). */ maxThreads?: number; /** @@ -377,7 +377,7 @@ export interface UserSettings { * * If you use [max_bytes_before_external_group_by] or [max_bytes_before_external_sort] setting, then it is recommended to set their values twice as low as [max_memory_usage] setting value. * - * See in-depth description in [ClickHouse documentation](https://clickhouse.tech/docs/en/operations/settings/query-complexity/#settings_max_memory_usage). + * See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/query-complexity/#settings_max_memory_usage). */ maxMemoryUsage?: number; /** @@ -407,7 +407,7 @@ export interface UserSettings { * * Default value: **false** (setting is disabled, query executes even if ClickHouse can't use index by date). * - * See in-depth description in [ClickHouse documentation](https://clickhouse.tech/docs/en/operations/settings/settings/#settings-force_index_by_date). + * See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#settings-force_index_by_date). */ forceIndexByDate?: boolean; /** @@ -416,7 +416,7 @@ export interface UserSettings { * * Default value: **false** (setting is disabled, query executes even if ClickHouse can't use index by primary key). * - * See in-depth description in [ClickHouse documentation](https://clickhouse.tech/docs/en/operations/settings/settings/#force-primary-key). + * See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#force-primary-key). */ forcePrimaryKey?: boolean; /** @@ -424,7 +424,7 @@ export interface UserSettings { * * Minimal value and default value: **0**, no limitation is set. * - * See in-depth description in [ClickHouse documentation](https://clickhouse.tech/docs/en/operations/settings/query-complexity/#max-rows-to-read). + * See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/query-complexity/#max-rows-to-read). */ maxRowsToRead?: number; /** @@ -434,7 +434,7 @@ export interface UserSettings { */ maxBytesToRead?: number; /** - * Determines the behavior on exceeding [limits](https://clickhouse.tech/docs/en/operations/settings/query-complexity/#restrictions-on-query-complexity) while reading the data. + * Determines the behavior on exceeding [limits](https://clickhouse.com/docs/en/operations/settings/query-complexity/#restrictions-on-query-complexity) while reading the data. * * * **throw**-abort query execution, return an error. * * **break**-stop query execution, return partial result. @@ -448,7 +448,7 @@ export interface UserSettings { */ maxRowsToGroupBy?: number; /** - * Determines the behavior on exceeding [limits](https://clickhouse.tech/docs/en/operations/settings/query-complexity/#restrictions-on-query-complexity) while doing aggregation. + * Determines the behavior on exceeding [limits](https://clickhouse.com/docs/en/operations/settings/query-complexity/#restrictions-on-query-complexity) while doing aggregation. * * * **throw**-abort query execution, return an error. * * **break**-stop query execution, return partial result. @@ -470,7 +470,7 @@ export interface UserSettings { */ maxBytesToSort?: number; /** - * Determines the behavior on exceeding [limits](https://clickhouse.tech/docs/en/operations/settings/query-complexity/#restrictions-on-query-complexity) while sorting. + * Determines the behavior on exceeding [limits](https://clickhouse.com/docs/en/operations/settings/query-complexity/#restrictions-on-query-complexity) while sorting. * * * **throw**-abort query execution, return an error. * * **break**-stop query execution, return partial result. @@ -491,7 +491,7 @@ export interface UserSettings { */ maxResultBytes?: number; /** - * Determines the behavior on exceeding [limits](https://clickhouse.tech/docs/en/operations/settings/query-complexity/#restrictions-on-query-complexity) while forming result. + * Determines the behavior on exceeding [limits](https://clickhouse.com/docs/en/operations/settings/query-complexity/#restrictions-on-query-complexity) while forming result. * * * **throw**-abort query execution, return an error. * * **break**-stop query execution, return partial result. @@ -506,7 +506,7 @@ export interface UserSettings { /** Limits the maximum size of a hash table in bytes (uncompressed data) when using **DISTINCT**. */ maxBytesInDistinct?: number; /** - * Determines the behavior on exceeding [limits](https://clickhouse.tech/docs/en/operations/settings/query-complexity/#restrictions-on-query-complexity) while doing **DISCTINCT**. + * Determines the behavior on exceeding [limits](https://clickhouse.com/docs/en/operations/settings/query-complexity/#restrictions-on-query-complexity) while doing **DISCTINCT**. * * * **throw**-abort query execution, return an error. * * **break**-stop query execution, return partial result. @@ -526,7 +526,7 @@ export interface UserSettings { */ maxBytesToTransfer?: number; /** - * Determines the behavior on exceeding [limits](https://clickhouse.tech/docs/en/operations/settings/query-complexity/#restrictions-on-query-complexity) while doing transfers. + * Determines the behavior on exceeding [limits](https://clickhouse.com/docs/en/operations/settings/query-complexity/#restrictions-on-query-complexity) while doing transfers. * * * **throw**-abort query execution, return an error. * * **break**-stop query execution, return partial result. @@ -540,7 +540,7 @@ export interface UserSettings { */ maxExecutionTime?: number; /** - * Determines the behavior on exceeding [limits](https://clickhouse.tech/docs/en/operations/settings/query-complexity/#restrictions-on-query-complexity) of execution time. + * Determines the behavior on exceeding [limits](https://clickhouse.com/docs/en/operations/settings/query-complexity/#restrictions-on-query-complexity) of execution time. * * * **throw**-abort query execution, return an error. * * **break**-stop query execution, return partial result. @@ -588,7 +588,7 @@ export interface UserSettings { * * Value must be greater than **0** (default: **262144**). * - * See in-depth description in [ClickHouse documentation](https://clickhouse.tech/docs/en/operations/settings/settings/#settings-max_query_size). + * See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#settings-max_query_size). */ maxQuerySize?: number; /** @@ -603,7 +603,7 @@ export interface UserSettings { * Value must be greater than **0** (default: **1000**). * If a too small value is set, it may render ClickHouse unable to execute even simple queries. * - * See in-depth description in [ClickHouse documentation](https://clickhouse.tech/docs/en/operations/settings/query-complexity/#max-ast-depth). + * See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/query-complexity/#max-ast-depth). */ maxAstDepth?: number; /** @@ -615,7 +615,7 @@ export interface UserSettings { * Value must be greater than **0** (default: **50000**). * If a too small value is set, it may render ClickHouse unable to execute even simple queries. * - * See in-depth description in [ClickHouse documentation](https://clickhouse.tech/docs/en/operations/settings/query-complexity/#max-ast-elements). + * See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/query-complexity/#max-ast-elements). */ maxAstElements?: number; /** @@ -642,11 +642,11 @@ export interface UserSettings { * For example, the stream parser is unable to parse a value that contains **now()** expression; therefore an **INSERT** query for this value will fail and no data will be inserted into a table. * With enabled SQL parser, this expression is parsed correctly: the **now()** expression will be parsed as SQL function, interpreted, and the current date and time will be inserted into the table as a result. * - * This setting has effect only if you use [Values](https://clickhouse.tech/docs/en/interfaces/formats/#data-format-values) format when inserting data. + * This setting has effect only if you use [Values](https://clickhouse.com/docs/en/interfaces/formats/#data-format-values) format when inserting data. * * Default value: **true** (SQL parser is enabled). * - * See in-depth description in [ClickHouse documentation](https://clickhouse.tech/docs/en/operations/settings/settings/#settings-input_format_values_interpret_expressions). + * See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/operations/settings/settings/#settings-input_format_values_interpret_expressions). */ inputFormatValuesInterpretExpressions?: boolean; /** @@ -727,7 +727,7 @@ export interface UserSettings { * * Default value: **false** (compression is disabled). * - * See in-depth description in [ClickHouse documentation](https://clickhouse.tech/docs/en/interfaces/http/). + * See in-depth description in [ClickHouse documentation](https://clickhouse.com/docs/en/interfaces/http/). */ enableHttpCompression?: boolean; /** @@ -1010,7 +1010,7 @@ export function userSettings_CountDistinctImplementationToJSON( /** * ClickHouse quota representation. Each quota associated with an user and limits it resource usage for an interval. - * See in-depth description [ClickHouse documentation](https://clickhouse.yandex/docs/en/operations/quotas/). + * See in-depth description [ClickHouse documentation](https://clickhouse.com/docs/en/operations/quotas/). */ export interface UserQuota { $type: "yandex.cloud.mdb.clickhouse.v1.UserQuota"; diff --git a/src/generated/yandex/cloud/mdb/clickhouse/v1/user_service.ts b/src/generated/yandex/cloud/mdb/clickhouse/v1/user_service.ts index c7e65696..cc510595 100644 --- a/src/generated/yandex/cloud/mdb/clickhouse/v1/user_service.ts +++ b/src/generated/yandex/cloud/mdb/clickhouse/v1/user_service.ts @@ -55,7 +55,7 @@ export interface ListUsersRequest { pageSize: number; /** * Page token. To get the next page of results, set [page_token] to the [ListUsersResponse.next_page_token] - * returned by a previous list request. + * returned by the previous list request. */ pageToken: string; } diff --git a/src/generated/yandex/cloud/mdb/elasticsearch/v1/cluster.ts b/src/generated/yandex/cloud/mdb/elasticsearch/v1/cluster.ts index 06ab836a..f4734950 100644 --- a/src/generated/yandex/cloud/mdb/elasticsearch/v1/cluster.ts +++ b/src/generated/yandex/cloud/mdb/elasticsearch/v1/cluster.ts @@ -52,7 +52,7 @@ export interface Cluster { status: Cluster_Status; /** User security groups */ securityGroupIds: string[]; - /** ID of the service account used for access to Yandex Object Storage. */ + /** ID of the service account used for access to Object Storage. */ serviceAccountId: string; /** Deletion Protection inhibits deletion of the cluster */ deletionProtection: boolean; diff --git a/src/generated/yandex/cloud/mdb/elasticsearch/v1/cluster_service.ts b/src/generated/yandex/cloud/mdb/elasticsearch/v1/cluster_service.ts index 2c504528..068c21a0 100644 --- a/src/generated/yandex/cloud/mdb/elasticsearch/v1/cluster_service.ts +++ b/src/generated/yandex/cloud/mdb/elasticsearch/v1/cluster_service.ts @@ -63,7 +63,7 @@ export interface ListClustersRequest { pageSize: number; /** * Page token. - * To get the next page of results, set `page_token` to the [ListClustersResponse.next_page_token] returned by a previous list request. + * To get the next page of results, set `page_token` to the [ListClustersResponse.next_page_token] returned by the previous list request. */ pageToken: string; /** @@ -119,7 +119,7 @@ export interface CreateClusterRequest { networkId: string; /** User security groups */ securityGroupIds: string[]; - /** ID of the service account used for access to Yandex Object Storage. */ + /** ID of the service account used for access to Object Storage. */ serviceAccountId: string; /** Deletion Protection inhibits deletion of the cluster */ deletionProtection: boolean; @@ -171,7 +171,7 @@ export interface UpdateClusterRequest { name: string; /** User security groups */ securityGroupIds: string[]; - /** ID of the service account used for access to Yandex Object Storage. */ + /** ID of the service account used for access to Object Storage. */ serviceAccountId: string; /** Deletion Protection inhibits deletion of the cluster */ deletionProtection: boolean; @@ -234,7 +234,7 @@ export interface ListClusterLogsRequest { /** * Page token. * - * To get the next page of results, set `page_token` to the [ListClusterLogsResponse.next_page_token] returned by a previous list request. + * To get the next page of results, set `page_token` to the [ListClusterLogsResponse.next_page_token] returned by the previous list request. */ pageToken: string; /** @@ -447,7 +447,7 @@ export interface ListClusterOperationsRequest { /** * Page token. * - * To get the next page of results, set `page_token` to the [ListClusterOperationsResponse.next_page_token] returned by a previous list request. + * To get the next page of results, set `page_token` to the [ListClusterOperationsResponse.next_page_token] returned by the previous list request. */ pageToken: string; } @@ -482,7 +482,7 @@ export interface ListClusterHostsRequest { /** * Page token. * - * To get the next page of results, set `page_token` to the [ListClusterHostsResponse.next_page_token] returned by a previous list request. + * To get the next page of results, set `page_token` to the [ListClusterHostsResponse.next_page_token] returned by the previous list request. */ pageToken: string; } @@ -748,7 +748,7 @@ export interface RestoreClusterRequest { networkId: string; /** User security groups */ securityGroupIds: string[]; - /** ID of the service account used for access to Yandex Object Storage. */ + /** ID of the service account used for access to Object Storage. */ serviceAccountId: string; /** Deletion Protection inhibits deletion of the cluster */ deletionProtection: boolean; diff --git a/src/generated/yandex/cloud/mdb/elasticsearch/v1/config/elasticsearch.ts b/src/generated/yandex/cloud/mdb/elasticsearch/v1/config/elasticsearch.ts index 62794093..c1cd6933 100644 --- a/src/generated/yandex/cloud/mdb/elasticsearch/v1/config/elasticsearch.ts +++ b/src/generated/yandex/cloud/mdb/elasticsearch/v1/config/elasticsearch.ts @@ -37,6 +37,18 @@ export interface ElasticsearchConfig7 { * See in-depth description in [Elasticsearch documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-fielddata.html). */ fielddataCacheSize: string; + /** + * Remote hosts for reindex have to be explicitly allowed in elasticsearch.yml using the reindex.remote.whitelist property. + * It can be set to a comma delimited list of allowed remote host and port combinations. + * Scheme is ignored, only the host and port are used. + */ + reindexRemoteWhitelist: string; + /** + * List of paths to PEM encoded certificate files that should be trusted. + * + * See in-depth description in [Elasticsearch documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-reindex.html#reindex-ssl) + */ + reindexSslCaPath: string; } /** Elasticsearch 7.x data node configuration. */ @@ -53,6 +65,8 @@ export interface ElasticsearchConfigSet7 { const baseElasticsearchConfig7: object = { $type: "yandex.cloud.mdb.elasticsearch.v1.config.ElasticsearchConfig7", fielddataCacheSize: "", + reindexRemoteWhitelist: "", + reindexSslCaPath: "", }; export const ElasticsearchConfig7 = { @@ -72,6 +86,12 @@ export const ElasticsearchConfig7 = { if (message.fielddataCacheSize !== "") { writer.uint32(34).string(message.fielddataCacheSize); } + if (message.reindexRemoteWhitelist !== "") { + writer.uint32(50).string(message.reindexRemoteWhitelist); + } + if (message.reindexSslCaPath !== "") { + writer.uint32(58).string(message.reindexSslCaPath); + } return writer; }, @@ -94,6 +114,12 @@ export const ElasticsearchConfig7 = { case 4: message.fielddataCacheSize = reader.string(); break; + case 6: + message.reindexRemoteWhitelist = reader.string(); + break; + case 7: + message.reindexSslCaPath = reader.string(); + break; default: reader.skipType(tag & 7); break; @@ -113,6 +139,15 @@ export const ElasticsearchConfig7 = { object.fielddataCacheSize !== null ? String(object.fielddataCacheSize) : ""; + message.reindexRemoteWhitelist = + object.reindexRemoteWhitelist !== undefined && + object.reindexRemoteWhitelist !== null + ? String(object.reindexRemoteWhitelist) + : ""; + message.reindexSslCaPath = + object.reindexSslCaPath !== undefined && object.reindexSslCaPath !== null + ? String(object.reindexSslCaPath) + : ""; return message; }, @@ -122,6 +157,10 @@ export const ElasticsearchConfig7 = { (obj.maxClauseCount = message.maxClauseCount); message.fielddataCacheSize !== undefined && (obj.fielddataCacheSize = message.fielddataCacheSize); + message.reindexRemoteWhitelist !== undefined && + (obj.reindexRemoteWhitelist = message.reindexRemoteWhitelist); + message.reindexSslCaPath !== undefined && + (obj.reindexSslCaPath = message.reindexSslCaPath); return obj; }, @@ -131,6 +170,8 @@ export const ElasticsearchConfig7 = { const message = { ...baseElasticsearchConfig7 } as ElasticsearchConfig7; message.maxClauseCount = object.maxClauseCount ?? undefined; message.fielddataCacheSize = object.fielddataCacheSize ?? ""; + message.reindexRemoteWhitelist = object.reindexRemoteWhitelist ?? ""; + message.reindexSslCaPath = object.reindexSslCaPath ?? ""; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/elasticsearch/v1/extension.ts b/src/generated/yandex/cloud/mdb/elasticsearch/v1/extension.ts index 88c90fc8..6b556b9e 100644 --- a/src/generated/yandex/cloud/mdb/elasticsearch/v1/extension.ts +++ b/src/generated/yandex/cloud/mdb/elasticsearch/v1/extension.ts @@ -9,13 +9,13 @@ export interface Extension { $type: "yandex.cloud.mdb.elasticsearch.v1.Extension"; /** Name of the extension. */ name: string; - /** Extension unique ID */ + /** Unique ID of the extension. */ id: string; /** ID of the Elasticsearch cluster the extension belongs to. */ clusterId: string; - /** Extension version */ + /** Version of the extension. */ version: number; - /** Flag is extension active now */ + /** The flag shows whether the extension is active. */ active: boolean; } @@ -23,11 +23,9 @@ export interface ExtensionSpec { $type: "yandex.cloud.mdb.elasticsearch.v1.ExtensionSpec"; /** Name of the extension. */ name: string; - /** - * URI of the zip arhive to create the new extension from. - * Currently only supports links that are stored in Yandex Object Storage. - */ + /** URI of the zip archive to create the new extension from. Currently only supports links that are stored in Object Storage. */ uri: string; + /** The flag shows whether to create the extension in disabled state. */ disabled: boolean; } diff --git a/src/generated/yandex/cloud/mdb/elasticsearch/v1/extension_service.ts b/src/generated/yandex/cloud/mdb/elasticsearch/v1/extension_service.ts index 5cdf27a1..8362888d 100644 --- a/src/generated/yandex/cloud/mdb/elasticsearch/v1/extension_service.ts +++ b/src/generated/yandex/cloud/mdb/elasticsearch/v1/extension_service.ts @@ -21,26 +21,26 @@ export const protobufPackage = "yandex.cloud.mdb.elasticsearch.v1"; export interface GetExtensionRequest { $type: "yandex.cloud.mdb.elasticsearch.v1.GetExtensionRequest"; - /** Required. ID of the cluster. */ + /** ID of the cluster. */ clusterId: string; - /** Required. ID of the extension to return. */ + /** ID of the extension to return. */ extensionId: string; } export interface ListExtensionsRequest { $type: "yandex.cloud.mdb.elasticsearch.v1.ListExtensionsRequest"; - /** Required. ID of the cluster to list extensions in. */ + /** ID of the cluster to list extensions in. */ clusterId: string; /** - * The maximum number of results per page that should be returned. If the number of available - * results is larger than `page_size`, the service returns a `next_page_token` that can be used - * to get the next page of results in subsequent ListBackups requests. - * Acceptable values are 0 to 1000, inclusive. Default value: 100. + * The maximum number of results per page to return. + * + * If the number of available results is larger than [page_size], the API returns a [ListExtensionsResponse.next_page_token] that can be used to get the next page of results in subsequent [ExtensionService.List] requests. */ pageSize: number; /** - * Page token. Set `page_token` to the `next_page_token` returned by a previous ListBackups - * request to get the next page of results. + * Page token that can be used to iterate through multiple pages of results. + * + * To get the next page of results, set [page_token] to the [ListExtensionsResponse.next_page_token] returned by the previous [ExtensionService.List] request. */ pageToken: string; } @@ -50,67 +50,66 @@ export interface ListExtensionsResponse { /** Requested list of extensions. */ extensions: Extension[]; /** - * This token allows you to get the next page of results for ListBackups requests, - * if the number of results is larger than `page_size` specified in the request. - * To get the next page, specify the value of `next_page_token` as a value for - * the `page_token` parameter in the next ListBackups request. Subsequent ListBackups - * requests will have their own `next_page_token` to continue paging through the results. + * The token that can be used to get the next page of results. + * + * If the number of results is larger than [ListExtensionsRequest.page_size], use the [next_page_token] as the value for the [ListExtensionsRequest.page_token] in the subsequent [ExtensionService.List] request to iterate through multiple pages of results. + * + * Each of the subsequent [ExtensionService.List] requests should use the [next_page_token] value returned in the previous request to continue paging through the results. */ nextPageToken: string; } export interface DeleteExtensionRequest { $type: "yandex.cloud.mdb.elasticsearch.v1.DeleteExtensionRequest"; - /** Required. ID of the cluster. */ + /** ID of the cluster. */ clusterId: string; - /** Required. ID of the extension to delete. */ + /** ID of the extension to delete. */ extensionId: string; } export interface DeleteExtensionMetadata { $type: "yandex.cloud.mdb.elasticsearch.v1.DeleteExtensionMetadata"; - /** Required. ID of the cluster. */ + /** ID of the cluster. */ clusterId: string; - /** Required. ID of the extension to delete. */ + /** ID of the extension to delete. */ extensionId: string; } export interface UpdateExtensionRequest { $type: "yandex.cloud.mdb.elasticsearch.v1.UpdateExtensionRequest"; - /** Required. ID of the cluster. */ + /** ID of the cluster. */ clusterId: string; - /** Required. ID of the extension to delete. */ + /** ID of the extension to update. */ extensionId: string; + /** The flag shows whether to make the extension active. */ active: boolean; } export interface UpdateExtensionMetadata { $type: "yandex.cloud.mdb.elasticsearch.v1.UpdateExtensionMetadata"; - /** Required. ID of the cluster. */ + /** ID of the cluster. */ clusterId: string; - /** Required. ID of the extension. */ + /** ID of the extension. */ extensionId: string; } export interface CreateExtensionRequest { $type: "yandex.cloud.mdb.elasticsearch.v1.CreateExtensionRequest"; - /** Required. ID of the cluster. */ + /** ID of the cluster. */ clusterId: string; /** Name of the extension. */ name: string; - /** - * URI of the zip arhive to create the new extension from. - * Currently only supports links that are stored in Yandex Object Storage. - */ + /** URI of the zip archive to create the new extension from. Currently only supports links that are stored in Object Storage. */ uri: string; + /** The flag that disables the extension. */ disabled: boolean; } export interface CreateExtensionMetadata { $type: "yandex.cloud.mdb.elasticsearch.v1.CreateExtensionMetadata"; - /** Required. ID of the cluster. */ + /** ID of the cluster. */ clusterId: string; - /** Required. ID of the extension. */ + /** ID of the extension. */ extensionId: string; } diff --git a/src/generated/yandex/cloud/mdb/elasticsearch/v1/resource_preset_service.ts b/src/generated/yandex/cloud/mdb/elasticsearch/v1/resource_preset_service.ts index 3f577953..df4df75c 100644 --- a/src/generated/yandex/cloud/mdb/elasticsearch/v1/resource_preset_service.ts +++ b/src/generated/yandex/cloud/mdb/elasticsearch/v1/resource_preset_service.ts @@ -39,7 +39,7 @@ export interface ListResourcePresetsRequest { /** * Page token. * - * To get the next page of results, set `page_token` to the [ListResourcePresetsResponse.next_page_token] returned by a previous list request. + * To get the next page of results, set `page_token` to the [ListResourcePresetsResponse.next_page_token] returned by the previous list request. */ pageToken: string; } diff --git a/src/generated/yandex/cloud/mdb/elasticsearch/v1/user_service.ts b/src/generated/yandex/cloud/mdb/elasticsearch/v1/user_service.ts index c889f48d..592c89fa 100644 --- a/src/generated/yandex/cloud/mdb/elasticsearch/v1/user_service.ts +++ b/src/generated/yandex/cloud/mdb/elasticsearch/v1/user_service.ts @@ -56,7 +56,7 @@ export interface ListUsersRequest { /** * Page token. * - * To get the next page of results, set `page_token` to the [ListUsersResponse.next_page_token] returned by a previous list request. + * To get the next page of results, set `page_token` to the [ListUsersResponse.next_page_token] returned by the previous list request. */ pageToken: string; } diff --git a/src/generated/yandex/cloud/mdb/greenplum/v1/backup.ts b/src/generated/yandex/cloud/mdb/greenplum/v1/backup.ts index e6169ecf..aa40dd8f 100644 --- a/src/generated/yandex/cloud/mdb/greenplum/v1/backup.ts +++ b/src/generated/yandex/cloud/mdb/greenplum/v1/backup.ts @@ -12,16 +12,13 @@ export interface Backup { id: string; /** ID of the folder that the backup belongs to. */ folderId: string; - /** - * Creation timestamp in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format - * (i.e. when the backup operation was completed). - */ + /** Time when the backup operation was completed. */ createdAt?: Date; - /** ID of the PostgreSQL cluster that the backup was created for. */ + /** ID of the Greenplum® cluster that the backup was created for. */ sourceClusterId: string; /** Time when the backup operation was started. */ startedAt?: Date; - /** Size of backup in bytes */ + /** Size of the backup in bytes. */ size: number; } diff --git a/src/generated/yandex/cloud/mdb/greenplum/v1/backup_service.ts b/src/generated/yandex/cloud/mdb/greenplum/v1/backup_service.ts index 769482a2..570c23bd 100644 --- a/src/generated/yandex/cloud/mdb/greenplum/v1/backup_service.ts +++ b/src/generated/yandex/cloud/mdb/greenplum/v1/backup_service.ts @@ -20,25 +20,23 @@ export const protobufPackage = "yandex.cloud.mdb.greenplum.v1"; export interface GetBackupRequest { $type: "yandex.cloud.mdb.greenplum.v1.GetBackupRequest"; - /** Required. ID of the backup to return. */ + /** ID of the backup to return. */ backupId: string; } export interface ListBackupsRequest { $type: "yandex.cloud.mdb.greenplum.v1.ListBackupsRequest"; - /** Required. ID of the folder to list backups in. */ + /** ID of the folder to list backups in. */ folderId: string; /** - * The maximum number of results per page that should be returned. If the number of available - * results is larger than `page_size`, the service returns a `next_page_token` that can be used - * to get the next page of results in subsequent ListBackups requests. - * Acceptable values are 0 to 1000, inclusive. Default value: 100. + * The maximum number of results per page to return. + * + * If the number of available results is larger than [page_size], the service returns a [ListBackupsResponse.next_page_token] that can be used to get the next page of results in subsequent list requests. + * + * Default value is 100. */ pageSize: number; - /** - * Page token. Set `page_token` to the `next_page_token` returned by a previous ListBackups - * request to get the next page of results. - */ + /** The page token. To get the next page of results, set [page_token] to the [ListBackupsResponse.next_page_token] returned by the previous list request. */ pageToken: string; } @@ -47,11 +45,11 @@ export interface ListBackupsResponse { /** Requested list of backups. */ backups: Backup[]; /** - * This token allows you to get the next page of results for ListBackups requests, - * if the number of results is larger than `page_size` specified in the request. - * To get the next page, specify the value of `next_page_token` as a value for - * the `page_token` parameter in the next ListBackups request. Subsequent ListBackups - * requests will have their own `next_page_token` to continue paging through the results. + * This token allows you to get the next page of results for a list request. + * + * If the number of results is larger than [ListBackupsRequest.page_size] specified in the request, use the [next_page_token] as the value for the [ListBackupsRequest.page_token] parameter in the next list request. + * + * Each subsequent ListBackups request has its own [next_page_token] to continue paging through the results. */ nextPageToken: string; } @@ -288,6 +286,7 @@ export const ListBackupsResponse = { messageTypeRegistry.set(ListBackupsResponse.$type, ListBackupsResponse); +/** A set of methods for managing backups. */ export const BackupServiceService = { /** Returns the specified backup of Greenplum® cluster. */ get: { diff --git a/src/generated/yandex/cloud/mdb/greenplum/v1/cluster.ts b/src/generated/yandex/cloud/mdb/greenplum/v1/cluster.ts index 664dca58..53bdc513 100644 --- a/src/generated/yandex/cloud/mdb/greenplum/v1/cluster.ts +++ b/src/generated/yandex/cloud/mdb/greenplum/v1/cluster.ts @@ -23,21 +23,21 @@ export interface Cluster { $type: "yandex.cloud.mdb.greenplum.v1.Cluster"; /** * ID of the Greenplum® cluster. - * This ID is assigned by Yandex Cloud at the time of cluster creation. + * This ID is assigned by the platform at the moment of cluster creation. */ id: string; /** ID of the folder that the Greenplum® cluster belongs to. */ folderId: string; - /** Cluster creation timestamp in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. */ + /** Time when the cluster was created. */ createdAt?: Date; /** * Name of the Greenplum® cluster. - * The name is unique within the folder and is 1-63 characters long. + * The name is unique within the folder. */ name: string; /** Greenplum® cluster configuration. */ config?: GreenplumConfig; - /** Description of the Greenplum® cluster. 0-256 characters long. */ + /** Description of the Greenplum® cluster. */ description: string; /** Custom labels for the Greenplum® cluster as `key:value` pairs. Maximum 64 labels per resource. */ labels: { [key: string]: string }; @@ -61,7 +61,7 @@ export interface Cluster { health: Cluster_Health; /** Current state of the cluster. */ status: Cluster_Status; - /** Window of maintenance operations. */ + /** A Greenplum® cluster maintenance window. Should be defined by either one of the two options. */ maintenanceWindow?: MaintenanceWindow; /** Maintenance operation planned at nearest [maintenance_window]. */ plannedOperation?: MaintenanceOperation; @@ -69,25 +69,19 @@ export interface Cluster { securityGroupIds: string[]; /** Owner user name. */ userName: string; - /** Whether or not cluster is protected from being deleted. */ + /** Determines whether the cluster is protected from being deleted. */ deletionProtection: boolean; /** Host groups hosting VMs of the cluster. */ hostGroupIds: string[]; - /** Greenplum and Odyssey configuration; */ + /** Greenplum® and Odyssey® configuration. */ clusterConfig?: ClusterConfigSet; } export enum Cluster_Environment { ENVIRONMENT_UNSPECIFIED = 0, - /** - * PRODUCTION - Stable environment with a conservative update policy: - * only hotfixes are applied during regular maintenance. - */ + /** PRODUCTION - Stable environment with a conservative update policy: only hotfixes are applied during regular maintenance. */ PRODUCTION = 1, - /** - * PRESTABLE - Environment with more aggressive update policy: new versions - * are rolled out irrespective of backward compatibility. - */ + /** PRESTABLE - Environment with more aggressive update policy: new versions are rolled out irrespective of backward compatibility. */ PRESTABLE = 2, UNRECOGNIZED = -1, } @@ -178,7 +172,6 @@ export function cluster_HealthToJSON(object: Cluster_Health): string { } } -/** Current state of the cluster. */ export enum Cluster_Status { /** STATUS_UNKNOWN - Cluster state is unknown. */ STATUS_UNKNOWN = 0, @@ -265,7 +258,7 @@ export interface ClusterConfigSet { $type: "yandex.cloud.mdb.greenplum.v1.ClusterConfigSet"; greenplumConfigSet617?: Greenplumconfigset617 | undefined; greenplumConfigSet619?: Greenplumconfigset619 | undefined; - /** Odyssey pool settings */ + /** Odyssey® pool settings. */ pool?: ConnectionPoolerConfigSet; } @@ -280,7 +273,6 @@ export interface Monitoring { link: string; } -/** Greenplum® cluster configuration. */ export interface GreenplumConfig { $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfig"; /** Version of the Greenplum® server software. */ @@ -294,27 +286,23 @@ export interface GreenplumConfig { * To get a list of available zones, use the [yandex.cloud.compute.v1.ZoneService.List] request. */ zoneId: string; - /** - * ID of the subnet the cluster belongs to. This subnet should be a part - * of the cloud network the cluster belongs to (see [Cluster.network_id]). - */ + /** ID of the subnet the cluster belongs to. This subnet should be a part of the cloud network the cluster belongs to (see [Cluster.network_id]). */ subnetId: string; /** - * Whether or not the cluster has a public IP address. + * Determines whether the cluster has a public IP address. * * After the cluster has been created, this setting cannot be changed. */ assignPublicIp: boolean; } -/** Greenplum® cluster access options. */ export interface Access { $type: "yandex.cloud.mdb.greenplum.v1.Access"; - /** Allows data export from the cluster to Yandex DataLens. */ + /** Allows data export from the cluster to DataLens. */ dataLens: boolean; - /** Allows SQL queries to the cluster databases from the Yandex Cloud management console. */ + /** Allows SQL queries to the cluster databases from the management console. */ webSql: boolean; - /** Allow access for DataTransfer. */ + /** Allows access for DataTransfer. */ dataTransfer: boolean; } @@ -326,31 +314,32 @@ export interface GreenplumRestoreConfig { access?: Access; /** * ID of the availability zone where the host resides. + * * To get a list of available zones, use the [yandex.cloud.compute.v1.ZoneService.List] request. */ zoneId: string; /** - * ID of the subnet that the host should belong to. This subnet should be a part - * of the network that the cluster belongs to. + * ID of the subnet that the host should belong to. This subnet should be a part of the network that the cluster belongs to. * The ID of the network is set in the field [Cluster.network_id]. */ subnetId: string; /** - * Whether the host should get a public IP address on creation. + * Determines whether the host should get a public IP address on creation. + * + * After a host has been created, this setting cannot be changed. * - * After a host has been created, this setting cannot be changed. To remove an assigned public IP, or to assign - * a public IP to a host without one, recreate the host with [assign_public_ip] set as needed. + * To remove an assigned public IP, or to assign a public IP to a host without one, recreate the host with [assign_public_ip] set as needed. * * Possible values: - * * false - don't assign a public IP to the master hosts. - * * true - the master hosts should have a public IP address. + * * `false` - do not assign a public IP to the master host. + * * `true` - assign a public IP to the master host. */ assignPublicIp: boolean; } export interface RestoreResources { $type: "yandex.cloud.mdb.greenplum.v1.RestoreResources"; - /** ID of the preset for computational resources available to a host (CPU, memory etc.). */ + /** ID of the preset for computational resources available to a host (CPU, memory, etc.). */ resourcePresetId: string; /** Volume of the storage available to a host. */ diskSize: number; @@ -926,7 +915,7 @@ export const ClusterConfigSet = { if (message.pool !== undefined) { ConnectionPoolerConfigSet.encode( message.pool, - writer.uint32(34).fork() + writer.uint32(26).fork() ).ldelim(); } return writer; @@ -951,7 +940,7 @@ export const ClusterConfigSet = { reader.uint32() ); break; - case 4: + case 3: message.pool = ConnectionPoolerConfigSet.decode( reader, reader.uint32() diff --git a/src/generated/yandex/cloud/mdb/greenplum/v1/cluster_service.ts b/src/generated/yandex/cloud/mdb/greenplum/v1/cluster_service.ts index 428b3da7..6d52150d 100644 --- a/src/generated/yandex/cloud/mdb/greenplum/v1/cluster_service.ts +++ b/src/generated/yandex/cloud/mdb/greenplum/v1/cluster_service.ts @@ -42,7 +42,8 @@ export const protobufPackage = "yandex.cloud.mdb.greenplum.v1"; export interface GetClusterRequest { $type: "yandex.cloud.mdb.greenplum.v1.GetClusterRequest"; /** - * ID of the Greenplum® Cluster resource to return. + * ID of the Greenplum® cluster resource to return. + * * To get the cluster ID, use a [ClusterService.List] request. */ clusterId: string; @@ -52,25 +53,27 @@ export interface ListClustersRequest { $type: "yandex.cloud.mdb.greenplum.v1.ListClustersRequest"; /** * ID of the folder to list Greenplum® clusters in. + * * To get the folder ID, use a [yandex.cloud.resourcemanager.v1.FolderService.List] request. */ folderId: string; /** - * The maximum number of results per page to return. If the number of available - * results is larger than [page_size], the service returns a [ListClustersResponse.next_page_token] - * that can be used to get the next page of results in subsequent list requests. + * The maximum number of results per page to return. + * + * If the number of available results is larger than [page_size], the service returns a [ListClustersResponse.next_page_token] that can be used to get the next page of results in subsequent list requests. */ pageSize: number; - /** - * Page token. To get the next page of results, set [page_token] to the [ListClustersResponse.next_page_token] - * returned by a previous list request. - */ + /** Page token. To get the next page of results, set [page_token] to the [ListClustersResponse.next_page_token] returned by the previous list request. */ pageToken: string; /** * A filter expression that filters resources listed in the response. + * * The expression must specify: + * * 1. The field name. Currently you can only use filtering with the [Cluster.name] field. + * * 2. An `=` operator. + * * 3. The value in double quotes (`"`). Must be 1-63 characters long and match the regular expression `[a-zA-Z0-9_-]+`. */ filter: string; @@ -78,13 +81,14 @@ export interface ListClustersRequest { export interface ListClustersResponse { $type: "yandex.cloud.mdb.greenplum.v1.ListClustersResponse"; - /** List of Greenplum Cluster resources. */ + /** List of Greenplum® cluster resources. */ clusters: Cluster[]; /** - * This token allows you to get the next page of results for list requests. If the number of results - * is larger than [ListClustersRequest.page_size], use the [next_page_token] as the value - * for the [ListClustersRequest.page_token] parameter in the next list request. Each subsequent - * list request will have its own [next_page_token] to continue paging through the results. + * This token allows you to get the next page of results for list requests. + * + * If the number of results is larger than [ListClustersRequest.page_size], use the [next_page_token] as the value for the [ListClustersRequest.page_token] parameter in the next list request. + * + * Each subsequent list request has its own [next_page_token] to continue paging through the results. */ nextPageToken: string; } @@ -93,13 +97,13 @@ export interface CreateClusterRequest { $type: "yandex.cloud.mdb.greenplum.v1.CreateClusterRequest"; /** ID of the folder to create the Greenplum® cluster in. */ folderId: string; - /** Name of the Greenplum® cluster. The name must be unique within the folder. Maximum 63 characters. */ + /** Name of the Greenplum® cluster. The name must be unique within the folder. */ name: string; /** Description of the Greenplum® cluster. */ description: string; /** - * Custom labels for the Greenplum® cluster as `key:value` pairs. Maximum 64 per resource. - * For example, "project":"mvp" or "source":"dictionary". + * Custom labels for the Greenplum® cluster as `key:value` pairs. + * For example, `"project":"mvp"` or `"source":"dictionary"`. */ labels: { [key: string]: string }; /** Deployment environment of the Greenplum® cluster. */ @@ -118,18 +122,19 @@ export interface CreateClusterRequest { segmentHostCount: number; /** Owner user name. */ userName: string; - /** Owner user password. Must be 8-128 characters long */ + /** Owner user password. */ userPassword: string; /** ID of the network to create the cluster in. */ networkId: string; /** User security groups. */ securityGroupIds: string[]; - /** Whether or not cluster is protected from being deleted. */ + /** Determines whether the cluster is protected from being deleted. */ deletionProtection: boolean; /** Host groups to place VMs of the cluster in. */ hostGroupIds: string[]; - /** Window of maintenance operations. */ + /** A Greenplum® cluster maintenance window. Should be defined by either one of the two options. */ maintenanceWindow?: MaintenanceWindow; + /** Configuration of Greenplum® and Odyssey®. */ configSpec?: ConfigSpec; } @@ -139,12 +144,11 @@ export interface CreateClusterRequest_LabelsEntry { value: string; } -/** Configuration of greenplum and odyssey */ export interface ConfigSpec { $type: "yandex.cloud.mdb.greenplum.v1.ConfigSpec"; greenplumConfig617?: Greenplumconfig617 | undefined; greenplumConfig619?: Greenplumconfig619 | undefined; - /** Odyssey pool settings */ + /** Odyssey® pool settings. */ pool?: ConnectionPoolerConfig; } @@ -157,36 +161,40 @@ export interface CreateClusterMetadata { export interface UpdateClusterRequest { $type: "yandex.cloud.mdb.greenplum.v1.UpdateClusterRequest"; /** - * ID of the Greenplum® Cluster resource to update. + * ID of the Greenplum® cluster resource to update. * To get the Greenplum® cluster ID, use a [ClusterService.List] request. */ clusterId: string; - /** Field mask that specifies which fields of the Greenplum® Cluster resource should be updated. */ + /** Field mask that specifies which fields of the Greenplum® cluster resource should be updated. */ updateMask?: FieldMask; /** New description of the Greenplum® cluster. */ description: string; /** - * Custom labels for the Greenplum® cluster as `key:value` pairs. Maximum 64 per resource. - * For example, "project":"mvp" or "source":"dictionary". + * Custom labels for the Greenplum® cluster as `key:value` pairs. + * For example, `"project":"mvp"` or `"source":"dictionary"`. * - * The new set of labels will completely replace the old ones. To add a label, request the current - * set with the [ClusterService.Get] method, then send an [ClusterService.Update] request with the new label added to the set. + * The new set of labels completely replaces the old one. + * To add a label, request the current set with the [ClusterService.Get] method, then send an [ClusterService.Update] request with the new label added to the set. */ labels: { [key: string]: string }; /** New name for the cluster. */ name: string; - /** Greenplum® cluster configuration. */ + /** The Greenplum® cluster configuration. */ config?: GreenplumConfig; /** Configuration of the Greenplum® master subcluster. */ masterConfig?: MasterSubclusterConfigSpec; /** Configuration of the Greenplum® segment subcluster. */ segmentConfig?: SegmentSubclusterConfigSpec; - /** Window of maintenance operations. */ + /** Owner user password. */ + userPassword: string; + /** The Greenplum® cluster maintenance window. Should be defined by either one of the two options. */ maintenanceWindow?: MaintenanceWindow; /** User security groups. */ securityGroupIds: string[]; - /** Whether or not cluster is protected from being deleted. */ + /** Determines whether the cluster is protected from being deleted. */ deletionProtection: boolean; + /** Settings of the Greenplum® cluster. */ + configSpec?: ConfigSpec; } export interface UpdateClusterRequest_LabelsEntry { @@ -197,8 +205,29 @@ export interface UpdateClusterRequest_LabelsEntry { export interface UpdateClusterMetadata { $type: "yandex.cloud.mdb.greenplum.v1.UpdateClusterMetadata"; - /** ID of the Greenplum® Cluster resource that is being updated. */ + /** ID of the Greenplum® cluster resource that is being updated. */ + clusterId: string; +} + +export interface AddClusterHostsMetadata { + $type: "yandex.cloud.mdb.greenplum.v1.AddClusterHostsMetadata"; + /** ID of the Greenplum Cluster resource that is being updated. */ + clusterId: string; +} + +export interface ExpandRequest { + $type: "yandex.cloud.mdb.greenplum.v1.ExpandRequest"; + /** + * ID of the Greenplum Cluster resource to update. + * To get the Greenplum cluster ID, use a [ClusterService.List] request. + */ clusterId: string; + /** Number of hosts for add to the segment subcluster */ + segmentHostCount: number; + /** Number of segments per host to add */ + addSegmentsPerHostCount: number; + /** Redistribute duration, in seconds */ + duration: number; } export interface DeleteClusterRequest { @@ -248,18 +277,15 @@ export interface StopClusterMetadata { export interface ListClusterOperationsRequest { $type: "yandex.cloud.mdb.greenplum.v1.ListClusterOperationsRequest"; - /** ID of the Greenplum® Cluster resource to list operations for. */ + /** ID of the Greenplum® cluster resource to list operations for. */ clusterId: string; /** - * The maximum number of results per page to return. If the number of available - * results is larger than [page_size], the service returns a [ListClusterOperationsResponse.next_page_token] - * that can be used to get the next page of results in subsequent list requests. + * The maximum number of results per page to return. + * + * If the number of available results is larger than [page_size], the service returns a [ListClusterOperationsResponse.next_page_token] that can be used to get the next page of results in subsequent list requests. */ pageSize: number; - /** - * Page token. To get the next page of results, set [page_token] to the [ListClusterOperationsResponse.next_page_token] - * returned by a previous list request. - */ + /** Page token. To get the next page of results, set [page_token] to the [ListClusterOperationsResponse.next_page_token] returned by the previous list request. */ pageToken: string; } @@ -268,10 +294,11 @@ export interface ListClusterOperationsResponse { /** List of Operation resources for the specified Greenplum® cluster. */ operations: Operation[]; /** - * This token allows you to get the next page of results for list requests. If the number of results - * is larger than [ListClusterOperationsRequest.page_size], use the [next_page_token] as the value - * for the [ListClusterOperationsRequest.page_token] query parameter in the next list request. - * Each subsequent list request will have its own [next_page_token] to continue paging through the results. + * This token allows you to get the next page of results for list requests. + * + * If the number of results is larger than [ListClusterOperationsRequest.page_size], use the [next_page_token] as the value for the [ListClusterOperationsRequest.page_token] query parameter in the next list request. + * + * Each subsequent list request has its own [next_page_token] to continue paging through the results. */ nextPageToken: string; } @@ -280,19 +307,17 @@ export interface ListClusterHostsRequest { $type: "yandex.cloud.mdb.greenplum.v1.ListClusterHostsRequest"; /** * ID of the Greenplum® cluster. + * * To get the Greenplum® cluster ID use a [ClusterService.List] request. */ clusterId: string; /** - * The maximum number of results per page to return. If the number of available - * results is larger than [page_size], the service returns a [ListClusterHostsResponse.next_page_token] - * that can be used to get the next page of results in subsequent list requests. + * The maximum number of results per page to return. + * + * If the number of available results is larger than [page_size], the service returns a [ListClusterHostsResponse.next_page_token] that can be used to get the next page of results in subsequent list requests. */ pageSize: number; - /** - * Page token. To get the next page of results, set [page_token] to the [ListClusterHostsResponse.next_page_token] - * returned by a previous list request. - */ + /** Page token. To get the next page of results, set [page_token] to the [ListClusterHostsResponse.next_page_token] returned by the previous list request. */ pageToken: string; } @@ -301,22 +326,21 @@ export interface ListClusterHostsResponse { /** Requested list of hosts for the cluster. */ hosts: Host[]; /** - * This token allows you to get the next page of results for list requests. If the number of results - * is larger than [ListClusterHostsRequest.page_size], use the [next_page_token] as the value - * for the [ListClusterHostsRequest.page_token] query parameter in the next list request. - * Each subsequent list request will have its own [next_page_token] to continue paging through the results. + * This token allows you to get the next page of results for list requests. + * + * If the number of results is larger than [ListClusterHostsRequest.page_size], use the [next_page_token] as the value for the [ListClusterHostsRequest.page_token] query parameter in the next list request. + * + * Each subsequent list request has its own [next_page_token] to continue paging through the results. */ nextPageToken: string; } -/** Configuration of the master subcluster. */ export interface MasterSubclusterConfigSpec { $type: "yandex.cloud.mdb.greenplum.v1.MasterSubclusterConfigSpec"; /** Resources allocated to Greenplum® master subcluster hosts. */ resources?: Resources; } -/** Configuration of the segment subcluster. */ export interface SegmentSubclusterConfigSpec { $type: "yandex.cloud.mdb.greenplum.v1.SegmentSubclusterConfigSpec"; /** Resources allocated to Greenplum® segment subcluster hosts. */ @@ -328,18 +352,20 @@ export interface ListClusterLogsResponse { /** Requested log records. */ logs: LogRecord[]; /** - * This token allows you to get the next page of results for list requests. If the number of results - * is larger than [ListClusterLogsRequest.page_size], use the [next_page_token] as the value - * for the [ListClusterLogsRequest.page_token] query parameter in the next list request. - * Each subsequent list request will have its own [next_page_token] to continue paging through the results. - * This value is interchangeable with the [StreamLogRecord.next_record_token] from StreamLogs method. + * This token allows you to get the next page of results for list requests. + * + * If the number of results is larger than [ListClusterLogsRequest.page_size], use the [next_page_token] as the value for the [ListClusterLogsRequest.page_token] query parameter in the next list request. + * + * Each subsequent list request has its own [next_page_token] to continue paging through the results. + * + * This value is interchangeable with the [StreamLogRecord.next_record_token] from [StreamLogs] method. */ nextPageToken: string; } export interface LogRecord { $type: "yandex.cloud.mdb.greenplum.v1.LogRecord"; - /** Log record timestamp in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. */ + /** Time when the log was recorded. */ timestamp?: Date; /** Contents of the log record. */ message: { [key: string]: string }; @@ -355,48 +381,49 @@ export interface ListClusterLogsRequest { $type: "yandex.cloud.mdb.greenplum.v1.ListClusterLogsRequest"; /** * ID of the Greenplum® cluster to request logs for. + * * To get the Greenplum® cluster ID, use a [ClusterService.List] request. */ clusterId: string; /** - * Columns from logs table to request. + * Columns from log table to request. * If no columns are specified, entire log records are returned. */ columnFilter: string[]; /** Type of the service to request logs about. */ serviceType: ListClusterLogsRequest_ServiceType; - /** Start timestamp for the logs request, in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. */ + /** Start timestamp for the logs request. */ fromTime?: Date; - /** End timestamp for the logs request, in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. */ + /** End timestamp for the logs request. */ toTime?: Date; /** - * The maximum number of results per page to return. If the number of available - * results is larger than [page_size], the service returns a [ListClusterLogsResponse.next_page_token] - * that can be used to get the next page of results in subsequent list requests. + * The maximum number of results per page to return. + * + * If the number of available results is larger than [page_size], the service returns a [ListClusterLogsResponse.next_page_token] that can be used to get the next page of results in subsequent list requests. */ pageSize: number; - /** - * Page token. To get the next page of results, set [page_token] to the [ListClusterLogsResponse.next_page_token] - * returned by a previous list request. - */ + /** Page token. To get the next page of results, set [page_token] to the [ListClusterLogsResponse.next_page_token] returned by the previous list request. */ pageToken: string; - /** Always return `next_page_token`, even if the current page is empty. */ + /** The service always returns a [ListClusterLogsResponse.next_page_token], even if the current page is empty. */ alwaysNextPageToken: boolean; /** * A filter expression that filters resources listed in the response. + * * The expression must specify: - * 1. The field name. Currently filtering can be applied to the [LogRecord.logs.message.hostname], - * [LogRecord.logs.message.error_severity] (for `GREENPLUM` service) and [LogRecord.logs.message.level] (for `GREENPLUM_POOLER` service) fields. + * + * 1. A field name. Currently filtering can be applied to the [LogRecord.logs.message.hostname], [LogRecord.logs.message.error_severity] (for `GREENPLUM` service) and [LogRecord.logs.message.level] (for `GREENPLUM_POOLER` service) fields. + * * 2. A conditional operator. Can be either `=` or `!=` for single values, `IN` or `NOT IN` for lists of values. - * 3. The value. Must be 1-63 characters long and match the regular expression `^[a-z0-9.-]{1,61}$`. + * + * 3. A value. Must be 1-63 characters long and match the regular expression `^[a-z0-9.-]{1,61}$`. + * * Examples of a filter: - * * `message.hostname='node1.db.cloud.yandex.net'` - * * `message.error_severity IN ("ERROR", "FATAL", "PANIC") AND message.hostname = "node1.db.cloud.yandex.net"` + * * `message.hostname='node1.db.cloud.yandex.net'`; + * * `message.error_severity IN ("ERROR", "FATAL", "PANIC") AND message.hostname = "node1.db.cloud.yandex.net"`. */ filter: string; } -/** Type of the service to request logs about. */ export enum ListClusterLogsRequest_ServiceType { /** SERVICE_TYPE_UNSPECIFIED - Type is not specified. */ SERVICE_TYPE_UNSPECIFIED = 0, @@ -446,19 +473,17 @@ export interface ListClusterBackupsRequest { $type: "yandex.cloud.mdb.greenplum.v1.ListClusterBackupsRequest"; /** * ID of the Greenplum® cluster. + * * To get the Greenplum® cluster ID use a [ClusterService.List] request. */ clusterId: string; /** - * The maximum number of results per page to return. If the number of available - * results is larger than [page_size], the service returns a [ListClusterBackupsResponse.next_page_token] - * that can be used to get the next page of results in subsequent list requests. + * The maximum number of results per page to return. + * + * If the number of available results is larger than [page_size], the service returns a [ListClusterBackupsResponse.next_page_token] that can be used to get the next page of results in subsequent list requests. */ pageSize: number; - /** - * Page token. To get the next page of results, set [page_token] to the [ListClusterBackupsResponse.next_page_token] - * returned by a previous list request. - */ + /** Page token. To get the next page of results, set [page_token] to the [ListClusterBackupsResponse.next_page_token] returned by the previous list request. */ pageToken: string; } @@ -467,44 +492,53 @@ export interface StreamLogRecord { /** One of the requested log records. */ record?: LogRecord; /** - * This token allows you to continue streaming logs starting from the exact - * same record. To continue streaming, specify value of `next_record_token` - * as value for `record_token` parameter in the next StreamLogs request. - * This value is interchangeable with `next_page_token` from ListLogs method. + * This token allows you to continue streaming logs starting from the exact same record. + * + * To do that, specify value of [next_record_token] as the value for [StreamLogs.record_token] parameter in the next [StreamLogs] request. + * + * This value is interchangeable with [ListLogs.next_page_token] from [ListLogs] method. */ nextRecordToken: string; } export interface StreamClusterLogsRequest { $type: "yandex.cloud.mdb.greenplum.v1.StreamClusterLogsRequest"; - /** Required. ID of the Greenplum cluster. */ + /** ID of the Greenplum® cluster. */ clusterId: string; - /** Columns from logs table to get in the response. */ + /** + * Columns from log table to get in the response. + * If no columns are specified, entire log records are returned. + */ columnFilter: string[]; + /** Type of the service to request logs about. */ serviceType: StreamClusterLogsRequest_ServiceType; /** Start timestamp for the logs request. */ fromTime?: Date; /** * End timestamp for the logs request. - * If this field is not set, all existing logs will be sent and then the new ones as - * they appear. In essence it has 'tail -f' semantics. + * + * If this field is not set, all existing logs are sent as well as the new ones as they appear. + * + * In essence it has `tail -f` semantics. */ toTime?: Date; - /** - * Record token. Set `record_token` to the `next_record_token` returned by a previous StreamLogs - * request to start streaming from next log record. - */ + /** Record token. Set [record_token] to the [StreamLogs.next_record_token] returned by the previous [StreamLogs] request to start streaming from the next log record. */ recordToken: string; /** * A filter expression that filters resources listed in the response. + * * The expression must specify: - * 1. The field name. Currently filtering can be applied to the [LogRecord.logs.message.hostname], - * [LogRecord.logs.message.error_severity] (for GREENPLUM service), [LogRecord.logs.message.level] (for POOLER service) fields. + * + * 1. A field name. Currently filtering can be applied to the [LogRecord.logs.message.hostname], [LogRecord.logs.message.error_severity] (for GREENPLUM service), [LogRecord.logs.message.level] (for POOLER service) fields. + * * 2. An `=` operator. - * 3. The value in double quotes (`"`). Must be 1-63 characters long and match the regular expression `[a-z0-9.-]{1,61}`. + * + * 3. A value in double quotes (`"`). Must be 1-63 characters long and match the regular expression `[a-z0-9.-]{1,61}`. + * * Examples of a filter: - * `message.hostname='node1.db.cloud.yandex.net'` - * `message.error_severity IN ("ERROR", "FATAL", "PANIC") AND message.hostname = "node1.db.cloud.yandex.net"` + * + * * `message.hostname='node1.db.cloud.yandex.net'`; + * * `message.error_severity IN ("ERROR", "FATAL", "PANIC") AND message.hostname = "node1.db.cloud.yandex.net"`. */ filter: string; } @@ -559,10 +593,11 @@ export interface ListClusterBackupsResponse { /** List of Greenplum® backups. */ backups: Backup[]; /** - * This token allows you to get the next page of results for list requests. If the number of results - * is larger than [ListClusterBackupsRequest.page_size], use the [next_page_token] as the value - * for the [ListClusterBackupsRequest.page_token] query parameter in the next list request. - * Each subsequent list request will have its own [next_page_token] to continue paging through the results. + * This token allows you to get the next page of results for list requests. + * + * If the number of results is larger than [ListClusterBackupsRequest.page_size], use the [next_page_token] as the value for the [ListClusterBackupsRequest.page_token] query parameter in the next list request. + * + * Each subsequent list request has its own [next_page_token] to continue paging through the results. */ nextPageToken: string; } @@ -571,6 +606,7 @@ export interface RestoreClusterRequest { $type: "yandex.cloud.mdb.greenplum.v1.RestoreClusterRequest"; /** * ID of the backup to create a cluster from. + * * To get the backup ID, use a [ClusterService.ListBackups] request. */ backupId: string; @@ -581,13 +617,13 @@ export interface RestoreClusterRequest { /** Description of the Greenplum® cluster. */ description: string; /** - * Custom labels for the Greenplum® cluster as `key:value` pairs. Maximum 64 per resource. - * For example, "project": "mvp" or "source": "dictionary". + * Custom labels for the Greenplum® cluster as `key:value` pairs. + * For example, "project":"mvp" or "source":"dictionary". */ labels: { [key: string]: string }; /** Deployment environment of the Greenplum® cluster. */ environment: Cluster_Environment; - /** Greenplum® cluster config */ + /** Greenplum® cluster config. */ config?: GreenplumRestoreConfig; /** Resources of the Greenplum® master subcluster. */ masterResources?: Resources; @@ -595,15 +631,15 @@ export interface RestoreClusterRequest { segmentResources?: Resources; /** ID of the network to create the cluster in. */ networkId: string; - /** User security groups */ + /** User security groups. */ securityGroupIds: string[]; - /** Deletion Protection inhibits deletion of the cluster */ + /** Determines whether the cluster is protected from being deleted. */ deletionProtection: boolean; /** Host groups to place VMs of cluster on. */ hostGroupIds: string[]; - /** ID of placement group */ + /** ID of the placement group. */ placementGroupId: string; - /** Window of maintenance operations. */ + /** A Greenplum® cluster maintenance window. Should be defined by either one of the two options. */ maintenanceWindow?: MaintenanceWindow; } @@ -1370,7 +1406,7 @@ export const ConfigSpec = { if (message.pool !== undefined) { ConnectionPoolerConfig.encode( message.pool, - writer.uint32(34).fork() + writer.uint32(26).fork() ).ldelim(); } return writer; @@ -1395,7 +1431,7 @@ export const ConfigSpec = { reader.uint32() ); break; - case 4: + case 3: message.pool = ConnectionPoolerConfig.decode(reader, reader.uint32()); break; default: @@ -1536,6 +1572,7 @@ const baseUpdateClusterRequest: object = { clusterId: "", description: "", name: "", + userPassword: "", securityGroupIds: "", deletionProtection: false, }; @@ -1585,6 +1622,9 @@ export const UpdateClusterRequest = { writer.uint32(66).fork() ).ldelim(); } + if (message.userPassword !== "") { + writer.uint32(106).string(message.userPassword); + } if (message.maintenanceWindow !== undefined) { MaintenanceWindow.encode( message.maintenanceWindow, @@ -1597,6 +1637,9 @@ export const UpdateClusterRequest = { if (message.deletionProtection === true) { writer.uint32(144).bool(message.deletionProtection); } + if (message.configSpec !== undefined) { + ConfigSpec.encode(message.configSpec, writer.uint32(154).fork()).ldelim(); + } return writer; }, @@ -1648,6 +1691,9 @@ export const UpdateClusterRequest = { reader.uint32() ); break; + case 13: + message.userPassword = reader.string(); + break; case 15: message.maintenanceWindow = MaintenanceWindow.decode( reader, @@ -1660,6 +1706,9 @@ export const UpdateClusterRequest = { case 18: message.deletionProtection = reader.bool(); break; + case 19: + message.configSpec = ConfigSpec.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -1704,6 +1753,10 @@ export const UpdateClusterRequest = { object.segmentConfig !== undefined && object.segmentConfig !== null ? SegmentSubclusterConfigSpec.fromJSON(object.segmentConfig) : undefined; + message.userPassword = + object.userPassword !== undefined && object.userPassword !== null + ? String(object.userPassword) + : ""; message.maintenanceWindow = object.maintenanceWindow !== undefined && object.maintenanceWindow !== null @@ -1717,6 +1770,10 @@ export const UpdateClusterRequest = { object.deletionProtection !== null ? Boolean(object.deletionProtection) : false; + message.configSpec = + object.configSpec !== undefined && object.configSpec !== null + ? ConfigSpec.fromJSON(object.configSpec) + : undefined; return message; }, @@ -1748,6 +1805,8 @@ export const UpdateClusterRequest = { (obj.segmentConfig = message.segmentConfig ? SegmentSubclusterConfigSpec.toJSON(message.segmentConfig) : undefined); + message.userPassword !== undefined && + (obj.userPassword = message.userPassword); message.maintenanceWindow !== undefined && (obj.maintenanceWindow = message.maintenanceWindow ? MaintenanceWindow.toJSON(message.maintenanceWindow) @@ -1759,6 +1818,10 @@ export const UpdateClusterRequest = { } message.deletionProtection !== undefined && (obj.deletionProtection = message.deletionProtection); + message.configSpec !== undefined && + (obj.configSpec = message.configSpec + ? ConfigSpec.toJSON(message.configSpec) + : undefined); return obj; }, @@ -1793,6 +1856,7 @@ export const UpdateClusterRequest = { object.segmentConfig !== undefined && object.segmentConfig !== null ? SegmentSubclusterConfigSpec.fromPartial(object.segmentConfig) : undefined; + message.userPassword = object.userPassword ?? ""; message.maintenanceWindow = object.maintenanceWindow !== undefined && object.maintenanceWindow !== null @@ -1800,6 +1864,10 @@ export const UpdateClusterRequest = { : undefined; message.securityGroupIds = object.securityGroupIds?.map((e) => e) || []; message.deletionProtection = object.deletionProtection ?? false; + message.configSpec = + object.configSpec !== undefined && object.configSpec !== null + ? ConfigSpec.fromPartial(object.configSpec) + : undefined; return message; }, }; @@ -1957,6 +2025,186 @@ export const UpdateClusterMetadata = { messageTypeRegistry.set(UpdateClusterMetadata.$type, UpdateClusterMetadata); +const baseAddClusterHostsMetadata: object = { + $type: "yandex.cloud.mdb.greenplum.v1.AddClusterHostsMetadata", + clusterId: "", +}; + +export const AddClusterHostsMetadata = { + $type: "yandex.cloud.mdb.greenplum.v1.AddClusterHostsMetadata" as const, + + encode( + message: AddClusterHostsMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): AddClusterHostsMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseAddClusterHostsMetadata, + } as AddClusterHostsMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AddClusterHostsMetadata { + const message = { + ...baseAddClusterHostsMetadata, + } as AddClusterHostsMetadata; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + return message; + }, + + toJSON(message: AddClusterHostsMetadata): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + return obj; + }, + + fromPartial, I>>( + object: I + ): AddClusterHostsMetadata { + const message = { + ...baseAddClusterHostsMetadata, + } as AddClusterHostsMetadata; + message.clusterId = object.clusterId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(AddClusterHostsMetadata.$type, AddClusterHostsMetadata); + +const baseExpandRequest: object = { + $type: "yandex.cloud.mdb.greenplum.v1.ExpandRequest", + clusterId: "", + segmentHostCount: 0, + addSegmentsPerHostCount: 0, + duration: 0, +}; + +export const ExpandRequest = { + $type: "yandex.cloud.mdb.greenplum.v1.ExpandRequest" as const, + + encode( + message: ExpandRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.segmentHostCount !== 0) { + writer.uint32(16).int64(message.segmentHostCount); + } + if (message.addSegmentsPerHostCount !== 0) { + writer.uint32(24).int64(message.addSegmentsPerHostCount); + } + if (message.duration !== 0) { + writer.uint32(32).int64(message.duration); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ExpandRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseExpandRequest } as ExpandRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.segmentHostCount = longToNumber(reader.int64() as Long); + break; + case 3: + message.addSegmentsPerHostCount = longToNumber( + reader.int64() as Long + ); + break; + case 4: + message.duration = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ExpandRequest { + const message = { ...baseExpandRequest } as ExpandRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.segmentHostCount = + object.segmentHostCount !== undefined && object.segmentHostCount !== null + ? Number(object.segmentHostCount) + : 0; + message.addSegmentsPerHostCount = + object.addSegmentsPerHostCount !== undefined && + object.addSegmentsPerHostCount !== null + ? Number(object.addSegmentsPerHostCount) + : 0; + message.duration = + object.duration !== undefined && object.duration !== null + ? Number(object.duration) + : 0; + return message; + }, + + toJSON(message: ExpandRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.segmentHostCount !== undefined && + (obj.segmentHostCount = Math.round(message.segmentHostCount)); + message.addSegmentsPerHostCount !== undefined && + (obj.addSegmentsPerHostCount = Math.round( + message.addSegmentsPerHostCount + )); + message.duration !== undefined && + (obj.duration = Math.round(message.duration)); + return obj; + }, + + fromPartial, I>>( + object: I + ): ExpandRequest { + const message = { ...baseExpandRequest } as ExpandRequest; + message.clusterId = object.clusterId ?? ""; + message.segmentHostCount = object.segmentHostCount ?? 0; + message.addSegmentsPerHostCount = object.addSegmentsPerHostCount ?? 0; + message.duration = object.duration ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(ExpandRequest.$type, ExpandRequest); + const baseDeleteClusterRequest: object = { $type: "yandex.cloud.mdb.greenplum.v1.DeleteClusterRequest", clusterId: "", @@ -4273,7 +4521,7 @@ export const ClusterServiceService = { /** * Returns the specified Greenplum® cluster. * - * To get the list of available Greenplum® clusters, make a [List] request. + * To get the list of all available Greenplum® clusters, make a [List] request. */ get: { path: "/yandex.cloud.mdb.greenplum.v1.ClusterService/Get", @@ -4322,6 +4570,18 @@ export const ClusterServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, + /** Expands the specified Greenplum® cluster. */ + expand: { + path: "/yandex.cloud.mdb.greenplum.v1.ClusterService/Expand", + requestStream: false, + responseStream: false, + requestSerialize: (value: ExpandRequest) => + Buffer.from(ExpandRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => ExpandRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, /** Deletes the specified Greenplum® cluster. */ delete: { path: "/yandex.cloud.mdb.greenplum.v1.ClusterService/Delete", @@ -4413,7 +4673,7 @@ export const ClusterServiceService = { responseDeserialize: (value: Buffer) => ListClusterLogsResponse.decode(value), }, - /** Same as ListLogs but using server-side streaming. Also allows for 'tail -f' semantics. */ + /** Same as [ListLogs] but using server-side streaming. Also allows for `tail -f` semantics. */ streamLogs: { path: "/yandex.cloud.mdb.greenplum.v1.ClusterService/StreamLogs", requestStream: false, @@ -4426,7 +4686,7 @@ export const ClusterServiceService = { Buffer.from(StreamLogRecord.encode(value).finish()), responseDeserialize: (value: Buffer) => StreamLogRecord.decode(value), }, - /** Retrieves the list of available backups for the specified Greenplum cluster. */ + /** Retrieves a list of available backups for the specified Greenplum® cluster. */ listBackups: { path: "/yandex.cloud.mdb.greenplum.v1.ClusterService/ListBackups", requestStream: false, @@ -4458,7 +4718,7 @@ export interface ClusterServiceServer extends UntypedServiceImplementation { /** * Returns the specified Greenplum® cluster. * - * To get the list of available Greenplum® clusters, make a [List] request. + * To get the list of all available Greenplum® clusters, make a [List] request. */ get: handleUnaryCall; /** Retrieves a list of Greenplum® clusters that belong to the specified folder. */ @@ -4467,6 +4727,8 @@ export interface ClusterServiceServer extends UntypedServiceImplementation { create: handleUnaryCall; /** Updates the specified Greenplum® cluster. */ update: handleUnaryCall; + /** Expands the specified Greenplum® cluster. */ + expand: handleUnaryCall; /** Deletes the specified Greenplum® cluster. */ delete: handleUnaryCall; /** Starts the specified Greenplum® cluster. */ @@ -4490,12 +4752,12 @@ export interface ClusterServiceServer extends UntypedServiceImplementation { >; /** Retrieves logs for the specified Greenplum® cluster. */ listLogs: handleUnaryCall; - /** Same as ListLogs but using server-side streaming. Also allows for 'tail -f' semantics. */ + /** Same as [ListLogs] but using server-side streaming. Also allows for `tail -f` semantics. */ streamLogs: handleServerStreamingCall< StreamClusterLogsRequest, StreamLogRecord >; - /** Retrieves the list of available backups for the specified Greenplum cluster. */ + /** Retrieves a list of available backups for the specified Greenplum® cluster. */ listBackups: handleUnaryCall< ListClusterBackupsRequest, ListClusterBackupsResponse @@ -4508,7 +4770,7 @@ export interface ClusterServiceClient extends Client { /** * Returns the specified Greenplum® cluster. * - * To get the list of available Greenplum® clusters, make a [List] request. + * To get the list of all available Greenplum® clusters, make a [List] request. */ get( request: GetClusterRequest, @@ -4582,6 +4844,22 @@ export interface ClusterServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; + /** Expands the specified Greenplum® cluster. */ + expand( + request: ExpandRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + expand( + request: ExpandRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + expand( + request: ExpandRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; /** Deletes the specified Greenplum® cluster. */ delete( request: DeleteClusterRequest, @@ -4730,7 +5008,7 @@ export interface ClusterServiceClient extends Client { response: ListClusterLogsResponse ) => void ): ClientUnaryCall; - /** Same as ListLogs but using server-side streaming. Also allows for 'tail -f' semantics. */ + /** Same as [ListLogs] but using server-side streaming. Also allows for `tail -f` semantics. */ streamLogs( request: StreamClusterLogsRequest, options?: Partial @@ -4740,7 +5018,7 @@ export interface ClusterServiceClient extends Client { metadata?: Metadata, options?: Partial ): ClientReadableStream; - /** Retrieves the list of available backups for the specified Greenplum cluster. */ + /** Retrieves a list of available backups for the specified Greenplum® cluster. */ listBackups( request: ListClusterBackupsRequest, callback: ( diff --git a/src/generated/yandex/cloud/mdb/greenplum/v1/config.ts b/src/generated/yandex/cloud/mdb/greenplum/v1/config.ts index 8a25ddfc..ee62f1f2 100644 --- a/src/generated/yandex/cloud/mdb/greenplum/v1/config.ts +++ b/src/generated/yandex/cloud/mdb/greenplum/v1/config.ts @@ -6,11 +6,65 @@ import { Int64Value, BoolValue } from "../../../../../google/protobuf/wrappers"; export const protobufPackage = "yandex.cloud.mdb.greenplum.v1"; -/** A list of computational resources allocated to a host. */ +export enum LogStatement { + LOG_STATEMENT_UNSPECIFIED = 0, + /** NONE - None statements are logged. */ + NONE = 1, + /** DDL - Logs all data definition commands like `CREATE`, `ALTER`, and `DROP`. Default value. */ + DDL = 2, + /** MOD - Logs all `DDL` statements, plus `INSERT`, `UPDATE`, `DELETE`, `TRUNCATE`, and `COPY FROM`. */ + MOD = 3, + /** ALL - Logs all statements. */ + ALL = 4, + UNRECOGNIZED = -1, +} + +export function logStatementFromJSON(object: any): LogStatement { + switch (object) { + case 0: + case "LOG_STATEMENT_UNSPECIFIED": + return LogStatement.LOG_STATEMENT_UNSPECIFIED; + case 1: + case "NONE": + return LogStatement.NONE; + case 2: + case "DDL": + return LogStatement.DDL; + case 3: + case "MOD": + return LogStatement.MOD; + case 4: + case "ALL": + return LogStatement.ALL; + case -1: + case "UNRECOGNIZED": + default: + return LogStatement.UNRECOGNIZED; + } +} + +export function logStatementToJSON(object: LogStatement): string { + switch (object) { + case LogStatement.LOG_STATEMENT_UNSPECIFIED: + return "LOG_STATEMENT_UNSPECIFIED"; + case LogStatement.NONE: + return "NONE"; + case LogStatement.DDL: + return "DDL"; + case LogStatement.MOD: + return "MOD"; + case LogStatement.ALL: + return "ALL"; + default: + return "UNKNOWN"; + } +} + export interface Resources { $type: "yandex.cloud.mdb.greenplum.v1.Resources"; /** * ID of the preset for computational resources allocated to a host. + * * Available presets are listed in the [documentation](/docs/managed-greenplum/concepts/instance-types). */ resourcePresetId: string; @@ -20,24 +74,26 @@ export interface Resources { diskTypeId: string; } -/** Route server configuration. */ export interface ConnectionPoolerConfig { $type: "yandex.cloud.mdb.greenplum.v1.ConnectionPoolerConfig"; /** Route server pool mode. */ mode: ConnectionPoolerConfig_PoolMode; /** * The number of servers in the server pool. Clients are placed in a wait queue when all servers are busy. + * * Set to zero to disable the limit. */ size?: number; /** - * Server pool idle timeout, in seconds. A server connection closes after it has been idle for the specified duration. + * Server pool idle timeout, in seconds. + * + * A server connection closes after being idle for the specified time. + * * Set to zero to disable the limit. */ clientIdleTimeout?: number; } -/** Route server pool mode. */ export enum ConnectionPoolerConfig_PoolMode { POOL_MODE_UNSPECIFIED = 0, /** SESSION - Assign server connection to a client until it disconnects. Default value. */ @@ -82,14 +138,12 @@ export function connectionPoolerConfig_PoolModeToJSON( } } -/** Configuration of the master subcluster. */ export interface MasterSubclusterConfig { $type: "yandex.cloud.mdb.greenplum.v1.MasterSubclusterConfig"; /** Computational resources allocated to Greenplum® master subcluster hosts. */ resources?: Resources; } -/** Configuration of the segment subcluster. */ export interface SegmentSubclusterConfig { $type: "yandex.cloud.mdb.greenplum.v1.SegmentSubclusterConfig"; /** Computational resources allocated to Greenplum® segment subcluster hosts. */ @@ -98,124 +152,167 @@ export interface SegmentSubclusterConfig { export interface Greenplumconfig617 { $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_17"; - /** Maximum number of inbound connections on master segment */ + /** Maximum number of inbound connections on master segment. */ maxConnections?: number; /** - * Specify the maximum size of WAL files that replication slots are allowed to retain in the pg_wal directory at checkpoint time. - * https://www.postgresql.org/docs/current/runtime-config-replication.html + * The maximum size of WAL files that replication slots are allowed to retain in the `pg_wal` directory at checkpoint time. + * + * More info in [PostgreSQL® documentation](https://www.postgresql.org/docs/current/runtime-config-replication.html). */ maxSlotWalKeepSize?: number; /** - * Sets the maximum total disk size that all running queries are allowed to use for creating temporary spill files at each segment. - * The default value is 0, which means a limit is not enforced. - * https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_workfile_limit_per_segment + * The maximum total disk size that all running queries are allowed to use for creating temporary spill files at each segment. + * + * The default value is 0 (no limit). + * + * More info in [Greenplum® documentation](https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_workfile_limit_per_segment). */ gpWorkfileLimitPerSegment?: number; /** - * Sets the maximum disk size an individual query is allowed to use for creating temporary spill files at each segment. - * The default value is 0, which means a limit is not enforced. - * https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_workfile_limit_per_query + * The maximum disk size that an individual query is allowed to use for creating temporary spill files at each segment. + * + * The default value is 0 (no limit). + * + * More info in [Greenplum® documentation](https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_workfile_limit_per_query). */ gpWorkfileLimitPerQuery?: number; /** - * Sets the maximum number of temporary spill files (also known as workfiles) allowed per query per segment. - * Spill files are created when executing a query that requires more memory than it is allocated. - * The current query is terminated when the limit is exceeded. - * Set the value to 0 (zero) to allow an unlimited number of spill files. master session reload - * https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_workfile_limit_files_per_query - * Default value is 10000 + * The maximum number of temporary spill files allowed per query at each segment. + * + * Spill files, also known as workfiles, are created when a query requires more memory than there is allocated. + * + * The current query is terminated if the limit is exceeded. + * + * Set to zero to disable the limit. + * + * Master session reloads if the parameter changes. + * + * Default value is 10000. + * + * More info in [Greenplum® documentation](https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_workfile_limit_files_per_query). */ gpWorkfileLimitFilesPerQuery?: number; /** - * Sets the maximum number of transactions that can be in the "prepared" state simultaneously - * https://www.postgresql.org/docs/9.6/runtime-config-resource.html + * The maximum number of transactions that can be in the `prepared` state simultaneously. + * + * More info in [PostgreSQL® documentation](https://www.postgresql.org/docs/9.6/runtime-config-resource.html). */ maxPreparedTransactions?: number; /** - * Specifies whether the temporary files created, when a hash aggregation or hash join operation spills to disk, are compressed. - * https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_workfile_compression + * Whether the spill files are compressed or not. + * + * More info in [Greenplum® documentation](https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_workfile_compression). */ gpWorkfileCompression?: boolean; } export interface Greenplumconfig619 { $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_19"; - /** Maximum number of inbound connections on master segment */ + /** Maximum number of inbound connections on master segment. */ maxConnections?: number; /** - * Specify the maximum size of WAL files that replication slots are allowed to retain in the pg_wal directory at checkpoint time. - * https://www.postgresql.org/docs/current/runtime-config-replication.html + * The maximum size of WAL files that replication slots are allowed to retain in the `pg_wal` directory at checkpoint time. + * + * More info in [PostgreSQL® documentation](https://www.postgresql.org/docs/current/runtime-config-replication.html). */ maxSlotWalKeepSize?: number; /** - * Sets the maximum total disk size that all running queries are allowed to use for creating temporary spill files at each segment. - * The default value is 0, which means a limit is not enforced. - * https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_workfile_limit_per_segment + * The maximum total disk size that all running queries are allowed to use for creating temporary spill files at each segment. + * + * The default value is 0 (no limit). + * + * More info in [Greenplum® documentation](https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_workfile_limit_per_segment). */ gpWorkfileLimitPerSegment?: number; /** - * Sets the maximum disk size an individual query is allowed to use for creating temporary spill files at each segment. - * The default value is 0, which means a limit is not enforced. - * https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_workfile_limit_per_query + * The maximum disk size that an individual query is allowed to use for creating temporary spill files at each segment. + * + * The default value is 0 (no limit). + * + * More info in [Greenplum® documentation](https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_workfile_limit_per_query). */ gpWorkfileLimitPerQuery?: number; /** - * Sets the maximum number of temporary spill files (also known as workfiles) allowed per query per segment. - * Spill files are created when executing a query that requires more memory than it is allocated. - * The current query is terminated when the limit is exceeded. - * Set the value to 0 (zero) to allow an unlimited number of spill files. master session reload - * https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_workfile_limit_files_per_query - * Default value is 10000 + * The maximum number of temporary spill files allowed per query at each segment. + * + * Spill files, also known as workfiles, are created when a query requires more memory than there is allocated. + * + * The current query is terminated if the limit is exceeded. + * + * Set to zero to disable the limit. + * + * Master session reloads if the parameter changes. + * + * Default value is 10000. + * + * More info in [Greenplum® documentation](https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_workfile_limit_files_per_query). */ gpWorkfileLimitFilesPerQuery?: number; /** - * Sets the maximum number of transactions that can be in the "prepared" state simultaneously - * https://www.postgresql.org/docs/9.6/runtime-config-resource.html + * The maximum number of transactions that can be in the `prepared` state simultaneously. + * + * More info in [PostgreSQL® documentation](https://www.postgresql.org/docs/9.6/runtime-config-resource.html). */ maxPreparedTransactions?: number; /** - * Specifies whether the temporary files created, when a hash aggregation or hash join operation spills to disk, are compressed. - * https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_workfile_compression + * Whether the spill files are compressed or not. + * + * More info in [Greenplum® documentation](https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_workfile_compression). */ gpWorkfileCompression?: boolean; + /** + * The maximum memory limit for a query, in bytes. + * + * Helps to avoid out-of-memory errors on a segment host during query processing as a result of setting `statement_mem` too high. + * + * Taking into account the configuration of a single segment host, calculate [max_statement_mem] as follows: `seghost_physical_memory` / `average_number_concurrent_queries`. + * + * When changing both [max_statement_mem] and `statement_mem`, [max_statement_mem] must be changed first, or listed first in the `postgresql.conf` file. + * + * Default value is 2097152000 (2000 MB). + * + * More info in [Greenplum® documentation](https://greenplum.docs.pivotal.io/6-19/ref_guide/config_params/guc-list.html#max_statement_mem). + */ + maxStatementMem?: number; + /** + * Logged SQL statements. + * + * `PREPARE` and `EXPLAIN ANALYZE` statements are also logged if their contained command belongs to an appropriate type. + * + * More info in [Greenplum® documentation](https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#log_statement). + */ + logStatement: LogStatement; } +/** Configuration settings version 6.17 */ export interface Greenplumconfigset617 { $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfigSet6_17"; - /** - * Effective settings for a Greenplum (a combination of settings defined - * in [user_config] and [default_config]). - */ + /** Effective settings for a Greenplum® cluster (a combination of settings defined in [GreenplumConfigSet6_17.user_config] and [GreenplumConfigSet6_17.default_config]). */ effectiveConfig?: Greenplumconfig617; - /** User-defined settings for a Greenplum. */ + /** User-defined settings for a Greenplum® cluster. */ userConfig?: Greenplumconfig617; - /** Default configuration for a Greenplum. */ + /** Default configuration for a Greenplum® cluster. */ defaultConfig?: Greenplumconfig617; } +/** Configuration settings version 6.19 */ export interface Greenplumconfigset619 { $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfigSet6_19"; - /** - * Effective settings for a Greenplum (a combination of settings defined - * in [user_config] and [default_config]). - */ + /** Effective settings for a Greenplum® cluster (a combination of settings defined in [GreenplumConfigSet6_19.user_config] and [GreenplumConfigSet6_19.default_config]). */ effectiveConfig?: Greenplumconfig619; - /** User-defined settings for a Greenplum. */ + /** User-defined settings for a Greenplum® cluster. */ userConfig?: Greenplumconfig619; - /** Default configuration for a Greenplum. */ + /** Default configuration for a Greenplum® cluster. */ defaultConfig?: Greenplumconfig619; } export interface ConnectionPoolerConfigSet { $type: "yandex.cloud.mdb.greenplum.v1.ConnectionPoolerConfigSet"; - /** - * Effective settings for a odyssey (a combination of settings defined - * in [user_config] and [default_config]). - */ + /** Effective settings for an Odyssey® pooler (a combination of settings defined in [ConnectionPoolerConfigSet.user_config] and [ConnectionPoolerConfigSet.default_config]). */ effectiveConfig?: ConnectionPoolerConfig; - /** User-defined settings for a odyssey. */ + /** User-defined settings for an Odyssey® pooler. */ userConfig?: ConnectionPoolerConfig; - /** Default configuration for a odyssey. */ + /** Default configuration for an Odyssey® pooler. */ defaultConfig?: ConnectionPoolerConfig; } @@ -771,6 +868,7 @@ messageTypeRegistry.set(Greenplumconfig617.$type, Greenplumconfig617); const baseGreenplumconfig619: object = { $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_19", + logStatement: 0, }; export const Greenplumconfig619 = { @@ -840,6 +938,18 @@ export const Greenplumconfig619 = { writer.uint32(58).fork() ).ldelim(); } + if (message.maxStatementMem !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxStatementMem!, + }, + writer.uint32(66).fork() + ).ldelim(); + } + if (message.logStatement !== 0) { + writer.uint32(72).int32(message.logStatement); + } return writer; }, @@ -892,6 +1002,15 @@ export const Greenplumconfig619 = { reader.uint32() ).value; break; + case 8: + message.maxStatementMem = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 9: + message.logStatement = reader.int32() as any; + break; default: reader.skipType(tag & 7); break; @@ -936,6 +1055,14 @@ export const Greenplumconfig619 = { object.gpWorkfileCompression !== null ? Boolean(object.gpWorkfileCompression) : undefined; + message.maxStatementMem = + object.maxStatementMem !== undefined && object.maxStatementMem !== null + ? Number(object.maxStatementMem) + : undefined; + message.logStatement = + object.logStatement !== undefined && object.logStatement !== null + ? logStatementFromJSON(object.logStatement) + : 0; return message; }, @@ -955,6 +1082,10 @@ export const Greenplumconfig619 = { (obj.maxPreparedTransactions = message.maxPreparedTransactions); message.gpWorkfileCompression !== undefined && (obj.gpWorkfileCompression = message.gpWorkfileCompression); + message.maxStatementMem !== undefined && + (obj.maxStatementMem = message.maxStatementMem); + message.logStatement !== undefined && + (obj.logStatement = logStatementToJSON(message.logStatement)); return obj; }, @@ -973,6 +1104,8 @@ export const Greenplumconfig619 = { message.maxPreparedTransactions = object.maxPreparedTransactions ?? undefined; message.gpWorkfileCompression = object.gpWorkfileCompression ?? undefined; + message.maxStatementMem = object.maxStatementMem ?? undefined; + message.logStatement = object.logStatement ?? 0; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/greenplum/v1/host.ts b/src/generated/yandex/cloud/mdb/greenplum/v1/host.ts index 183b66de..cf8a637c 100644 --- a/src/generated/yandex/cloud/mdb/greenplum/v1/host.ts +++ b/src/generated/yandex/cloud/mdb/greenplum/v1/host.ts @@ -10,13 +10,14 @@ export const protobufPackage = "yandex.cloud.mdb.greenplum.v1"; export interface Host { $type: "yandex.cloud.mdb.greenplum.v1.Host"; /** - * Name of the Greenplum® host. The host name is assigned by Yandex Cloud at creation time and cannot be changed. - * 1-63 characters long. + * Name of the Greenplum® host. * - * The name is unique across all existing MDB hosts in Yandex Cloud, as it defines the FQDN of the host. + * The host name is assigned by the platform at creation time and cannot be changed. + * + * The name is unique across all MDB hosts that exist on the platform, as it defines the FQDN of the host. */ name: string; - /** ID of the Greenplum® cluster. The ID is assigned by Yandex Cloud at creation time. */ + /** ID of the Greenplum® cluster. The ID is assigned by the platform at creation time. */ clusterId: string; /** ID of the availability zone the Greenplum® host belongs to. */ zoneId: string; @@ -28,7 +29,7 @@ export interface Host { health: Host_Health; /** ID of the subnet that the host belongs to. */ subnetId: string; - /** Whether or not a public IP is assigned to the host. */ + /** Determines whether a public IP is assigned to the host. */ assignPublicIp: boolean; } @@ -89,7 +90,7 @@ export enum Host_Health { DEAD = 2, /** DEGRADED - The host is working below capacity or not fully functional. */ DEGRADED = 3, - /** UNBALANCED - One or more segments are not in preferred role. */ + /** UNBALANCED - One or more segments are not in the preferred role. */ UNBALANCED = 4, UNRECOGNIZED = -1, } diff --git a/src/generated/yandex/cloud/mdb/greenplum/v1/maintenance.ts b/src/generated/yandex/cloud/mdb/greenplum/v1/maintenance.ts index 41b0a76c..b2fb8d04 100644 --- a/src/generated/yandex/cloud/mdb/greenplum/v1/maintenance.ts +++ b/src/generated/yandex/cloud/mdb/greenplum/v1/maintenance.ts @@ -6,7 +6,6 @@ import { Timestamp } from "../../../../../google/protobuf/timestamp"; export const protobufPackage = "yandex.cloud.mdb.greenplum.v1"; -/** A Greenplum® cluster maintenance window. Should be defined by either one of the two options. */ export interface MaintenanceWindow { $type: "yandex.cloud.mdb.greenplum.v1.MaintenanceWindow"; /** An any-time maintenance window. */ @@ -31,12 +30,19 @@ export interface WeeklyMaintenanceWindow { export enum WeeklyMaintenanceWindow_WeekDay { WEEK_DAY_UNSPECIFIED = 0, + /** MON - Monday */ MON = 1, + /** TUE - Tuesday */ TUE = 2, + /** WED - Wednesday */ WED = 3, + /** THU - Thursday */ THU = 4, + /** FRI - Friday */ FRI = 5, + /** SAT - Saturday */ SAT = 6, + /** SUN - Sunday */ SUN = 7, UNRECOGNIZED = -1, } @@ -101,10 +107,9 @@ export function weeklyMaintenanceWindow_WeekDayToJSON( } } -/** The operation to perform during maintenance. */ export interface MaintenanceOperation { $type: "yandex.cloud.mdb.greenplum.v1.MaintenanceOperation"; - /** The description of the operation, 1-256 characters long. */ + /** The description of the operation. */ info: string; /** Delay time for the maintenance operation. */ delayedUntil?: Date; diff --git a/src/generated/yandex/cloud/mdb/greenplum/v1/resource_preset.ts b/src/generated/yandex/cloud/mdb/greenplum/v1/resource_preset.ts index e4bf3175..dcbba056 100644 --- a/src/generated/yandex/cloud/mdb/greenplum/v1/resource_preset.ts +++ b/src/generated/yandex/cloud/mdb/greenplum/v1/resource_preset.ts @@ -5,34 +5,32 @@ import _m0 from "protobufjs/minimal"; export const protobufPackage = "yandex.cloud.mdb.greenplum.v1"; -/** A preset of resources for hardware configuration of Greenplum hosts. */ +/** A preset of resources for hardware configuration of Greenplum® hosts. */ export interface ResourcePreset { $type: "yandex.cloud.mdb.greenplum.v1.ResourcePreset"; /** ID of the resource preset. */ id: string; /** IDs of availability zones where the resource preset is available. */ zoneIds: string[]; - /** Number of CPU cores for a Greenplum host created with the preset. */ + /** IDs of availability disk types available in the resource preset. */ + diskTypeIds: string[]; + /** Number of CPU cores for a Greenplum® host created with the preset. */ cores: number; - /** RAM volume for a Greenplum host created with the preset, in bytes. */ + /** RAM volume for a Greenplum® host created with the preset, in bytes. */ memory: number; - /** Host type */ + /** Host type. */ type: ResourcePreset_Type; - /** Min host count */ - minHostCount: number; - /** Max host count */ - maxHostCount: number; - /** The number of hosts must be divisible by host_count_divider */ + /** The number of hosts must be divisible by [host_count_divider]. */ hostCountDivider: number; - /** Max segment count in host (actual only for segment host) */ + /** Maximum number of segments in segment host. */ maxSegmentInHostCount: number; } export enum ResourcePreset_Type { TYPE_UNSPECIFIED = 0, - /** MASTER - Greenplum master host. */ + /** MASTER - Greenplum® master host. */ MASTER = 1, - /** SEGMENT - Greenplum segment host. */ + /** SEGMENT - Greenplum® segment host. */ SEGMENT = 2, UNRECOGNIZED = -1, } @@ -72,11 +70,10 @@ const baseResourcePreset: object = { $type: "yandex.cloud.mdb.greenplum.v1.ResourcePreset", id: "", zoneIds: "", + diskTypeIds: "", cores: 0, memory: 0, type: 0, - minHostCount: 0, - maxHostCount: 0, hostCountDivider: 0, maxSegmentInHostCount: 0, }; @@ -94,6 +91,9 @@ export const ResourcePreset = { for (const v of message.zoneIds) { writer.uint32(18).string(v!); } + for (const v of message.diskTypeIds) { + writer.uint32(82).string(v!); + } if (message.cores !== 0) { writer.uint32(24).int64(message.cores); } @@ -103,12 +103,6 @@ export const ResourcePreset = { if (message.type !== 0) { writer.uint32(40).int32(message.type); } - if (message.minHostCount !== 0) { - writer.uint32(48).int64(message.minHostCount); - } - if (message.maxHostCount !== 0) { - writer.uint32(56).int64(message.maxHostCount); - } if (message.hostCountDivider !== 0) { writer.uint32(64).int64(message.hostCountDivider); } @@ -123,6 +117,7 @@ export const ResourcePreset = { let end = length === undefined ? reader.len : reader.pos + length; const message = { ...baseResourcePreset } as ResourcePreset; message.zoneIds = []; + message.diskTypeIds = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -132,6 +127,9 @@ export const ResourcePreset = { case 2: message.zoneIds.push(reader.string()); break; + case 10: + message.diskTypeIds.push(reader.string()); + break; case 3: message.cores = longToNumber(reader.int64() as Long); break; @@ -141,12 +139,6 @@ export const ResourcePreset = { case 5: message.type = reader.int32() as any; break; - case 6: - message.minHostCount = longToNumber(reader.int64() as Long); - break; - case 7: - message.maxHostCount = longToNumber(reader.int64() as Long); - break; case 8: message.hostCountDivider = longToNumber(reader.int64() as Long); break; @@ -166,6 +158,7 @@ export const ResourcePreset = { message.id = object.id !== undefined && object.id !== null ? String(object.id) : ""; message.zoneIds = (object.zoneIds ?? []).map((e: any) => String(e)); + message.diskTypeIds = (object.diskTypeIds ?? []).map((e: any) => String(e)); message.cores = object.cores !== undefined && object.cores !== null ? Number(object.cores) @@ -178,14 +171,6 @@ export const ResourcePreset = { object.type !== undefined && object.type !== null ? resourcePreset_TypeFromJSON(object.type) : 0; - message.minHostCount = - object.minHostCount !== undefined && object.minHostCount !== null - ? Number(object.minHostCount) - : 0; - message.maxHostCount = - object.maxHostCount !== undefined && object.maxHostCount !== null - ? Number(object.maxHostCount) - : 0; message.hostCountDivider = object.hostCountDivider !== undefined && object.hostCountDivider !== null ? Number(object.hostCountDivider) @@ -206,14 +191,15 @@ export const ResourcePreset = { } else { obj.zoneIds = []; } + if (message.diskTypeIds) { + obj.diskTypeIds = message.diskTypeIds.map((e) => e); + } else { + obj.diskTypeIds = []; + } message.cores !== undefined && (obj.cores = Math.round(message.cores)); message.memory !== undefined && (obj.memory = Math.round(message.memory)); message.type !== undefined && (obj.type = resourcePreset_TypeToJSON(message.type)); - message.minHostCount !== undefined && - (obj.minHostCount = Math.round(message.minHostCount)); - message.maxHostCount !== undefined && - (obj.maxHostCount = Math.round(message.maxHostCount)); message.hostCountDivider !== undefined && (obj.hostCountDivider = Math.round(message.hostCountDivider)); message.maxSegmentInHostCount !== undefined && @@ -227,11 +213,10 @@ export const ResourcePreset = { const message = { ...baseResourcePreset } as ResourcePreset; message.id = object.id ?? ""; message.zoneIds = object.zoneIds?.map((e) => e) || []; + message.diskTypeIds = object.diskTypeIds?.map((e) => e) || []; message.cores = object.cores ?? 0; message.memory = object.memory ?? 0; message.type = object.type ?? 0; - message.minHostCount = object.minHostCount ?? 0; - message.maxHostCount = object.maxHostCount ?? 0; message.hostCountDivider = object.hostCountDivider ?? 0; message.maxSegmentInHostCount = object.maxSegmentInHostCount ?? 0; return message; diff --git a/src/generated/yandex/cloud/mdb/greenplum/v1/resource_preset_service.ts b/src/generated/yandex/cloud/mdb/greenplum/v1/resource_preset_service.ts index ee362f83..70073eca 100644 --- a/src/generated/yandex/cloud/mdb/greenplum/v1/resource_preset_service.ts +++ b/src/generated/yandex/cloud/mdb/greenplum/v1/resource_preset_service.ts @@ -14,32 +14,39 @@ import { ServiceError, } from "@grpc/grpc-js"; import _m0 from "protobufjs/minimal"; -import { ResourcePreset } from "../../../../../yandex/cloud/mdb/greenplum/v1/resource_preset"; +import { + ResourcePreset_Type, + ResourcePreset, + resourcePreset_TypeFromJSON, + resourcePreset_TypeToJSON, +} from "../../../../../yandex/cloud/mdb/greenplum/v1/resource_preset"; export const protobufPackage = "yandex.cloud.mdb.greenplum.v1"; export interface GetResourcePresetRequest { $type: "yandex.cloud.mdb.greenplum.v1.GetResourcePresetRequest"; /** - * Required. ID of the resource preset to return. + * ID of the resource preset to return. + * * To get the resource preset ID, use a [ResourcePresetService.List] request. */ resourcePresetId: string; + /** Required. ResourcePreset type - master or segment. */ + type: ResourcePreset_Type; } export interface ListResourcePresetsRequest { $type: "yandex.cloud.mdb.greenplum.v1.ListResourcePresetsRequest"; /** - * The maximum number of results per page to return. If the number of available - * results is larger than [page_size], the service returns a [ListResourcePresetsResponse.next_page_token] - * that can be used to get the next page of results in subsequent list requests. + * The maximum number of results per page to return. + * + * If the number of available results is larger than [page_size], the service returns a [ListResourcePresetsResponse.next_page_token] that can be used to get the next page of results in subsequent list requests. */ pageSize: number; - /** - * Page token. To get the next page of results, set [page_token] to the [ListResourcePresetsResponse.next_page_token] - * returned by a previous list request. - */ + /** Page token. To get the next page of results, set [page_token] to the [ListResourcePresetsResponse.next_page_token] returned by the previous list request. */ pageToken: string; + /** Required. ResourcePreset type - master or segment. */ + type: ResourcePreset_Type; } export interface ListResourcePresetsResponse { @@ -47,10 +54,11 @@ export interface ListResourcePresetsResponse { /** List of resource presets. */ resourcePresets: ResourcePreset[]; /** - * This token allows you to get the next page of results for list requests. If the number of results - * is larger than [ListResourcePresetsRequest.page_size], use the [next_page_token] as the value - * for the [ListResourcePresetsRequest.page_token] parameter in the next list request. Each subsequent - * list request will have its own [next_page_token] to continue paging through the results. + * This token allows you to get the next page of results for list requests. + * + * If the number of results is larger than [ListResourcePresetsRequest.page_size], use the [next_page_token] as the value for the [ListResourcePresetsRequest.page_token] parameter in the next list request. + * + * Each subsequent list request has its own [next_page_token] to continue paging through the results. */ nextPageToken: string; } @@ -58,6 +66,7 @@ export interface ListResourcePresetsResponse { const baseGetResourcePresetRequest: object = { $type: "yandex.cloud.mdb.greenplum.v1.GetResourcePresetRequest", resourcePresetId: "", + type: 0, }; export const GetResourcePresetRequest = { @@ -70,6 +79,9 @@ export const GetResourcePresetRequest = { if (message.resourcePresetId !== "") { writer.uint32(10).string(message.resourcePresetId); } + if (message.type !== 0) { + writer.uint32(16).int32(message.type); + } return writer; }, @@ -88,6 +100,9 @@ export const GetResourcePresetRequest = { case 1: message.resourcePresetId = reader.string(); break; + case 2: + message.type = reader.int32() as any; + break; default: reader.skipType(tag & 7); break; @@ -104,6 +119,10 @@ export const GetResourcePresetRequest = { object.resourcePresetId !== undefined && object.resourcePresetId !== null ? String(object.resourcePresetId) : ""; + message.type = + object.type !== undefined && object.type !== null + ? resourcePreset_TypeFromJSON(object.type) + : 0; return message; }, @@ -111,6 +130,8 @@ export const GetResourcePresetRequest = { const obj: any = {}; message.resourcePresetId !== undefined && (obj.resourcePresetId = message.resourcePresetId); + message.type !== undefined && + (obj.type = resourcePreset_TypeToJSON(message.type)); return obj; }, @@ -121,6 +142,7 @@ export const GetResourcePresetRequest = { ...baseGetResourcePresetRequest, } as GetResourcePresetRequest; message.resourcePresetId = object.resourcePresetId ?? ""; + message.type = object.type ?? 0; return message; }, }; @@ -134,6 +156,7 @@ const baseListResourcePresetsRequest: object = { $type: "yandex.cloud.mdb.greenplum.v1.ListResourcePresetsRequest", pageSize: 0, pageToken: "", + type: 0, }; export const ListResourcePresetsRequest = { @@ -149,6 +172,9 @@ export const ListResourcePresetsRequest = { if (message.pageToken !== "") { writer.uint32(26).string(message.pageToken); } + if (message.type !== 0) { + writer.uint32(32).int32(message.type); + } return writer; }, @@ -170,6 +196,9 @@ export const ListResourcePresetsRequest = { case 3: message.pageToken = reader.string(); break; + case 4: + message.type = reader.int32() as any; + break; default: reader.skipType(tag & 7); break; @@ -190,6 +219,10 @@ export const ListResourcePresetsRequest = { object.pageToken !== undefined && object.pageToken !== null ? String(object.pageToken) : ""; + message.type = + object.type !== undefined && object.type !== null + ? resourcePreset_TypeFromJSON(object.type) + : 0; return message; }, @@ -198,6 +231,8 @@ export const ListResourcePresetsRequest = { message.pageSize !== undefined && (obj.pageSize = Math.round(message.pageSize)); message.pageToken !== undefined && (obj.pageToken = message.pageToken); + message.type !== undefined && + (obj.type = resourcePreset_TypeToJSON(message.type)); return obj; }, @@ -209,6 +244,7 @@ export const ListResourcePresetsRequest = { } as ListResourcePresetsRequest; message.pageSize = object.pageSize ?? 0; message.pageToken = object.pageToken ?? ""; + message.type = object.type ?? 0; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/index.ts b/src/generated/yandex/cloud/mdb/index.ts index 0144703a..5a4cab3b 100644 --- a/src/generated/yandex/cloud/mdb/index.ts +++ b/src/generated/yandex/cloud/mdb/index.ts @@ -108,6 +108,8 @@ export * as mongodb_mongodb4_4 from './mongodb/v1/config/mongodb4_4' export * as mongodb_mongodb4_4_enterprise from './mongodb/v1/config/mongodb4_4_enterprise' export * as mongodb_mongodb5_0 from './mongodb/v1/config/mongodb5_0' export * as mongodb_mongodb5_0_enterprise from './mongodb/v1/config/mongodb5_0_enterprise' +export * as mongodb_mongodb6_0 from './mongodb/v1/config/mongodb6_0' +export * as mongodb_mongodb6_0_enterprise from './mongodb/v1/config/mongodb6_0_enterprise' export * as mysql_mysql5_7 from './mysql/v1/config/mysql5_7' export * as mysql_mysql8_0 from './mysql/v1/config/mysql8_0' export * as postgresql_host10 from './postgresql/v1/config/host10' @@ -117,7 +119,9 @@ export * as postgresql_host11_1c from './postgresql/v1/config/host11_1c' export * as postgresql_host12 from './postgresql/v1/config/host12' export * as postgresql_host12_1c from './postgresql/v1/config/host12_1c' export * as postgresql_host13 from './postgresql/v1/config/host13' +export * as postgresql_host13_1c from './postgresql/v1/config/host13_1c' export * as postgresql_host14 from './postgresql/v1/config/host14' +export * as postgresql_host14_1c from './postgresql/v1/config/host14_1c' export * as postgresql_host9_6 from './postgresql/v1/config/host9_6' export * as postgresql_postgresql10 from './postgresql/v1/config/postgresql10' export * as postgresql_postgresql10_1c from './postgresql/v1/config/postgresql10_1c' @@ -126,9 +130,14 @@ export * as postgresql_postgresql11_1c from './postgresql/v1/config/postgresql11 export * as postgresql_postgresql12 from './postgresql/v1/config/postgresql12' export * as postgresql_postgresql12_1c from './postgresql/v1/config/postgresql12_1c' export * as postgresql_postgresql13 from './postgresql/v1/config/postgresql13' +export * as postgresql_postgresql13_1c from './postgresql/v1/config/postgresql13_1c' export * as postgresql_postgresql14 from './postgresql/v1/config/postgresql14' +export * as postgresql_postgresql14_1c from './postgresql/v1/config/postgresql14_1c' export * as postgresql_postgresql9_6 from './postgresql/v1/config/postgresql9_6' export * as redis_redis5_0 from './redis/v1/config/redis5_0' export * as redis_redis6_0 from './redis/v1/config/redis6_0' export * as redis_redis6_2 from './redis/v1/config/redis6_2' -export * as sqlserver_sqlserver2016sp2 from './sqlserver/v1/config/sqlserver2016sp2' \ No newline at end of file +export * as redis_redis7_0 from './redis/v1/config/redis7_0' +export * as sqlserver_sqlserver2016sp2 from './sqlserver/v1/config/sqlserver2016sp2' +export * as sqlserver_sqlserver2017 from './sqlserver/v1/config/sqlserver2017' +export * as sqlserver_sqlserver2019 from './sqlserver/v1/config/sqlserver2019' \ No newline at end of file diff --git a/src/generated/yandex/cloud/mdb/kafka/v1/cluster.ts b/src/generated/yandex/cloud/mdb/kafka/v1/cluster.ts index acb38a43..71790be2 100644 --- a/src/generated/yandex/cloud/mdb/kafka/v1/cluster.ts +++ b/src/generated/yandex/cloud/mdb/kafka/v1/cluster.ts @@ -280,6 +280,7 @@ export interface ConfigSpec_Kafka { kafkaConfig21?: Kafkaconfig21 | undefined; kafkaConfig26?: Kafkaconfig26 | undefined; kafkaConfig28?: Kafkaconfig28 | undefined; + kafkaConfig3?: KafkaConfig3 | undefined; } export interface ConfigSpec_Zookeeper { @@ -369,6 +370,14 @@ export interface Kafkaconfig21 { numPartitions?: number; /** Default replication factor of the topic on the whole cluster */ defaultReplicationFactor?: number; + /** The largest record batch size allowed by Kafka. Default value: 1048588. */ + messageMaxBytes?: number; + /** The number of bytes of messages to attempt to fetch for each partition. Default value: 1048576. */ + replicaFetchMaxBytes?: number; + /** A list of cipher suites. */ + sslCipherSuites: string[]; + /** Offset storage time after a consumer group loses all its consumers. Default: 10080. */ + offsetsRetentionMinutes?: number; } /** Kafka version 2.6 broker configuration. */ @@ -439,6 +448,14 @@ export interface Kafkaconfig26 { numPartitions?: number; /** Default replication factor of the topic on the whole cluster */ defaultReplicationFactor?: number; + /** The largest record batch size allowed by Kafka. Default value: 1048588. */ + messageMaxBytes?: number; + /** The number of bytes of messages to attempt to fetch for each partition. Default value: 1048576. */ + replicaFetchMaxBytes?: number; + /** A list of cipher suites. */ + sslCipherSuites: string[]; + /** Offset storage time after a consumer group loses all its consumers. Default: 10080. */ + offsetsRetentionMinutes?: number; } /** Kafka version 2.8 broker configuration. */ @@ -509,6 +526,92 @@ export interface Kafkaconfig28 { numPartitions?: number; /** Default replication factor of the topic on the whole cluster */ defaultReplicationFactor?: number; + /** The largest record batch size allowed by Kafka. Default value: 1048588. */ + messageMaxBytes?: number; + /** The number of bytes of messages to attempt to fetch for each partition. Default value: 1048576. */ + replicaFetchMaxBytes?: number; + /** A list of cipher suites. */ + sslCipherSuites: string[]; + /** Offset storage time after a consumer group loses all its consumers. Default: 10080. */ + offsetsRetentionMinutes?: number; +} + +/** Kafka version 3.x broker configuration. */ +export interface KafkaConfig3 { + $type: "yandex.cloud.mdb.kafka.v1.KafkaConfig3"; + /** Cluster topics compression type. */ + compressionType: CompressionType; + /** + * The number of messages accumulated on a log partition before messages are flushed to disk. + * + * This is the global cluster-level setting that can be overridden on a topic level by using the [TopicConfig3.flush_messages] setting. + */ + logFlushIntervalMessages?: number; + /** + * The maximum time (in milliseconds) that a message in any topic is kept in memory before flushed to disk. + * If not set, the value of [log_flush_scheduler_interval_ms] is used. + * + * This is the global cluster-level setting that can be overridden on a topic level by using the [TopicConfig3.flush_ms] setting. + */ + logFlushIntervalMs?: number; + /** + * The frequency of checks (in milliseconds) for any logs that need to be flushed to disk. + * This check is done by the log flusher. + */ + logFlushSchedulerIntervalMs?: number; + /** + * Partition size limit; Kafka will discard old log segments to free up space if `delete` [TopicConfig3.cleanup_policy] is in effect. + * This setting is helpful if you need to control the size of a log due to limited disk space. + * + * This is the global cluster-level setting that can be overridden on a topic level by using the [TopicConfig3.retention_bytes] setting. + */ + logRetentionBytes?: number; + /** The number of hours to keep a log segment file before deleting it. */ + logRetentionHours?: number; + /** + * The number of minutes to keep a log segment file before deleting it. + * + * If not set, the value of [log_retention_hours] is used. + */ + logRetentionMinutes?: number; + /** + * The number of milliseconds to keep a log segment file before deleting it. + * + * If not set, the value of [log_retention_minutes] is used. + * + * This is the global cluster-level setting that can be overridden on a topic level by using the [TopicConfig3.retention_ms] setting. + */ + logRetentionMs?: number; + /** + * The maximum size of a single log file. + * + * This is the global cluster-level setting that can be overridden on a topic level by using the [TopicConfig3.segment_bytes] setting. + */ + logSegmentBytes?: number; + /** + * Should pre allocate file when create new segment? + * + * This is the global cluster-level setting that can be overridden on a topic level by using the [TopicConfig3.preallocate] setting. + */ + logPreallocate?: boolean; + /** The SO_SNDBUF buffer of the socket server sockets. If the value is -1, the OS default will be used. */ + socketSendBufferBytes?: number; + /** The SO_RCVBUF buffer of the socket server sockets. If the value is -1, the OS default will be used. */ + socketReceiveBufferBytes?: number; + /** Enable auto creation of topic on the server */ + autoCreateTopicsEnable?: boolean; + /** Default number of partitions per topic on the whole cluster */ + numPartitions?: number; + /** Default replication factor of the topic on the whole cluster */ + defaultReplicationFactor?: number; + /** The largest record batch size allowed by Kafka. Default value: 1048588. */ + messageMaxBytes?: number; + /** The number of bytes of messages to attempt to fetch for each partition. Default value: 1048576. */ + replicaFetchMaxBytes?: number; + /** A list of cipher suites. */ + sslCipherSuites: string[]; + /** Offset storage time after a consumer group loses all its consumers. Default: 10080. */ + offsetsRetentionMinutes?: number; } /** Cluster host metadata. */ @@ -1364,6 +1467,12 @@ export const ConfigSpec_Kafka = { writer.uint32(34).fork() ).ldelim(); } + if (message.kafkaConfig3 !== undefined) { + KafkaConfig3.encode( + message.kafkaConfig3, + writer.uint32(42).fork() + ).ldelim(); + } return writer; }, @@ -1386,6 +1495,9 @@ export const ConfigSpec_Kafka = { case 4: message.kafkaConfig28 = Kafkaconfig28.decode(reader, reader.uint32()); break; + case 5: + message.kafkaConfig3 = KafkaConfig3.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -1412,6 +1524,10 @@ export const ConfigSpec_Kafka = { object.kafkaConfig_2_8 !== undefined && object.kafkaConfig_2_8 !== null ? Kafkaconfig28.fromJSON(object.kafkaConfig_2_8) : undefined; + message.kafkaConfig3 = + object.kafkaConfig_3 !== undefined && object.kafkaConfig_3 !== null + ? KafkaConfig3.fromJSON(object.kafkaConfig_3) + : undefined; return message; }, @@ -1433,6 +1549,10 @@ export const ConfigSpec_Kafka = { (obj.kafkaConfig_2_8 = message.kafkaConfig28 ? Kafkaconfig28.toJSON(message.kafkaConfig28) : undefined); + message.kafkaConfig3 !== undefined && + (obj.kafkaConfig_3 = message.kafkaConfig3 + ? KafkaConfig3.toJSON(message.kafkaConfig3) + : undefined); return obj; }, @@ -1456,6 +1576,10 @@ export const ConfigSpec_Kafka = { object.kafkaConfig28 !== undefined && object.kafkaConfig28 !== null ? Kafkaconfig28.fromPartial(object.kafkaConfig28) : undefined; + message.kafkaConfig3 = + object.kafkaConfig3 !== undefined && object.kafkaConfig3 !== null + ? KafkaConfig3.fromPartial(object.kafkaConfig3) + : undefined; return message; }, }; @@ -1625,6 +1749,7 @@ messageTypeRegistry.set(Resources.$type, Resources); const baseKafkaconfig21: object = { $type: "yandex.cloud.mdb.kafka.v1.KafkaConfig2_1", compressionType: 0, + sslCipherSuites: "", }; export const Kafkaconfig21 = { @@ -1754,6 +1879,36 @@ export const Kafkaconfig21 = { writer.uint32(122).fork() ).ldelim(); } + if (message.messageMaxBytes !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.messageMaxBytes!, + }, + writer.uint32(130).fork() + ).ldelim(); + } + if (message.replicaFetchMaxBytes !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.replicaFetchMaxBytes!, + }, + writer.uint32(138).fork() + ).ldelim(); + } + for (const v of message.sslCipherSuites) { + writer.uint32(146).string(v!); + } + if (message.offsetsRetentionMinutes !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.offsetsRetentionMinutes!, + }, + writer.uint32(154).fork() + ).ldelim(); + } return writer; }, @@ -1761,6 +1916,7 @@ export const Kafkaconfig21 = { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = { ...baseKafkaconfig21 } as Kafkaconfig21; + message.sslCipherSuites = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -1851,6 +2007,27 @@ export const Kafkaconfig21 = { reader.uint32() ).value; break; + case 16: + message.messageMaxBytes = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 17: + message.replicaFetchMaxBytes = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 18: + message.sslCipherSuites.push(reader.string()); + break; + case 19: + message.offsetsRetentionMinutes = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; default: reader.skipType(tag & 7); break; @@ -1931,6 +2108,23 @@ export const Kafkaconfig21 = { object.defaultReplicationFactor !== null ? Number(object.defaultReplicationFactor) : undefined; + message.messageMaxBytes = + object.messageMaxBytes !== undefined && object.messageMaxBytes !== null + ? Number(object.messageMaxBytes) + : undefined; + message.replicaFetchMaxBytes = + object.replicaFetchMaxBytes !== undefined && + object.replicaFetchMaxBytes !== null + ? Number(object.replicaFetchMaxBytes) + : undefined; + message.sslCipherSuites = (object.sslCipherSuites ?? []).map((e: any) => + String(e) + ); + message.offsetsRetentionMinutes = + object.offsetsRetentionMinutes !== undefined && + object.offsetsRetentionMinutes !== null + ? Number(object.offsetsRetentionMinutes) + : undefined; return message; }, @@ -1966,6 +2160,17 @@ export const Kafkaconfig21 = { (obj.numPartitions = message.numPartitions); message.defaultReplicationFactor !== undefined && (obj.defaultReplicationFactor = message.defaultReplicationFactor); + message.messageMaxBytes !== undefined && + (obj.messageMaxBytes = message.messageMaxBytes); + message.replicaFetchMaxBytes !== undefined && + (obj.replicaFetchMaxBytes = message.replicaFetchMaxBytes); + if (message.sslCipherSuites) { + obj.sslCipherSuites = message.sslCipherSuites.map((e) => e); + } else { + obj.sslCipherSuites = []; + } + message.offsetsRetentionMinutes !== undefined && + (obj.offsetsRetentionMinutes = message.offsetsRetentionMinutes); return obj; }, @@ -1992,6 +2197,11 @@ export const Kafkaconfig21 = { message.numPartitions = object.numPartitions ?? undefined; message.defaultReplicationFactor = object.defaultReplicationFactor ?? undefined; + message.messageMaxBytes = object.messageMaxBytes ?? undefined; + message.replicaFetchMaxBytes = object.replicaFetchMaxBytes ?? undefined; + message.sslCipherSuites = object.sslCipherSuites?.map((e) => e) || []; + message.offsetsRetentionMinutes = + object.offsetsRetentionMinutes ?? undefined; return message; }, }; @@ -2001,6 +2211,7 @@ messageTypeRegistry.set(Kafkaconfig21.$type, Kafkaconfig21); const baseKafkaconfig26: object = { $type: "yandex.cloud.mdb.kafka.v1.KafkaConfig2_6", compressionType: 0, + sslCipherSuites: "", }; export const Kafkaconfig26 = { @@ -2130,6 +2341,36 @@ export const Kafkaconfig26 = { writer.uint32(122).fork() ).ldelim(); } + if (message.messageMaxBytes !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.messageMaxBytes!, + }, + writer.uint32(130).fork() + ).ldelim(); + } + if (message.replicaFetchMaxBytes !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.replicaFetchMaxBytes!, + }, + writer.uint32(138).fork() + ).ldelim(); + } + for (const v of message.sslCipherSuites) { + writer.uint32(146).string(v!); + } + if (message.offsetsRetentionMinutes !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.offsetsRetentionMinutes!, + }, + writer.uint32(154).fork() + ).ldelim(); + } return writer; }, @@ -2137,6 +2378,7 @@ export const Kafkaconfig26 = { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = { ...baseKafkaconfig26 } as Kafkaconfig26; + message.sslCipherSuites = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -2227,6 +2469,27 @@ export const Kafkaconfig26 = { reader.uint32() ).value; break; + case 16: + message.messageMaxBytes = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 17: + message.replicaFetchMaxBytes = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 18: + message.sslCipherSuites.push(reader.string()); + break; + case 19: + message.offsetsRetentionMinutes = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; default: reader.skipType(tag & 7); break; @@ -2307,6 +2570,23 @@ export const Kafkaconfig26 = { object.defaultReplicationFactor !== null ? Number(object.defaultReplicationFactor) : undefined; + message.messageMaxBytes = + object.messageMaxBytes !== undefined && object.messageMaxBytes !== null + ? Number(object.messageMaxBytes) + : undefined; + message.replicaFetchMaxBytes = + object.replicaFetchMaxBytes !== undefined && + object.replicaFetchMaxBytes !== null + ? Number(object.replicaFetchMaxBytes) + : undefined; + message.sslCipherSuites = (object.sslCipherSuites ?? []).map((e: any) => + String(e) + ); + message.offsetsRetentionMinutes = + object.offsetsRetentionMinutes !== undefined && + object.offsetsRetentionMinutes !== null + ? Number(object.offsetsRetentionMinutes) + : undefined; return message; }, @@ -2342,6 +2622,17 @@ export const Kafkaconfig26 = { (obj.numPartitions = message.numPartitions); message.defaultReplicationFactor !== undefined && (obj.defaultReplicationFactor = message.defaultReplicationFactor); + message.messageMaxBytes !== undefined && + (obj.messageMaxBytes = message.messageMaxBytes); + message.replicaFetchMaxBytes !== undefined && + (obj.replicaFetchMaxBytes = message.replicaFetchMaxBytes); + if (message.sslCipherSuites) { + obj.sslCipherSuites = message.sslCipherSuites.map((e) => e); + } else { + obj.sslCipherSuites = []; + } + message.offsetsRetentionMinutes !== undefined && + (obj.offsetsRetentionMinutes = message.offsetsRetentionMinutes); return obj; }, @@ -2368,6 +2659,11 @@ export const Kafkaconfig26 = { message.numPartitions = object.numPartitions ?? undefined; message.defaultReplicationFactor = object.defaultReplicationFactor ?? undefined; + message.messageMaxBytes = object.messageMaxBytes ?? undefined; + message.replicaFetchMaxBytes = object.replicaFetchMaxBytes ?? undefined; + message.sslCipherSuites = object.sslCipherSuites?.map((e) => e) || []; + message.offsetsRetentionMinutes = + object.offsetsRetentionMinutes ?? undefined; return message; }, }; @@ -2377,6 +2673,7 @@ messageTypeRegistry.set(Kafkaconfig26.$type, Kafkaconfig26); const baseKafkaconfig28: object = { $type: "yandex.cloud.mdb.kafka.v1.KafkaConfig2_8", compressionType: 0, + sslCipherSuites: "", }; export const Kafkaconfig28 = { @@ -2506,6 +2803,36 @@ export const Kafkaconfig28 = { writer.uint32(122).fork() ).ldelim(); } + if (message.messageMaxBytes !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.messageMaxBytes!, + }, + writer.uint32(130).fork() + ).ldelim(); + } + if (message.replicaFetchMaxBytes !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.replicaFetchMaxBytes!, + }, + writer.uint32(138).fork() + ).ldelim(); + } + for (const v of message.sslCipherSuites) { + writer.uint32(146).string(v!); + } + if (message.offsetsRetentionMinutes !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.offsetsRetentionMinutes!, + }, + writer.uint32(154).fork() + ).ldelim(); + } return writer; }, @@ -2513,6 +2840,7 @@ export const Kafkaconfig28 = { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = { ...baseKafkaconfig28 } as Kafkaconfig28; + message.sslCipherSuites = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -2603,6 +2931,27 @@ export const Kafkaconfig28 = { reader.uint32() ).value; break; + case 16: + message.messageMaxBytes = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 17: + message.replicaFetchMaxBytes = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 18: + message.sslCipherSuites.push(reader.string()); + break; + case 19: + message.offsetsRetentionMinutes = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; default: reader.skipType(tag & 7); break; @@ -2683,6 +3032,23 @@ export const Kafkaconfig28 = { object.defaultReplicationFactor !== null ? Number(object.defaultReplicationFactor) : undefined; + message.messageMaxBytes = + object.messageMaxBytes !== undefined && object.messageMaxBytes !== null + ? Number(object.messageMaxBytes) + : undefined; + message.replicaFetchMaxBytes = + object.replicaFetchMaxBytes !== undefined && + object.replicaFetchMaxBytes !== null + ? Number(object.replicaFetchMaxBytes) + : undefined; + message.sslCipherSuites = (object.sslCipherSuites ?? []).map((e: any) => + String(e) + ); + message.offsetsRetentionMinutes = + object.offsetsRetentionMinutes !== undefined && + object.offsetsRetentionMinutes !== null + ? Number(object.offsetsRetentionMinutes) + : undefined; return message; }, @@ -2718,6 +3084,17 @@ export const Kafkaconfig28 = { (obj.numPartitions = message.numPartitions); message.defaultReplicationFactor !== undefined && (obj.defaultReplicationFactor = message.defaultReplicationFactor); + message.messageMaxBytes !== undefined && + (obj.messageMaxBytes = message.messageMaxBytes); + message.replicaFetchMaxBytes !== undefined && + (obj.replicaFetchMaxBytes = message.replicaFetchMaxBytes); + if (message.sslCipherSuites) { + obj.sslCipherSuites = message.sslCipherSuites.map((e) => e); + } else { + obj.sslCipherSuites = []; + } + message.offsetsRetentionMinutes !== undefined && + (obj.offsetsRetentionMinutes = message.offsetsRetentionMinutes); return obj; }, @@ -2744,12 +3121,479 @@ export const Kafkaconfig28 = { message.numPartitions = object.numPartitions ?? undefined; message.defaultReplicationFactor = object.defaultReplicationFactor ?? undefined; + message.messageMaxBytes = object.messageMaxBytes ?? undefined; + message.replicaFetchMaxBytes = object.replicaFetchMaxBytes ?? undefined; + message.sslCipherSuites = object.sslCipherSuites?.map((e) => e) || []; + message.offsetsRetentionMinutes = + object.offsetsRetentionMinutes ?? undefined; return message; }, }; messageTypeRegistry.set(Kafkaconfig28.$type, Kafkaconfig28); +const baseKafkaConfig3: object = { + $type: "yandex.cloud.mdb.kafka.v1.KafkaConfig3", + compressionType: 0, + sslCipherSuites: "", +}; + +export const KafkaConfig3 = { + $type: "yandex.cloud.mdb.kafka.v1.KafkaConfig3" as const, + + encode( + message: KafkaConfig3, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.compressionType !== 0) { + writer.uint32(8).int32(message.compressionType); + } + if (message.logFlushIntervalMessages !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logFlushIntervalMessages!, + }, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.logFlushIntervalMs !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logFlushIntervalMs!, + }, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.logFlushSchedulerIntervalMs !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logFlushSchedulerIntervalMs!, + }, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.logRetentionBytes !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logRetentionBytes!, + }, + writer.uint32(42).fork() + ).ldelim(); + } + if (message.logRetentionHours !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logRetentionHours!, + }, + writer.uint32(50).fork() + ).ldelim(); + } + if (message.logRetentionMinutes !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logRetentionMinutes!, + }, + writer.uint32(58).fork() + ).ldelim(); + } + if (message.logRetentionMs !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.logRetentionMs! }, + writer.uint32(66).fork() + ).ldelim(); + } + if (message.logSegmentBytes !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logSegmentBytes!, + }, + writer.uint32(74).fork() + ).ldelim(); + } + if (message.logPreallocate !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logPreallocate! }, + writer.uint32(82).fork() + ).ldelim(); + } + if (message.socketSendBufferBytes !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.socketSendBufferBytes!, + }, + writer.uint32(90).fork() + ).ldelim(); + } + if (message.socketReceiveBufferBytes !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.socketReceiveBufferBytes!, + }, + writer.uint32(98).fork() + ).ldelim(); + } + if (message.autoCreateTopicsEnable !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.autoCreateTopicsEnable!, + }, + writer.uint32(106).fork() + ).ldelim(); + } + if (message.numPartitions !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.numPartitions! }, + writer.uint32(114).fork() + ).ldelim(); + } + if (message.defaultReplicationFactor !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.defaultReplicationFactor!, + }, + writer.uint32(122).fork() + ).ldelim(); + } + if (message.messageMaxBytes !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.messageMaxBytes!, + }, + writer.uint32(130).fork() + ).ldelim(); + } + if (message.replicaFetchMaxBytes !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.replicaFetchMaxBytes!, + }, + writer.uint32(138).fork() + ).ldelim(); + } + for (const v of message.sslCipherSuites) { + writer.uint32(146).string(v!); + } + if (message.offsetsRetentionMinutes !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.offsetsRetentionMinutes!, + }, + writer.uint32(154).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): KafkaConfig3 { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseKafkaConfig3 } as KafkaConfig3; + message.sslCipherSuites = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.compressionType = reader.int32() as any; + break; + case 2: + message.logFlushIntervalMessages = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 3: + message.logFlushIntervalMs = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 4: + message.logFlushSchedulerIntervalMs = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 5: + message.logRetentionBytes = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 6: + message.logRetentionHours = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 7: + message.logRetentionMinutes = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 8: + message.logRetentionMs = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 9: + message.logSegmentBytes = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 10: + message.logPreallocate = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 11: + message.socketSendBufferBytes = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 12: + message.socketReceiveBufferBytes = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 13: + message.autoCreateTopicsEnable = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 14: + message.numPartitions = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 15: + message.defaultReplicationFactor = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 16: + message.messageMaxBytes = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 17: + message.replicaFetchMaxBytes = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 18: + message.sslCipherSuites.push(reader.string()); + break; + case 19: + message.offsetsRetentionMinutes = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): KafkaConfig3 { + const message = { ...baseKafkaConfig3 } as KafkaConfig3; + message.compressionType = + object.compressionType !== undefined && object.compressionType !== null + ? compressionTypeFromJSON(object.compressionType) + : 0; + message.logFlushIntervalMessages = + object.logFlushIntervalMessages !== undefined && + object.logFlushIntervalMessages !== null + ? Number(object.logFlushIntervalMessages) + : undefined; + message.logFlushIntervalMs = + object.logFlushIntervalMs !== undefined && + object.logFlushIntervalMs !== null + ? Number(object.logFlushIntervalMs) + : undefined; + message.logFlushSchedulerIntervalMs = + object.logFlushSchedulerIntervalMs !== undefined && + object.logFlushSchedulerIntervalMs !== null + ? Number(object.logFlushSchedulerIntervalMs) + : undefined; + message.logRetentionBytes = + object.logRetentionBytes !== undefined && + object.logRetentionBytes !== null + ? Number(object.logRetentionBytes) + : undefined; + message.logRetentionHours = + object.logRetentionHours !== undefined && + object.logRetentionHours !== null + ? Number(object.logRetentionHours) + : undefined; + message.logRetentionMinutes = + object.logRetentionMinutes !== undefined && + object.logRetentionMinutes !== null + ? Number(object.logRetentionMinutes) + : undefined; + message.logRetentionMs = + object.logRetentionMs !== undefined && object.logRetentionMs !== null + ? Number(object.logRetentionMs) + : undefined; + message.logSegmentBytes = + object.logSegmentBytes !== undefined && object.logSegmentBytes !== null + ? Number(object.logSegmentBytes) + : undefined; + message.logPreallocate = + object.logPreallocate !== undefined && object.logPreallocate !== null + ? Boolean(object.logPreallocate) + : undefined; + message.socketSendBufferBytes = + object.socketSendBufferBytes !== undefined && + object.socketSendBufferBytes !== null + ? Number(object.socketSendBufferBytes) + : undefined; + message.socketReceiveBufferBytes = + object.socketReceiveBufferBytes !== undefined && + object.socketReceiveBufferBytes !== null + ? Number(object.socketReceiveBufferBytes) + : undefined; + message.autoCreateTopicsEnable = + object.autoCreateTopicsEnable !== undefined && + object.autoCreateTopicsEnable !== null + ? Boolean(object.autoCreateTopicsEnable) + : undefined; + message.numPartitions = + object.numPartitions !== undefined && object.numPartitions !== null + ? Number(object.numPartitions) + : undefined; + message.defaultReplicationFactor = + object.defaultReplicationFactor !== undefined && + object.defaultReplicationFactor !== null + ? Number(object.defaultReplicationFactor) + : undefined; + message.messageMaxBytes = + object.messageMaxBytes !== undefined && object.messageMaxBytes !== null + ? Number(object.messageMaxBytes) + : undefined; + message.replicaFetchMaxBytes = + object.replicaFetchMaxBytes !== undefined && + object.replicaFetchMaxBytes !== null + ? Number(object.replicaFetchMaxBytes) + : undefined; + message.sslCipherSuites = (object.sslCipherSuites ?? []).map((e: any) => + String(e) + ); + message.offsetsRetentionMinutes = + object.offsetsRetentionMinutes !== undefined && + object.offsetsRetentionMinutes !== null + ? Number(object.offsetsRetentionMinutes) + : undefined; + return message; + }, + + toJSON(message: KafkaConfig3): unknown { + const obj: any = {}; + message.compressionType !== undefined && + (obj.compressionType = compressionTypeToJSON(message.compressionType)); + message.logFlushIntervalMessages !== undefined && + (obj.logFlushIntervalMessages = message.logFlushIntervalMessages); + message.logFlushIntervalMs !== undefined && + (obj.logFlushIntervalMs = message.logFlushIntervalMs); + message.logFlushSchedulerIntervalMs !== undefined && + (obj.logFlushSchedulerIntervalMs = message.logFlushSchedulerIntervalMs); + message.logRetentionBytes !== undefined && + (obj.logRetentionBytes = message.logRetentionBytes); + message.logRetentionHours !== undefined && + (obj.logRetentionHours = message.logRetentionHours); + message.logRetentionMinutes !== undefined && + (obj.logRetentionMinutes = message.logRetentionMinutes); + message.logRetentionMs !== undefined && + (obj.logRetentionMs = message.logRetentionMs); + message.logSegmentBytes !== undefined && + (obj.logSegmentBytes = message.logSegmentBytes); + message.logPreallocate !== undefined && + (obj.logPreallocate = message.logPreallocate); + message.socketSendBufferBytes !== undefined && + (obj.socketSendBufferBytes = message.socketSendBufferBytes); + message.socketReceiveBufferBytes !== undefined && + (obj.socketReceiveBufferBytes = message.socketReceiveBufferBytes); + message.autoCreateTopicsEnable !== undefined && + (obj.autoCreateTopicsEnable = message.autoCreateTopicsEnable); + message.numPartitions !== undefined && + (obj.numPartitions = message.numPartitions); + message.defaultReplicationFactor !== undefined && + (obj.defaultReplicationFactor = message.defaultReplicationFactor); + message.messageMaxBytes !== undefined && + (obj.messageMaxBytes = message.messageMaxBytes); + message.replicaFetchMaxBytes !== undefined && + (obj.replicaFetchMaxBytes = message.replicaFetchMaxBytes); + if (message.sslCipherSuites) { + obj.sslCipherSuites = message.sslCipherSuites.map((e) => e); + } else { + obj.sslCipherSuites = []; + } + message.offsetsRetentionMinutes !== undefined && + (obj.offsetsRetentionMinutes = message.offsetsRetentionMinutes); + return obj; + }, + + fromPartial, I>>( + object: I + ): KafkaConfig3 { + const message = { ...baseKafkaConfig3 } as KafkaConfig3; + message.compressionType = object.compressionType ?? 0; + message.logFlushIntervalMessages = + object.logFlushIntervalMessages ?? undefined; + message.logFlushIntervalMs = object.logFlushIntervalMs ?? undefined; + message.logFlushSchedulerIntervalMs = + object.logFlushSchedulerIntervalMs ?? undefined; + message.logRetentionBytes = object.logRetentionBytes ?? undefined; + message.logRetentionHours = object.logRetentionHours ?? undefined; + message.logRetentionMinutes = object.logRetentionMinutes ?? undefined; + message.logRetentionMs = object.logRetentionMs ?? undefined; + message.logSegmentBytes = object.logSegmentBytes ?? undefined; + message.logPreallocate = object.logPreallocate ?? undefined; + message.socketSendBufferBytes = object.socketSendBufferBytes ?? undefined; + message.socketReceiveBufferBytes = + object.socketReceiveBufferBytes ?? undefined; + message.autoCreateTopicsEnable = object.autoCreateTopicsEnable ?? undefined; + message.numPartitions = object.numPartitions ?? undefined; + message.defaultReplicationFactor = + object.defaultReplicationFactor ?? undefined; + message.messageMaxBytes = object.messageMaxBytes ?? undefined; + message.replicaFetchMaxBytes = object.replicaFetchMaxBytes ?? undefined; + message.sslCipherSuites = object.sslCipherSuites?.map((e) => e) || []; + message.offsetsRetentionMinutes = + object.offsetsRetentionMinutes ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(KafkaConfig3.$type, KafkaConfig3); + const baseHost: object = { $type: "yandex.cloud.mdb.kafka.v1.Host", name: "", diff --git a/src/generated/yandex/cloud/mdb/kafka/v1/cluster_service.ts b/src/generated/yandex/cloud/mdb/kafka/v1/cluster_service.ts index dc081cb0..88afdb34 100644 --- a/src/generated/yandex/cloud/mdb/kafka/v1/cluster_service.ts +++ b/src/generated/yandex/cloud/mdb/kafka/v1/cluster_service.ts @@ -60,7 +60,7 @@ export interface ListClustersRequest { /** * Page token. * - * To get the next page of results, set [page_token] to the [ListClustersResponse.next_page_token] returned by a previous list request. + * To get the next page of results, set [page_token] to the [ListClustersResponse.next_page_token] returned by the previous list request. */ pageToken: string; /** Filter support is not currently implemented. Any filters are ignored. */ @@ -223,7 +223,7 @@ export interface ListClusterLogsRequest { /** * Page token. * - * To get the next page of results, set [page_token] to the [ListClusterLogsResponse.next_page_token] returned by a previous list request. + * To get the next page of results, set [page_token] to the [ListClusterLogsResponse.next_page_token] returned by the previous list request. */ pageToken: string; /** @@ -343,7 +343,7 @@ export interface ListClusterOperationsRequest { /** * Page token. * - * To get the next page of results, set [page_token] to the [ListClusterOperationsResponse.next_page_token] returned by a previous list request. + * To get the next page of results, set [page_token] to the [ListClusterOperationsResponse.next_page_token] returned by the previous list request. */ pageToken: string; } @@ -378,7 +378,7 @@ export interface ListClusterHostsRequest { /** * Page token. * - * To get the next page of results, set [page_token] to the [ListClusterHostsResponse.next_page_token] returned by a previous list request. + * To get the next page of results, set [page_token] to the [ListClusterHostsResponse.next_page_token] returned by the previous list request. */ pageToken: string; } diff --git a/src/generated/yandex/cloud/mdb/kafka/v1/connector.ts b/src/generated/yandex/cloud/mdb/kafka/v1/connector.ts index c9fc4c5e..119dfebe 100644 --- a/src/generated/yandex/cloud/mdb/kafka/v1/connector.ts +++ b/src/generated/yandex/cloud/mdb/kafka/v1/connector.ts @@ -6,23 +6,26 @@ import { Int64Value } from "../../../../../google/protobuf/wrappers"; export const protobufPackage = "yandex.cloud.mdb.kafka.v1"; -/** An Apache Kafka® connector specification */ +/** + * An object that represents an Apache Kafka® connector. + * + * See [the documentation](/docs/managed-kafka/concepts/connectors) for details. + */ export interface ConnectorSpec { $type: "yandex.cloud.mdb.kafka.v1.ConnectorSpec"; /** Name of the connector. */ name: string; - /** - * Maximum number of connector tasks. - * Default is the number of brokers. - */ + /** Maximum number of connector tasks. Default value is the number of brokers. */ tasksMax?: number; /** - * Properties passed with connector config to Connect service. - * Example: 'sync.topics.config.enabled: true'. + * A set of properties passed to Managed Service for Apache Kafka® with the connector configuration. + * Example: `sync.topics.config.enabled: true`. */ properties: { [key: string]: string }; - /** Configuration of MirrorMaker connector */ + /** Configuration of the MirrorMaker connector. */ connectorConfigMirrormaker?: ConnectorConfigMirrorMakerSpec | undefined; + /** Configuration of S3-Sink connector. */ + connectorConfigS3Sink?: ConnectorConfigS3SinkSpec | undefined; } export interface ConnectorSpec_PropertiesEntry { @@ -31,19 +34,19 @@ export interface ConnectorSpec_PropertiesEntry { value: string; } -/** An Apache Kafka® connector's update specification. */ export interface UpdateConnectorSpec { $type: "yandex.cloud.mdb.kafka.v1.UpdateConnectorSpec"; - /** Maximum number of tasks to update. */ + /** Maximum number of connector tasks to update. */ tasksMax?: number; /** - * Properties passed with connector config to Connect service, that - * we should change or add in existing Properties-set of connector. - * Example: 'sync.topics.config.enabled: false' + * A set of new or changed properties to update for the connector. They are passed with the connector configuration to Managed Service for Apache Kafka®. + * Example: `sync.topics.config.enabled: false`. */ properties: { [key: string]: string }; - /** Update specification for MirrorMaker. */ + /** Configuration of the MirrorMaker connector. */ connectorConfigMirrormaker?: ConnectorConfigMirrorMakerSpec | undefined; + /** Update specification for S3-Sink Connector. */ + connectorConfigS3Sink?: UpdateConnectorConfigS3SinkSpec | undefined; } export interface UpdateConnectorSpec_PropertiesEntry { @@ -52,122 +55,132 @@ export interface UpdateConnectorSpec_PropertiesEntry { value: string; } -/** - * An An Apache Kafka® MirrorMaker - * connector specification. - */ export interface ConnectorConfigMirrorMakerSpec { $type: "yandex.cloud.mdb.kafka.v1.ConnectorConfigMirrorMakerSpec"; - /** Source cluster configuration. */ + /** Source cluster configuration for the MirrorMaker connector. */ sourceCluster?: ClusterConnectionSpec; - /** Target cluster configuration. */ + /** Target cluster configuration for the MirrorMaker connector. */ targetCluster?: ClusterConnectionSpec; - /** List of Kafka topics, separated by ',' */ + /** List of Kafka topics, separated by `,`. */ topics: string; /** Replication factor for automatically created topics. */ replicationFactor?: number; } -/** - * Specification of ClusterConnection - - * connection to clusters, that - * are source or target of MirrorMaker - * clusters. - */ export interface ClusterConnectionSpec { $type: "yandex.cloud.mdb.kafka.v1.ClusterConnectionSpec"; /** - * Alias of ClusterConnection. - * For example: 'source', 'target', ... + * Alias of cluster connection configuration. + * Examples: `source`, `target`. */ alias: string; - /** - * If type is 'this_cluster' - we connect to - * cluster that is handle Kafka Connect Worker, - * on which we try to register connector. - */ + /** Connection configuration of the cluster the connector belongs to. As all credentials are already known, leave this parameter empty. */ thisCluster?: ThisClusterSpec | undefined; - /** - * If type is 'external_cluster' - we connect - * to cluster that is not handle Kafka Connect Worker, - * on which we try to register connector. - */ + /** Configuration of connection to an external cluster with all the necessary credentials. */ externalCluster?: ExternalClusterConnectionSpec | undefined; } -/** - * Specification of cluster_connection - * type 'this_cluster'. This means - * that we already have all credentials, - * so this spec is empty. - */ export interface ThisClusterSpec { $type: "yandex.cloud.mdb.kafka.v1.ThisClusterSpec"; } -/** - * Specification of connection to - * external cluster. It contains - * all necessary credentials to - * connect to external cluster. - */ export interface ExternalClusterConnectionSpec { $type: "yandex.cloud.mdb.kafka.v1.ExternalClusterConnectionSpec"; - /** - * List bootstrap servers of cluster, - * separated by ','. - */ + /** List of bootstrap servers of the cluster, separated by `,`. */ bootstrapServers: string; - /** - * Sasl username which - * we use to connect to cluster. - */ + /** SASL username to use for connection to the cluster. */ saslUsername: string; - /** - * Sasl password which we use - * to connect to cluster. - */ + /** SASL password to use for connection to the cluster. */ saslPassword: string; + /** SASL mechanism to use for connection to the cluster. */ + saslMechanism: string; + /** Security protocol to use for connection to the cluster. */ + securityProtocol: string; /** - * Sasl mechanism, which we - * should use to connect to cluster. + * CA in PEM format to connect to external cluster. + * Lines of certificate separated by '\n' symbol. */ - saslMechanism: string; + sslTruststoreCertificates: string; +} + +/** Specification for Kafka S3-Sink Connector. */ +export interface ConnectorConfigS3SinkSpec { + $type: "yandex.cloud.mdb.kafka.v1.ConnectorConfigS3SinkSpec"; + /** List of Kafka topics, separated by ','. */ + topics: string; /** - * Security protocol, which - * we should use to connect - * to cluster. + * The compression type used for files put on GCS. + * The supported values are: `gzip`, `snappy`, `zstd`, `none`. + * Optional, the default is `none`. */ - securityProtocol: string; + fileCompressionType: string; + /** Max records per file. */ + fileMaxRecords?: number; + /** Credentials for connecting to S3 storage. */ + s3Connection?: S3ConnectionSpec; +} + +/** Specification for update Kafka S3-Sink Connector. */ +export interface UpdateConnectorConfigS3SinkSpec { + $type: "yandex.cloud.mdb.kafka.v1.UpdateConnectorConfigS3SinkSpec"; + /** List of Kafka topics, separated by ','. */ + topics: string; + /** Max records per file. */ + fileMaxRecords?: number; + /** Credentials for connecting to S3 storage. */ + s3Connection?: S3ConnectionSpec; +} + +/** + * Specification for S3Connection - + * settings of connection to AWS-compatible S3 storage, that + * are source or target of Kafka S3-connectors. + * YC Object Storage is AWS-compatible. + */ +export interface S3ConnectionSpec { + $type: "yandex.cloud.mdb.kafka.v1.S3ConnectionSpec"; + bucketName: string; + externalS3?: ExternalS3StorageSpec | undefined; +} + +export interface ExternalS3StorageSpec { + $type: "yandex.cloud.mdb.kafka.v1.ExternalS3StorageSpec"; + accessKeyId: string; + secretAccessKey: string; + endpoint: string; + /** Default is 'us-east-1'. */ + region: string; } -/** An Apache Kafka® connector resource. */ export interface Connector { $type: "yandex.cloud.mdb.kafka.v1.Connector"; /** Name of the connector. */ name: string; - /** Maximum number of tasks. Default is the number of brokers */ + /** Maximum number of connector tasks. Default value is the number of brokers. */ tasksMax?: number; /** - * Properties passed with connector config to Connect service - * Example: 'sync.topics.config.enabled: true' + * A set of properties passed to Managed Service for Apache Kafka® with the connector configuration. + * Example: `sync.topics.config.enabled: true`. */ properties: { [key: string]: string }; /** Connector health. */ health: Connector_Health; /** Current status of the connector. */ status: Connector_Status; - /** ID of the Apache Kafka cluster that the connector belongs to. */ + /** ID of the Apache Kafka® cluster that the connector belongs to. */ clusterId: string; + /** Configuration of the MirrorMaker connector. */ connectorConfigMirrormaker?: ConnectorConfigMirrorMaker | undefined; + /** Configuration of S3-Sink connector. */ + connectorConfigS3Sink?: ConnectorConfigS3Sink | undefined; } export enum Connector_Health { - /** HEALTH_UNKNOWN - State of the connector is unknown. */ + /** HEALTH_UNKNOWN - Health of the connector is unknown. */ HEALTH_UNKNOWN = 0, /** ALIVE - Connector is running. */ ALIVE = 1, - /** DEAD - Connector is failed to start. */ + /** DEAD - Connector has failed to start. */ DEAD = 2, UNRECOGNIZED = -1, } @@ -208,9 +221,9 @@ export enum Connector_Status { STATUS_UNKNOWN = 0, /** RUNNING - Connector is running normally. */ RUNNING = 1, - /** ERROR - Connector encountered a problem and cannot operate. */ + /** ERROR - Connector has encountered a problem and cannot operate. */ ERROR = 2, - /** PAUSED - Connector paused. */ + /** PAUSED - Connector is paused. */ PAUSED = 3, UNRECOGNIZED = -1, } @@ -257,93 +270,85 @@ export interface Connector_PropertiesEntry { value: string; } -/** - * An An Apache Kafka® MirrorMaker - * connector resource. - */ export interface ConnectorConfigMirrorMaker { $type: "yandex.cloud.mdb.kafka.v1.ConnectorConfigMirrorMaker"; - /** - * Source cluster resource - * settings. - */ + /** Source cluster connection configuration. */ sourceCluster?: ClusterConnection; - /** - * Target cluster resource - * settings. - */ + /** Target cluster connection configuration. */ targetCluster?: ClusterConnection; - /** List of Kafka topics, separated by ',' */ + /** List of Kafka topics, separated by `,`. */ topics: string; /** Replication factor for automatically created topics. */ replicationFactor?: number; } -/** - * Resource ClusterConnection - - * settings of - * connection to clusters, that - * are source or target of MirrorMaker - * clusters. - */ export interface ClusterConnection { $type: "yandex.cloud.mdb.kafka.v1.ClusterConnection"; /** - * Alias of ClusterConnection resource. - * For example: 'source', 'target', ... + * Alias of cluster connection configuration. + * Examples: `source`, `target`. */ alias: string; - /** - * If type is 'this_cluster' - we connect to - * cluster that is handle Kafka Connect Worker, - * on which we try to register connector. - */ + /** Connection configuration of the cluster the connector belongs to. As all credentials are already known, leave this parameter empty. */ thisCluster?: ThisCluster | undefined; - /** - * If type is 'external_cluster' - we connect - * to cluster that is not handle Kafka Connect Worker, - * on which we try to register connector. - */ + /** Configuration of connection to an external cluster with all the necessary credentials. */ externalCluster?: ExternalClusterConnection | undefined; } -/** - * Resource of cluster_connection - * type 'this_cluster'. - */ export interface ThisCluster { $type: "yandex.cloud.mdb.kafka.v1.ThisCluster"; } -/** - * Resource of connection to - * external cluster. It contains - * all settings of connection - * to external cluster. - */ export interface ExternalClusterConnection { $type: "yandex.cloud.mdb.kafka.v1.ExternalClusterConnection"; - /** - * List bootstrap servers of cluster, - * separated by ',' - */ + /** List of bootstrap servers of the cluster, separated by `,`. */ bootstrapServers: string; - /** - * Sasl username which - * we use to connect to cluster. - */ + /** SASL username to use for connection to the cluster. */ saslUsername: string; - /** - * Sasl mechanism, which we - * should use to connect to cluster. - */ + /** SASL mechanism to use for connection to the cluster. */ saslMechanism: string; + /** Security protocol to use for connection to the cluster. */ + securityProtocol: string; +} + +/** + * An Apache Kafka® S3-Sink + * connector resource. + */ +export interface ConnectorConfigS3Sink { + $type: "yandex.cloud.mdb.kafka.v1.ConnectorConfigS3Sink"; + /** List of Kafka topics, separated by ','. */ + topics: string; /** - * Security protocol, which - * we should use to connect - * to cluster. + * The compression type used for files put on GCS. + * The supported values are: `gzip`, `snappy`, `zstd`, `none`. + * Optional, the default is `none`. */ - securityProtocol: string; + fileCompressionType: string; + /** Max records per file. */ + fileMaxRecords?: number; + /** Credentials for connecting to S3 storage. */ + s3Connection?: S3Connection; +} + +/** + * Resource for S3Connection - + * settings of connection to AWS-compatible S3 storage, that + * are source or target of Kafka S3-connectors. + * YC Object Storage is AWS-compatible. + */ +export interface S3Connection { + $type: "yandex.cloud.mdb.kafka.v1.S3Connection"; + bucketName: string; + externalS3?: ExternalS3Storage | undefined; +} + +export interface ExternalS3Storage { + $type: "yandex.cloud.mdb.kafka.v1.ExternalS3Storage"; + accessKeyId: string; + endpoint: string; + /** Default is 'us-east-1' */ + region: string; } const baseConnectorSpec: object = { @@ -383,6 +388,12 @@ export const ConnectorSpec = { writer.uint32(82).fork() ).ldelim(); } + if (message.connectorConfigS3Sink !== undefined) { + ConnectorConfigS3SinkSpec.encode( + message.connectorConfigS3Sink, + writer.uint32(90).fork() + ).ldelim(); + } return writer; }, @@ -413,6 +424,12 @@ export const ConnectorSpec = { message.connectorConfigMirrormaker = ConnectorConfigMirrorMakerSpec.decode(reader, reader.uint32()); break; + case 11: + message.connectorConfigS3Sink = ConnectorConfigS3SinkSpec.decode( + reader, + reader.uint32() + ); + break; default: reader.skipType(tag & 7); break; @@ -444,6 +461,11 @@ export const ConnectorSpec = { object.connectorConfigMirrormaker ) : undefined; + message.connectorConfigS3Sink = + object.connectorConfigS3Sink !== undefined && + object.connectorConfigS3Sink !== null + ? ConnectorConfigS3SinkSpec.fromJSON(object.connectorConfigS3Sink) + : undefined; return message; }, @@ -463,6 +485,10 @@ export const ConnectorSpec = { message.connectorConfigMirrormaker ) : undefined); + message.connectorConfigS3Sink !== undefined && + (obj.connectorConfigS3Sink = message.connectorConfigS3Sink + ? ConnectorConfigS3SinkSpec.toJSON(message.connectorConfigS3Sink) + : undefined); return obj; }, @@ -487,6 +513,11 @@ export const ConnectorSpec = { object.connectorConfigMirrormaker ) : undefined; + message.connectorConfigS3Sink = + object.connectorConfigS3Sink !== undefined && + object.connectorConfigS3Sink !== null + ? ConnectorConfigS3SinkSpec.fromPartial(object.connectorConfigS3Sink) + : undefined; return message; }, }; @@ -612,6 +643,12 @@ export const UpdateConnectorSpec = { writer.uint32(82).fork() ).ldelim(); } + if (message.connectorConfigS3Sink !== undefined) { + UpdateConnectorConfigS3SinkSpec.encode( + message.connectorConfigS3Sink, + writer.uint32(90).fork() + ).ldelim(); + } return writer; }, @@ -639,6 +676,10 @@ export const UpdateConnectorSpec = { message.connectorConfigMirrormaker = ConnectorConfigMirrorMakerSpec.decode(reader, reader.uint32()); break; + case 11: + message.connectorConfigS3Sink = + UpdateConnectorConfigS3SinkSpec.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -666,6 +707,11 @@ export const UpdateConnectorSpec = { object.connectorConfigMirrormaker ) : undefined; + message.connectorConfigS3Sink = + object.connectorConfigS3Sink !== undefined && + object.connectorConfigS3Sink !== null + ? UpdateConnectorConfigS3SinkSpec.fromJSON(object.connectorConfigS3Sink) + : undefined; return message; }, @@ -684,6 +730,10 @@ export const UpdateConnectorSpec = { message.connectorConfigMirrormaker ) : undefined); + message.connectorConfigS3Sink !== undefined && + (obj.connectorConfigS3Sink = message.connectorConfigS3Sink + ? UpdateConnectorConfigS3SinkSpec.toJSON(message.connectorConfigS3Sink) + : undefined); return obj; }, @@ -707,6 +757,13 @@ export const UpdateConnectorSpec = { object.connectorConfigMirrormaker ) : undefined; + message.connectorConfigS3Sink = + object.connectorConfigS3Sink !== undefined && + object.connectorConfigS3Sink !== null + ? UpdateConnectorConfigS3SinkSpec.fromPartial( + object.connectorConfigS3Sink + ) + : undefined; return message; }, }; @@ -1110,6 +1167,7 @@ const baseExternalClusterConnectionSpec: object = { saslPassword: "", saslMechanism: "", securityProtocol: "", + sslTruststoreCertificates: "", }; export const ExternalClusterConnectionSpec = { @@ -1134,6 +1192,9 @@ export const ExternalClusterConnectionSpec = { if (message.securityProtocol !== "") { writer.uint32(42).string(message.securityProtocol); } + if (message.sslTruststoreCertificates !== "") { + writer.uint32(50).string(message.sslTruststoreCertificates); + } return writer; }, @@ -1164,6 +1225,9 @@ export const ExternalClusterConnectionSpec = { case 5: message.securityProtocol = reader.string(); break; + case 6: + message.sslTruststoreCertificates = reader.string(); + break; default: reader.skipType(tag & 7); break; @@ -1196,6 +1260,11 @@ export const ExternalClusterConnectionSpec = { object.securityProtocol !== undefined && object.securityProtocol !== null ? String(object.securityProtocol) : ""; + message.sslTruststoreCertificates = + object.sslTruststoreCertificates !== undefined && + object.sslTruststoreCertificates !== null + ? String(object.sslTruststoreCertificates) + : ""; return message; }, @@ -1211,6 +1280,8 @@ export const ExternalClusterConnectionSpec = { (obj.saslMechanism = message.saslMechanism); message.securityProtocol !== undefined && (obj.securityProtocol = message.securityProtocol); + message.sslTruststoreCertificates !== undefined && + (obj.sslTruststoreCertificates = message.sslTruststoreCertificates); return obj; }, @@ -1225,6 +1296,7 @@ export const ExternalClusterConnectionSpec = { message.saslPassword = object.saslPassword ?? ""; message.saslMechanism = object.saslMechanism ?? ""; message.securityProtocol = object.securityProtocol ?? ""; + message.sslTruststoreCertificates = object.sslTruststoreCertificates ?? ""; return message; }, }; @@ -1234,93 +1306,69 @@ messageTypeRegistry.set( ExternalClusterConnectionSpec ); -const baseConnector: object = { - $type: "yandex.cloud.mdb.kafka.v1.Connector", - name: "", - health: 0, - status: 0, - clusterId: "", +const baseConnectorConfigS3SinkSpec: object = { + $type: "yandex.cloud.mdb.kafka.v1.ConnectorConfigS3SinkSpec", + topics: "", + fileCompressionType: "", }; -export const Connector = { - $type: "yandex.cloud.mdb.kafka.v1.Connector" as const, +export const ConnectorConfigS3SinkSpec = { + $type: "yandex.cloud.mdb.kafka.v1.ConnectorConfigS3SinkSpec" as const, encode( - message: Connector, + message: ConnectorConfigS3SinkSpec, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { - if (message.name !== "") { - writer.uint32(10).string(message.name); + if (message.topics !== "") { + writer.uint32(10).string(message.topics); } - if (message.tasksMax !== undefined) { - Int64Value.encode( - { $type: "google.protobuf.Int64Value", value: message.tasksMax! }, - writer.uint32(18).fork() - ).ldelim(); + if (message.fileCompressionType !== "") { + writer.uint32(18).string(message.fileCompressionType); } - Object.entries(message.properties).forEach(([key, value]) => { - Connector_PropertiesEntry.encode( - { - $type: "yandex.cloud.mdb.kafka.v1.Connector.PropertiesEntry", - key: key as any, - value, - }, + if (message.fileMaxRecords !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.fileMaxRecords! }, writer.uint32(26).fork() ).ldelim(); - }); - if (message.health !== 0) { - writer.uint32(32).int32(message.health); - } - if (message.status !== 0) { - writer.uint32(40).int32(message.status); - } - if (message.clusterId !== "") { - writer.uint32(50).string(message.clusterId); } - if (message.connectorConfigMirrormaker !== undefined) { - ConnectorConfigMirrorMaker.encode( - message.connectorConfigMirrormaker, - writer.uint32(82).fork() + if (message.s3Connection !== undefined) { + S3ConnectionSpec.encode( + message.s3Connection, + writer.uint32(34).fork() ).ldelim(); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): Connector { + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ConnectorConfigS3SinkSpec { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; - const message = { ...baseConnector } as Connector; - message.properties = {}; + const message = { + ...baseConnectorConfigS3SinkSpec, + } as ConnectorConfigS3SinkSpec; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.name = reader.string(); + message.topics = reader.string(); break; case 2: - message.tasksMax = Int64Value.decode(reader, reader.uint32()).value; + message.fileCompressionType = reader.string(); break; case 3: - const entry3 = Connector_PropertiesEntry.decode( + message.fileMaxRecords = Int64Value.decode( reader, reader.uint32() - ); - if (entry3.value !== undefined) { - message.properties[entry3.key] = entry3.value; - } + ).value; break; case 4: - message.health = reader.int32() as any; - break; - case 5: - message.status = reader.int32() as any; - break; - case 6: - message.clusterId = reader.string(); - break; - case 10: - message.connectorConfigMirrormaker = - ConnectorConfigMirrorMaker.decode(reader, reader.uint32()); + message.s3Connection = S3ConnectionSpec.decode( + reader, + reader.uint32() + ); break; default: reader.skipType(tag & 7); @@ -1330,109 +1378,600 @@ export const Connector = { return message; }, - fromJSON(object: any): Connector { - const message = { ...baseConnector } as Connector; - message.name = - object.name !== undefined && object.name !== null - ? String(object.name) + fromJSON(object: any): ConnectorConfigS3SinkSpec { + const message = { + ...baseConnectorConfigS3SinkSpec, + } as ConnectorConfigS3SinkSpec; + message.topics = + object.topics !== undefined && object.topics !== null + ? String(object.topics) : ""; - message.tasksMax = - object.tasksMax !== undefined && object.tasksMax !== null - ? Number(object.tasksMax) - : undefined; - message.properties = Object.entries(object.properties ?? {}).reduce<{ - [key: string]: string; - }>((acc, [key, value]) => { - acc[key] = String(value); - return acc; - }, {}); - message.health = - object.health !== undefined && object.health !== null - ? connector_HealthFromJSON(object.health) - : 0; - message.status = - object.status !== undefined && object.status !== null - ? connector_StatusFromJSON(object.status) - : 0; - message.clusterId = - object.clusterId !== undefined && object.clusterId !== null - ? String(object.clusterId) + message.fileCompressionType = + object.fileCompressionType !== undefined && + object.fileCompressionType !== null + ? String(object.fileCompressionType) : ""; - message.connectorConfigMirrormaker = - object.connectorConfigMirrormaker !== undefined && - object.connectorConfigMirrormaker !== null - ? ConnectorConfigMirrorMaker.fromJSON(object.connectorConfigMirrormaker) + message.fileMaxRecords = + object.fileMaxRecords !== undefined && object.fileMaxRecords !== null + ? Number(object.fileMaxRecords) + : undefined; + message.s3Connection = + object.s3Connection !== undefined && object.s3Connection !== null + ? S3ConnectionSpec.fromJSON(object.s3Connection) : undefined; return message; }, - toJSON(message: Connector): unknown { + toJSON(message: ConnectorConfigS3SinkSpec): unknown { const obj: any = {}; - message.name !== undefined && (obj.name = message.name); - message.tasksMax !== undefined && (obj.tasksMax = message.tasksMax); - obj.properties = {}; - if (message.properties) { - Object.entries(message.properties).forEach(([k, v]) => { - obj.properties[k] = v; - }); - } - message.health !== undefined && - (obj.health = connector_HealthToJSON(message.health)); - message.status !== undefined && - (obj.status = connector_StatusToJSON(message.status)); - message.clusterId !== undefined && (obj.clusterId = message.clusterId); - message.connectorConfigMirrormaker !== undefined && - (obj.connectorConfigMirrormaker = message.connectorConfigMirrormaker - ? ConnectorConfigMirrorMaker.toJSON(message.connectorConfigMirrormaker) + message.topics !== undefined && (obj.topics = message.topics); + message.fileCompressionType !== undefined && + (obj.fileCompressionType = message.fileCompressionType); + message.fileMaxRecords !== undefined && + (obj.fileMaxRecords = message.fileMaxRecords); + message.s3Connection !== undefined && + (obj.s3Connection = message.s3Connection + ? S3ConnectionSpec.toJSON(message.s3Connection) : undefined); return obj; }, - fromPartial, I>>( + fromPartial, I>>( object: I - ): Connector { - const message = { ...baseConnector } as Connector; - message.name = object.name ?? ""; - message.tasksMax = object.tasksMax ?? undefined; - message.properties = Object.entries(object.properties ?? {}).reduce<{ - [key: string]: string; - }>((acc, [key, value]) => { - if (value !== undefined) { - acc[key] = String(value); - } - return acc; - }, {}); - message.health = object.health ?? 0; - message.status = object.status ?? 0; - message.clusterId = object.clusterId ?? ""; - message.connectorConfigMirrormaker = - object.connectorConfigMirrormaker !== undefined && - object.connectorConfigMirrormaker !== null - ? ConnectorConfigMirrorMaker.fromPartial( - object.connectorConfigMirrormaker - ) + ): ConnectorConfigS3SinkSpec { + const message = { + ...baseConnectorConfigS3SinkSpec, + } as ConnectorConfigS3SinkSpec; + message.topics = object.topics ?? ""; + message.fileCompressionType = object.fileCompressionType ?? ""; + message.fileMaxRecords = object.fileMaxRecords ?? undefined; + message.s3Connection = + object.s3Connection !== undefined && object.s3Connection !== null + ? S3ConnectionSpec.fromPartial(object.s3Connection) : undefined; return message; }, }; -messageTypeRegistry.set(Connector.$type, Connector); +messageTypeRegistry.set( + ConnectorConfigS3SinkSpec.$type, + ConnectorConfigS3SinkSpec +); -const baseConnector_PropertiesEntry: object = { - $type: "yandex.cloud.mdb.kafka.v1.Connector.PropertiesEntry", - key: "", - value: "", +const baseUpdateConnectorConfigS3SinkSpec: object = { + $type: "yandex.cloud.mdb.kafka.v1.UpdateConnectorConfigS3SinkSpec", + topics: "", }; -export const Connector_PropertiesEntry = { - $type: "yandex.cloud.mdb.kafka.v1.Connector.PropertiesEntry" as const, +export const UpdateConnectorConfigS3SinkSpec = { + $type: "yandex.cloud.mdb.kafka.v1.UpdateConnectorConfigS3SinkSpec" as const, encode( - message: Connector_PropertiesEntry, + message: UpdateConnectorConfigS3SinkSpec, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { - if (message.key !== "") { - writer.uint32(10).string(message.key); + if (message.topics !== "") { + writer.uint32(10).string(message.topics); + } + if (message.fileMaxRecords !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.fileMaxRecords! }, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.s3Connection !== undefined) { + S3ConnectionSpec.encode( + message.s3Connection, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateConnectorConfigS3SinkSpec { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateConnectorConfigS3SinkSpec, + } as UpdateConnectorConfigS3SinkSpec; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.topics = reader.string(); + break; + case 2: + message.fileMaxRecords = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 3: + message.s3Connection = S3ConnectionSpec.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateConnectorConfigS3SinkSpec { + const message = { + ...baseUpdateConnectorConfigS3SinkSpec, + } as UpdateConnectorConfigS3SinkSpec; + message.topics = + object.topics !== undefined && object.topics !== null + ? String(object.topics) + : ""; + message.fileMaxRecords = + object.fileMaxRecords !== undefined && object.fileMaxRecords !== null + ? Number(object.fileMaxRecords) + : undefined; + message.s3Connection = + object.s3Connection !== undefined && object.s3Connection !== null + ? S3ConnectionSpec.fromJSON(object.s3Connection) + : undefined; + return message; + }, + + toJSON(message: UpdateConnectorConfigS3SinkSpec): unknown { + const obj: any = {}; + message.topics !== undefined && (obj.topics = message.topics); + message.fileMaxRecords !== undefined && + (obj.fileMaxRecords = message.fileMaxRecords); + message.s3Connection !== undefined && + (obj.s3Connection = message.s3Connection + ? S3ConnectionSpec.toJSON(message.s3Connection) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateConnectorConfigS3SinkSpec { + const message = { + ...baseUpdateConnectorConfigS3SinkSpec, + } as UpdateConnectorConfigS3SinkSpec; + message.topics = object.topics ?? ""; + message.fileMaxRecords = object.fileMaxRecords ?? undefined; + message.s3Connection = + object.s3Connection !== undefined && object.s3Connection !== null + ? S3ConnectionSpec.fromPartial(object.s3Connection) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + UpdateConnectorConfigS3SinkSpec.$type, + UpdateConnectorConfigS3SinkSpec +); + +const baseS3ConnectionSpec: object = { + $type: "yandex.cloud.mdb.kafka.v1.S3ConnectionSpec", + bucketName: "", +}; + +export const S3ConnectionSpec = { + $type: "yandex.cloud.mdb.kafka.v1.S3ConnectionSpec" as const, + + encode( + message: S3ConnectionSpec, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.bucketName !== "") { + writer.uint32(10).string(message.bucketName); + } + if (message.externalS3 !== undefined) { + ExternalS3StorageSpec.encode( + message.externalS3, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): S3ConnectionSpec { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseS3ConnectionSpec } as S3ConnectionSpec; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.bucketName = reader.string(); + break; + case 2: + message.externalS3 = ExternalS3StorageSpec.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): S3ConnectionSpec { + const message = { ...baseS3ConnectionSpec } as S3ConnectionSpec; + message.bucketName = + object.bucketName !== undefined && object.bucketName !== null + ? String(object.bucketName) + : ""; + message.externalS3 = + object.externalS3 !== undefined && object.externalS3 !== null + ? ExternalS3StorageSpec.fromJSON(object.externalS3) + : undefined; + return message; + }, + + toJSON(message: S3ConnectionSpec): unknown { + const obj: any = {}; + message.bucketName !== undefined && (obj.bucketName = message.bucketName); + message.externalS3 !== undefined && + (obj.externalS3 = message.externalS3 + ? ExternalS3StorageSpec.toJSON(message.externalS3) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): S3ConnectionSpec { + const message = { ...baseS3ConnectionSpec } as S3ConnectionSpec; + message.bucketName = object.bucketName ?? ""; + message.externalS3 = + object.externalS3 !== undefined && object.externalS3 !== null + ? ExternalS3StorageSpec.fromPartial(object.externalS3) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(S3ConnectionSpec.$type, S3ConnectionSpec); + +const baseExternalS3StorageSpec: object = { + $type: "yandex.cloud.mdb.kafka.v1.ExternalS3StorageSpec", + accessKeyId: "", + secretAccessKey: "", + endpoint: "", + region: "", +}; + +export const ExternalS3StorageSpec = { + $type: "yandex.cloud.mdb.kafka.v1.ExternalS3StorageSpec" as const, + + encode( + message: ExternalS3StorageSpec, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.accessKeyId !== "") { + writer.uint32(10).string(message.accessKeyId); + } + if (message.secretAccessKey !== "") { + writer.uint32(18).string(message.secretAccessKey); + } + if (message.endpoint !== "") { + writer.uint32(26).string(message.endpoint); + } + if (message.region !== "") { + writer.uint32(34).string(message.region); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ExternalS3StorageSpec { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseExternalS3StorageSpec } as ExternalS3StorageSpec; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.accessKeyId = reader.string(); + break; + case 2: + message.secretAccessKey = reader.string(); + break; + case 3: + message.endpoint = reader.string(); + break; + case 4: + message.region = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ExternalS3StorageSpec { + const message = { ...baseExternalS3StorageSpec } as ExternalS3StorageSpec; + message.accessKeyId = + object.accessKeyId !== undefined && object.accessKeyId !== null + ? String(object.accessKeyId) + : ""; + message.secretAccessKey = + object.secretAccessKey !== undefined && object.secretAccessKey !== null + ? String(object.secretAccessKey) + : ""; + message.endpoint = + object.endpoint !== undefined && object.endpoint !== null + ? String(object.endpoint) + : ""; + message.region = + object.region !== undefined && object.region !== null + ? String(object.region) + : ""; + return message; + }, + + toJSON(message: ExternalS3StorageSpec): unknown { + const obj: any = {}; + message.accessKeyId !== undefined && + (obj.accessKeyId = message.accessKeyId); + message.secretAccessKey !== undefined && + (obj.secretAccessKey = message.secretAccessKey); + message.endpoint !== undefined && (obj.endpoint = message.endpoint); + message.region !== undefined && (obj.region = message.region); + return obj; + }, + + fromPartial, I>>( + object: I + ): ExternalS3StorageSpec { + const message = { ...baseExternalS3StorageSpec } as ExternalS3StorageSpec; + message.accessKeyId = object.accessKeyId ?? ""; + message.secretAccessKey = object.secretAccessKey ?? ""; + message.endpoint = object.endpoint ?? ""; + message.region = object.region ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ExternalS3StorageSpec.$type, ExternalS3StorageSpec); + +const baseConnector: object = { + $type: "yandex.cloud.mdb.kafka.v1.Connector", + name: "", + health: 0, + status: 0, + clusterId: "", +}; + +export const Connector = { + $type: "yandex.cloud.mdb.kafka.v1.Connector" as const, + + encode( + message: Connector, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + if (message.tasksMax !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.tasksMax! }, + writer.uint32(18).fork() + ).ldelim(); + } + Object.entries(message.properties).forEach(([key, value]) => { + Connector_PropertiesEntry.encode( + { + $type: "yandex.cloud.mdb.kafka.v1.Connector.PropertiesEntry", + key: key as any, + value, + }, + writer.uint32(26).fork() + ).ldelim(); + }); + if (message.health !== 0) { + writer.uint32(32).int32(message.health); + } + if (message.status !== 0) { + writer.uint32(40).int32(message.status); + } + if (message.clusterId !== "") { + writer.uint32(50).string(message.clusterId); + } + if (message.connectorConfigMirrormaker !== undefined) { + ConnectorConfigMirrorMaker.encode( + message.connectorConfigMirrormaker, + writer.uint32(82).fork() + ).ldelim(); + } + if (message.connectorConfigS3Sink !== undefined) { + ConnectorConfigS3Sink.encode( + message.connectorConfigS3Sink, + writer.uint32(90).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Connector { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseConnector } as Connector; + message.properties = {}; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.tasksMax = Int64Value.decode(reader, reader.uint32()).value; + break; + case 3: + const entry3 = Connector_PropertiesEntry.decode( + reader, + reader.uint32() + ); + if (entry3.value !== undefined) { + message.properties[entry3.key] = entry3.value; + } + break; + case 4: + message.health = reader.int32() as any; + break; + case 5: + message.status = reader.int32() as any; + break; + case 6: + message.clusterId = reader.string(); + break; + case 10: + message.connectorConfigMirrormaker = + ConnectorConfigMirrorMaker.decode(reader, reader.uint32()); + break; + case 11: + message.connectorConfigS3Sink = ConnectorConfigS3Sink.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Connector { + const message = { ...baseConnector } as Connector; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.tasksMax = + object.tasksMax !== undefined && object.tasksMax !== null + ? Number(object.tasksMax) + : undefined; + message.properties = Object.entries(object.properties ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); + message.health = + object.health !== undefined && object.health !== null + ? connector_HealthFromJSON(object.health) + : 0; + message.status = + object.status !== undefined && object.status !== null + ? connector_StatusFromJSON(object.status) + : 0; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.connectorConfigMirrormaker = + object.connectorConfigMirrormaker !== undefined && + object.connectorConfigMirrormaker !== null + ? ConnectorConfigMirrorMaker.fromJSON(object.connectorConfigMirrormaker) + : undefined; + message.connectorConfigS3Sink = + object.connectorConfigS3Sink !== undefined && + object.connectorConfigS3Sink !== null + ? ConnectorConfigS3Sink.fromJSON(object.connectorConfigS3Sink) + : undefined; + return message; + }, + + toJSON(message: Connector): unknown { + const obj: any = {}; + message.name !== undefined && (obj.name = message.name); + message.tasksMax !== undefined && (obj.tasksMax = message.tasksMax); + obj.properties = {}; + if (message.properties) { + Object.entries(message.properties).forEach(([k, v]) => { + obj.properties[k] = v; + }); + } + message.health !== undefined && + (obj.health = connector_HealthToJSON(message.health)); + message.status !== undefined && + (obj.status = connector_StatusToJSON(message.status)); + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.connectorConfigMirrormaker !== undefined && + (obj.connectorConfigMirrormaker = message.connectorConfigMirrormaker + ? ConnectorConfigMirrorMaker.toJSON(message.connectorConfigMirrormaker) + : undefined); + message.connectorConfigS3Sink !== undefined && + (obj.connectorConfigS3Sink = message.connectorConfigS3Sink + ? ConnectorConfigS3Sink.toJSON(message.connectorConfigS3Sink) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Connector { + const message = { ...baseConnector } as Connector; + message.name = object.name ?? ""; + message.tasksMax = object.tasksMax ?? undefined; + message.properties = Object.entries(object.properties ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); + message.health = object.health ?? 0; + message.status = object.status ?? 0; + message.clusterId = object.clusterId ?? ""; + message.connectorConfigMirrormaker = + object.connectorConfigMirrormaker !== undefined && + object.connectorConfigMirrormaker !== null + ? ConnectorConfigMirrorMaker.fromPartial( + object.connectorConfigMirrormaker + ) + : undefined; + message.connectorConfigS3Sink = + object.connectorConfigS3Sink !== undefined && + object.connectorConfigS3Sink !== null + ? ConnectorConfigS3Sink.fromPartial(object.connectorConfigS3Sink) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Connector.$type, Connector); + +const baseConnector_PropertiesEntry: object = { + $type: "yandex.cloud.mdb.kafka.v1.Connector.PropertiesEntry", + key: "", + value: "", +}; + +export const Connector_PropertiesEntry = { + $type: "yandex.cloud.mdb.kafka.v1.Connector.PropertiesEntry" as const, + + encode( + message: Connector_PropertiesEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); } if (message.value !== "") { writer.uint32(18).string(message.value); @@ -1916,6 +2455,301 @@ messageTypeRegistry.set( ExternalClusterConnection ); +const baseConnectorConfigS3Sink: object = { + $type: "yandex.cloud.mdb.kafka.v1.ConnectorConfigS3Sink", + topics: "", + fileCompressionType: "", +}; + +export const ConnectorConfigS3Sink = { + $type: "yandex.cloud.mdb.kafka.v1.ConnectorConfigS3Sink" as const, + + encode( + message: ConnectorConfigS3Sink, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.topics !== "") { + writer.uint32(10).string(message.topics); + } + if (message.fileCompressionType !== "") { + writer.uint32(18).string(message.fileCompressionType); + } + if (message.fileMaxRecords !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.fileMaxRecords! }, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.s3Connection !== undefined) { + S3Connection.encode( + message.s3Connection, + writer.uint32(34).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ConnectorConfigS3Sink { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseConnectorConfigS3Sink } as ConnectorConfigS3Sink; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.topics = reader.string(); + break; + case 2: + message.fileCompressionType = reader.string(); + break; + case 3: + message.fileMaxRecords = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 4: + message.s3Connection = S3Connection.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ConnectorConfigS3Sink { + const message = { ...baseConnectorConfigS3Sink } as ConnectorConfigS3Sink; + message.topics = + object.topics !== undefined && object.topics !== null + ? String(object.topics) + : ""; + message.fileCompressionType = + object.fileCompressionType !== undefined && + object.fileCompressionType !== null + ? String(object.fileCompressionType) + : ""; + message.fileMaxRecords = + object.fileMaxRecords !== undefined && object.fileMaxRecords !== null + ? Number(object.fileMaxRecords) + : undefined; + message.s3Connection = + object.s3Connection !== undefined && object.s3Connection !== null + ? S3Connection.fromJSON(object.s3Connection) + : undefined; + return message; + }, + + toJSON(message: ConnectorConfigS3Sink): unknown { + const obj: any = {}; + message.topics !== undefined && (obj.topics = message.topics); + message.fileCompressionType !== undefined && + (obj.fileCompressionType = message.fileCompressionType); + message.fileMaxRecords !== undefined && + (obj.fileMaxRecords = message.fileMaxRecords); + message.s3Connection !== undefined && + (obj.s3Connection = message.s3Connection + ? S3Connection.toJSON(message.s3Connection) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): ConnectorConfigS3Sink { + const message = { ...baseConnectorConfigS3Sink } as ConnectorConfigS3Sink; + message.topics = object.topics ?? ""; + message.fileCompressionType = object.fileCompressionType ?? ""; + message.fileMaxRecords = object.fileMaxRecords ?? undefined; + message.s3Connection = + object.s3Connection !== undefined && object.s3Connection !== null + ? S3Connection.fromPartial(object.s3Connection) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(ConnectorConfigS3Sink.$type, ConnectorConfigS3Sink); + +const baseS3Connection: object = { + $type: "yandex.cloud.mdb.kafka.v1.S3Connection", + bucketName: "", +}; + +export const S3Connection = { + $type: "yandex.cloud.mdb.kafka.v1.S3Connection" as const, + + encode( + message: S3Connection, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.bucketName !== "") { + writer.uint32(10).string(message.bucketName); + } + if (message.externalS3 !== undefined) { + ExternalS3Storage.encode( + message.externalS3, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): S3Connection { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseS3Connection } as S3Connection; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.bucketName = reader.string(); + break; + case 2: + message.externalS3 = ExternalS3Storage.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): S3Connection { + const message = { ...baseS3Connection } as S3Connection; + message.bucketName = + object.bucketName !== undefined && object.bucketName !== null + ? String(object.bucketName) + : ""; + message.externalS3 = + object.externalS3 !== undefined && object.externalS3 !== null + ? ExternalS3Storage.fromJSON(object.externalS3) + : undefined; + return message; + }, + + toJSON(message: S3Connection): unknown { + const obj: any = {}; + message.bucketName !== undefined && (obj.bucketName = message.bucketName); + message.externalS3 !== undefined && + (obj.externalS3 = message.externalS3 + ? ExternalS3Storage.toJSON(message.externalS3) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): S3Connection { + const message = { ...baseS3Connection } as S3Connection; + message.bucketName = object.bucketName ?? ""; + message.externalS3 = + object.externalS3 !== undefined && object.externalS3 !== null + ? ExternalS3Storage.fromPartial(object.externalS3) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(S3Connection.$type, S3Connection); + +const baseExternalS3Storage: object = { + $type: "yandex.cloud.mdb.kafka.v1.ExternalS3Storage", + accessKeyId: "", + endpoint: "", + region: "", +}; + +export const ExternalS3Storage = { + $type: "yandex.cloud.mdb.kafka.v1.ExternalS3Storage" as const, + + encode( + message: ExternalS3Storage, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.accessKeyId !== "") { + writer.uint32(10).string(message.accessKeyId); + } + if (message.endpoint !== "") { + writer.uint32(18).string(message.endpoint); + } + if (message.region !== "") { + writer.uint32(26).string(message.region); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ExternalS3Storage { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseExternalS3Storage } as ExternalS3Storage; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.accessKeyId = reader.string(); + break; + case 2: + message.endpoint = reader.string(); + break; + case 3: + message.region = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ExternalS3Storage { + const message = { ...baseExternalS3Storage } as ExternalS3Storage; + message.accessKeyId = + object.accessKeyId !== undefined && object.accessKeyId !== null + ? String(object.accessKeyId) + : ""; + message.endpoint = + object.endpoint !== undefined && object.endpoint !== null + ? String(object.endpoint) + : ""; + message.region = + object.region !== undefined && object.region !== null + ? String(object.region) + : ""; + return message; + }, + + toJSON(message: ExternalS3Storage): unknown { + const obj: any = {}; + message.accessKeyId !== undefined && + (obj.accessKeyId = message.accessKeyId); + message.endpoint !== undefined && (obj.endpoint = message.endpoint); + message.region !== undefined && (obj.region = message.region); + return obj; + }, + + fromPartial, I>>( + object: I + ): ExternalS3Storage { + const message = { ...baseExternalS3Storage } as ExternalS3Storage; + message.accessKeyId = object.accessKeyId ?? ""; + message.endpoint = object.endpoint ?? ""; + message.region = object.region ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ExternalS3Storage.$type, ExternalS3Storage); + type Builtin = | Date | Function diff --git a/src/generated/yandex/cloud/mdb/kafka/v1/connector_service.ts b/src/generated/yandex/cloud/mdb/kafka/v1/connector_service.ts index a6e43349..e9da510f 100644 --- a/src/generated/yandex/cloud/mdb/kafka/v1/connector_service.ts +++ b/src/generated/yandex/cloud/mdb/kafka/v1/connector_service.ts @@ -27,13 +27,15 @@ export const protobufPackage = "yandex.cloud.mdb.kafka.v1"; export interface GetConnectorRequest { $type: "yandex.cloud.mdb.kafka.v1.GetConnectorRequest"; /** - * ID of the Apache Kafka Cluster resource to return. - * To get the cluster ID use a [ClusterService.List] request. + * ID of the Apache Kafka® cluster the connector belongs to. + * + * To get this ID, make a [ClusterService.List] request. */ clusterId: string; /** - * Name of the Apache Kafka Connector resource to return. - * To get the name of the connector use a [ConnectorService.List] request. + * Name of the Apache Kafka® connector to return information about. + * + * To get this name, make a [ConnectorService.List] request. */ connectorName: string; } @@ -41,27 +43,33 @@ export interface GetConnectorRequest { export interface ListConnectorsRequest { $type: "yandex.cloud.mdb.kafka.v1.ListConnectorsRequest"; /** - * ID of the Apache Kafka cluster to list connectors in. - * To get the cluster ID use a [ClusterService.List] request. + * ID of the Apache Kafka® cluster to list connectors in. + * + * To get this ID, make a [ClusterService.List] request. */ clusterId: string; + /** + * The maximum number of results per page to return. + * + * If the number of available results is larger than [page_size], the API returns a [ListConnectorsResponse.next_page_token] that can be used to get the next page of results in the subsequent [ConnectorService.List] requests. + */ pageSize: number; /** - * Page token. To get the next page of results, Set [page_token] to the [ListConnectorsResponse.next_page_token] - * returned by a previous list request. + * Page token that can be used to iterate through multiple pages of results. + * + * To get the next page of results, set [page_token] to the [ListConnectorsResponse.next_page_token] returned by the previous [ConnectorService.List] request. */ pageToken: string; } export interface ListConnectorsResponse { $type: "yandex.cloud.mdb.kafka.v1.ListConnectorsResponse"; - /** List of Apache Kafka Connector resources. */ + /** List of Apache Kafka® Connectors. */ connectors: Connector[]; /** - * This token allows you to get the next page of results for list requests. If the number of results - * is larger than [ListConnectorsRequest.page_size], use the [next_page_token] as the value - * for the [ListConnectorsRequest.page_token] parameter in the next list request. Each subsequent - * list request will have its own [next_page_token] to continue paging through the results. + * The token that can be used to get the next page of results. + * + * If the number of results is larger than [ListConnectorsRequest.page_size], use the [next_page_token] as the value for the [ListConnectorsRequest.page_token] in the subsequent [ConnectorService.List] request to iterate through multiple pages of results. */ nextPageToken: string; } @@ -69,105 +77,120 @@ export interface ListConnectorsResponse { export interface CreateConnectorRequest { $type: "yandex.cloud.mdb.kafka.v1.CreateConnectorRequest"; /** - * Required. ID of the Apache Kafka cluster to create a connector in. - * To get the cluster ID use a [ClusterService.List] request. + * ID of the Apache Kafka® cluster to create the connector in. + * + * To get this ID, make a [ClusterService.List] request. */ clusterId: string; - /** Required. Configuration of the connector to create. */ + /** Configuration of the connector to create. */ connectorSpec?: ConnectorSpec; } export interface CreateConnectorMetadata { $type: "yandex.cloud.mdb.kafka.v1.CreateConnectorMetadata"; - /** ID of the Apache Kafka cluster where a connector is being created. */ + /** ID of the Apache Kafka® cluster the connector is being created in. */ clusterId: string; - /** Name of the Apache Kafka connector that is being created. */ + /** Name of the Apache Kafka® connector that is being created. */ connectorName: string; } export interface UpdateConnectorRequest { $type: "yandex.cloud.mdb.kafka.v1.UpdateConnectorRequest"; /** - * Required. ID of the Apache Kafka cluster to update a connector in. - * To get the cluster ID use a [ClusterService.List] request. + * ID of the Apache Kafka® cluster to update the connector in. + * + * To get this ID, make a [ClusterService.List] request. */ clusterId: string; /** - * Required. Name of the connector to update. - * To get the name of the connector, use a [ConnectorService.List] request. + * Name of the connector to update. + * + * To get this name, make a [ConnectorService.List] request. */ connectorName: string; - /** Field mask that specifies which fields of the Connector resource should be updated. */ + /** Field mask that specifies which settings of the connector should be updated. */ updateMask?: FieldMask; - /** Required. Configuration of the connector to update. */ + /** Configuration of the connector to update. */ connectorSpec?: UpdateConnectorSpec; } export interface UpdateConnectorMetadata { $type: "yandex.cloud.mdb.kafka.v1.UpdateConnectorMetadata"; - /** ID of the Apache Kafka cluster where a connector is being updated. */ + /** ID of the Apache Kafka® cluster the connector is being updated in. */ clusterId: string; - /** Name of the Apache Kafka connector that is being updated. */ + /** Name of the Apache Kafka® connector that is being updated. */ connectorName: string; } export interface DeleteConnectorRequest { $type: "yandex.cloud.mdb.kafka.v1.DeleteConnectorRequest"; /** - * Required. ID of the Apache Kafka cluster to delete a connector in. - * To get the cluster ID, use a [ClusterService.List] request. + * ID of the Apache Kafka® cluster to delete the connector from. + * + * To get this ID, make a [ClusterService.List] request. */ clusterId: string; /** - * Required. Name of the connector to delete. - * To get the name of the connector, use a [ConnectorService.List] request. + * Name of the connector to delete. + * + * To get this name, make a [ConnectorService.List] request. */ connectorName: string; } export interface DeleteConnectorMetadata { $type: "yandex.cloud.mdb.kafka.v1.DeleteConnectorMetadata"; - /** ID of the Apache Kafka cluster where a connector is being deleted. */ + /** ID of the Apache Kafka® cluster the connector is being deleted from. */ clusterId: string; - /** Name of the Apache Kafka connector that is being deleted. */ + /** Name of the Apache Kafka® connector that is being deleted. */ connectorName: string; } export interface ResumeConnectorRequest { $type: "yandex.cloud.mdb.kafka.v1.ResumeConnectorRequest"; - /** Required. ID of the Apache Kafka cluster to resume connector in. */ + /** + * ID of the Apache Kafka® cluster to resume the connector in. + * + * To get this ID, make a [ClusterService.List] request. + */ clusterId: string; /** - * Name of the Apache Kafka Connector resource to resume. - * To get the name of the connector use a [ConnectorService.List] request. + * Name of the Apache Kafka® connector to resume. + * + * To get this name, make a [ConnectorService.List] request. */ connectorName: string; } export interface ResumeConnectorMetadata { $type: "yandex.cloud.mdb.kafka.v1.ResumeConnectorMetadata"; - /** Required. ID of the Apache Kafka cluster. */ + /** ID of the Apache Kafka® cluster the connector is being resumed in. */ clusterId: string; - /** Name of the Apache Kafka Connector resource that is beign resumed. */ + /** Name of the Apache Kafka® connector that is beign resumed. */ connectorName: string; } export interface PauseConnectorRequest { $type: "yandex.cloud.mdb.kafka.v1.PauseConnectorRequest"; - /** Required. ID of the Apache Kafka cluster to pause connector in. */ + /** + * ID of the Apache Kafka® cluster to pause the connector in. + * + * To get this ID, make a [ClusterService.List] request. + */ clusterId: string; /** - * Name of the Apache Kafka Connector resource to pause. - * To get the name of the connector use a [ConnectorService.List] request. + * Name of the Apache Kafka® connector to pause. + * + * To get this name, make a [ConnectorService.List] request. */ connectorName: string; } export interface PauseConnectorMetadata { $type: "yandex.cloud.mdb.kafka.v1.PauseConnectorMetadata"; - /** Required. ID of the Apache Kafka cluster. */ + /** ID of the Apache Kafka® cluster the connector is being paused in. */ clusterId: string; - /** Name of the Apache Kafka Connector resource that is being paused. */ + /** Name of the Apache Kafka® connector that is being paused. */ connectorName: string; } @@ -1287,13 +1310,9 @@ export const PauseConnectorMetadata = { messageTypeRegistry.set(PauseConnectorMetadata.$type, PauseConnectorMetadata); -/** A set of methods for managing Apache Kafka Connectors resources. */ +/** A set of methods for managing Apache Kafka® connectors. */ export const ConnectorServiceService = { - /** - * Returns the specified Apache Kafka Connector resource. - * - * To get the list of available Apache Kafka Connector resources, make a [List] request. - */ + /** Returns information about an Apache Kafka® connector. */ get: { path: "/yandex.cloud.mdb.kafka.v1.ConnectorService/Get", requestStream: false, @@ -1305,7 +1324,7 @@ export const ConnectorServiceService = { Buffer.from(Connector.encode(value).finish()), responseDeserialize: (value: Buffer) => Connector.decode(value), }, - /** Retrieves the list of Apache Kafka Connector resources in the specified cluster. */ + /** Retrieves the list of Apache Kafka® connectors in a cluster. */ list: { path: "/yandex.cloud.mdb.kafka.v1.ConnectorService/List", requestStream: false, @@ -1318,7 +1337,7 @@ export const ConnectorServiceService = { responseDeserialize: (value: Buffer) => ListConnectorsResponse.decode(value), }, - /** Creates a new Apache Kafka connector in the specified cluster. */ + /** Creates a new Apache Kafka® connector in a cluster. */ create: { path: "/yandex.cloud.mdb.kafka.v1.ConnectorService/Create", requestStream: false, @@ -1330,7 +1349,7 @@ export const ConnectorServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Updates an Apache Kafka connector in the specified cluster. */ + /** Updates an Apache Kafka® connector. */ update: { path: "/yandex.cloud.mdb.kafka.v1.ConnectorService/Update", requestStream: false, @@ -1342,7 +1361,7 @@ export const ConnectorServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Deletes the specified Apache Kafka connector. */ + /** Deletes an Apache Kafka® connector. */ delete: { path: "/yandex.cloud.mdb.kafka.v1.ConnectorService/Delete", requestStream: false, @@ -1354,7 +1373,7 @@ export const ConnectorServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Resume the specified Apache Kafka connector. */ + /** Resumes an Apache Kafka® connector. */ resume: { path: "/yandex.cloud.mdb.kafka.v1.ConnectorService/Resume", requestStream: false, @@ -1366,7 +1385,7 @@ export const ConnectorServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Pause the specified Apache Kafka connector. */ + /** Pauses an Apache Kafka® connector. */ pause: { path: "/yandex.cloud.mdb.kafka.v1.ConnectorService/Pause", requestStream: false, @@ -1381,32 +1400,24 @@ export const ConnectorServiceService = { } as const; export interface ConnectorServiceServer extends UntypedServiceImplementation { - /** - * Returns the specified Apache Kafka Connector resource. - * - * To get the list of available Apache Kafka Connector resources, make a [List] request. - */ + /** Returns information about an Apache Kafka® connector. */ get: handleUnaryCall; - /** Retrieves the list of Apache Kafka Connector resources in the specified cluster. */ + /** Retrieves the list of Apache Kafka® connectors in a cluster. */ list: handleUnaryCall; - /** Creates a new Apache Kafka connector in the specified cluster. */ + /** Creates a new Apache Kafka® connector in a cluster. */ create: handleUnaryCall; - /** Updates an Apache Kafka connector in the specified cluster. */ + /** Updates an Apache Kafka® connector. */ update: handleUnaryCall; - /** Deletes the specified Apache Kafka connector. */ + /** Deletes an Apache Kafka® connector. */ delete: handleUnaryCall; - /** Resume the specified Apache Kafka connector. */ + /** Resumes an Apache Kafka® connector. */ resume: handleUnaryCall; - /** Pause the specified Apache Kafka connector. */ + /** Pauses an Apache Kafka® connector. */ pause: handleUnaryCall; } export interface ConnectorServiceClient extends Client { - /** - * Returns the specified Apache Kafka Connector resource. - * - * To get the list of available Apache Kafka Connector resources, make a [List] request. - */ + /** Returns information about an Apache Kafka® connector. */ get( request: GetConnectorRequest, callback: (error: ServiceError | null, response: Connector) => void @@ -1422,7 +1433,7 @@ export interface ConnectorServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Connector) => void ): ClientUnaryCall; - /** Retrieves the list of Apache Kafka Connector resources in the specified cluster. */ + /** Retrieves the list of Apache Kafka® connectors in a cluster. */ list( request: ListConnectorsRequest, callback: ( @@ -1447,7 +1458,7 @@ export interface ConnectorServiceClient extends Client { response: ListConnectorsResponse ) => void ): ClientUnaryCall; - /** Creates a new Apache Kafka connector in the specified cluster. */ + /** Creates a new Apache Kafka® connector in a cluster. */ create( request: CreateConnectorRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -1463,7 +1474,7 @@ export interface ConnectorServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Updates an Apache Kafka connector in the specified cluster. */ + /** Updates an Apache Kafka® connector. */ update( request: UpdateConnectorRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -1479,7 +1490,7 @@ export interface ConnectorServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Deletes the specified Apache Kafka connector. */ + /** Deletes an Apache Kafka® connector. */ delete( request: DeleteConnectorRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -1495,7 +1506,7 @@ export interface ConnectorServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Resume the specified Apache Kafka connector. */ + /** Resumes an Apache Kafka® connector. */ resume( request: ResumeConnectorRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -1511,7 +1522,7 @@ export interface ConnectorServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Pause the specified Apache Kafka connector. */ + /** Pauses an Apache Kafka® connector. */ pause( request: PauseConnectorRequest, callback: (error: ServiceError | null, response: Operation) => void diff --git a/src/generated/yandex/cloud/mdb/kafka/v1/resource_preset_service.ts b/src/generated/yandex/cloud/mdb/kafka/v1/resource_preset_service.ts index ae501111..d98ab143 100644 --- a/src/generated/yandex/cloud/mdb/kafka/v1/resource_preset_service.ts +++ b/src/generated/yandex/cloud/mdb/kafka/v1/resource_preset_service.ts @@ -39,7 +39,7 @@ export interface ListResourcePresetsRequest { /** * Page token. * - * To get the next page of results, set [page_token] to the [ListResourcePresetsResponse.next_page_token], returned by a previous list request. + * To get the next page of results, set [page_token] to the [ListResourcePresetsResponse.next_page_token], returned by the previous list request. */ pageToken: string; } diff --git a/src/generated/yandex/cloud/mdb/kafka/v1/topic.ts b/src/generated/yandex/cloud/mdb/kafka/v1/topic.ts index 79f3a31f..3c9e29b6 100644 --- a/src/generated/yandex/cloud/mdb/kafka/v1/topic.ts +++ b/src/generated/yandex/cloud/mdb/kafka/v1/topic.ts @@ -32,6 +32,7 @@ export interface Topic { topicConfig21?: Topicconfig21 | undefined; topicConfig26?: Topicconfig26 | undefined; topicConfig28?: Topicconfig28 | undefined; + topicConfig3?: TopicConfig3 | undefined; } export interface TopicSpec { @@ -45,6 +46,7 @@ export interface TopicSpec { topicConfig21?: Topicconfig21 | undefined; topicConfig26?: Topicconfig26 | undefined; topicConfig28?: Topicconfig28 | undefined; + topicConfig3?: TopicConfig3 | undefined; } /** A topic settings for 2.1. */ @@ -380,6 +382,117 @@ export function topicconfig28_CleanupPolicyToJSON( } } +/** A topic settings for 3.x */ +export interface TopicConfig3 { + $type: "yandex.cloud.mdb.kafka.v1.TopicConfig3"; + /** Retention policy to use on old log messages. */ + cleanupPolicy: TopicConfig3_CleanupPolicy; + /** The compression type for a given topic. */ + compressionType: CompressionType; + /** The amount of time in milliseconds to retain delete tombstone markers for log compacted topics. */ + deleteRetentionMs?: number; + /** The time to wait before deleting a file from the filesystem. */ + fileDeleteDelayMs?: number; + /** + * The number of messages accumulated on a log partition before messages are flushed to disk. + * + * This setting overrides the cluster-level [KafkaConfig3.log_flush_interval_messages] setting on the topic level. + */ + flushMessages?: number; + /** + * The maximum time in milliseconds that a message in the topic is kept in memory before flushed to disk. + * + * This setting overrides the cluster-level [KafkaConfig3.log_flush_interval_ms] setting on the topic level. + */ + flushMs?: number; + /** The minimum time in milliseconds a message will remain uncompacted in the log. */ + minCompactionLagMs?: number; + /** + * The maximum size a partition can grow to before Kafka will discard old log segments to free up space if the `delete` [cleanup_policy] is in effect. + * It is helpful if you need to control the size of log due to limited disk space. + * + * This setting overrides the cluster-level [KafkaConfig3.log_retention_bytes] setting on the topic level. + */ + retentionBytes?: number; + /** + * The number of milliseconds to keep a log segment's file before deleting it. + * + * This setting overrides the cluster-level [KafkaConfig3.log_retention_ms] setting on the topic level. + */ + retentionMs?: number; + /** The largest record batch size allowed in topic. */ + maxMessageBytes?: number; + /** + * This configuration specifies the minimum number of replicas that must acknowledge a write to topic for the write + * to be considered successful (when a producer sets acks to "all"). + */ + minInsyncReplicas?: number; + /** + * This configuration controls the segment file size for the log. Retention and cleaning is always done a file + * at a time so a larger segment size means fewer files but less granular control over retention. + * + * This setting overrides the cluster-level [KafkaConfig3.log_segment_bytes] setting on the topic level. + */ + segmentBytes?: number; + /** + * True if we should preallocate the file on disk when creating a new log segment. + * + * This setting overrides the cluster-level [KafkaConfig3.log_preallocate] setting on the topic level. + */ + preallocate?: boolean; +} + +export enum TopicConfig3_CleanupPolicy { + CLEANUP_POLICY_UNSPECIFIED = 0, + /** CLEANUP_POLICY_DELETE - this policy discards log segments when either their retention time or log size limit is reached. See also: [KafkaConfig3.log_retention_ms] and other similar parameters. */ + CLEANUP_POLICY_DELETE = 1, + /** CLEANUP_POLICY_COMPACT - this policy compacts messages in log. */ + CLEANUP_POLICY_COMPACT = 2, + /** CLEANUP_POLICY_COMPACT_AND_DELETE - this policy use both compaction and deletion for messages and log segments. */ + CLEANUP_POLICY_COMPACT_AND_DELETE = 3, + UNRECOGNIZED = -1, +} + +export function topicConfig3_CleanupPolicyFromJSON( + object: any +): TopicConfig3_CleanupPolicy { + switch (object) { + case 0: + case "CLEANUP_POLICY_UNSPECIFIED": + return TopicConfig3_CleanupPolicy.CLEANUP_POLICY_UNSPECIFIED; + case 1: + case "CLEANUP_POLICY_DELETE": + return TopicConfig3_CleanupPolicy.CLEANUP_POLICY_DELETE; + case 2: + case "CLEANUP_POLICY_COMPACT": + return TopicConfig3_CleanupPolicy.CLEANUP_POLICY_COMPACT; + case 3: + case "CLEANUP_POLICY_COMPACT_AND_DELETE": + return TopicConfig3_CleanupPolicy.CLEANUP_POLICY_COMPACT_AND_DELETE; + case -1: + case "UNRECOGNIZED": + default: + return TopicConfig3_CleanupPolicy.UNRECOGNIZED; + } +} + +export function topicConfig3_CleanupPolicyToJSON( + object: TopicConfig3_CleanupPolicy +): string { + switch (object) { + case TopicConfig3_CleanupPolicy.CLEANUP_POLICY_UNSPECIFIED: + return "CLEANUP_POLICY_UNSPECIFIED"; + case TopicConfig3_CleanupPolicy.CLEANUP_POLICY_DELETE: + return "CLEANUP_POLICY_DELETE"; + case TopicConfig3_CleanupPolicy.CLEANUP_POLICY_COMPACT: + return "CLEANUP_POLICY_COMPACT"; + case TopicConfig3_CleanupPolicy.CLEANUP_POLICY_COMPACT_AND_DELETE: + return "CLEANUP_POLICY_COMPACT_AND_DELETE"; + default: + return "UNKNOWN"; + } +} + const baseTopic: object = { $type: "yandex.cloud.mdb.kafka.v1.Topic", name: "", @@ -429,6 +542,12 @@ export const Topic = { writer.uint32(58).fork() ).ldelim(); } + if (message.topicConfig3 !== undefined) { + TopicConfig3.encode( + message.topicConfig3, + writer.uint32(66).fork() + ).ldelim(); + } return writer; }, @@ -463,6 +582,9 @@ export const Topic = { case 7: message.topicConfig28 = Topicconfig28.decode(reader, reader.uint32()); break; + case 8: + message.topicConfig3 = TopicConfig3.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -502,6 +624,10 @@ export const Topic = { object.topicConfig_2_8 !== undefined && object.topicConfig_2_8 !== null ? Topicconfig28.fromJSON(object.topicConfig_2_8) : undefined; + message.topicConfig3 = + object.topicConfig_3 !== undefined && object.topicConfig_3 !== null + ? TopicConfig3.fromJSON(object.topicConfig_3) + : undefined; return message; }, @@ -524,6 +650,10 @@ export const Topic = { (obj.topicConfig_2_8 = message.topicConfig28 ? Topicconfig28.toJSON(message.topicConfig28) : undefined); + message.topicConfig3 !== undefined && + (obj.topicConfig_3 = message.topicConfig3 + ? TopicConfig3.toJSON(message.topicConfig3) + : undefined); return obj; }, @@ -545,6 +675,10 @@ export const Topic = { object.topicConfig28 !== undefined && object.topicConfig28 !== null ? Topicconfig28.fromPartial(object.topicConfig28) : undefined; + message.topicConfig3 = + object.topicConfig3 !== undefined && object.topicConfig3 !== null + ? TopicConfig3.fromPartial(object.topicConfig3) + : undefined; return message; }, }; @@ -599,6 +733,12 @@ export const TopicSpec = { writer.uint32(50).fork() ).ldelim(); } + if (message.topicConfig3 !== undefined) { + TopicConfig3.encode( + message.topicConfig3, + writer.uint32(58).fork() + ).ldelim(); + } return writer; }, @@ -630,6 +770,9 @@ export const TopicSpec = { case 6: message.topicConfig28 = Topicconfig28.decode(reader, reader.uint32()); break; + case 7: + message.topicConfig3 = TopicConfig3.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -665,6 +808,10 @@ export const TopicSpec = { object.topicConfig_2_8 !== undefined && object.topicConfig_2_8 !== null ? Topicconfig28.fromJSON(object.topicConfig_2_8) : undefined; + message.topicConfig3 = + object.topicConfig_3 !== undefined && object.topicConfig_3 !== null + ? TopicConfig3.fromJSON(object.topicConfig_3) + : undefined; return message; }, @@ -686,6 +833,10 @@ export const TopicSpec = { (obj.topicConfig_2_8 = message.topicConfig28 ? Topicconfig28.toJSON(message.topicConfig28) : undefined); + message.topicConfig3 !== undefined && + (obj.topicConfig_3 = message.topicConfig3 + ? TopicConfig3.toJSON(message.topicConfig3) + : undefined); return obj; }, @@ -708,6 +859,10 @@ export const TopicSpec = { object.topicConfig28 !== undefined && object.topicConfig28 !== null ? Topicconfig28.fromPartial(object.topicConfig28) : undefined; + message.topicConfig3 = + object.topicConfig3 !== undefined && object.topicConfig3 !== null + ? TopicConfig3.fromPartial(object.topicConfig3) + : undefined; return message; }, }; @@ -1614,6 +1769,306 @@ export const Topicconfig28 = { messageTypeRegistry.set(Topicconfig28.$type, Topicconfig28); +const baseTopicConfig3: object = { + $type: "yandex.cloud.mdb.kafka.v1.TopicConfig3", + cleanupPolicy: 0, + compressionType: 0, +}; + +export const TopicConfig3 = { + $type: "yandex.cloud.mdb.kafka.v1.TopicConfig3" as const, + + encode( + message: TopicConfig3, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.cleanupPolicy !== 0) { + writer.uint32(8).int32(message.cleanupPolicy); + } + if (message.compressionType !== 0) { + writer.uint32(16).int32(message.compressionType); + } + if (message.deleteRetentionMs !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.deleteRetentionMs!, + }, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.fileDeleteDelayMs !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.fileDeleteDelayMs!, + }, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.flushMessages !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.flushMessages! }, + writer.uint32(42).fork() + ).ldelim(); + } + if (message.flushMs !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.flushMs! }, + writer.uint32(50).fork() + ).ldelim(); + } + if (message.minCompactionLagMs !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.minCompactionLagMs!, + }, + writer.uint32(58).fork() + ).ldelim(); + } + if (message.retentionBytes !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.retentionBytes! }, + writer.uint32(66).fork() + ).ldelim(); + } + if (message.retentionMs !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.retentionMs! }, + writer.uint32(74).fork() + ).ldelim(); + } + if (message.maxMessageBytes !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxMessageBytes!, + }, + writer.uint32(82).fork() + ).ldelim(); + } + if (message.minInsyncReplicas !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.minInsyncReplicas!, + }, + writer.uint32(90).fork() + ).ldelim(); + } + if (message.segmentBytes !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.segmentBytes! }, + writer.uint32(98).fork() + ).ldelim(); + } + if (message.preallocate !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.preallocate! }, + writer.uint32(106).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): TopicConfig3 { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseTopicConfig3 } as TopicConfig3; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.cleanupPolicy = reader.int32() as any; + break; + case 2: + message.compressionType = reader.int32() as any; + break; + case 3: + message.deleteRetentionMs = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 4: + message.fileDeleteDelayMs = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 5: + message.flushMessages = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 6: + message.flushMs = Int64Value.decode(reader, reader.uint32()).value; + break; + case 7: + message.minCompactionLagMs = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 8: + message.retentionBytes = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 9: + message.retentionMs = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 10: + message.maxMessageBytes = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 11: + message.minInsyncReplicas = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 12: + message.segmentBytes = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 13: + message.preallocate = BoolValue.decode(reader, reader.uint32()).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): TopicConfig3 { + const message = { ...baseTopicConfig3 } as TopicConfig3; + message.cleanupPolicy = + object.cleanupPolicy !== undefined && object.cleanupPolicy !== null + ? topicConfig3_CleanupPolicyFromJSON(object.cleanupPolicy) + : 0; + message.compressionType = + object.compressionType !== undefined && object.compressionType !== null + ? compressionTypeFromJSON(object.compressionType) + : 0; + message.deleteRetentionMs = + object.deleteRetentionMs !== undefined && + object.deleteRetentionMs !== null + ? Number(object.deleteRetentionMs) + : undefined; + message.fileDeleteDelayMs = + object.fileDeleteDelayMs !== undefined && + object.fileDeleteDelayMs !== null + ? Number(object.fileDeleteDelayMs) + : undefined; + message.flushMessages = + object.flushMessages !== undefined && object.flushMessages !== null + ? Number(object.flushMessages) + : undefined; + message.flushMs = + object.flushMs !== undefined && object.flushMs !== null + ? Number(object.flushMs) + : undefined; + message.minCompactionLagMs = + object.minCompactionLagMs !== undefined && + object.minCompactionLagMs !== null + ? Number(object.minCompactionLagMs) + : undefined; + message.retentionBytes = + object.retentionBytes !== undefined && object.retentionBytes !== null + ? Number(object.retentionBytes) + : undefined; + message.retentionMs = + object.retentionMs !== undefined && object.retentionMs !== null + ? Number(object.retentionMs) + : undefined; + message.maxMessageBytes = + object.maxMessageBytes !== undefined && object.maxMessageBytes !== null + ? Number(object.maxMessageBytes) + : undefined; + message.minInsyncReplicas = + object.minInsyncReplicas !== undefined && + object.minInsyncReplicas !== null + ? Number(object.minInsyncReplicas) + : undefined; + message.segmentBytes = + object.segmentBytes !== undefined && object.segmentBytes !== null + ? Number(object.segmentBytes) + : undefined; + message.preallocate = + object.preallocate !== undefined && object.preallocate !== null + ? Boolean(object.preallocate) + : undefined; + return message; + }, + + toJSON(message: TopicConfig3): unknown { + const obj: any = {}; + message.cleanupPolicy !== undefined && + (obj.cleanupPolicy = topicConfig3_CleanupPolicyToJSON( + message.cleanupPolicy + )); + message.compressionType !== undefined && + (obj.compressionType = compressionTypeToJSON(message.compressionType)); + message.deleteRetentionMs !== undefined && + (obj.deleteRetentionMs = message.deleteRetentionMs); + message.fileDeleteDelayMs !== undefined && + (obj.fileDeleteDelayMs = message.fileDeleteDelayMs); + message.flushMessages !== undefined && + (obj.flushMessages = message.flushMessages); + message.flushMs !== undefined && (obj.flushMs = message.flushMs); + message.minCompactionLagMs !== undefined && + (obj.minCompactionLagMs = message.minCompactionLagMs); + message.retentionBytes !== undefined && + (obj.retentionBytes = message.retentionBytes); + message.retentionMs !== undefined && + (obj.retentionMs = message.retentionMs); + message.maxMessageBytes !== undefined && + (obj.maxMessageBytes = message.maxMessageBytes); + message.minInsyncReplicas !== undefined && + (obj.minInsyncReplicas = message.minInsyncReplicas); + message.segmentBytes !== undefined && + (obj.segmentBytes = message.segmentBytes); + message.preallocate !== undefined && + (obj.preallocate = message.preallocate); + return obj; + }, + + fromPartial, I>>( + object: I + ): TopicConfig3 { + const message = { ...baseTopicConfig3 } as TopicConfig3; + message.cleanupPolicy = object.cleanupPolicy ?? 0; + message.compressionType = object.compressionType ?? 0; + message.deleteRetentionMs = object.deleteRetentionMs ?? undefined; + message.fileDeleteDelayMs = object.fileDeleteDelayMs ?? undefined; + message.flushMessages = object.flushMessages ?? undefined; + message.flushMs = object.flushMs ?? undefined; + message.minCompactionLagMs = object.minCompactionLagMs ?? undefined; + message.retentionBytes = object.retentionBytes ?? undefined; + message.retentionMs = object.retentionMs ?? undefined; + message.maxMessageBytes = object.maxMessageBytes ?? undefined; + message.minInsyncReplicas = object.minInsyncReplicas ?? undefined; + message.segmentBytes = object.segmentBytes ?? undefined; + message.preallocate = object.preallocate ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(TopicConfig3.$type, TopicConfig3); + type Builtin = | Date | Function diff --git a/src/generated/yandex/cloud/mdb/kafka/v1/topic_service.ts b/src/generated/yandex/cloud/mdb/kafka/v1/topic_service.ts index 204f4ab2..67c41e13 100644 --- a/src/generated/yandex/cloud/mdb/kafka/v1/topic_service.ts +++ b/src/generated/yandex/cloud/mdb/kafka/v1/topic_service.ts @@ -56,7 +56,7 @@ export interface ListTopicsRequest { /** * Page token. * - * To get the next page of results, set [page_token] to the [ListTopicsResponse.next_page_token] returned by a previous list request. + * To get the next page of results, set [page_token] to the [ListTopicsResponse.next_page_token] returned by the previous list request. */ pageToken: string; } diff --git a/src/generated/yandex/cloud/mdb/kafka/v1/user_service.ts b/src/generated/yandex/cloud/mdb/kafka/v1/user_service.ts index 2e952686..acfc0980 100644 --- a/src/generated/yandex/cloud/mdb/kafka/v1/user_service.ts +++ b/src/generated/yandex/cloud/mdb/kafka/v1/user_service.ts @@ -57,7 +57,7 @@ export interface ListUsersRequest { /** * Page token. * - * To get the next page of results, set [page_token] to the [ListUsersResponse.next_page_token] returned by a previous list request. + * To get the next page of results, set [page_token] to the [ListUsersResponse.next_page_token] returned by the previous list request. */ pageToken: string; } diff --git a/src/generated/yandex/cloud/mdb/mongodb/v1/backup.ts b/src/generated/yandex/cloud/mdb/mongodb/v1/backup.ts index 8761d58c..d56d301d 100644 --- a/src/generated/yandex/cloud/mdb/mongodb/v1/backup.ts +++ b/src/generated/yandex/cloud/mdb/mongodb/v1/backup.ts @@ -12,7 +12,7 @@ export const protobufPackage = "yandex.cloud.mdb.mongodb.v1"; */ export interface Backup { $type: "yandex.cloud.mdb.mongodb.v1.Backup"; - /** ID of the backup. */ + /** ID of the backup. Required. */ id: string; /** ID of the folder that the backup belongs to. */ folderId: string; diff --git a/src/generated/yandex/cloud/mdb/mongodb/v1/backup_service.ts b/src/generated/yandex/cloud/mdb/mongodb/v1/backup_service.ts index 90cbc49d..13b6c086 100644 --- a/src/generated/yandex/cloud/mdb/mongodb/v1/backup_service.ts +++ b/src/generated/yandex/cloud/mdb/mongodb/v1/backup_service.ts @@ -35,10 +35,15 @@ export interface ListBackupsRequest { * To get the folder ID, use a [yandex.cloud.resourcemanager.v1.FolderService.List] request. */ folderId: string; + /** + * The maximum number of results per page to return. If the number of available + * results is larger than [page_size], the service returns a [ListBackupsResponse.next_page_token] + * that can be used to get the next page of results in subsequent list requests. + */ pageSize: number; /** * Page token. To get the next page of results, set [page_token] to the - * [ListBackupsResponse.next_page_token] returned by a previous list request. + * [ListBackupsResponse.next_page_token] returned by the previous list request. */ pageToken: string; } diff --git a/src/generated/yandex/cloud/mdb/mongodb/v1/cluster.ts b/src/generated/yandex/cloud/mdb/mongodb/v1/cluster.ts index e889a4fa..4eabf42e 100644 --- a/src/generated/yandex/cloud/mdb/mongodb/v1/cluster.ts +++ b/src/generated/yandex/cloud/mdb/mongodb/v1/cluster.ts @@ -42,6 +42,16 @@ import { Mongocfgconfigset50Enterprise, Mongosconfigset50Enterprise, } from "../../../../../yandex/cloud/mdb/mongodb/v1/config/mongodb5_0_enterprise"; +import { + Mongodconfigset60, + Mongocfgconfigset60, + Mongosconfigset60, +} from "../../../../../yandex/cloud/mdb/mongodb/v1/config/mongodb6_0"; +import { + Mongodconfigset60Enterprise, + Mongocfgconfigset60Enterprise, + Mongosconfigset60Enterprise, +} from "../../../../../yandex/cloud/mdb/mongodb/v1/config/mongodb6_0_enterprise"; import { Timestamp } from "../../../../../google/protobuf/timestamp"; import { Int64Value } from "../../../../../google/protobuf/wrappers"; @@ -92,6 +102,7 @@ export interface Cluster { deletionProtection: boolean; } +/** Deployment environment. */ export enum Cluster_Environment { ENVIRONMENT_UNSPECIFIED = 0, /** @@ -281,7 +292,7 @@ export interface Monitoring { export interface ClusterConfig { $type: "yandex.cloud.mdb.mongodb.v1.ClusterConfig"; - /** Version of MongoDB server software. Possible values: `3.6`, `4.0`, `4.2`, `4.4`, `4.4-enterprise`, `5.0`, `5.0-enterprise`. */ + /** Version of MongoDB server software. Possible values: `3.6`, `4.0`, `4.2`, `4.4`, `4.4-enterprise`, `5.0`, `5.0-enterprise`, `6.0`, `6.0-enterprise`. */ version: string; /** * MongoDB feature compatibility version. See usage details in [MongoDB documentation](https://docs.mongodb.com/manual/reference/command/setFeatureCompatibilityVersion/). @@ -292,6 +303,7 @@ export interface ClusterConfig { * * `4.2` - persist data compatibility for version 4.2. After setting this option the data will not be compatible with 4.0 or lower. * * `4.4` - persist data compatibility for version 4.4. After setting this option the data will not be compatible with 4.2 or lower. * * `5.0` - persist data compatibility for version 5.0. After setting this option the data will not be compatible with 5.0 or lower. + * * `6.0` - persist data compatibility for version 6.0. After setting this option the data will not be compatible with 6.0 or lower. */ featureCompatibilityVersion: string; /** Configuration and resource allocation for a MongoDB 3.6 cluster. */ @@ -304,14 +316,20 @@ export interface ClusterConfig { mongodb44?: Mongodb44 | undefined; /** Configuration and resource allocation for a MongoDB 5.0 cluster. */ mongodb50?: Mongodb50 | undefined; + /** Configuration and resource allocation for a MongoDB 6.0 cluster. */ + mongodb60?: Mongodb60 | undefined; /** Configuration and resource allocation for a MongoDB 4.4 Enterprise cluster. */ mongodb44Enterprise?: Mongodb44Enterprise | undefined; /** Configuration and resource allocation for a MongoDB 5.0 Enterprise cluster. */ mongodb50Enterprise?: Mongodb50Enterprise | undefined; + /** Configuration and resource allocation for a MongoDB 6.0 Enterprise cluster. */ + mongodb60Enterprise?: Mongodb60Enterprise | undefined; /** Time to start the daily backup, in the UTC timezone. */ backupWindowStart?: TimeOfDay; /** Retain period of automatically created backup in days */ backupRetainPeriodDays?: number; + /** Performance Diagnostic */ + performanceDiagnostics?: PerformanceDiagnosticsConfig; /** Access policy to DB */ access?: Access; } @@ -346,7 +364,7 @@ export interface Mongodb36_MongoCfg { export interface Mongodb36_Mongos { $type: "yandex.cloud.mdb.mongodb.v1.Mongodb3_6.Mongos"; config?: Mongosconfigset36; - /** Resources allocated to mongocfg hosts. */ + /** Resources allocated to mongos hosts. */ resources?: Resources; } @@ -622,6 +640,94 @@ export interface Mongodb50Enterprise_MongoInfra { resources?: Resources; } +export interface Mongodb60 { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb6_0"; + /** Configuration and resource allocation for mongod in a MongoDB 6.0 cluster. */ + mongod?: Mongodb60_Mongod; + /** Configuration and resource allocation for mongocfg in a MongoDB 6.0 cluster. */ + mongocfg?: Mongodb60_MongoCfg; + /** Configuration and resource allocation for mongos in a MongoDB 6.0 cluster. */ + mongos?: Mongodb60_Mongos; + /** Configuration and resource allocation for mongoinfra (mongos+mongocfg) in a MongoDB 6.0 cluster. */ + mongoinfra?: Mongodb60_MongoInfra; +} + +export interface Mongodb60_Mongod { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb6_0.Mongod"; + /** Configuration for mongod 6.0 hosts. */ + config?: Mongodconfigset60; + /** Resources allocated to mongod hosts. */ + resources?: Resources; +} + +export interface Mongodb60_MongoCfg { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb6_0.MongoCfg"; + /** Configuration for mongocfg 6.0 hosts. */ + config?: Mongocfgconfigset60; + /** Resources allocated to mongocfg hosts. */ + resources?: Resources; +} + +export interface Mongodb60_Mongos { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb6_0.Mongos"; + /** Configuration for mongos 6.0 hosts. */ + config?: Mongosconfigset60; + /** Resources allocated to mongos hosts. */ + resources?: Resources; +} + +export interface Mongodb60_MongoInfra { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb6_0.MongoInfra"; + configMongos?: Mongosconfigset60; + configMongocfg?: Mongocfgconfigset60; + /** Resources allocated to mongoinfra (mongos+mongocfg) hosts. */ + resources?: Resources; +} + +export interface Mongodb60Enterprise { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb6_0_enterprise"; + /** Configuration and resource allocation for mongod in a MongoDB 6.0 cluster. */ + mongod?: Mongodb60Enterprise_Mongod; + /** Configuration and resource allocation for mongocfg in a MongoDB 6.0 cluster. */ + mongocfg?: Mongodb60Enterprise_MongoCfg; + /** Configuration and resource allocation for mongos in a MongoDB 6.0 cluster. */ + mongos?: Mongodb60Enterprise_Mongos; + /** Configuration and resource allocation for mongoinfra (mongos+mongocfg) in a MongoDB 6.0 cluster. */ + mongoinfra?: Mongodb60Enterprise_MongoInfra; +} + +export interface Mongodb60Enterprise_Mongod { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb6_0_enterprise.Mongod"; + /** Configuration for mongod 6.0 hosts. */ + config?: Mongodconfigset60Enterprise; + /** Resources allocated to mongod hosts. */ + resources?: Resources; +} + +export interface Mongodb60Enterprise_MongoCfg { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb6_0_enterprise.MongoCfg"; + /** Configuration for mongocfg 6.0 hosts. */ + config?: Mongocfgconfigset60Enterprise; + /** Resources allocated to mongocfg hosts. */ + resources?: Resources; +} + +export interface Mongodb60Enterprise_Mongos { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb6_0_enterprise.Mongos"; + /** Configuration for mongos 6.0 hosts. */ + config?: Mongosconfigset60Enterprise; + /** Resources allocated to mongos hosts. */ + resources?: Resources; +} + +export interface Mongodb60Enterprise_MongoInfra { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb6_0_enterprise.MongoInfra"; + configMongos?: Mongosconfigset60Enterprise; + configMongocfg?: Mongocfgconfigset60Enterprise; + /** Resources allocated to mongoinfra (mongos+mongocfg) hosts. */ + resources?: Resources; +} + export interface Shard { $type: "yandex.cloud.mdb.mongodb.v1.Shard"; /** Name of the shard. */ @@ -636,7 +742,7 @@ export interface Host { * Name of the MongoDB host. The host name is assigned by MDB at creation time, and cannot be changed. * 1-63 characters long. * - * The name is unique across all existing MDB hosts in Yandex Cloud, as it defines the FQDN of the host. + * The name is unique across all MDB hosts that exist on the platform, as it defines the FQDN of the host. */ name: string; /** ID of the MongoDB host. The ID is assigned by MDB at creation time. */ @@ -921,12 +1027,17 @@ export interface Resources { export interface Access { $type: "yandex.cloud.mdb.mongodb.v1.Access"; - /** Allow access for DataLens */ + /** Allow access for DataLens. */ dataLens: boolean; /** Allow access for DataTransfer. */ dataTransfer: boolean; } +export interface PerformanceDiagnosticsConfig { + $type: "yandex.cloud.mdb.mongodb.v1.PerformanceDiagnosticsConfig"; + profilingEnabled: boolean; +} + const baseCluster: object = { $type: "yandex.cloud.mdb.mongodb.v1.Cluster", id: "", @@ -1463,6 +1574,9 @@ export const ClusterConfig = { if (message.mongodb50 !== undefined) { Mongodb50.encode(message.mongodb50, writer.uint32(82).fork()).ldelim(); } + if (message.mongodb60 !== undefined) { + Mongodb60.encode(message.mongodb60, writer.uint32(114).fork()).ldelim(); + } if (message.mongodb44Enterprise !== undefined) { Mongodb44Enterprise.encode( message.mongodb44Enterprise, @@ -1475,6 +1589,12 @@ export const ClusterConfig = { writer.uint32(98).fork() ).ldelim(); } + if (message.mongodb60Enterprise !== undefined) { + Mongodb60Enterprise.encode( + message.mongodb60Enterprise, + writer.uint32(122).fork() + ).ldelim(); + } if (message.backupWindowStart !== undefined) { TimeOfDay.encode( message.backupWindowStart, @@ -1490,6 +1610,12 @@ export const ClusterConfig = { writer.uint32(74).fork() ).ldelim(); } + if (message.performanceDiagnostics !== undefined) { + PerformanceDiagnosticsConfig.encode( + message.performanceDiagnostics, + writer.uint32(106).fork() + ).ldelim(); + } if (message.access !== undefined) { Access.encode(message.access, writer.uint32(50).fork()).ldelim(); } @@ -1524,6 +1650,9 @@ export const ClusterConfig = { case 10: message.mongodb50 = Mongodb50.decode(reader, reader.uint32()); break; + case 14: + message.mongodb60 = Mongodb60.decode(reader, reader.uint32()); + break; case 11: message.mongodb44Enterprise = Mongodb44Enterprise.decode( reader, @@ -1536,6 +1665,12 @@ export const ClusterConfig = { reader.uint32() ); break; + case 15: + message.mongodb60Enterprise = Mongodb60Enterprise.decode( + reader, + reader.uint32() + ); + break; case 3: message.backupWindowStart = TimeOfDay.decode(reader, reader.uint32()); break; @@ -1545,6 +1680,12 @@ export const ClusterConfig = { reader.uint32() ).value; break; + case 13: + message.performanceDiagnostics = PerformanceDiagnosticsConfig.decode( + reader, + reader.uint32() + ); + break; case 6: message.access = Access.decode(reader, reader.uint32()); break; @@ -1587,6 +1728,10 @@ export const ClusterConfig = { object.mongodb_5_0 !== undefined && object.mongodb_5_0 !== null ? Mongodb50.fromJSON(object.mongodb_5_0) : undefined; + message.mongodb60 = + object.mongodb_6_0 !== undefined && object.mongodb_6_0 !== null + ? Mongodb60.fromJSON(object.mongodb_6_0) + : undefined; message.mongodb44Enterprise = object.mongodb_4_4_enterprise !== undefined && object.mongodb_4_4_enterprise !== null @@ -1597,6 +1742,11 @@ export const ClusterConfig = { object.mongodb_5_0_enterprise !== null ? Mongodb50Enterprise.fromJSON(object.mongodb_5_0_enterprise) : undefined; + message.mongodb60Enterprise = + object.mongodb_6_0_enterprise !== undefined && + object.mongodb_6_0_enterprise !== null + ? Mongodb60Enterprise.fromJSON(object.mongodb_6_0_enterprise) + : undefined; message.backupWindowStart = object.backupWindowStart !== undefined && object.backupWindowStart !== null @@ -1607,6 +1757,11 @@ export const ClusterConfig = { object.backupRetainPeriodDays !== null ? Number(object.backupRetainPeriodDays) : undefined; + message.performanceDiagnostics = + object.performanceDiagnostics !== undefined && + object.performanceDiagnostics !== null + ? PerformanceDiagnosticsConfig.fromJSON(object.performanceDiagnostics) + : undefined; message.access = object.access !== undefined && object.access !== null ? Access.fromJSON(object.access) @@ -1639,6 +1794,10 @@ export const ClusterConfig = { (obj.mongodb_5_0 = message.mongodb50 ? Mongodb50.toJSON(message.mongodb50) : undefined); + message.mongodb60 !== undefined && + (obj.mongodb_6_0 = message.mongodb60 + ? Mongodb60.toJSON(message.mongodb60) + : undefined); message.mongodb44Enterprise !== undefined && (obj.mongodb_4_4_enterprise = message.mongodb44Enterprise ? Mongodb44Enterprise.toJSON(message.mongodb44Enterprise) @@ -1647,12 +1806,20 @@ export const ClusterConfig = { (obj.mongodb_5_0_enterprise = message.mongodb50Enterprise ? Mongodb50Enterprise.toJSON(message.mongodb50Enterprise) : undefined); + message.mongodb60Enterprise !== undefined && + (obj.mongodb_6_0_enterprise = message.mongodb60Enterprise + ? Mongodb60Enterprise.toJSON(message.mongodb60Enterprise) + : undefined); message.backupWindowStart !== undefined && (obj.backupWindowStart = message.backupWindowStart ? TimeOfDay.toJSON(message.backupWindowStart) : undefined); message.backupRetainPeriodDays !== undefined && (obj.backupRetainPeriodDays = message.backupRetainPeriodDays); + message.performanceDiagnostics !== undefined && + (obj.performanceDiagnostics = message.performanceDiagnostics + ? PerformanceDiagnosticsConfig.toJSON(message.performanceDiagnostics) + : undefined); message.access !== undefined && (obj.access = message.access ? Access.toJSON(message.access) : undefined); return obj; @@ -1685,6 +1852,10 @@ export const ClusterConfig = { object.mongodb50 !== undefined && object.mongodb50 !== null ? Mongodb50.fromPartial(object.mongodb50) : undefined; + message.mongodb60 = + object.mongodb60 !== undefined && object.mongodb60 !== null + ? Mongodb60.fromPartial(object.mongodb60) + : undefined; message.mongodb44Enterprise = object.mongodb44Enterprise !== undefined && object.mongodb44Enterprise !== null @@ -1695,12 +1866,24 @@ export const ClusterConfig = { object.mongodb50Enterprise !== null ? Mongodb50Enterprise.fromPartial(object.mongodb50Enterprise) : undefined; + message.mongodb60Enterprise = + object.mongodb60Enterprise !== undefined && + object.mongodb60Enterprise !== null + ? Mongodb60Enterprise.fromPartial(object.mongodb60Enterprise) + : undefined; message.backupWindowStart = object.backupWindowStart !== undefined && object.backupWindowStart !== null ? TimeOfDay.fromPartial(object.backupWindowStart) : undefined; message.backupRetainPeriodDays = object.backupRetainPeriodDays ?? undefined; + message.performanceDiagnostics = + object.performanceDiagnostics !== undefined && + object.performanceDiagnostics !== null + ? PerformanceDiagnosticsConfig.fromPartial( + object.performanceDiagnostics + ) + : undefined; message.access = object.access !== undefined && object.access !== null ? Access.fromPartial(object.access) @@ -5465,37 +5648,65 @@ messageTypeRegistry.set( Mongodb50Enterprise_MongoInfra ); -const baseShard: object = { - $type: "yandex.cloud.mdb.mongodb.v1.Shard", - name: "", - clusterId: "", +const baseMongodb60: object = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb6_0", }; -export const Shard = { - $type: "yandex.cloud.mdb.mongodb.v1.Shard" as const, +export const Mongodb60 = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb6_0" as const, - encode(message: Shard, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.name !== "") { - writer.uint32(10).string(message.name); + encode( + message: Mongodb60, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.mongod !== undefined) { + Mongodb60_Mongod.encode( + message.mongod, + writer.uint32(10).fork() + ).ldelim(); } - if (message.clusterId !== "") { - writer.uint32(18).string(message.clusterId); + if (message.mongocfg !== undefined) { + Mongodb60_MongoCfg.encode( + message.mongocfg, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.mongos !== undefined) { + Mongodb60_Mongos.encode( + message.mongos, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.mongoinfra !== undefined) { + Mongodb60_MongoInfra.encode( + message.mongoinfra, + writer.uint32(34).fork() + ).ldelim(); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): Shard { + decode(input: _m0.Reader | Uint8Array, length?: number): Mongodb60 { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; - const message = { ...baseShard } as Shard; + const message = { ...baseMongodb60 } as Mongodb60; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.name = reader.string(); + message.mongod = Mongodb60_Mongod.decode(reader, reader.uint32()); break; case 2: - message.clusterId = reader.string(); + message.mongocfg = Mongodb60_MongoCfg.decode(reader, reader.uint32()); + break; + case 3: + message.mongos = Mongodb60_Mongos.decode(reader, reader.uint32()); + break; + case 4: + message.mongoinfra = Mongodb60_MongoInfra.decode( + reader, + reader.uint32() + ); break; default: reader.skipType(tag & 7); @@ -5505,130 +5716,110 @@ export const Shard = { return message; }, - fromJSON(object: any): Shard { - const message = { ...baseShard } as Shard; - message.name = - object.name !== undefined && object.name !== null - ? String(object.name) - : ""; - message.clusterId = - object.clusterId !== undefined && object.clusterId !== null - ? String(object.clusterId) - : ""; + fromJSON(object: any): Mongodb60 { + const message = { ...baseMongodb60 } as Mongodb60; + message.mongod = + object.mongod !== undefined && object.mongod !== null + ? Mongodb60_Mongod.fromJSON(object.mongod) + : undefined; + message.mongocfg = + object.mongocfg !== undefined && object.mongocfg !== null + ? Mongodb60_MongoCfg.fromJSON(object.mongocfg) + : undefined; + message.mongos = + object.mongos !== undefined && object.mongos !== null + ? Mongodb60_Mongos.fromJSON(object.mongos) + : undefined; + message.mongoinfra = + object.mongoinfra !== undefined && object.mongoinfra !== null + ? Mongodb60_MongoInfra.fromJSON(object.mongoinfra) + : undefined; return message; }, - toJSON(message: Shard): unknown { + toJSON(message: Mongodb60): unknown { const obj: any = {}; - message.name !== undefined && (obj.name = message.name); - message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.mongod !== undefined && + (obj.mongod = message.mongod + ? Mongodb60_Mongod.toJSON(message.mongod) + : undefined); + message.mongocfg !== undefined && + (obj.mongocfg = message.mongocfg + ? Mongodb60_MongoCfg.toJSON(message.mongocfg) + : undefined); + message.mongos !== undefined && + (obj.mongos = message.mongos + ? Mongodb60_Mongos.toJSON(message.mongos) + : undefined); + message.mongoinfra !== undefined && + (obj.mongoinfra = message.mongoinfra + ? Mongodb60_MongoInfra.toJSON(message.mongoinfra) + : undefined); return obj; }, - fromPartial, I>>(object: I): Shard { - const message = { ...baseShard } as Shard; - message.name = object.name ?? ""; - message.clusterId = object.clusterId ?? ""; + fromPartial, I>>( + object: I + ): Mongodb60 { + const message = { ...baseMongodb60 } as Mongodb60; + message.mongod = + object.mongod !== undefined && object.mongod !== null + ? Mongodb60_Mongod.fromPartial(object.mongod) + : undefined; + message.mongocfg = + object.mongocfg !== undefined && object.mongocfg !== null + ? Mongodb60_MongoCfg.fromPartial(object.mongocfg) + : undefined; + message.mongos = + object.mongos !== undefined && object.mongos !== null + ? Mongodb60_Mongos.fromPartial(object.mongos) + : undefined; + message.mongoinfra = + object.mongoinfra !== undefined && object.mongoinfra !== null + ? Mongodb60_MongoInfra.fromPartial(object.mongoinfra) + : undefined; return message; }, }; -messageTypeRegistry.set(Shard.$type, Shard); +messageTypeRegistry.set(Mongodb60.$type, Mongodb60); -const baseHost: object = { - $type: "yandex.cloud.mdb.mongodb.v1.Host", - name: "", - clusterId: "", - zoneId: "", - role: 0, - health: 0, - subnetId: "", - assignPublicIp: false, - shardName: "", - type: 0, +const baseMongodb60_Mongod: object = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb6_0.Mongod", }; -export const Host = { - $type: "yandex.cloud.mdb.mongodb.v1.Host" as const, +export const Mongodb60_Mongod = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb6_0.Mongod" as const, - encode(message: Host, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.name !== "") { - writer.uint32(10).string(message.name); - } - if (message.clusterId !== "") { - writer.uint32(18).string(message.clusterId); - } - if (message.zoneId !== "") { - writer.uint32(26).string(message.zoneId); + encode( + message: Mongodb60_Mongod, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.config !== undefined) { + Mongodconfigset60.encode( + message.config, + writer.uint32(10).fork() + ).ldelim(); } if (message.resources !== undefined) { - Resources.encode(message.resources, writer.uint32(34).fork()).ldelim(); - } - if (message.role !== 0) { - writer.uint32(40).int32(message.role); - } - if (message.health !== 0) { - writer.uint32(48).int32(message.health); - } - for (const v of message.services) { - Service.encode(v!, writer.uint32(58).fork()).ldelim(); - } - if (message.subnetId !== "") { - writer.uint32(66).string(message.subnetId); - } - if (message.assignPublicIp === true) { - writer.uint32(72).bool(message.assignPublicIp); - } - if (message.shardName !== "") { - writer.uint32(82).string(message.shardName); - } - if (message.type !== 0) { - writer.uint32(88).int32(message.type); + Resources.encode(message.resources, writer.uint32(18).fork()).ldelim(); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): Host { + decode(input: _m0.Reader | Uint8Array, length?: number): Mongodb60_Mongod { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; - const message = { ...baseHost } as Host; - message.services = []; + const message = { ...baseMongodb60_Mongod } as Mongodb60_Mongod; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.name = reader.string(); + message.config = Mongodconfigset60.decode(reader, reader.uint32()); break; case 2: - message.clusterId = reader.string(); - break; - case 3: - message.zoneId = reader.string(); - break; - case 4: message.resources = Resources.decode(reader, reader.uint32()); break; - case 5: - message.role = reader.int32() as any; - break; - case 6: - message.health = reader.int32() as any; - break; - case 7: - message.services.push(Service.decode(reader, reader.uint32())); - break; - case 8: - message.subnetId = reader.string(); - break; - case 9: - message.assignPublicIp = reader.bool(); - break; - case 10: - message.shardName = reader.string(); - break; - case 11: - message.type = reader.int32() as any; - break; default: reader.skipType(tag & 7); break; @@ -5637,12 +5828,1104 @@ export const Host = { return message; }, - fromJSON(object: any): Host { - const message = { ...baseHost } as Host; - message.name = - object.name !== undefined && object.name !== null - ? String(object.name) - : ""; + fromJSON(object: any): Mongodb60_Mongod { + const message = { ...baseMongodb60_Mongod } as Mongodb60_Mongod; + message.config = + object.config !== undefined && object.config !== null + ? Mongodconfigset60.fromJSON(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + return message; + }, + + toJSON(message: Mongodb60_Mongod): unknown { + const obj: any = {}; + message.config !== undefined && + (obj.config = message.config + ? Mongodconfigset60.toJSON(message.config) + : undefined); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodb60_Mongod { + const message = { ...baseMongodb60_Mongod } as Mongodb60_Mongod; + message.config = + object.config !== undefined && object.config !== null + ? Mongodconfigset60.fromPartial(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Mongodb60_Mongod.$type, Mongodb60_Mongod); + +const baseMongodb60_MongoCfg: object = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb6_0.MongoCfg", +}; + +export const Mongodb60_MongoCfg = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb6_0.MongoCfg" as const, + + encode( + message: Mongodb60_MongoCfg, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.config !== undefined) { + Mongocfgconfigset60.encode( + message.config, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Mongodb60_MongoCfg { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMongodb60_MongoCfg } as Mongodb60_MongoCfg; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.config = Mongocfgconfigset60.decode(reader, reader.uint32()); + break; + case 2: + message.resources = Resources.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodb60_MongoCfg { + const message = { ...baseMongodb60_MongoCfg } as Mongodb60_MongoCfg; + message.config = + object.config !== undefined && object.config !== null + ? Mongocfgconfigset60.fromJSON(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + return message; + }, + + toJSON(message: Mongodb60_MongoCfg): unknown { + const obj: any = {}; + message.config !== undefined && + (obj.config = message.config + ? Mongocfgconfigset60.toJSON(message.config) + : undefined); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodb60_MongoCfg { + const message = { ...baseMongodb60_MongoCfg } as Mongodb60_MongoCfg; + message.config = + object.config !== undefined && object.config !== null + ? Mongocfgconfigset60.fromPartial(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Mongodb60_MongoCfg.$type, Mongodb60_MongoCfg); + +const baseMongodb60_Mongos: object = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb6_0.Mongos", +}; + +export const Mongodb60_Mongos = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb6_0.Mongos" as const, + + encode( + message: Mongodb60_Mongos, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.config !== undefined) { + Mongosconfigset60.encode( + message.config, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Mongodb60_Mongos { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMongodb60_Mongos } as Mongodb60_Mongos; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.config = Mongosconfigset60.decode(reader, reader.uint32()); + break; + case 2: + message.resources = Resources.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodb60_Mongos { + const message = { ...baseMongodb60_Mongos } as Mongodb60_Mongos; + message.config = + object.config !== undefined && object.config !== null + ? Mongosconfigset60.fromJSON(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + return message; + }, + + toJSON(message: Mongodb60_Mongos): unknown { + const obj: any = {}; + message.config !== undefined && + (obj.config = message.config + ? Mongosconfigset60.toJSON(message.config) + : undefined); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodb60_Mongos { + const message = { ...baseMongodb60_Mongos } as Mongodb60_Mongos; + message.config = + object.config !== undefined && object.config !== null + ? Mongosconfigset60.fromPartial(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Mongodb60_Mongos.$type, Mongodb60_Mongos); + +const baseMongodb60_MongoInfra: object = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb6_0.MongoInfra", +}; + +export const Mongodb60_MongoInfra = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb6_0.MongoInfra" as const, + + encode( + message: Mongodb60_MongoInfra, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.configMongos !== undefined) { + Mongosconfigset60.encode( + message.configMongos, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.configMongocfg !== undefined) { + Mongocfgconfigset60.encode( + message.configMongocfg, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(26).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodb60_MongoInfra { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMongodb60_MongoInfra } as Mongodb60_MongoInfra; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.configMongos = Mongosconfigset60.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.configMongocfg = Mongocfgconfigset60.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.resources = Resources.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodb60_MongoInfra { + const message = { ...baseMongodb60_MongoInfra } as Mongodb60_MongoInfra; + message.configMongos = + object.configMongos !== undefined && object.configMongos !== null + ? Mongosconfigset60.fromJSON(object.configMongos) + : undefined; + message.configMongocfg = + object.configMongocfg !== undefined && object.configMongocfg !== null + ? Mongocfgconfigset60.fromJSON(object.configMongocfg) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + return message; + }, + + toJSON(message: Mongodb60_MongoInfra): unknown { + const obj: any = {}; + message.configMongos !== undefined && + (obj.configMongos = message.configMongos + ? Mongosconfigset60.toJSON(message.configMongos) + : undefined); + message.configMongocfg !== undefined && + (obj.configMongocfg = message.configMongocfg + ? Mongocfgconfigset60.toJSON(message.configMongocfg) + : undefined); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodb60_MongoInfra { + const message = { ...baseMongodb60_MongoInfra } as Mongodb60_MongoInfra; + message.configMongos = + object.configMongos !== undefined && object.configMongos !== null + ? Mongosconfigset60.fromPartial(object.configMongos) + : undefined; + message.configMongocfg = + object.configMongocfg !== undefined && object.configMongocfg !== null + ? Mongocfgconfigset60.fromPartial(object.configMongocfg) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Mongodb60_MongoInfra.$type, Mongodb60_MongoInfra); + +const baseMongodb60Enterprise: object = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb6_0_enterprise", +}; + +export const Mongodb60Enterprise = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb6_0_enterprise" as const, + + encode( + message: Mongodb60Enterprise, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.mongod !== undefined) { + Mongodb60Enterprise_Mongod.encode( + message.mongod, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.mongocfg !== undefined) { + Mongodb60Enterprise_MongoCfg.encode( + message.mongocfg, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.mongos !== undefined) { + Mongodb60Enterprise_Mongos.encode( + message.mongos, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.mongoinfra !== undefined) { + Mongodb60Enterprise_MongoInfra.encode( + message.mongoinfra, + writer.uint32(34).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Mongodb60Enterprise { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMongodb60Enterprise } as Mongodb60Enterprise; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.mongod = Mongodb60Enterprise_Mongod.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.mongocfg = Mongodb60Enterprise_MongoCfg.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.mongos = Mongodb60Enterprise_Mongos.decode( + reader, + reader.uint32() + ); + break; + case 4: + message.mongoinfra = Mongodb60Enterprise_MongoInfra.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodb60Enterprise { + const message = { ...baseMongodb60Enterprise } as Mongodb60Enterprise; + message.mongod = + object.mongod !== undefined && object.mongod !== null + ? Mongodb60Enterprise_Mongod.fromJSON(object.mongod) + : undefined; + message.mongocfg = + object.mongocfg !== undefined && object.mongocfg !== null + ? Mongodb60Enterprise_MongoCfg.fromJSON(object.mongocfg) + : undefined; + message.mongos = + object.mongos !== undefined && object.mongos !== null + ? Mongodb60Enterprise_Mongos.fromJSON(object.mongos) + : undefined; + message.mongoinfra = + object.mongoinfra !== undefined && object.mongoinfra !== null + ? Mongodb60Enterprise_MongoInfra.fromJSON(object.mongoinfra) + : undefined; + return message; + }, + + toJSON(message: Mongodb60Enterprise): unknown { + const obj: any = {}; + message.mongod !== undefined && + (obj.mongod = message.mongod + ? Mongodb60Enterprise_Mongod.toJSON(message.mongod) + : undefined); + message.mongocfg !== undefined && + (obj.mongocfg = message.mongocfg + ? Mongodb60Enterprise_MongoCfg.toJSON(message.mongocfg) + : undefined); + message.mongos !== undefined && + (obj.mongos = message.mongos + ? Mongodb60Enterprise_Mongos.toJSON(message.mongos) + : undefined); + message.mongoinfra !== undefined && + (obj.mongoinfra = message.mongoinfra + ? Mongodb60Enterprise_MongoInfra.toJSON(message.mongoinfra) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodb60Enterprise { + const message = { ...baseMongodb60Enterprise } as Mongodb60Enterprise; + message.mongod = + object.mongod !== undefined && object.mongod !== null + ? Mongodb60Enterprise_Mongod.fromPartial(object.mongod) + : undefined; + message.mongocfg = + object.mongocfg !== undefined && object.mongocfg !== null + ? Mongodb60Enterprise_MongoCfg.fromPartial(object.mongocfg) + : undefined; + message.mongos = + object.mongos !== undefined && object.mongos !== null + ? Mongodb60Enterprise_Mongos.fromPartial(object.mongos) + : undefined; + message.mongoinfra = + object.mongoinfra !== undefined && object.mongoinfra !== null + ? Mongodb60Enterprise_MongoInfra.fromPartial(object.mongoinfra) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Mongodb60Enterprise.$type, Mongodb60Enterprise); + +const baseMongodb60Enterprise_Mongod: object = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb6_0_enterprise.Mongod", +}; + +export const Mongodb60Enterprise_Mongod = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb6_0_enterprise.Mongod" as const, + + encode( + message: Mongodb60Enterprise_Mongod, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.config !== undefined) { + Mongodconfigset60Enterprise.encode( + message.config, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodb60Enterprise_Mongod { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodb60Enterprise_Mongod, + } as Mongodb60Enterprise_Mongod; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.config = Mongodconfigset60Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.resources = Resources.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodb60Enterprise_Mongod { + const message = { + ...baseMongodb60Enterprise_Mongod, + } as Mongodb60Enterprise_Mongod; + message.config = + object.config !== undefined && object.config !== null + ? Mongodconfigset60Enterprise.fromJSON(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + return message; + }, + + toJSON(message: Mongodb60Enterprise_Mongod): unknown { + const obj: any = {}; + message.config !== undefined && + (obj.config = message.config + ? Mongodconfigset60Enterprise.toJSON(message.config) + : undefined); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodb60Enterprise_Mongod { + const message = { + ...baseMongodb60Enterprise_Mongod, + } as Mongodb60Enterprise_Mongod; + message.config = + object.config !== undefined && object.config !== null + ? Mongodconfigset60Enterprise.fromPartial(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodb60Enterprise_Mongod.$type, + Mongodb60Enterprise_Mongod +); + +const baseMongodb60Enterprise_MongoCfg: object = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb6_0_enterprise.MongoCfg", +}; + +export const Mongodb60Enterprise_MongoCfg = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb6_0_enterprise.MongoCfg" as const, + + encode( + message: Mongodb60Enterprise_MongoCfg, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.config !== undefined) { + Mongocfgconfigset60Enterprise.encode( + message.config, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodb60Enterprise_MongoCfg { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodb60Enterprise_MongoCfg, + } as Mongodb60Enterprise_MongoCfg; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.config = Mongocfgconfigset60Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.resources = Resources.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodb60Enterprise_MongoCfg { + const message = { + ...baseMongodb60Enterprise_MongoCfg, + } as Mongodb60Enterprise_MongoCfg; + message.config = + object.config !== undefined && object.config !== null + ? Mongocfgconfigset60Enterprise.fromJSON(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + return message; + }, + + toJSON(message: Mongodb60Enterprise_MongoCfg): unknown { + const obj: any = {}; + message.config !== undefined && + (obj.config = message.config + ? Mongocfgconfigset60Enterprise.toJSON(message.config) + : undefined); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodb60Enterprise_MongoCfg { + const message = { + ...baseMongodb60Enterprise_MongoCfg, + } as Mongodb60Enterprise_MongoCfg; + message.config = + object.config !== undefined && object.config !== null + ? Mongocfgconfigset60Enterprise.fromPartial(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodb60Enterprise_MongoCfg.$type, + Mongodb60Enterprise_MongoCfg +); + +const baseMongodb60Enterprise_Mongos: object = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb6_0_enterprise.Mongos", +}; + +export const Mongodb60Enterprise_Mongos = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb6_0_enterprise.Mongos" as const, + + encode( + message: Mongodb60Enterprise_Mongos, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.config !== undefined) { + Mongosconfigset60Enterprise.encode( + message.config, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodb60Enterprise_Mongos { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodb60Enterprise_Mongos, + } as Mongodb60Enterprise_Mongos; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.config = Mongosconfigset60Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.resources = Resources.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodb60Enterprise_Mongos { + const message = { + ...baseMongodb60Enterprise_Mongos, + } as Mongodb60Enterprise_Mongos; + message.config = + object.config !== undefined && object.config !== null + ? Mongosconfigset60Enterprise.fromJSON(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + return message; + }, + + toJSON(message: Mongodb60Enterprise_Mongos): unknown { + const obj: any = {}; + message.config !== undefined && + (obj.config = message.config + ? Mongosconfigset60Enterprise.toJSON(message.config) + : undefined); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodb60Enterprise_Mongos { + const message = { + ...baseMongodb60Enterprise_Mongos, + } as Mongodb60Enterprise_Mongos; + message.config = + object.config !== undefined && object.config !== null + ? Mongosconfigset60Enterprise.fromPartial(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodb60Enterprise_Mongos.$type, + Mongodb60Enterprise_Mongos +); + +const baseMongodb60Enterprise_MongoInfra: object = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb6_0_enterprise.MongoInfra", +}; + +export const Mongodb60Enterprise_MongoInfra = { + $type: + "yandex.cloud.mdb.mongodb.v1.Mongodb6_0_enterprise.MongoInfra" as const, + + encode( + message: Mongodb60Enterprise_MongoInfra, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.configMongos !== undefined) { + Mongosconfigset60Enterprise.encode( + message.configMongos, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.configMongocfg !== undefined) { + Mongocfgconfigset60Enterprise.encode( + message.configMongocfg, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(26).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodb60Enterprise_MongoInfra { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodb60Enterprise_MongoInfra, + } as Mongodb60Enterprise_MongoInfra; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.configMongos = Mongosconfigset60Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.configMongocfg = Mongocfgconfigset60Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.resources = Resources.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodb60Enterprise_MongoInfra { + const message = { + ...baseMongodb60Enterprise_MongoInfra, + } as Mongodb60Enterprise_MongoInfra; + message.configMongos = + object.configMongos !== undefined && object.configMongos !== null + ? Mongosconfigset60Enterprise.fromJSON(object.configMongos) + : undefined; + message.configMongocfg = + object.configMongocfg !== undefined && object.configMongocfg !== null + ? Mongocfgconfigset60Enterprise.fromJSON(object.configMongocfg) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + return message; + }, + + toJSON(message: Mongodb60Enterprise_MongoInfra): unknown { + const obj: any = {}; + message.configMongos !== undefined && + (obj.configMongos = message.configMongos + ? Mongosconfigset60Enterprise.toJSON(message.configMongos) + : undefined); + message.configMongocfg !== undefined && + (obj.configMongocfg = message.configMongocfg + ? Mongocfgconfigset60Enterprise.toJSON(message.configMongocfg) + : undefined); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodb60Enterprise_MongoInfra { + const message = { + ...baseMongodb60Enterprise_MongoInfra, + } as Mongodb60Enterprise_MongoInfra; + message.configMongos = + object.configMongos !== undefined && object.configMongos !== null + ? Mongosconfigset60Enterprise.fromPartial(object.configMongos) + : undefined; + message.configMongocfg = + object.configMongocfg !== undefined && object.configMongocfg !== null + ? Mongocfgconfigset60Enterprise.fromPartial(object.configMongocfg) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodb60Enterprise_MongoInfra.$type, + Mongodb60Enterprise_MongoInfra +); + +const baseShard: object = { + $type: "yandex.cloud.mdb.mongodb.v1.Shard", + name: "", + clusterId: "", +}; + +export const Shard = { + $type: "yandex.cloud.mdb.mongodb.v1.Shard" as const, + + encode(message: Shard, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + if (message.clusterId !== "") { + writer.uint32(18).string(message.clusterId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Shard { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseShard } as Shard; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.clusterId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Shard { + const message = { ...baseShard } as Shard; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + return message; + }, + + toJSON(message: Shard): unknown { + const obj: any = {}; + message.name !== undefined && (obj.name = message.name); + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + return obj; + }, + + fromPartial, I>>(object: I): Shard { + const message = { ...baseShard } as Shard; + message.name = object.name ?? ""; + message.clusterId = object.clusterId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(Shard.$type, Shard); + +const baseHost: object = { + $type: "yandex.cloud.mdb.mongodb.v1.Host", + name: "", + clusterId: "", + zoneId: "", + role: 0, + health: 0, + subnetId: "", + assignPublicIp: false, + shardName: "", + type: 0, +}; + +export const Host = { + $type: "yandex.cloud.mdb.mongodb.v1.Host" as const, + + encode(message: Host, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + if (message.clusterId !== "") { + writer.uint32(18).string(message.clusterId); + } + if (message.zoneId !== "") { + writer.uint32(26).string(message.zoneId); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(34).fork()).ldelim(); + } + if (message.role !== 0) { + writer.uint32(40).int32(message.role); + } + if (message.health !== 0) { + writer.uint32(48).int32(message.health); + } + for (const v of message.services) { + Service.encode(v!, writer.uint32(58).fork()).ldelim(); + } + if (message.subnetId !== "") { + writer.uint32(66).string(message.subnetId); + } + if (message.assignPublicIp === true) { + writer.uint32(72).bool(message.assignPublicIp); + } + if (message.shardName !== "") { + writer.uint32(82).string(message.shardName); + } + if (message.type !== 0) { + writer.uint32(88).int32(message.type); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Host { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseHost } as Host; + message.services = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.clusterId = reader.string(); + break; + case 3: + message.zoneId = reader.string(); + break; + case 4: + message.resources = Resources.decode(reader, reader.uint32()); + break; + case 5: + message.role = reader.int32() as any; + break; + case 6: + message.health = reader.int32() as any; + break; + case 7: + message.services.push(Service.decode(reader, reader.uint32())); + break; + case 8: + message.subnetId = reader.string(); + break; + case 9: + message.assignPublicIp = reader.bool(); + break; + case 10: + message.shardName = reader.string(); + break; + case 11: + message.type = reader.int32() as any; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Host { + const message = { ...baseHost } as Host; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; message.clusterId = object.clusterId !== undefined && object.clusterId !== null ? String(object.clusterId) @@ -5973,6 +7256,81 @@ export const Access = { messageTypeRegistry.set(Access.$type, Access); +const basePerformanceDiagnosticsConfig: object = { + $type: "yandex.cloud.mdb.mongodb.v1.PerformanceDiagnosticsConfig", + profilingEnabled: false, +}; + +export const PerformanceDiagnosticsConfig = { + $type: "yandex.cloud.mdb.mongodb.v1.PerformanceDiagnosticsConfig" as const, + + encode( + message: PerformanceDiagnosticsConfig, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.profilingEnabled === true) { + writer.uint32(8).bool(message.profilingEnabled); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): PerformanceDiagnosticsConfig { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...basePerformanceDiagnosticsConfig, + } as PerformanceDiagnosticsConfig; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.profilingEnabled = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): PerformanceDiagnosticsConfig { + const message = { + ...basePerformanceDiagnosticsConfig, + } as PerformanceDiagnosticsConfig; + message.profilingEnabled = + object.profilingEnabled !== undefined && object.profilingEnabled !== null + ? Boolean(object.profilingEnabled) + : false; + return message; + }, + + toJSON(message: PerformanceDiagnosticsConfig): unknown { + const obj: any = {}; + message.profilingEnabled !== undefined && + (obj.profilingEnabled = message.profilingEnabled); + return obj; + }, + + fromPartial, I>>( + object: I + ): PerformanceDiagnosticsConfig { + const message = { + ...basePerformanceDiagnosticsConfig, + } as PerformanceDiagnosticsConfig; + message.profilingEnabled = object.profilingEnabled ?? false; + return message; + }, +}; + +messageTypeRegistry.set( + PerformanceDiagnosticsConfig.$type, + PerformanceDiagnosticsConfig +); + declare var self: any | undefined; declare var window: any | undefined; declare var global: any | undefined; diff --git a/src/generated/yandex/cloud/mdb/mongodb/v1/cluster_service.ts b/src/generated/yandex/cloud/mdb/mongodb/v1/cluster_service.ts index ee25b5a5..03c392c8 100644 --- a/src/generated/yandex/cloud/mdb/mongodb/v1/cluster_service.ts +++ b/src/generated/yandex/cloud/mdb/mongodb/v1/cluster_service.ts @@ -20,6 +20,7 @@ import { Cluster_Environment, Resources, Host_Type, + PerformanceDiagnosticsConfig, Access, Cluster, Host, @@ -66,6 +67,16 @@ import { Mongocfgconfig50Enterprise, Mongosconfig50Enterprise, } from "../../../../../yandex/cloud/mdb/mongodb/v1/config/mongodb5_0_enterprise"; +import { + Mongodconfig60, + Mongocfgconfig60, + Mongosconfig60, +} from "../../../../../yandex/cloud/mdb/mongodb/v1/config/mongodb6_0"; +import { + Mongodconfig60Enterprise, + Mongocfgconfig60Enterprise, + Mongosconfig60Enterprise, +} from "../../../../../yandex/cloud/mdb/mongodb/v1/config/mongodb6_0_enterprise"; import { TimeOfDay } from "../../../../../google/type/timeofday"; import { Timestamp } from "../../../../../google/protobuf/timestamp"; import { DatabaseSpec } from "../../../../../yandex/cloud/mdb/mongodb/v1/database"; @@ -92,10 +103,16 @@ export interface ListClustersRequest { * To get the folder ID, use a [yandex.cloud.resourcemanager.v1.FolderService.List] request. */ folderId: string; + /** + * The maximum number of results per page to return. If the number of available + * results is larger than [page_size], the service returns a [ListClustersResponse.next_page_token] + * that can be used to get the next page of results in subsequent list requests. + * Acceptable values are 0 to 1000, inclusive. Default value: 100. + */ pageSize: number; /** * Page token. To get the next page of results, set [page_token] - * to the [ListClustersResponse.next_page_token] returned by a previous list request. + * to the [ListClustersResponse.next_page_token] returned by the previous list request. */ pageToken: string; /** @@ -286,7 +303,10 @@ export interface RestoreClusterRequest { * To get the backup ID, use a [ClusterService.ListBackups] request. */ backupId: string; - /** Name of the new MongoDB cluster. The name must be unique within the folder. */ + /** + * Name of the new MongoDB cluster. The name must be unique within the folder. + * The name can't be changed after the MongoDB cluster is created. + */ name: string; /** Description of the new MongoDB cluster. */ description: string; @@ -312,6 +332,8 @@ export interface RestoreClusterRequest { recoveryTargetSpec?: RestoreClusterRequest_RecoveryTargetSpec; /** User security groups */ securityGroupIds: string[]; + /** Deletion Protection inhibits deletion of the cluster */ + deletionProtection: boolean; } export interface RestoreClusterRequest_LabelsEntry { @@ -436,10 +458,16 @@ export interface ListClusterLogsRequest { fromTime?: Date; /** End timestamp for the logs request, in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. */ toTime?: Date; + /** + * The maximum number of results per page to return. If the number of available + * results is larger than [page_size], the service returns a [ListClusterLogsResponse.next_page_token] + * that can be used to get the next page of results in subsequent list requests. + * Acceptable values are 0 to 1000, inclusive. Default value: 100. + */ pageSize: number; /** * Page token. To get the next page of results, set [page_token] to the - * [ListClusterLogsResponse.next_page_token] returned by a previous list request. + * [ListClusterLogsResponse.next_page_token] returned by the previous list request. */ pageToken: string; } @@ -618,10 +646,16 @@ export interface ListClusterOperationsRequest { $type: "yandex.cloud.mdb.mongodb.v1.ListClusterOperationsRequest"; /** ID of the MongoDB Cluster resource to list operations for. */ clusterId: string; + /** + * The maximum number of results per page to return. If the number of available + * results is larger than [page_size], the service returns a [ListClusterOperationsResponse.next_page_token] + * that can be used to get the next page of results in subsequent list requests. + * Acceptable values are 0 to 1000, inclusive. Default value: 100. + */ pageSize: number; /** * Page token. To get the next page of results, set [page_token] to the - * [ListClusterOperationsResponse.next_page_token] returned by a previous list request. + * [ListClusterOperationsResponse.next_page_token] returned by the previous list request. */ pageToken: string; } @@ -646,10 +680,16 @@ export interface ListClusterBackupsRequest { * To get the MongoDB cluster ID, use a [ClusterService.List] request. */ clusterId: string; + /** + * The maximum number of results per page to return. If the number of available + * results is larger than [page_size], the service returns a [ListClusterBackupsResponse.next_page_token] + * that can be used to get the next page of results in subsequent list requests. + * Acceptable values are 0 to 1000, inclusive. Default value: 100. + */ pageSize: number; /** * Page token. To get the next page of results, set [page_token] to the - * [ListClusterBackupsResponse.next_page_token] returned by a previous list request. + * [ListClusterBackupsResponse.next_page_token] returned by the previous list request. */ pageToken: string; } @@ -674,10 +714,16 @@ export interface ListClusterHostsRequest { * To get the MongoDB cluster ID, use a [ClusterService.List] request. */ clusterId: string; + /** + * The maximum number of results per page to return. If the number of available + * results is larger than [page_size], the service returns a [ListClusterHostsResponse.next_page_token] + * that can be used to get the next page of results in subsequent list requests. + * Acceptable values are 0 to 1000, inclusive. Default value: 100. + */ pageSize: number; /** * Page token. To get the next page of results, set [page_token] to the - * [ListClusterHostsResponse.next_page_token] returned by a previous list request. + * [ListClusterHostsResponse.next_page_token] returned by the previous list request. */ pageToken: string; } @@ -800,7 +846,7 @@ export interface ListClusterShardsRequest { pageSize: number; /** * Page token. To get the next page of results, set [page_token] to the - * [ListClusterShardsResponse.next_page_token] returned by a previous list request. + * [ListClusterShardsResponse.next_page_token] returned by the previous list request. */ pageToken: string; } @@ -1254,9 +1300,99 @@ export interface Mongodbspec50Enterprise_MongoInfra { resources?: Resources; } +export interface Mongodbspec60 { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec6_0"; + /** Configuration and resource allocation for mongod 6.0 hosts. */ + mongod?: Mongodbspec60_Mongod; + /** Configuration and resource allocation for mongocfg 6.0 hosts. */ + mongocfg?: Mongodbspec60_MongoCfg; + /** Configuration and resource allocation for mongos 6.0 hosts. */ + mongos?: Mongodbspec60_Mongos; + /** Configuration and resource allocation for mongoinfra (mongos+mongocfg) 6.0 hosts. */ + mongoinfra?: Mongodbspec60_MongoInfra; +} + +export interface Mongodbspec60_Mongod { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec6_0.Mongod"; + /** Configuration for mongod 6.0 hosts. */ + config?: Mongodconfig60; + /** Resources allocated to each mongod host. */ + resources?: Resources; +} + +export interface Mongodbspec60_MongoCfg { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec6_0.MongoCfg"; + /** Configuration for mongocfg 6.0 hosts. */ + config?: Mongocfgconfig60; + /** Resources allocated to each mongocfg host. */ + resources?: Resources; +} + +export interface Mongodbspec60_Mongos { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec6_0.Mongos"; + /** Configuration for mongos 6.0 hosts. */ + config?: Mongosconfig60; + /** Resources allocated to each mongos host. */ + resources?: Resources; +} + +export interface Mongodbspec60_MongoInfra { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec6_0.MongoInfra"; + /** Configuration for mongoinfra 6.0 hosts. */ + configMongos?: Mongosconfig60; + configMongocfg?: Mongocfgconfig60; + /** Resources allocated to each mongoinfra (mongos+mongocfg) host. */ + resources?: Resources; +} + +export interface Mongodbspec60Enterprise { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec6_0_enterprise"; + /** Configuration and resource allocation for mongod 6.0 hosts. */ + mongod?: Mongodbspec60Enterprise_Mongod; + /** Configuration and resource allocation for mongocfg 6.0 hosts. */ + mongocfg?: Mongodbspec60Enterprise_MongoCfg; + /** Configuration and resource allocation for mongos 6.0 hosts. */ + mongos?: Mongodbspec60Enterprise_Mongos; + /** Configuration and resource allocation for mongoinfra (mongos+mongocfg) 6.0 hosts. */ + mongoinfra?: Mongodbspec60Enterprise_MongoInfra; +} + +export interface Mongodbspec60Enterprise_Mongod { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec6_0_enterprise.Mongod"; + /** Configuration for mongod 6.0 hosts. */ + config?: Mongodconfig60Enterprise; + /** Resources allocated to each mongod host. */ + resources?: Resources; +} + +export interface Mongodbspec60Enterprise_MongoCfg { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec6_0_enterprise.MongoCfg"; + /** Configuration for mongocfg 6.0 hosts. */ + config?: Mongocfgconfig60Enterprise; + /** Resources allocated to each mongocfg host. */ + resources?: Resources; +} + +export interface Mongodbspec60Enterprise_Mongos { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec6_0_enterprise.Mongos"; + /** Configuration for mongos 6.0 hosts. */ + config?: Mongosconfig60Enterprise; + /** Resources allocated to each mongos host. */ + resources?: Resources; +} + +export interface Mongodbspec60Enterprise_MongoInfra { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec6_0_enterprise.MongoInfra"; + /** Configuration for mongoinfra 6.0 hosts. */ + configMongos?: Mongosconfig60Enterprise; + configMongocfg?: Mongocfgconfig60Enterprise; + /** Resources allocated to each mongoinfra (mongos+mongocfg) host. */ + resources?: Resources; +} + export interface ConfigSpec { $type: "yandex.cloud.mdb.mongodb.v1.ConfigSpec"; - /** Version of MongoDB used in the cluster. Possible values: `3.6`, `4.0`, `4.2`, `4.4`, `4.4-enterprise`, `5.0`, `5.0-enterprise`. */ + /** Version of MongoDB used in the cluster. Possible values: `3.6`, `4.0`, `4.2`, `4.4`, `4.4-enterprise`, `5.0`, `5.0-enterprise`, `6.0`, `6.0-enterprise`. */ version: string; /** * MongoDB feature compatibility version. See usage details in [MongoDB documentation](https://docs.mongodb.com/manual/reference/command/setFeatureCompatibilityVersion/). @@ -1267,6 +1403,7 @@ export interface ConfigSpec { * * `4.2` - persist data compatibility for version 4.2. After setting this option the data will not be compatible with 4.0 or older. * * `4.4` - persist data compatibility for version 4.4. After setting this option the data will not be compatible with 4.2 or older. * * `5.0` - persist data compatibility for version 5.0. After setting this option the data will not be compatible with 4.4 or older. + * * `6.0` - persist data compatibility for version 6.0. After setting this option the data will not be compatible with 5.0 or older. */ featureCompatibilityVersion: string; /** Configuration and resource allocation for a MongoDB 3.6 cluster. */ @@ -1279,14 +1416,20 @@ export interface ConfigSpec { mongodbSpec44?: Mongodbspec44 | undefined; /** Configuration and resource allocation for a MongoDB 5.0 cluster. */ mongodbSpec50?: Mongodbspec50 | undefined; + /** Configuration and resource allocation for a MongoDB 6.0 cluster. */ + mongodbSpec60?: Mongodbspec60 | undefined; /** Configuration and resource allocation for a MongoDB 4.4 Enterprise cluster. */ mongodbSpec44Enterprise?: Mongodbspec44Enterprise | undefined; /** Configuration and resource allocation for a MongoDB 5.0 Enterprise cluster. */ mongodbSpec50Enterprise?: Mongodbspec50Enterprise | undefined; + /** Configuration and resource allocation for a MongoDB 6.0 Enterprise cluster. */ + mongodbSpec60Enterprise?: Mongodbspec60Enterprise | undefined; /** Time to start the daily backup, in the UTC timezone. */ backupWindowStart?: TimeOfDay; /** Retain period of automatically created backup in days */ backupRetainPeriodDays?: number; + /** Performance Diagnosics configuration */ + performanceDiagnostics?: PerformanceDiagnosticsConfig; /** Access policy to DB */ access?: Access; } @@ -3024,6 +3167,7 @@ const baseRestoreClusterRequest: object = { networkId: "", folderId: "", securityGroupIds: "", + deletionProtection: false, }; export const RestoreClusterRequest = { @@ -3077,6 +3221,9 @@ export const RestoreClusterRequest = { for (const v of message.securityGroupIds) { writer.uint32(90).string(v!); } + if (message.deletionProtection === true) { + writer.uint32(96).bool(message.deletionProtection); + } return writer; }, @@ -3136,6 +3283,9 @@ export const RestoreClusterRequest = { case 11: message.securityGroupIds.push(reader.string()); break; + case 12: + message.deletionProtection = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -3193,6 +3343,11 @@ export const RestoreClusterRequest = { message.securityGroupIds = (object.securityGroupIds ?? []).map((e: any) => String(e) ); + message.deletionProtection = + object.deletionProtection !== undefined && + object.deletionProtection !== null + ? Boolean(object.deletionProtection) + : false; return message; }, @@ -3234,6 +3389,8 @@ export const RestoreClusterRequest = { } else { obj.securityGroupIds = []; } + message.deletionProtection !== undefined && + (obj.deletionProtection = message.deletionProtection); return obj; }, @@ -3269,6 +3426,7 @@ export const RestoreClusterRequest = { ) : undefined; message.securityGroupIds = object.securityGroupIds?.map((e) => e) || []; + message.deletionProtection = object.deletionProtection ?? false; return message; }, }; @@ -10844,139 +11002,68 @@ messageTypeRegistry.set( Mongodbspec50Enterprise_MongoInfra ); -const baseConfigSpec: object = { - $type: "yandex.cloud.mdb.mongodb.v1.ConfigSpec", - version: "", - featureCompatibilityVersion: "", +const baseMongodbspec60: object = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec6_0", }; -export const ConfigSpec = { - $type: "yandex.cloud.mdb.mongodb.v1.ConfigSpec" as const, +export const Mongodbspec60 = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec6_0" as const, encode( - message: ConfigSpec, + message: Mongodbspec60, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { - if (message.version !== "") { - writer.uint32(10).string(message.version); - } - if (message.featureCompatibilityVersion !== "") { - writer.uint32(42).string(message.featureCompatibilityVersion); - } - if (message.mongodbSpec36 !== undefined) { - Mongodbspec36.encode( - message.mongodbSpec36, - writer.uint32(18).fork() - ).ldelim(); - } - if (message.mongodbSpec40 !== undefined) { - Mongodbspec40.encode( - message.mongodbSpec40, - writer.uint32(34).fork() - ).ldelim(); - } - if (message.mongodbSpec42 !== undefined) { - Mongodbspec42.encode( - message.mongodbSpec42, - writer.uint32(58).fork() - ).ldelim(); - } - if (message.mongodbSpec44 !== undefined) { - Mongodbspec44.encode( - message.mongodbSpec44, - writer.uint32(66).fork() - ).ldelim(); - } - if (message.mongodbSpec50 !== undefined) { - Mongodbspec50.encode( - message.mongodbSpec50, - writer.uint32(82).fork() - ).ldelim(); - } - if (message.mongodbSpec44Enterprise !== undefined) { - Mongodbspec44Enterprise.encode( - message.mongodbSpec44Enterprise, - writer.uint32(90).fork() + if (message.mongod !== undefined) { + Mongodbspec60_Mongod.encode( + message.mongod, + writer.uint32(10).fork() ).ldelim(); } - if (message.mongodbSpec50Enterprise !== undefined) { - Mongodbspec50Enterprise.encode( - message.mongodbSpec50Enterprise, - writer.uint32(98).fork() + if (message.mongocfg !== undefined) { + Mongodbspec60_MongoCfg.encode( + message.mongocfg, + writer.uint32(18).fork() ).ldelim(); } - if (message.backupWindowStart !== undefined) { - TimeOfDay.encode( - message.backupWindowStart, + if (message.mongos !== undefined) { + Mongodbspec60_Mongos.encode( + message.mongos, writer.uint32(26).fork() ).ldelim(); } - if (message.backupRetainPeriodDays !== undefined) { - Int64Value.encode( - { - $type: "google.protobuf.Int64Value", - value: message.backupRetainPeriodDays!, - }, - writer.uint32(74).fork() + if (message.mongoinfra !== undefined) { + Mongodbspec60_MongoInfra.encode( + message.mongoinfra, + writer.uint32(34).fork() ).ldelim(); } - if (message.access !== undefined) { - Access.encode(message.access, writer.uint32(50).fork()).ldelim(); - } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): ConfigSpec { + decode(input: _m0.Reader | Uint8Array, length?: number): Mongodbspec60 { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; - const message = { ...baseConfigSpec } as ConfigSpec; + const message = { ...baseMongodbspec60 } as Mongodbspec60; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.version = reader.string(); - break; - case 5: - message.featureCompatibilityVersion = reader.string(); + message.mongod = Mongodbspec60_Mongod.decode(reader, reader.uint32()); break; case 2: - message.mongodbSpec36 = Mongodbspec36.decode(reader, reader.uint32()); - break; - case 4: - message.mongodbSpec40 = Mongodbspec40.decode(reader, reader.uint32()); - break; - case 7: - message.mongodbSpec42 = Mongodbspec42.decode(reader, reader.uint32()); - break; - case 8: - message.mongodbSpec44 = Mongodbspec44.decode(reader, reader.uint32()); - break; - case 10: - message.mongodbSpec50 = Mongodbspec50.decode(reader, reader.uint32()); - break; - case 11: - message.mongodbSpec44Enterprise = Mongodbspec44Enterprise.decode( - reader, - reader.uint32() - ); - break; - case 12: - message.mongodbSpec50Enterprise = Mongodbspec50Enterprise.decode( + message.mongocfg = Mongodbspec60_MongoCfg.decode( reader, reader.uint32() ); break; case 3: - message.backupWindowStart = TimeOfDay.decode(reader, reader.uint32()); + message.mongos = Mongodbspec60_Mongos.decode(reader, reader.uint32()); break; - case 9: - message.backupRetainPeriodDays = Int64Value.decode( + case 4: + message.mongoinfra = Mongodbspec60_MongoInfra.decode( reader, reader.uint32() - ).value; - break; - case 6: - message.access = Access.decode(reader, reader.uint32()); + ); break; default: reader.skipType(tag & 7); @@ -10986,73 +11073,1315 @@ export const ConfigSpec = { return message; }, - fromJSON(object: any): ConfigSpec { - const message = { ...baseConfigSpec } as ConfigSpec; - message.version = - object.version !== undefined && object.version !== null - ? String(object.version) - : ""; - message.featureCompatibilityVersion = - object.featureCompatibilityVersion !== undefined && - object.featureCompatibilityVersion !== null - ? String(object.featureCompatibilityVersion) - : ""; - message.mongodbSpec36 = - object.mongodbSpec_3_6 !== undefined && object.mongodbSpec_3_6 !== null - ? Mongodbspec36.fromJSON(object.mongodbSpec_3_6) - : undefined; - message.mongodbSpec40 = - object.mongodbSpec_4_0 !== undefined && object.mongodbSpec_4_0 !== null - ? Mongodbspec40.fromJSON(object.mongodbSpec_4_0) - : undefined; - message.mongodbSpec42 = - object.mongodbSpec_4_2 !== undefined && object.mongodbSpec_4_2 !== null - ? Mongodbspec42.fromJSON(object.mongodbSpec_4_2) - : undefined; - message.mongodbSpec44 = - object.mongodbSpec_4_4 !== undefined && object.mongodbSpec_4_4 !== null - ? Mongodbspec44.fromJSON(object.mongodbSpec_4_4) - : undefined; - message.mongodbSpec50 = - object.mongodbSpec_5_0 !== undefined && object.mongodbSpec_5_0 !== null - ? Mongodbspec50.fromJSON(object.mongodbSpec_5_0) - : undefined; - message.mongodbSpec44Enterprise = - object.mongodbSpec_4_4_enterprise !== undefined && - object.mongodbSpec_4_4_enterprise !== null - ? Mongodbspec44Enterprise.fromJSON(object.mongodbSpec_4_4_enterprise) - : undefined; - message.mongodbSpec50Enterprise = - object.mongodbSpec_5_0_enterprise !== undefined && - object.mongodbSpec_5_0_enterprise !== null - ? Mongodbspec50Enterprise.fromJSON(object.mongodbSpec_5_0_enterprise) + fromJSON(object: any): Mongodbspec60 { + const message = { ...baseMongodbspec60 } as Mongodbspec60; + message.mongod = + object.mongod !== undefined && object.mongod !== null + ? Mongodbspec60_Mongod.fromJSON(object.mongod) : undefined; - message.backupWindowStart = - object.backupWindowStart !== undefined && - object.backupWindowStart !== null - ? TimeOfDay.fromJSON(object.backupWindowStart) + message.mongocfg = + object.mongocfg !== undefined && object.mongocfg !== null + ? Mongodbspec60_MongoCfg.fromJSON(object.mongocfg) : undefined; - message.backupRetainPeriodDays = - object.backupRetainPeriodDays !== undefined && - object.backupRetainPeriodDays !== null - ? Number(object.backupRetainPeriodDays) + message.mongos = + object.mongos !== undefined && object.mongos !== null + ? Mongodbspec60_Mongos.fromJSON(object.mongos) : undefined; - message.access = - object.access !== undefined && object.access !== null - ? Access.fromJSON(object.access) + message.mongoinfra = + object.mongoinfra !== undefined && object.mongoinfra !== null + ? Mongodbspec60_MongoInfra.fromJSON(object.mongoinfra) : undefined; return message; }, - toJSON(message: ConfigSpec): unknown { + toJSON(message: Mongodbspec60): unknown { const obj: any = {}; - message.version !== undefined && (obj.version = message.version); - message.featureCompatibilityVersion !== undefined && - (obj.featureCompatibilityVersion = message.featureCompatibilityVersion); - message.mongodbSpec36 !== undefined && - (obj.mongodbSpec_3_6 = message.mongodbSpec36 - ? Mongodbspec36.toJSON(message.mongodbSpec36) - : undefined); + message.mongod !== undefined && + (obj.mongod = message.mongod + ? Mongodbspec60_Mongod.toJSON(message.mongod) + : undefined); + message.mongocfg !== undefined && + (obj.mongocfg = message.mongocfg + ? Mongodbspec60_MongoCfg.toJSON(message.mongocfg) + : undefined); + message.mongos !== undefined && + (obj.mongos = message.mongos + ? Mongodbspec60_Mongos.toJSON(message.mongos) + : undefined); + message.mongoinfra !== undefined && + (obj.mongoinfra = message.mongoinfra + ? Mongodbspec60_MongoInfra.toJSON(message.mongoinfra) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodbspec60 { + const message = { ...baseMongodbspec60 } as Mongodbspec60; + message.mongod = + object.mongod !== undefined && object.mongod !== null + ? Mongodbspec60_Mongod.fromPartial(object.mongod) + : undefined; + message.mongocfg = + object.mongocfg !== undefined && object.mongocfg !== null + ? Mongodbspec60_MongoCfg.fromPartial(object.mongocfg) + : undefined; + message.mongos = + object.mongos !== undefined && object.mongos !== null + ? Mongodbspec60_Mongos.fromPartial(object.mongos) + : undefined; + message.mongoinfra = + object.mongoinfra !== undefined && object.mongoinfra !== null + ? Mongodbspec60_MongoInfra.fromPartial(object.mongoinfra) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Mongodbspec60.$type, Mongodbspec60); + +const baseMongodbspec60_Mongod: object = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec6_0.Mongod", +}; + +export const Mongodbspec60_Mongod = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec6_0.Mongod" as const, + + encode( + message: Mongodbspec60_Mongod, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.config !== undefined) { + Mongodconfig60.encode(message.config, writer.uint32(10).fork()).ldelim(); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodbspec60_Mongod { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMongodbspec60_Mongod } as Mongodbspec60_Mongod; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.config = Mongodconfig60.decode(reader, reader.uint32()); + break; + case 2: + message.resources = Resources.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodbspec60_Mongod { + const message = { ...baseMongodbspec60_Mongod } as Mongodbspec60_Mongod; + message.config = + object.config !== undefined && object.config !== null + ? Mongodconfig60.fromJSON(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + return message; + }, + + toJSON(message: Mongodbspec60_Mongod): unknown { + const obj: any = {}; + message.config !== undefined && + (obj.config = message.config + ? Mongodconfig60.toJSON(message.config) + : undefined); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodbspec60_Mongod { + const message = { ...baseMongodbspec60_Mongod } as Mongodbspec60_Mongod; + message.config = + object.config !== undefined && object.config !== null + ? Mongodconfig60.fromPartial(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Mongodbspec60_Mongod.$type, Mongodbspec60_Mongod); + +const baseMongodbspec60_MongoCfg: object = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec6_0.MongoCfg", +}; + +export const Mongodbspec60_MongoCfg = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec6_0.MongoCfg" as const, + + encode( + message: Mongodbspec60_MongoCfg, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.config !== undefined) { + Mongocfgconfig60.encode( + message.config, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodbspec60_MongoCfg { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMongodbspec60_MongoCfg } as Mongodbspec60_MongoCfg; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.config = Mongocfgconfig60.decode(reader, reader.uint32()); + break; + case 2: + message.resources = Resources.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodbspec60_MongoCfg { + const message = { ...baseMongodbspec60_MongoCfg } as Mongodbspec60_MongoCfg; + message.config = + object.config !== undefined && object.config !== null + ? Mongocfgconfig60.fromJSON(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + return message; + }, + + toJSON(message: Mongodbspec60_MongoCfg): unknown { + const obj: any = {}; + message.config !== undefined && + (obj.config = message.config + ? Mongocfgconfig60.toJSON(message.config) + : undefined); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodbspec60_MongoCfg { + const message = { ...baseMongodbspec60_MongoCfg } as Mongodbspec60_MongoCfg; + message.config = + object.config !== undefined && object.config !== null + ? Mongocfgconfig60.fromPartial(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Mongodbspec60_MongoCfg.$type, Mongodbspec60_MongoCfg); + +const baseMongodbspec60_Mongos: object = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec6_0.Mongos", +}; + +export const Mongodbspec60_Mongos = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec6_0.Mongos" as const, + + encode( + message: Mongodbspec60_Mongos, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.config !== undefined) { + Mongosconfig60.encode(message.config, writer.uint32(10).fork()).ldelim(); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodbspec60_Mongos { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMongodbspec60_Mongos } as Mongodbspec60_Mongos; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.config = Mongosconfig60.decode(reader, reader.uint32()); + break; + case 2: + message.resources = Resources.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodbspec60_Mongos { + const message = { ...baseMongodbspec60_Mongos } as Mongodbspec60_Mongos; + message.config = + object.config !== undefined && object.config !== null + ? Mongosconfig60.fromJSON(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + return message; + }, + + toJSON(message: Mongodbspec60_Mongos): unknown { + const obj: any = {}; + message.config !== undefined && + (obj.config = message.config + ? Mongosconfig60.toJSON(message.config) + : undefined); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodbspec60_Mongos { + const message = { ...baseMongodbspec60_Mongos } as Mongodbspec60_Mongos; + message.config = + object.config !== undefined && object.config !== null + ? Mongosconfig60.fromPartial(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Mongodbspec60_Mongos.$type, Mongodbspec60_Mongos); + +const baseMongodbspec60_MongoInfra: object = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec6_0.MongoInfra", +}; + +export const Mongodbspec60_MongoInfra = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec6_0.MongoInfra" as const, + + encode( + message: Mongodbspec60_MongoInfra, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.configMongos !== undefined) { + Mongosconfig60.encode( + message.configMongos, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.configMongocfg !== undefined) { + Mongocfgconfig60.encode( + message.configMongocfg, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(26).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodbspec60_MongoInfra { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodbspec60_MongoInfra, + } as Mongodbspec60_MongoInfra; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.configMongos = Mongosconfig60.decode(reader, reader.uint32()); + break; + case 2: + message.configMongocfg = Mongocfgconfig60.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.resources = Resources.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodbspec60_MongoInfra { + const message = { + ...baseMongodbspec60_MongoInfra, + } as Mongodbspec60_MongoInfra; + message.configMongos = + object.configMongos !== undefined && object.configMongos !== null + ? Mongosconfig60.fromJSON(object.configMongos) + : undefined; + message.configMongocfg = + object.configMongocfg !== undefined && object.configMongocfg !== null + ? Mongocfgconfig60.fromJSON(object.configMongocfg) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + return message; + }, + + toJSON(message: Mongodbspec60_MongoInfra): unknown { + const obj: any = {}; + message.configMongos !== undefined && + (obj.configMongos = message.configMongos + ? Mongosconfig60.toJSON(message.configMongos) + : undefined); + message.configMongocfg !== undefined && + (obj.configMongocfg = message.configMongocfg + ? Mongocfgconfig60.toJSON(message.configMongocfg) + : undefined); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodbspec60_MongoInfra { + const message = { + ...baseMongodbspec60_MongoInfra, + } as Mongodbspec60_MongoInfra; + message.configMongos = + object.configMongos !== undefined && object.configMongos !== null + ? Mongosconfig60.fromPartial(object.configMongos) + : undefined; + message.configMongocfg = + object.configMongocfg !== undefined && object.configMongocfg !== null + ? Mongocfgconfig60.fromPartial(object.configMongocfg) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodbspec60_MongoInfra.$type, + Mongodbspec60_MongoInfra +); + +const baseMongodbspec60Enterprise: object = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec6_0_enterprise", +}; + +export const Mongodbspec60Enterprise = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec6_0_enterprise" as const, + + encode( + message: Mongodbspec60Enterprise, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.mongod !== undefined) { + Mongodbspec60Enterprise_Mongod.encode( + message.mongod, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.mongocfg !== undefined) { + Mongodbspec60Enterprise_MongoCfg.encode( + message.mongocfg, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.mongos !== undefined) { + Mongodbspec60Enterprise_Mongos.encode( + message.mongos, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.mongoinfra !== undefined) { + Mongodbspec60Enterprise_MongoInfra.encode( + message.mongoinfra, + writer.uint32(34).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodbspec60Enterprise { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodbspec60Enterprise, + } as Mongodbspec60Enterprise; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.mongod = Mongodbspec60Enterprise_Mongod.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.mongocfg = Mongodbspec60Enterprise_MongoCfg.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.mongos = Mongodbspec60Enterprise_Mongos.decode( + reader, + reader.uint32() + ); + break; + case 4: + message.mongoinfra = Mongodbspec60Enterprise_MongoInfra.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodbspec60Enterprise { + const message = { + ...baseMongodbspec60Enterprise, + } as Mongodbspec60Enterprise; + message.mongod = + object.mongod !== undefined && object.mongod !== null + ? Mongodbspec60Enterprise_Mongod.fromJSON(object.mongod) + : undefined; + message.mongocfg = + object.mongocfg !== undefined && object.mongocfg !== null + ? Mongodbspec60Enterprise_MongoCfg.fromJSON(object.mongocfg) + : undefined; + message.mongos = + object.mongos !== undefined && object.mongos !== null + ? Mongodbspec60Enterprise_Mongos.fromJSON(object.mongos) + : undefined; + message.mongoinfra = + object.mongoinfra !== undefined && object.mongoinfra !== null + ? Mongodbspec60Enterprise_MongoInfra.fromJSON(object.mongoinfra) + : undefined; + return message; + }, + + toJSON(message: Mongodbspec60Enterprise): unknown { + const obj: any = {}; + message.mongod !== undefined && + (obj.mongod = message.mongod + ? Mongodbspec60Enterprise_Mongod.toJSON(message.mongod) + : undefined); + message.mongocfg !== undefined && + (obj.mongocfg = message.mongocfg + ? Mongodbspec60Enterprise_MongoCfg.toJSON(message.mongocfg) + : undefined); + message.mongos !== undefined && + (obj.mongos = message.mongos + ? Mongodbspec60Enterprise_Mongos.toJSON(message.mongos) + : undefined); + message.mongoinfra !== undefined && + (obj.mongoinfra = message.mongoinfra + ? Mongodbspec60Enterprise_MongoInfra.toJSON(message.mongoinfra) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodbspec60Enterprise { + const message = { + ...baseMongodbspec60Enterprise, + } as Mongodbspec60Enterprise; + message.mongod = + object.mongod !== undefined && object.mongod !== null + ? Mongodbspec60Enterprise_Mongod.fromPartial(object.mongod) + : undefined; + message.mongocfg = + object.mongocfg !== undefined && object.mongocfg !== null + ? Mongodbspec60Enterprise_MongoCfg.fromPartial(object.mongocfg) + : undefined; + message.mongos = + object.mongos !== undefined && object.mongos !== null + ? Mongodbspec60Enterprise_Mongos.fromPartial(object.mongos) + : undefined; + message.mongoinfra = + object.mongoinfra !== undefined && object.mongoinfra !== null + ? Mongodbspec60Enterprise_MongoInfra.fromPartial(object.mongoinfra) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Mongodbspec60Enterprise.$type, Mongodbspec60Enterprise); + +const baseMongodbspec60Enterprise_Mongod: object = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec6_0_enterprise.Mongod", +}; + +export const Mongodbspec60Enterprise_Mongod = { + $type: + "yandex.cloud.mdb.mongodb.v1.MongodbSpec6_0_enterprise.Mongod" as const, + + encode( + message: Mongodbspec60Enterprise_Mongod, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.config !== undefined) { + Mongodconfig60Enterprise.encode( + message.config, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodbspec60Enterprise_Mongod { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodbspec60Enterprise_Mongod, + } as Mongodbspec60Enterprise_Mongod; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.config = Mongodconfig60Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.resources = Resources.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodbspec60Enterprise_Mongod { + const message = { + ...baseMongodbspec60Enterprise_Mongod, + } as Mongodbspec60Enterprise_Mongod; + message.config = + object.config !== undefined && object.config !== null + ? Mongodconfig60Enterprise.fromJSON(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + return message; + }, + + toJSON(message: Mongodbspec60Enterprise_Mongod): unknown { + const obj: any = {}; + message.config !== undefined && + (obj.config = message.config + ? Mongodconfig60Enterprise.toJSON(message.config) + : undefined); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodbspec60Enterprise_Mongod { + const message = { + ...baseMongodbspec60Enterprise_Mongod, + } as Mongodbspec60Enterprise_Mongod; + message.config = + object.config !== undefined && object.config !== null + ? Mongodconfig60Enterprise.fromPartial(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodbspec60Enterprise_Mongod.$type, + Mongodbspec60Enterprise_Mongod +); + +const baseMongodbspec60Enterprise_MongoCfg: object = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec6_0_enterprise.MongoCfg", +}; + +export const Mongodbspec60Enterprise_MongoCfg = { + $type: + "yandex.cloud.mdb.mongodb.v1.MongodbSpec6_0_enterprise.MongoCfg" as const, + + encode( + message: Mongodbspec60Enterprise_MongoCfg, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.config !== undefined) { + Mongocfgconfig60Enterprise.encode( + message.config, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodbspec60Enterprise_MongoCfg { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodbspec60Enterprise_MongoCfg, + } as Mongodbspec60Enterprise_MongoCfg; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.config = Mongocfgconfig60Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.resources = Resources.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodbspec60Enterprise_MongoCfg { + const message = { + ...baseMongodbspec60Enterprise_MongoCfg, + } as Mongodbspec60Enterprise_MongoCfg; + message.config = + object.config !== undefined && object.config !== null + ? Mongocfgconfig60Enterprise.fromJSON(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + return message; + }, + + toJSON(message: Mongodbspec60Enterprise_MongoCfg): unknown { + const obj: any = {}; + message.config !== undefined && + (obj.config = message.config + ? Mongocfgconfig60Enterprise.toJSON(message.config) + : undefined); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodbspec60Enterprise_MongoCfg { + const message = { + ...baseMongodbspec60Enterprise_MongoCfg, + } as Mongodbspec60Enterprise_MongoCfg; + message.config = + object.config !== undefined && object.config !== null + ? Mongocfgconfig60Enterprise.fromPartial(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodbspec60Enterprise_MongoCfg.$type, + Mongodbspec60Enterprise_MongoCfg +); + +const baseMongodbspec60Enterprise_Mongos: object = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec6_0_enterprise.Mongos", +}; + +export const Mongodbspec60Enterprise_Mongos = { + $type: + "yandex.cloud.mdb.mongodb.v1.MongodbSpec6_0_enterprise.Mongos" as const, + + encode( + message: Mongodbspec60Enterprise_Mongos, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.config !== undefined) { + Mongosconfig60Enterprise.encode( + message.config, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodbspec60Enterprise_Mongos { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodbspec60Enterprise_Mongos, + } as Mongodbspec60Enterprise_Mongos; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.config = Mongosconfig60Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.resources = Resources.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodbspec60Enterprise_Mongos { + const message = { + ...baseMongodbspec60Enterprise_Mongos, + } as Mongodbspec60Enterprise_Mongos; + message.config = + object.config !== undefined && object.config !== null + ? Mongosconfig60Enterprise.fromJSON(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + return message; + }, + + toJSON(message: Mongodbspec60Enterprise_Mongos): unknown { + const obj: any = {}; + message.config !== undefined && + (obj.config = message.config + ? Mongosconfig60Enterprise.toJSON(message.config) + : undefined); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodbspec60Enterprise_Mongos { + const message = { + ...baseMongodbspec60Enterprise_Mongos, + } as Mongodbspec60Enterprise_Mongos; + message.config = + object.config !== undefined && object.config !== null + ? Mongosconfig60Enterprise.fromPartial(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodbspec60Enterprise_Mongos.$type, + Mongodbspec60Enterprise_Mongos +); + +const baseMongodbspec60Enterprise_MongoInfra: object = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec6_0_enterprise.MongoInfra", +}; + +export const Mongodbspec60Enterprise_MongoInfra = { + $type: + "yandex.cloud.mdb.mongodb.v1.MongodbSpec6_0_enterprise.MongoInfra" as const, + + encode( + message: Mongodbspec60Enterprise_MongoInfra, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.configMongos !== undefined) { + Mongosconfig60Enterprise.encode( + message.configMongos, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.configMongocfg !== undefined) { + Mongocfgconfig60Enterprise.encode( + message.configMongocfg, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(26).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodbspec60Enterprise_MongoInfra { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodbspec60Enterprise_MongoInfra, + } as Mongodbspec60Enterprise_MongoInfra; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.configMongos = Mongosconfig60Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.configMongocfg = Mongocfgconfig60Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.resources = Resources.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodbspec60Enterprise_MongoInfra { + const message = { + ...baseMongodbspec60Enterprise_MongoInfra, + } as Mongodbspec60Enterprise_MongoInfra; + message.configMongos = + object.configMongos !== undefined && object.configMongos !== null + ? Mongosconfig60Enterprise.fromJSON(object.configMongos) + : undefined; + message.configMongocfg = + object.configMongocfg !== undefined && object.configMongocfg !== null + ? Mongocfgconfig60Enterprise.fromJSON(object.configMongocfg) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + return message; + }, + + toJSON(message: Mongodbspec60Enterprise_MongoInfra): unknown { + const obj: any = {}; + message.configMongos !== undefined && + (obj.configMongos = message.configMongos + ? Mongosconfig60Enterprise.toJSON(message.configMongos) + : undefined); + message.configMongocfg !== undefined && + (obj.configMongocfg = message.configMongocfg + ? Mongocfgconfig60Enterprise.toJSON(message.configMongocfg) + : undefined); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodbspec60Enterprise_MongoInfra { + const message = { + ...baseMongodbspec60Enterprise_MongoInfra, + } as Mongodbspec60Enterprise_MongoInfra; + message.configMongos = + object.configMongos !== undefined && object.configMongos !== null + ? Mongosconfig60Enterprise.fromPartial(object.configMongos) + : undefined; + message.configMongocfg = + object.configMongocfg !== undefined && object.configMongocfg !== null + ? Mongocfgconfig60Enterprise.fromPartial(object.configMongocfg) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodbspec60Enterprise_MongoInfra.$type, + Mongodbspec60Enterprise_MongoInfra +); + +const baseConfigSpec: object = { + $type: "yandex.cloud.mdb.mongodb.v1.ConfigSpec", + version: "", + featureCompatibilityVersion: "", +}; + +export const ConfigSpec = { + $type: "yandex.cloud.mdb.mongodb.v1.ConfigSpec" as const, + + encode( + message: ConfigSpec, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.version !== "") { + writer.uint32(10).string(message.version); + } + if (message.featureCompatibilityVersion !== "") { + writer.uint32(42).string(message.featureCompatibilityVersion); + } + if (message.mongodbSpec36 !== undefined) { + Mongodbspec36.encode( + message.mongodbSpec36, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.mongodbSpec40 !== undefined) { + Mongodbspec40.encode( + message.mongodbSpec40, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.mongodbSpec42 !== undefined) { + Mongodbspec42.encode( + message.mongodbSpec42, + writer.uint32(58).fork() + ).ldelim(); + } + if (message.mongodbSpec44 !== undefined) { + Mongodbspec44.encode( + message.mongodbSpec44, + writer.uint32(66).fork() + ).ldelim(); + } + if (message.mongodbSpec50 !== undefined) { + Mongodbspec50.encode( + message.mongodbSpec50, + writer.uint32(82).fork() + ).ldelim(); + } + if (message.mongodbSpec60 !== undefined) { + Mongodbspec60.encode( + message.mongodbSpec60, + writer.uint32(114).fork() + ).ldelim(); + } + if (message.mongodbSpec44Enterprise !== undefined) { + Mongodbspec44Enterprise.encode( + message.mongodbSpec44Enterprise, + writer.uint32(90).fork() + ).ldelim(); + } + if (message.mongodbSpec50Enterprise !== undefined) { + Mongodbspec50Enterprise.encode( + message.mongodbSpec50Enterprise, + writer.uint32(98).fork() + ).ldelim(); + } + if (message.mongodbSpec60Enterprise !== undefined) { + Mongodbspec60Enterprise.encode( + message.mongodbSpec60Enterprise, + writer.uint32(122).fork() + ).ldelim(); + } + if (message.backupWindowStart !== undefined) { + TimeOfDay.encode( + message.backupWindowStart, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.backupRetainPeriodDays !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.backupRetainPeriodDays!, + }, + writer.uint32(74).fork() + ).ldelim(); + } + if (message.performanceDiagnostics !== undefined) { + PerformanceDiagnosticsConfig.encode( + message.performanceDiagnostics, + writer.uint32(106).fork() + ).ldelim(); + } + if (message.access !== undefined) { + Access.encode(message.access, writer.uint32(50).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ConfigSpec { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseConfigSpec } as ConfigSpec; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.version = reader.string(); + break; + case 5: + message.featureCompatibilityVersion = reader.string(); + break; + case 2: + message.mongodbSpec36 = Mongodbspec36.decode(reader, reader.uint32()); + break; + case 4: + message.mongodbSpec40 = Mongodbspec40.decode(reader, reader.uint32()); + break; + case 7: + message.mongodbSpec42 = Mongodbspec42.decode(reader, reader.uint32()); + break; + case 8: + message.mongodbSpec44 = Mongodbspec44.decode(reader, reader.uint32()); + break; + case 10: + message.mongodbSpec50 = Mongodbspec50.decode(reader, reader.uint32()); + break; + case 14: + message.mongodbSpec60 = Mongodbspec60.decode(reader, reader.uint32()); + break; + case 11: + message.mongodbSpec44Enterprise = Mongodbspec44Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 12: + message.mongodbSpec50Enterprise = Mongodbspec50Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 15: + message.mongodbSpec60Enterprise = Mongodbspec60Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.backupWindowStart = TimeOfDay.decode(reader, reader.uint32()); + break; + case 9: + message.backupRetainPeriodDays = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 13: + message.performanceDiagnostics = PerformanceDiagnosticsConfig.decode( + reader, + reader.uint32() + ); + break; + case 6: + message.access = Access.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ConfigSpec { + const message = { ...baseConfigSpec } as ConfigSpec; + message.version = + object.version !== undefined && object.version !== null + ? String(object.version) + : ""; + message.featureCompatibilityVersion = + object.featureCompatibilityVersion !== undefined && + object.featureCompatibilityVersion !== null + ? String(object.featureCompatibilityVersion) + : ""; + message.mongodbSpec36 = + object.mongodbSpec_3_6 !== undefined && object.mongodbSpec_3_6 !== null + ? Mongodbspec36.fromJSON(object.mongodbSpec_3_6) + : undefined; + message.mongodbSpec40 = + object.mongodbSpec_4_0 !== undefined && object.mongodbSpec_4_0 !== null + ? Mongodbspec40.fromJSON(object.mongodbSpec_4_0) + : undefined; + message.mongodbSpec42 = + object.mongodbSpec_4_2 !== undefined && object.mongodbSpec_4_2 !== null + ? Mongodbspec42.fromJSON(object.mongodbSpec_4_2) + : undefined; + message.mongodbSpec44 = + object.mongodbSpec_4_4 !== undefined && object.mongodbSpec_4_4 !== null + ? Mongodbspec44.fromJSON(object.mongodbSpec_4_4) + : undefined; + message.mongodbSpec50 = + object.mongodbSpec_5_0 !== undefined && object.mongodbSpec_5_0 !== null + ? Mongodbspec50.fromJSON(object.mongodbSpec_5_0) + : undefined; + message.mongodbSpec60 = + object.mongodbSpec_6_0 !== undefined && object.mongodbSpec_6_0 !== null + ? Mongodbspec60.fromJSON(object.mongodbSpec_6_0) + : undefined; + message.mongodbSpec44Enterprise = + object.mongodbSpec_4_4_enterprise !== undefined && + object.mongodbSpec_4_4_enterprise !== null + ? Mongodbspec44Enterprise.fromJSON(object.mongodbSpec_4_4_enterprise) + : undefined; + message.mongodbSpec50Enterprise = + object.mongodbSpec_5_0_enterprise !== undefined && + object.mongodbSpec_5_0_enterprise !== null + ? Mongodbspec50Enterprise.fromJSON(object.mongodbSpec_5_0_enterprise) + : undefined; + message.mongodbSpec60Enterprise = + object.mongodbSpec_6_0_enterprise !== undefined && + object.mongodbSpec_6_0_enterprise !== null + ? Mongodbspec60Enterprise.fromJSON(object.mongodbSpec_6_0_enterprise) + : undefined; + message.backupWindowStart = + object.backupWindowStart !== undefined && + object.backupWindowStart !== null + ? TimeOfDay.fromJSON(object.backupWindowStart) + : undefined; + message.backupRetainPeriodDays = + object.backupRetainPeriodDays !== undefined && + object.backupRetainPeriodDays !== null + ? Number(object.backupRetainPeriodDays) + : undefined; + message.performanceDiagnostics = + object.performanceDiagnostics !== undefined && + object.performanceDiagnostics !== null + ? PerformanceDiagnosticsConfig.fromJSON(object.performanceDiagnostics) + : undefined; + message.access = + object.access !== undefined && object.access !== null + ? Access.fromJSON(object.access) + : undefined; + return message; + }, + + toJSON(message: ConfigSpec): unknown { + const obj: any = {}; + message.version !== undefined && (obj.version = message.version); + message.featureCompatibilityVersion !== undefined && + (obj.featureCompatibilityVersion = message.featureCompatibilityVersion); + message.mongodbSpec36 !== undefined && + (obj.mongodbSpec_3_6 = message.mongodbSpec36 + ? Mongodbspec36.toJSON(message.mongodbSpec36) + : undefined); message.mongodbSpec40 !== undefined && (obj.mongodbSpec_4_0 = message.mongodbSpec40 ? Mongodbspec40.toJSON(message.mongodbSpec40) @@ -11069,6 +12398,10 @@ export const ConfigSpec = { (obj.mongodbSpec_5_0 = message.mongodbSpec50 ? Mongodbspec50.toJSON(message.mongodbSpec50) : undefined); + message.mongodbSpec60 !== undefined && + (obj.mongodbSpec_6_0 = message.mongodbSpec60 + ? Mongodbspec60.toJSON(message.mongodbSpec60) + : undefined); message.mongodbSpec44Enterprise !== undefined && (obj.mongodbSpec_4_4_enterprise = message.mongodbSpec44Enterprise ? Mongodbspec44Enterprise.toJSON(message.mongodbSpec44Enterprise) @@ -11077,12 +12410,20 @@ export const ConfigSpec = { (obj.mongodbSpec_5_0_enterprise = message.mongodbSpec50Enterprise ? Mongodbspec50Enterprise.toJSON(message.mongodbSpec50Enterprise) : undefined); + message.mongodbSpec60Enterprise !== undefined && + (obj.mongodbSpec_6_0_enterprise = message.mongodbSpec60Enterprise + ? Mongodbspec60Enterprise.toJSON(message.mongodbSpec60Enterprise) + : undefined); message.backupWindowStart !== undefined && (obj.backupWindowStart = message.backupWindowStart ? TimeOfDay.toJSON(message.backupWindowStart) : undefined); message.backupRetainPeriodDays !== undefined && (obj.backupRetainPeriodDays = message.backupRetainPeriodDays); + message.performanceDiagnostics !== undefined && + (obj.performanceDiagnostics = message.performanceDiagnostics + ? PerformanceDiagnosticsConfig.toJSON(message.performanceDiagnostics) + : undefined); message.access !== undefined && (obj.access = message.access ? Access.toJSON(message.access) : undefined); return obj; @@ -11115,6 +12456,10 @@ export const ConfigSpec = { object.mongodbSpec50 !== undefined && object.mongodbSpec50 !== null ? Mongodbspec50.fromPartial(object.mongodbSpec50) : undefined; + message.mongodbSpec60 = + object.mongodbSpec60 !== undefined && object.mongodbSpec60 !== null + ? Mongodbspec60.fromPartial(object.mongodbSpec60) + : undefined; message.mongodbSpec44Enterprise = object.mongodbSpec44Enterprise !== undefined && object.mongodbSpec44Enterprise !== null @@ -11125,12 +12470,24 @@ export const ConfigSpec = { object.mongodbSpec50Enterprise !== null ? Mongodbspec50Enterprise.fromPartial(object.mongodbSpec50Enterprise) : undefined; + message.mongodbSpec60Enterprise = + object.mongodbSpec60Enterprise !== undefined && + object.mongodbSpec60Enterprise !== null + ? Mongodbspec60Enterprise.fromPartial(object.mongodbSpec60Enterprise) + : undefined; message.backupWindowStart = object.backupWindowStart !== undefined && object.backupWindowStart !== null ? TimeOfDay.fromPartial(object.backupWindowStart) : undefined; message.backupRetainPeriodDays = object.backupRetainPeriodDays ?? undefined; + message.performanceDiagnostics = + object.performanceDiagnostics !== undefined && + object.performanceDiagnostics !== null + ? PerformanceDiagnosticsConfig.fromPartial( + object.performanceDiagnostics + ) + : undefined; message.access = object.access !== undefined && object.access !== null ? Access.fromPartial(object.access) @@ -11283,7 +12640,10 @@ export const ClusterServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Retrieves logs for the specified MongoDB cluster. */ + /** + * Retrieves logs for the specified MongoDB cluster. + * See the [Logs](/yandex-mdb-guide/concepts/logs.html) section in the developers guide for detailed logs description. + */ listLogs: { path: "/yandex.cloud.mdb.mongodb.v1.ClusterService/ListLogs", requestStream: false, @@ -11377,7 +12737,8 @@ export const ClusterServiceService = { responseDeserialize: (value: Buffer) => Operation.decode(value), }, /** - * Enables sharding for the cluster: creates 3 mongoinfra (or 3 mongocfg and 2 mongos) hosts + * Enables sharding for the cluster: + * creates 3 mongoinfra (or 3 mongocfg and 2 mongos) hosts * that would support adding and using shards in the cluster. */ enableSharding: { @@ -11514,7 +12875,10 @@ export interface ClusterServiceServer extends UntypedServiceImplementation { RescheduleMaintenanceRequest, Operation >; - /** Retrieves logs for the specified MongoDB cluster. */ + /** + * Retrieves logs for the specified MongoDB cluster. + * See the [Logs](/yandex-mdb-guide/concepts/logs.html) section in the developers guide for detailed logs description. + */ listLogs: handleUnaryCall; /** Same as ListLogs but using server-side streaming. Also allows for 'tail -f' semantics. */ streamLogs: handleServerStreamingCall< @@ -11538,7 +12902,8 @@ export interface ClusterServiceServer extends UntypedServiceImplementation { /** Deletes the specified hosts for a cluster. */ deleteHosts: handleUnaryCall; /** - * Enables sharding for the cluster: creates 3 mongoinfra (or 3 mongocfg and 2 mongos) hosts + * Enables sharding for the cluster: + * creates 3 mongoinfra (or 3 mongocfg and 2 mongos) hosts * that would support adding and using shards in the cluster. */ enableSharding: handleUnaryCall; @@ -11754,7 +13119,10 @@ export interface ClusterServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Retrieves logs for the specified MongoDB cluster. */ + /** + * Retrieves logs for the specified MongoDB cluster. + * See the [Logs](/yandex-mdb-guide/concepts/logs.html) section in the developers guide for detailed logs description. + */ listLogs( request: ListClusterLogsRequest, callback: ( @@ -11897,7 +13265,8 @@ export interface ClusterServiceClient extends Client { callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; /** - * Enables sharding for the cluster: creates 3 mongoinfra (or 3 mongocfg and 2 mongos) hosts + * Enables sharding for the cluster: + * creates 3 mongoinfra (or 3 mongocfg and 2 mongos) hosts * that would support adding and using shards in the cluster. */ enableSharding( diff --git a/src/generated/yandex/cloud/mdb/mongodb/v1/config/mongodb4_2.ts b/src/generated/yandex/cloud/mdb/mongodb/v1/config/mongodb4_2.ts index d3ab509f..1c89e132 100644 --- a/src/generated/yandex/cloud/mdb/mongodb/v1/config/mongodb4_2.ts +++ b/src/generated/yandex/cloud/mdb/mongodb/v1/config/mongodb4_2.ts @@ -60,6 +60,8 @@ export enum Mongodconfig42_Storage_WiredTiger_CollectionConfig_Compressor { SNAPPY = 2, /** ZLIB - The [zlib](https://docs.mongodb.com/v4.2/reference/glossary/#term-zlib) compression. */ ZLIB = 3, + /** ZSTD - The [zstd](https://docs.mongodb.com/v4.2/reference/glossary/#term-zstd) compression. */ + ZSTD = 4, UNRECOGNIZED = -1, } @@ -79,6 +81,9 @@ export function mongodconfig42_Storage_WiredTiger_CollectionConfig_CompressorFro case 3: case "ZLIB": return Mongodconfig42_Storage_WiredTiger_CollectionConfig_Compressor.ZLIB; + case 4: + case "ZSTD": + return Mongodconfig42_Storage_WiredTiger_CollectionConfig_Compressor.ZSTD; case -1: case "UNRECOGNIZED": default: @@ -98,6 +103,8 @@ export function mongodconfig42_Storage_WiredTiger_CollectionConfig_CompressorToJ return "SNAPPY"; case Mongodconfig42_Storage_WiredTiger_CollectionConfig_Compressor.ZLIB: return "ZLIB"; + case Mongodconfig42_Storage_WiredTiger_CollectionConfig_Compressor.ZSTD: + return "ZSTD"; default: return "UNKNOWN"; } diff --git a/src/generated/yandex/cloud/mdb/mongodb/v1/config/mongodb4_4.ts b/src/generated/yandex/cloud/mdb/mongodb/v1/config/mongodb4_4.ts index 46ff286c..77e1ff4b 100644 --- a/src/generated/yandex/cloud/mdb/mongodb/v1/config/mongodb4_4.ts +++ b/src/generated/yandex/cloud/mdb/mongodb/v1/config/mongodb4_4.ts @@ -60,6 +60,8 @@ export enum Mongodconfig44_Storage_WiredTiger_CollectionConfig_Compressor { SNAPPY = 2, /** ZLIB - The [zlib](https://docs.mongodb.com/v4.4/reference/glossary/#term-zlib) compression. */ ZLIB = 3, + /** ZSTD - The [zstd](https://docs.mongodb.com/v4.4/reference/glossary/#term-zstd) compression. */ + ZSTD = 4, UNRECOGNIZED = -1, } @@ -79,6 +81,9 @@ export function mongodconfig44_Storage_WiredTiger_CollectionConfig_CompressorFro case 3: case "ZLIB": return Mongodconfig44_Storage_WiredTiger_CollectionConfig_Compressor.ZLIB; + case 4: + case "ZSTD": + return Mongodconfig44_Storage_WiredTiger_CollectionConfig_Compressor.ZSTD; case -1: case "UNRECOGNIZED": default: @@ -98,6 +103,8 @@ export function mongodconfig44_Storage_WiredTiger_CollectionConfig_CompressorToJ return "SNAPPY"; case Mongodconfig44_Storage_WiredTiger_CollectionConfig_Compressor.ZLIB: return "ZLIB"; + case Mongodconfig44_Storage_WiredTiger_CollectionConfig_Compressor.ZSTD: + return "ZSTD"; default: return "UNKNOWN"; } diff --git a/src/generated/yandex/cloud/mdb/mongodb/v1/config/mongodb4_4_enterprise.ts b/src/generated/yandex/cloud/mdb/mongodb/v1/config/mongodb4_4_enterprise.ts index 7ccec218..f20d7aa5 100644 --- a/src/generated/yandex/cloud/mdb/mongodb/v1/config/mongodb4_4_enterprise.ts +++ b/src/generated/yandex/cloud/mdb/mongodb/v1/config/mongodb4_4_enterprise.ts @@ -67,6 +67,8 @@ export enum Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig_Compres SNAPPY = 2, /** ZLIB - The [zlib](https://docs.mongodb.com/v4.4/reference/glossary/#term-zlib) compression. */ ZLIB = 3, + /** ZSTD - The [zstd](https://docs.mongodb.com/v4.4/reference/glossary/#term-zstd) compression. */ + ZSTD = 4, UNRECOGNIZED = -1, } @@ -86,6 +88,9 @@ export function mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig_Com case 3: case "ZLIB": return Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.ZLIB; + case 4: + case "ZSTD": + return Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.ZSTD; case -1: case "UNRECOGNIZED": default: @@ -105,6 +110,8 @@ export function mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig_Com return "SNAPPY"; case Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.ZLIB: return "ZLIB"; + case Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.ZSTD: + return "ZSTD"; default: return "UNKNOWN"; } diff --git a/src/generated/yandex/cloud/mdb/mongodb/v1/config/mongodb5_0.ts b/src/generated/yandex/cloud/mdb/mongodb/v1/config/mongodb5_0.ts index 7ec0849b..5f25e071 100644 --- a/src/generated/yandex/cloud/mdb/mongodb/v1/config/mongodb5_0.ts +++ b/src/generated/yandex/cloud/mdb/mongodb/v1/config/mongodb5_0.ts @@ -60,6 +60,8 @@ export enum Mongodconfig50_Storage_WiredTiger_CollectionConfig_Compressor { SNAPPY = 2, /** ZLIB - The [zlib](https://docs.mongodb.com/v5.0/reference/glossary/#term-zlib) compression. */ ZLIB = 3, + /** ZSTD - The [zstd](https://docs.mongodb.com/v5.0/reference/glossary/#term-zstd) compression. */ + ZSTD = 4, UNRECOGNIZED = -1, } @@ -79,6 +81,9 @@ export function mongodconfig50_Storage_WiredTiger_CollectionConfig_CompressorFro case 3: case "ZLIB": return Mongodconfig50_Storage_WiredTiger_CollectionConfig_Compressor.ZLIB; + case 4: + case "ZSTD": + return Mongodconfig50_Storage_WiredTiger_CollectionConfig_Compressor.ZSTD; case -1: case "UNRECOGNIZED": default: @@ -98,6 +103,8 @@ export function mongodconfig50_Storage_WiredTiger_CollectionConfig_CompressorToJ return "SNAPPY"; case Mongodconfig50_Storage_WiredTiger_CollectionConfig_Compressor.ZLIB: return "ZLIB"; + case Mongodconfig50_Storage_WiredTiger_CollectionConfig_Compressor.ZSTD: + return "ZSTD"; default: return "UNKNOWN"; } diff --git a/src/generated/yandex/cloud/mdb/mongodb/v1/config/mongodb5_0_enterprise.ts b/src/generated/yandex/cloud/mdb/mongodb/v1/config/mongodb5_0_enterprise.ts index 770cc57c..7a850308 100644 --- a/src/generated/yandex/cloud/mdb/mongodb/v1/config/mongodb5_0_enterprise.ts +++ b/src/generated/yandex/cloud/mdb/mongodb/v1/config/mongodb5_0_enterprise.ts @@ -67,6 +67,8 @@ export enum Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig_Compres SNAPPY = 2, /** ZLIB - The [zlib](https://docs.mongodb.com/v5.0/reference/glossary/#term-zlib) compression. */ ZLIB = 3, + /** ZSTD - The [zstd](https://docs.mongodb.com/v5.0/reference/glossary/#term-zstd) compression. */ + ZSTD = 4, UNRECOGNIZED = -1, } @@ -86,6 +88,9 @@ export function mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig_Com case 3: case "ZLIB": return Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.ZLIB; + case 4: + case "ZSTD": + return Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.ZSTD; case -1: case "UNRECOGNIZED": default: @@ -105,6 +110,8 @@ export function mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig_Com return "SNAPPY"; case Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.ZLIB: return "ZLIB"; + case Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.ZSTD: + return "ZSTD"; default: return "UNKNOWN"; } diff --git a/src/generated/yandex/cloud/mdb/mongodb/v1/config/mongodb6_0.ts b/src/generated/yandex/cloud/mdb/mongodb/v1/config/mongodb6_0.ts new file mode 100644 index 00000000..53e0a283 --- /dev/null +++ b/src/generated/yandex/cloud/mdb/mongodb/v1/config/mongodb6_0.ts @@ -0,0 +1,2215 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { + DoubleValue, + Int64Value, +} from "../../../../../../google/protobuf/wrappers"; + +export const protobufPackage = "yandex.cloud.mdb.mongodb.v1.config"; + +/** + * Configuration of a mongod daemon. Supported options are a limited subset of all + * options described in [MongoDB documentation](https://docs.mongodb.com/v6.0/reference/configuration-options/). + */ +export interface Mongodconfig60 { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0"; + /** `storage` section of mongod configuration. */ + storage?: Mongodconfig60_Storage; + /** `operationProfiling` section of mongod configuration. */ + operationProfiling?: Mongodconfig60_OperationProfiling; + /** `net` section of mongod configuration. */ + net?: Mongodconfig60_Network; +} + +export interface Mongodconfig60_Storage { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0.Storage"; + /** Configuration of the WiredTiger storage engine. */ + wiredTiger?: Mongodconfig60_Storage_WiredTiger; + /** Configuration of the MongoDB [journal](https://docs.mongodb.com/v6.0/reference/glossary/#term-journal). */ + journal?: Mongodconfig60_Storage_Journal; +} + +/** Configuration of WiredTiger storage engine. */ +export interface Mongodconfig60_Storage_WiredTiger { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0.Storage.WiredTiger"; + /** Engine configuration for WiredTiger. */ + engineConfig?: Mongodconfig60_Storage_WiredTiger_EngineConfig; + /** Collection configuration for WiredTiger. */ + collectionConfig?: Mongodconfig60_Storage_WiredTiger_CollectionConfig; +} + +export interface Mongodconfig60_Storage_WiredTiger_EngineConfig { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0.Storage.WiredTiger.EngineConfig"; + /** The maximum size of the internal cache that WiredTiger will use for all data. */ + cacheSizeGb?: number; +} + +export interface Mongodconfig60_Storage_WiredTiger_CollectionConfig { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0.Storage.WiredTiger.CollectionConfig"; + /** Default type of compression to use for collection data. */ + blockCompressor: Mongodconfig60_Storage_WiredTiger_CollectionConfig_Compressor; +} + +export enum Mongodconfig60_Storage_WiredTiger_CollectionConfig_Compressor { + COMPRESSOR_UNSPECIFIED = 0, + /** NONE - No compression. */ + NONE = 1, + /** SNAPPY - The [Snappy](https://docs.mongodb.com/v6.0/reference/glossary/#term-snappy) compression. */ + SNAPPY = 2, + /** ZLIB - The [zlib](https://docs.mongodb.com/v6.0/reference/glossary/#term-zlib) compression. */ + ZLIB = 3, + /** ZSTD - The [zstd](https://docs.mongodb.com/v6.0/reference/glossary/#term-zstd) compression. */ + ZSTD = 4, + UNRECOGNIZED = -1, +} + +export function mongodconfig60_Storage_WiredTiger_CollectionConfig_CompressorFromJSON( + object: any +): Mongodconfig60_Storage_WiredTiger_CollectionConfig_Compressor { + switch (object) { + case 0: + case "COMPRESSOR_UNSPECIFIED": + return Mongodconfig60_Storage_WiredTiger_CollectionConfig_Compressor.COMPRESSOR_UNSPECIFIED; + case 1: + case "NONE": + return Mongodconfig60_Storage_WiredTiger_CollectionConfig_Compressor.NONE; + case 2: + case "SNAPPY": + return Mongodconfig60_Storage_WiredTiger_CollectionConfig_Compressor.SNAPPY; + case 3: + case "ZLIB": + return Mongodconfig60_Storage_WiredTiger_CollectionConfig_Compressor.ZLIB; + case 4: + case "ZSTD": + return Mongodconfig60_Storage_WiredTiger_CollectionConfig_Compressor.ZSTD; + case -1: + case "UNRECOGNIZED": + default: + return Mongodconfig60_Storage_WiredTiger_CollectionConfig_Compressor.UNRECOGNIZED; + } +} + +export function mongodconfig60_Storage_WiredTiger_CollectionConfig_CompressorToJSON( + object: Mongodconfig60_Storage_WiredTiger_CollectionConfig_Compressor +): string { + switch (object) { + case Mongodconfig60_Storage_WiredTiger_CollectionConfig_Compressor.COMPRESSOR_UNSPECIFIED: + return "COMPRESSOR_UNSPECIFIED"; + case Mongodconfig60_Storage_WiredTiger_CollectionConfig_Compressor.NONE: + return "NONE"; + case Mongodconfig60_Storage_WiredTiger_CollectionConfig_Compressor.SNAPPY: + return "SNAPPY"; + case Mongodconfig60_Storage_WiredTiger_CollectionConfig_Compressor.ZLIB: + return "ZLIB"; + case Mongodconfig60_Storage_WiredTiger_CollectionConfig_Compressor.ZSTD: + return "ZSTD"; + default: + return "UNKNOWN"; + } +} + +export interface Mongodconfig60_Storage_Journal { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0.Storage.Journal"; + /** + * Commit interval between journal operations, in milliseconds. + * Default: 100. + */ + commitInterval?: number; +} + +export interface Mongodconfig60_OperationProfiling { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0.OperationProfiling"; + /** Mode which specifies operations that should be profiled. */ + mode: Mongodconfig60_OperationProfiling_Mode; + /** + * The slow operation time threshold, in milliseconds. Operations that run + * for longer than this threshold are considered slow, and are processed by the profiler + * running in the SLOW_OP mode. + */ + slowOpThreshold?: number; +} + +export enum Mongodconfig60_OperationProfiling_Mode { + MODE_UNSPECIFIED = 0, + /** OFF - The profiler is off and does not collect any data. */ + OFF = 1, + /** SLOW_OP - The profiler collects data for operations that take longer than the value of [slow_op_threshold]. */ + SLOW_OP = 2, + /** ALL - The profiler collects data for all operations. */ + ALL = 3, + UNRECOGNIZED = -1, +} + +export function mongodconfig60_OperationProfiling_ModeFromJSON( + object: any +): Mongodconfig60_OperationProfiling_Mode { + switch (object) { + case 0: + case "MODE_UNSPECIFIED": + return Mongodconfig60_OperationProfiling_Mode.MODE_UNSPECIFIED; + case 1: + case "OFF": + return Mongodconfig60_OperationProfiling_Mode.OFF; + case 2: + case "SLOW_OP": + return Mongodconfig60_OperationProfiling_Mode.SLOW_OP; + case 3: + case "ALL": + return Mongodconfig60_OperationProfiling_Mode.ALL; + case -1: + case "UNRECOGNIZED": + default: + return Mongodconfig60_OperationProfiling_Mode.UNRECOGNIZED; + } +} + +export function mongodconfig60_OperationProfiling_ModeToJSON( + object: Mongodconfig60_OperationProfiling_Mode +): string { + switch (object) { + case Mongodconfig60_OperationProfiling_Mode.MODE_UNSPECIFIED: + return "MODE_UNSPECIFIED"; + case Mongodconfig60_OperationProfiling_Mode.OFF: + return "OFF"; + case Mongodconfig60_OperationProfiling_Mode.SLOW_OP: + return "SLOW_OP"; + case Mongodconfig60_OperationProfiling_Mode.ALL: + return "ALL"; + default: + return "UNKNOWN"; + } +} + +export interface Mongodconfig60_Network { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0.Network"; + /** The maximum number of simultaneous connections that mongod will accept. */ + maxIncomingConnections?: number; +} + +export interface Mongocfgconfig60 { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig6_0"; + /** `storage` section of mongocfg configuration. */ + storage?: Mongocfgconfig60_Storage; + /** `operationProfiling` section of mongocfg configuration. */ + operationProfiling?: Mongocfgconfig60_OperationProfiling; + /** `net` section of mongocfg configuration. */ + net?: Mongocfgconfig60_Network; +} + +export interface Mongocfgconfig60_Storage { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig6_0.Storage"; + /** Configuration of the WiredTiger storage engine. */ + wiredTiger?: Mongocfgconfig60_Storage_WiredTiger; +} + +/** Configuration of WiredTiger storage engine. */ +export interface Mongocfgconfig60_Storage_WiredTiger { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig6_0.Storage.WiredTiger"; + /** Engine configuration for WiredTiger. */ + engineConfig?: Mongocfgconfig60_Storage_WiredTiger_EngineConfig; +} + +export interface Mongocfgconfig60_Storage_WiredTiger_EngineConfig { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig6_0.Storage.WiredTiger.EngineConfig"; + /** The maximum size of the internal cache that WiredTiger will use for all data. */ + cacheSizeGb?: number; +} + +export interface Mongocfgconfig60_OperationProfiling { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig6_0.OperationProfiling"; + /** Mode which specifies operations that should be profiled. */ + mode: Mongocfgconfig60_OperationProfiling_Mode; + /** + * The slow operation time threshold, in milliseconds. Operations that run + * for longer than this threshold are considered slow, and are processed by the profiler + * running in the SLOW_OP mode. For details see [MongoDB documentation](https://docs.mongodb.com/v6.0/reference/configuration-options/#operationProfiling.slowOpThresholdMs). + */ + slowOpThreshold?: number; +} + +export enum Mongocfgconfig60_OperationProfiling_Mode { + MODE_UNSPECIFIED = 0, + /** OFF - The profiler is off and does not collect any data. */ + OFF = 1, + /** SLOW_OP - The profiler collects data for operations that take longer than the value of [slow_op_threshold]. */ + SLOW_OP = 2, + /** ALL - The profiler collects data for all operations. */ + ALL = 3, + UNRECOGNIZED = -1, +} + +export function mongocfgconfig60_OperationProfiling_ModeFromJSON( + object: any +): Mongocfgconfig60_OperationProfiling_Mode { + switch (object) { + case 0: + case "MODE_UNSPECIFIED": + return Mongocfgconfig60_OperationProfiling_Mode.MODE_UNSPECIFIED; + case 1: + case "OFF": + return Mongocfgconfig60_OperationProfiling_Mode.OFF; + case 2: + case "SLOW_OP": + return Mongocfgconfig60_OperationProfiling_Mode.SLOW_OP; + case 3: + case "ALL": + return Mongocfgconfig60_OperationProfiling_Mode.ALL; + case -1: + case "UNRECOGNIZED": + default: + return Mongocfgconfig60_OperationProfiling_Mode.UNRECOGNIZED; + } +} + +export function mongocfgconfig60_OperationProfiling_ModeToJSON( + object: Mongocfgconfig60_OperationProfiling_Mode +): string { + switch (object) { + case Mongocfgconfig60_OperationProfiling_Mode.MODE_UNSPECIFIED: + return "MODE_UNSPECIFIED"; + case Mongocfgconfig60_OperationProfiling_Mode.OFF: + return "OFF"; + case Mongocfgconfig60_OperationProfiling_Mode.SLOW_OP: + return "SLOW_OP"; + case Mongocfgconfig60_OperationProfiling_Mode.ALL: + return "ALL"; + default: + return "UNKNOWN"; + } +} + +export interface Mongocfgconfig60_Network { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig6_0.Network"; + /** The maximum number of simultaneous connections that mongocfg will accept. */ + maxIncomingConnections?: number; +} + +export interface Mongosconfig60 { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongosConfig6_0"; + /** Network settings for mongos. */ + net?: Mongosconfig60_Network; +} + +export interface Mongosconfig60_Network { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongosConfig6_0.Network"; + /** The maximum number of simultaneous connections that mongos will accept. */ + maxIncomingConnections?: number; +} + +export interface Mongodconfigset60 { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfigSet6_0"; + /** + * Effective mongod settings for a MongoDB 6.0 cluster (a combination of settings defined + * in [user_config] and [default_config]). + */ + effectiveConfig?: Mongodconfig60; + /** User-defined mongod settings for a MongoDB 6.0 cluster. */ + userConfig?: Mongodconfig60; + /** Default mongod configuration for a MongoDB 6.0 cluster. */ + defaultConfig?: Mongodconfig60; +} + +export interface Mongocfgconfigset60 { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfigSet6_0"; + /** + * Effective mongocfg settings for a MongoDB 6.0 cluster (a combination of settings defined + * in [user_config] and [default_config]). + */ + effectiveConfig?: Mongocfgconfig60; + /** User-defined mongocfg settings for a MongoDB 6.0 cluster. */ + userConfig?: Mongocfgconfig60; + /** Default mongocfg configuration for a MongoDB 6.0 cluster. */ + defaultConfig?: Mongocfgconfig60; +} + +export interface Mongosconfigset60 { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongosConfigSet6_0"; + /** + * Effective mongos settings for a MongoDB 6.0 cluster (a combination of settings defined + * in [user_config] and [default_config]). + */ + effectiveConfig?: Mongosconfig60; + /** User-defined mongos settings for a MongoDB 6.0 cluster. */ + userConfig?: Mongosconfig60; + /** Default mongos configuration for a MongoDB 6.0 cluster. */ + defaultConfig?: Mongosconfig60; +} + +const baseMongodconfig60: object = { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0", +}; + +export const Mongodconfig60 = { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0" as const, + + encode( + message: Mongodconfig60, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.storage !== undefined) { + Mongodconfig60_Storage.encode( + message.storage, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.operationProfiling !== undefined) { + Mongodconfig60_OperationProfiling.encode( + message.operationProfiling, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.net !== undefined) { + Mongodconfig60_Network.encode( + message.net, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Mongodconfig60 { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMongodconfig60 } as Mongodconfig60; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.storage = Mongodconfig60_Storage.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.operationProfiling = Mongodconfig60_OperationProfiling.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.net = Mongodconfig60_Network.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig60 { + const message = { ...baseMongodconfig60 } as Mongodconfig60; + message.storage = + object.storage !== undefined && object.storage !== null + ? Mongodconfig60_Storage.fromJSON(object.storage) + : undefined; + message.operationProfiling = + object.operationProfiling !== undefined && + object.operationProfiling !== null + ? Mongodconfig60_OperationProfiling.fromJSON(object.operationProfiling) + : undefined; + message.net = + object.net !== undefined && object.net !== null + ? Mongodconfig60_Network.fromJSON(object.net) + : undefined; + return message; + }, + + toJSON(message: Mongodconfig60): unknown { + const obj: any = {}; + message.storage !== undefined && + (obj.storage = message.storage + ? Mongodconfig60_Storage.toJSON(message.storage) + : undefined); + message.operationProfiling !== undefined && + (obj.operationProfiling = message.operationProfiling + ? Mongodconfig60_OperationProfiling.toJSON(message.operationProfiling) + : undefined); + message.net !== undefined && + (obj.net = message.net + ? Mongodconfig60_Network.toJSON(message.net) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodconfig60 { + const message = { ...baseMongodconfig60 } as Mongodconfig60; + message.storage = + object.storage !== undefined && object.storage !== null + ? Mongodconfig60_Storage.fromPartial(object.storage) + : undefined; + message.operationProfiling = + object.operationProfiling !== undefined && + object.operationProfiling !== null + ? Mongodconfig60_OperationProfiling.fromPartial( + object.operationProfiling + ) + : undefined; + message.net = + object.net !== undefined && object.net !== null + ? Mongodconfig60_Network.fromPartial(object.net) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Mongodconfig60.$type, Mongodconfig60); + +const baseMongodconfig60_Storage: object = { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0.Storage", +}; + +export const Mongodconfig60_Storage = { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0.Storage" as const, + + encode( + message: Mongodconfig60_Storage, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.wiredTiger !== undefined) { + Mongodconfig60_Storage_WiredTiger.encode( + message.wiredTiger, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.journal !== undefined) { + Mongodconfig60_Storage_Journal.encode( + message.journal, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig60_Storage { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMongodconfig60_Storage } as Mongodconfig60_Storage; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.wiredTiger = Mongodconfig60_Storage_WiredTiger.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.journal = Mongodconfig60_Storage_Journal.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig60_Storage { + const message = { ...baseMongodconfig60_Storage } as Mongodconfig60_Storage; + message.wiredTiger = + object.wiredTiger !== undefined && object.wiredTiger !== null + ? Mongodconfig60_Storage_WiredTiger.fromJSON(object.wiredTiger) + : undefined; + message.journal = + object.journal !== undefined && object.journal !== null + ? Mongodconfig60_Storage_Journal.fromJSON(object.journal) + : undefined; + return message; + }, + + toJSON(message: Mongodconfig60_Storage): unknown { + const obj: any = {}; + message.wiredTiger !== undefined && + (obj.wiredTiger = message.wiredTiger + ? Mongodconfig60_Storage_WiredTiger.toJSON(message.wiredTiger) + : undefined); + message.journal !== undefined && + (obj.journal = message.journal + ? Mongodconfig60_Storage_Journal.toJSON(message.journal) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodconfig60_Storage { + const message = { ...baseMongodconfig60_Storage } as Mongodconfig60_Storage; + message.wiredTiger = + object.wiredTiger !== undefined && object.wiredTiger !== null + ? Mongodconfig60_Storage_WiredTiger.fromPartial(object.wiredTiger) + : undefined; + message.journal = + object.journal !== undefined && object.journal !== null + ? Mongodconfig60_Storage_Journal.fromPartial(object.journal) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Mongodconfig60_Storage.$type, Mongodconfig60_Storage); + +const baseMongodconfig60_Storage_WiredTiger: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0.Storage.WiredTiger", +}; + +export const Mongodconfig60_Storage_WiredTiger = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0.Storage.WiredTiger" as const, + + encode( + message: Mongodconfig60_Storage_WiredTiger, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.engineConfig !== undefined) { + Mongodconfig60_Storage_WiredTiger_EngineConfig.encode( + message.engineConfig, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.collectionConfig !== undefined) { + Mongodconfig60_Storage_WiredTiger_CollectionConfig.encode( + message.collectionConfig, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig60_Storage_WiredTiger { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig60_Storage_WiredTiger, + } as Mongodconfig60_Storage_WiredTiger; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.engineConfig = + Mongodconfig60_Storage_WiredTiger_EngineConfig.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.collectionConfig = + Mongodconfig60_Storage_WiredTiger_CollectionConfig.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig60_Storage_WiredTiger { + const message = { + ...baseMongodconfig60_Storage_WiredTiger, + } as Mongodconfig60_Storage_WiredTiger; + message.engineConfig = + object.engineConfig !== undefined && object.engineConfig !== null + ? Mongodconfig60_Storage_WiredTiger_EngineConfig.fromJSON( + object.engineConfig + ) + : undefined; + message.collectionConfig = + object.collectionConfig !== undefined && object.collectionConfig !== null + ? Mongodconfig60_Storage_WiredTiger_CollectionConfig.fromJSON( + object.collectionConfig + ) + : undefined; + return message; + }, + + toJSON(message: Mongodconfig60_Storage_WiredTiger): unknown { + const obj: any = {}; + message.engineConfig !== undefined && + (obj.engineConfig = message.engineConfig + ? Mongodconfig60_Storage_WiredTiger_EngineConfig.toJSON( + message.engineConfig + ) + : undefined); + message.collectionConfig !== undefined && + (obj.collectionConfig = message.collectionConfig + ? Mongodconfig60_Storage_WiredTiger_CollectionConfig.toJSON( + message.collectionConfig + ) + : undefined); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodconfig60_Storage_WiredTiger { + const message = { + ...baseMongodconfig60_Storage_WiredTiger, + } as Mongodconfig60_Storage_WiredTiger; + message.engineConfig = + object.engineConfig !== undefined && object.engineConfig !== null + ? Mongodconfig60_Storage_WiredTiger_EngineConfig.fromPartial( + object.engineConfig + ) + : undefined; + message.collectionConfig = + object.collectionConfig !== undefined && object.collectionConfig !== null + ? Mongodconfig60_Storage_WiredTiger_CollectionConfig.fromPartial( + object.collectionConfig + ) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig60_Storage_WiredTiger.$type, + Mongodconfig60_Storage_WiredTiger +); + +const baseMongodconfig60_Storage_WiredTiger_EngineConfig: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0.Storage.WiredTiger.EngineConfig", +}; + +export const Mongodconfig60_Storage_WiredTiger_EngineConfig = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0.Storage.WiredTiger.EngineConfig" as const, + + encode( + message: Mongodconfig60_Storage_WiredTiger_EngineConfig, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.cacheSizeGb !== undefined) { + DoubleValue.encode( + { $type: "google.protobuf.DoubleValue", value: message.cacheSizeGb! }, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig60_Storage_WiredTiger_EngineConfig { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig60_Storage_WiredTiger_EngineConfig, + } as Mongodconfig60_Storage_WiredTiger_EngineConfig; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.cacheSizeGb = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig60_Storage_WiredTiger_EngineConfig { + const message = { + ...baseMongodconfig60_Storage_WiredTiger_EngineConfig, + } as Mongodconfig60_Storage_WiredTiger_EngineConfig; + message.cacheSizeGb = + object.cacheSizeGb !== undefined && object.cacheSizeGb !== null + ? Number(object.cacheSizeGb) + : undefined; + return message; + }, + + toJSON(message: Mongodconfig60_Storage_WiredTiger_EngineConfig): unknown { + const obj: any = {}; + message.cacheSizeGb !== undefined && + (obj.cacheSizeGb = message.cacheSizeGb); + return obj; + }, + + fromPartial< + I extends Exact< + DeepPartial, + I + > + >(object: I): Mongodconfig60_Storage_WiredTiger_EngineConfig { + const message = { + ...baseMongodconfig60_Storage_WiredTiger_EngineConfig, + } as Mongodconfig60_Storage_WiredTiger_EngineConfig; + message.cacheSizeGb = object.cacheSizeGb ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig60_Storage_WiredTiger_EngineConfig.$type, + Mongodconfig60_Storage_WiredTiger_EngineConfig +); + +const baseMongodconfig60_Storage_WiredTiger_CollectionConfig: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0.Storage.WiredTiger.CollectionConfig", + blockCompressor: 0, +}; + +export const Mongodconfig60_Storage_WiredTiger_CollectionConfig = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0.Storage.WiredTiger.CollectionConfig" as const, + + encode( + message: Mongodconfig60_Storage_WiredTiger_CollectionConfig, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.blockCompressor !== 0) { + writer.uint32(8).int32(message.blockCompressor); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig60_Storage_WiredTiger_CollectionConfig { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig60_Storage_WiredTiger_CollectionConfig, + } as Mongodconfig60_Storage_WiredTiger_CollectionConfig; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.blockCompressor = reader.int32() as any; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig60_Storage_WiredTiger_CollectionConfig { + const message = { + ...baseMongodconfig60_Storage_WiredTiger_CollectionConfig, + } as Mongodconfig60_Storage_WiredTiger_CollectionConfig; + message.blockCompressor = + object.blockCompressor !== undefined && object.blockCompressor !== null + ? mongodconfig60_Storage_WiredTiger_CollectionConfig_CompressorFromJSON( + object.blockCompressor + ) + : 0; + return message; + }, + + toJSON(message: Mongodconfig60_Storage_WiredTiger_CollectionConfig): unknown { + const obj: any = {}; + message.blockCompressor !== undefined && + (obj.blockCompressor = + mongodconfig60_Storage_WiredTiger_CollectionConfig_CompressorToJSON( + message.blockCompressor + )); + return obj; + }, + + fromPartial< + I extends Exact< + DeepPartial, + I + > + >(object: I): Mongodconfig60_Storage_WiredTiger_CollectionConfig { + const message = { + ...baseMongodconfig60_Storage_WiredTiger_CollectionConfig, + } as Mongodconfig60_Storage_WiredTiger_CollectionConfig; + message.blockCompressor = object.blockCompressor ?? 0; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig60_Storage_WiredTiger_CollectionConfig.$type, + Mongodconfig60_Storage_WiredTiger_CollectionConfig +); + +const baseMongodconfig60_Storage_Journal: object = { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0.Storage.Journal", +}; + +export const Mongodconfig60_Storage_Journal = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0.Storage.Journal" as const, + + encode( + message: Mongodconfig60_Storage_Journal, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.commitInterval !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.commitInterval! }, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig60_Storage_Journal { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig60_Storage_Journal, + } as Mongodconfig60_Storage_Journal; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + message.commitInterval = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig60_Storage_Journal { + const message = { + ...baseMongodconfig60_Storage_Journal, + } as Mongodconfig60_Storage_Journal; + message.commitInterval = + object.commitInterval !== undefined && object.commitInterval !== null + ? Number(object.commitInterval) + : undefined; + return message; + }, + + toJSON(message: Mongodconfig60_Storage_Journal): unknown { + const obj: any = {}; + message.commitInterval !== undefined && + (obj.commitInterval = message.commitInterval); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodconfig60_Storage_Journal { + const message = { + ...baseMongodconfig60_Storage_Journal, + } as Mongodconfig60_Storage_Journal; + message.commitInterval = object.commitInterval ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig60_Storage_Journal.$type, + Mongodconfig60_Storage_Journal +); + +const baseMongodconfig60_OperationProfiling: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0.OperationProfiling", + mode: 0, +}; + +export const Mongodconfig60_OperationProfiling = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0.OperationProfiling" as const, + + encode( + message: Mongodconfig60_OperationProfiling, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.mode !== 0) { + writer.uint32(8).int32(message.mode); + } + if (message.slowOpThreshold !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.slowOpThreshold!, + }, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig60_OperationProfiling { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig60_OperationProfiling, + } as Mongodconfig60_OperationProfiling; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.mode = reader.int32() as any; + break; + case 2: + message.slowOpThreshold = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig60_OperationProfiling { + const message = { + ...baseMongodconfig60_OperationProfiling, + } as Mongodconfig60_OperationProfiling; + message.mode = + object.mode !== undefined && object.mode !== null + ? mongodconfig60_OperationProfiling_ModeFromJSON(object.mode) + : 0; + message.slowOpThreshold = + object.slowOpThreshold !== undefined && object.slowOpThreshold !== null + ? Number(object.slowOpThreshold) + : undefined; + return message; + }, + + toJSON(message: Mongodconfig60_OperationProfiling): unknown { + const obj: any = {}; + message.mode !== undefined && + (obj.mode = mongodconfig60_OperationProfiling_ModeToJSON(message.mode)); + message.slowOpThreshold !== undefined && + (obj.slowOpThreshold = message.slowOpThreshold); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodconfig60_OperationProfiling { + const message = { + ...baseMongodconfig60_OperationProfiling, + } as Mongodconfig60_OperationProfiling; + message.mode = object.mode ?? 0; + message.slowOpThreshold = object.slowOpThreshold ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig60_OperationProfiling.$type, + Mongodconfig60_OperationProfiling +); + +const baseMongodconfig60_Network: object = { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0.Network", +}; + +export const Mongodconfig60_Network = { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0.Network" as const, + + encode( + message: Mongodconfig60_Network, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.maxIncomingConnections !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxIncomingConnections!, + }, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig60_Network { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMongodconfig60_Network } as Mongodconfig60_Network; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.maxIncomingConnections = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig60_Network { + const message = { ...baseMongodconfig60_Network } as Mongodconfig60_Network; + message.maxIncomingConnections = + object.maxIncomingConnections !== undefined && + object.maxIncomingConnections !== null + ? Number(object.maxIncomingConnections) + : undefined; + return message; + }, + + toJSON(message: Mongodconfig60_Network): unknown { + const obj: any = {}; + message.maxIncomingConnections !== undefined && + (obj.maxIncomingConnections = message.maxIncomingConnections); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodconfig60_Network { + const message = { ...baseMongodconfig60_Network } as Mongodconfig60_Network; + message.maxIncomingConnections = object.maxIncomingConnections ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(Mongodconfig60_Network.$type, Mongodconfig60_Network); + +const baseMongocfgconfig60: object = { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig6_0", +}; + +export const Mongocfgconfig60 = { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig6_0" as const, + + encode( + message: Mongocfgconfig60, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.storage !== undefined) { + Mongocfgconfig60_Storage.encode( + message.storage, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.operationProfiling !== undefined) { + Mongocfgconfig60_OperationProfiling.encode( + message.operationProfiling, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.net !== undefined) { + Mongocfgconfig60_Network.encode( + message.net, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Mongocfgconfig60 { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMongocfgconfig60 } as Mongocfgconfig60; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.storage = Mongocfgconfig60_Storage.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.operationProfiling = + Mongocfgconfig60_OperationProfiling.decode(reader, reader.uint32()); + break; + case 3: + message.net = Mongocfgconfig60_Network.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongocfgconfig60 { + const message = { ...baseMongocfgconfig60 } as Mongocfgconfig60; + message.storage = + object.storage !== undefined && object.storage !== null + ? Mongocfgconfig60_Storage.fromJSON(object.storage) + : undefined; + message.operationProfiling = + object.operationProfiling !== undefined && + object.operationProfiling !== null + ? Mongocfgconfig60_OperationProfiling.fromJSON( + object.operationProfiling + ) + : undefined; + message.net = + object.net !== undefined && object.net !== null + ? Mongocfgconfig60_Network.fromJSON(object.net) + : undefined; + return message; + }, + + toJSON(message: Mongocfgconfig60): unknown { + const obj: any = {}; + message.storage !== undefined && + (obj.storage = message.storage + ? Mongocfgconfig60_Storage.toJSON(message.storage) + : undefined); + message.operationProfiling !== undefined && + (obj.operationProfiling = message.operationProfiling + ? Mongocfgconfig60_OperationProfiling.toJSON(message.operationProfiling) + : undefined); + message.net !== undefined && + (obj.net = message.net + ? Mongocfgconfig60_Network.toJSON(message.net) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongocfgconfig60 { + const message = { ...baseMongocfgconfig60 } as Mongocfgconfig60; + message.storage = + object.storage !== undefined && object.storage !== null + ? Mongocfgconfig60_Storage.fromPartial(object.storage) + : undefined; + message.operationProfiling = + object.operationProfiling !== undefined && + object.operationProfiling !== null + ? Mongocfgconfig60_OperationProfiling.fromPartial( + object.operationProfiling + ) + : undefined; + message.net = + object.net !== undefined && object.net !== null + ? Mongocfgconfig60_Network.fromPartial(object.net) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Mongocfgconfig60.$type, Mongocfgconfig60); + +const baseMongocfgconfig60_Storage: object = { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig6_0.Storage", +}; + +export const Mongocfgconfig60_Storage = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig6_0.Storage" as const, + + encode( + message: Mongocfgconfig60_Storage, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.wiredTiger !== undefined) { + Mongocfgconfig60_Storage_WiredTiger.encode( + message.wiredTiger, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongocfgconfig60_Storage { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongocfgconfig60_Storage, + } as Mongocfgconfig60_Storage; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.wiredTiger = Mongocfgconfig60_Storage_WiredTiger.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongocfgconfig60_Storage { + const message = { + ...baseMongocfgconfig60_Storage, + } as Mongocfgconfig60_Storage; + message.wiredTiger = + object.wiredTiger !== undefined && object.wiredTiger !== null + ? Mongocfgconfig60_Storage_WiredTiger.fromJSON(object.wiredTiger) + : undefined; + return message; + }, + + toJSON(message: Mongocfgconfig60_Storage): unknown { + const obj: any = {}; + message.wiredTiger !== undefined && + (obj.wiredTiger = message.wiredTiger + ? Mongocfgconfig60_Storage_WiredTiger.toJSON(message.wiredTiger) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongocfgconfig60_Storage { + const message = { + ...baseMongocfgconfig60_Storage, + } as Mongocfgconfig60_Storage; + message.wiredTiger = + object.wiredTiger !== undefined && object.wiredTiger !== null + ? Mongocfgconfig60_Storage_WiredTiger.fromPartial(object.wiredTiger) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongocfgconfig60_Storage.$type, + Mongocfgconfig60_Storage +); + +const baseMongocfgconfig60_Storage_WiredTiger: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig6_0.Storage.WiredTiger", +}; + +export const Mongocfgconfig60_Storage_WiredTiger = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig6_0.Storage.WiredTiger" as const, + + encode( + message: Mongocfgconfig60_Storage_WiredTiger, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.engineConfig !== undefined) { + Mongocfgconfig60_Storage_WiredTiger_EngineConfig.encode( + message.engineConfig, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongocfgconfig60_Storage_WiredTiger { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongocfgconfig60_Storage_WiredTiger, + } as Mongocfgconfig60_Storage_WiredTiger; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.engineConfig = + Mongocfgconfig60_Storage_WiredTiger_EngineConfig.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongocfgconfig60_Storage_WiredTiger { + const message = { + ...baseMongocfgconfig60_Storage_WiredTiger, + } as Mongocfgconfig60_Storage_WiredTiger; + message.engineConfig = + object.engineConfig !== undefined && object.engineConfig !== null + ? Mongocfgconfig60_Storage_WiredTiger_EngineConfig.fromJSON( + object.engineConfig + ) + : undefined; + return message; + }, + + toJSON(message: Mongocfgconfig60_Storage_WiredTiger): unknown { + const obj: any = {}; + message.engineConfig !== undefined && + (obj.engineConfig = message.engineConfig + ? Mongocfgconfig60_Storage_WiredTiger_EngineConfig.toJSON( + message.engineConfig + ) + : undefined); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongocfgconfig60_Storage_WiredTiger { + const message = { + ...baseMongocfgconfig60_Storage_WiredTiger, + } as Mongocfgconfig60_Storage_WiredTiger; + message.engineConfig = + object.engineConfig !== undefined && object.engineConfig !== null + ? Mongocfgconfig60_Storage_WiredTiger_EngineConfig.fromPartial( + object.engineConfig + ) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongocfgconfig60_Storage_WiredTiger.$type, + Mongocfgconfig60_Storage_WiredTiger +); + +const baseMongocfgconfig60_Storage_WiredTiger_EngineConfig: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig6_0.Storage.WiredTiger.EngineConfig", +}; + +export const Mongocfgconfig60_Storage_WiredTiger_EngineConfig = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig6_0.Storage.WiredTiger.EngineConfig" as const, + + encode( + message: Mongocfgconfig60_Storage_WiredTiger_EngineConfig, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.cacheSizeGb !== undefined) { + DoubleValue.encode( + { $type: "google.protobuf.DoubleValue", value: message.cacheSizeGb! }, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongocfgconfig60_Storage_WiredTiger_EngineConfig { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongocfgconfig60_Storage_WiredTiger_EngineConfig, + } as Mongocfgconfig60_Storage_WiredTiger_EngineConfig; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.cacheSizeGb = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongocfgconfig60_Storage_WiredTiger_EngineConfig { + const message = { + ...baseMongocfgconfig60_Storage_WiredTiger_EngineConfig, + } as Mongocfgconfig60_Storage_WiredTiger_EngineConfig; + message.cacheSizeGb = + object.cacheSizeGb !== undefined && object.cacheSizeGb !== null + ? Number(object.cacheSizeGb) + : undefined; + return message; + }, + + toJSON(message: Mongocfgconfig60_Storage_WiredTiger_EngineConfig): unknown { + const obj: any = {}; + message.cacheSizeGb !== undefined && + (obj.cacheSizeGb = message.cacheSizeGb); + return obj; + }, + + fromPartial< + I extends Exact< + DeepPartial, + I + > + >(object: I): Mongocfgconfig60_Storage_WiredTiger_EngineConfig { + const message = { + ...baseMongocfgconfig60_Storage_WiredTiger_EngineConfig, + } as Mongocfgconfig60_Storage_WiredTiger_EngineConfig; + message.cacheSizeGb = object.cacheSizeGb ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongocfgconfig60_Storage_WiredTiger_EngineConfig.$type, + Mongocfgconfig60_Storage_WiredTiger_EngineConfig +); + +const baseMongocfgconfig60_OperationProfiling: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig6_0.OperationProfiling", + mode: 0, +}; + +export const Mongocfgconfig60_OperationProfiling = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig6_0.OperationProfiling" as const, + + encode( + message: Mongocfgconfig60_OperationProfiling, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.mode !== 0) { + writer.uint32(8).int32(message.mode); + } + if (message.slowOpThreshold !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.slowOpThreshold!, + }, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongocfgconfig60_OperationProfiling { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongocfgconfig60_OperationProfiling, + } as Mongocfgconfig60_OperationProfiling; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.mode = reader.int32() as any; + break; + case 2: + message.slowOpThreshold = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongocfgconfig60_OperationProfiling { + const message = { + ...baseMongocfgconfig60_OperationProfiling, + } as Mongocfgconfig60_OperationProfiling; + message.mode = + object.mode !== undefined && object.mode !== null + ? mongocfgconfig60_OperationProfiling_ModeFromJSON(object.mode) + : 0; + message.slowOpThreshold = + object.slowOpThreshold !== undefined && object.slowOpThreshold !== null + ? Number(object.slowOpThreshold) + : undefined; + return message; + }, + + toJSON(message: Mongocfgconfig60_OperationProfiling): unknown { + const obj: any = {}; + message.mode !== undefined && + (obj.mode = mongocfgconfig60_OperationProfiling_ModeToJSON(message.mode)); + message.slowOpThreshold !== undefined && + (obj.slowOpThreshold = message.slowOpThreshold); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongocfgconfig60_OperationProfiling { + const message = { + ...baseMongocfgconfig60_OperationProfiling, + } as Mongocfgconfig60_OperationProfiling; + message.mode = object.mode ?? 0; + message.slowOpThreshold = object.slowOpThreshold ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongocfgconfig60_OperationProfiling.$type, + Mongocfgconfig60_OperationProfiling +); + +const baseMongocfgconfig60_Network: object = { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig6_0.Network", +}; + +export const Mongocfgconfig60_Network = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig6_0.Network" as const, + + encode( + message: Mongocfgconfig60_Network, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.maxIncomingConnections !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxIncomingConnections!, + }, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongocfgconfig60_Network { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongocfgconfig60_Network, + } as Mongocfgconfig60_Network; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.maxIncomingConnections = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongocfgconfig60_Network { + const message = { + ...baseMongocfgconfig60_Network, + } as Mongocfgconfig60_Network; + message.maxIncomingConnections = + object.maxIncomingConnections !== undefined && + object.maxIncomingConnections !== null + ? Number(object.maxIncomingConnections) + : undefined; + return message; + }, + + toJSON(message: Mongocfgconfig60_Network): unknown { + const obj: any = {}; + message.maxIncomingConnections !== undefined && + (obj.maxIncomingConnections = message.maxIncomingConnections); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongocfgconfig60_Network { + const message = { + ...baseMongocfgconfig60_Network, + } as Mongocfgconfig60_Network; + message.maxIncomingConnections = object.maxIncomingConnections ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongocfgconfig60_Network.$type, + Mongocfgconfig60_Network +); + +const baseMongosconfig60: object = { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongosConfig6_0", +}; + +export const Mongosconfig60 = { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongosConfig6_0" as const, + + encode( + message: Mongosconfig60, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.net !== undefined) { + Mongosconfig60_Network.encode( + message.net, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Mongosconfig60 { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMongosconfig60 } as Mongosconfig60; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 3: + message.net = Mongosconfig60_Network.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongosconfig60 { + const message = { ...baseMongosconfig60 } as Mongosconfig60; + message.net = + object.net !== undefined && object.net !== null + ? Mongosconfig60_Network.fromJSON(object.net) + : undefined; + return message; + }, + + toJSON(message: Mongosconfig60): unknown { + const obj: any = {}; + message.net !== undefined && + (obj.net = message.net + ? Mongosconfig60_Network.toJSON(message.net) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongosconfig60 { + const message = { ...baseMongosconfig60 } as Mongosconfig60; + message.net = + object.net !== undefined && object.net !== null + ? Mongosconfig60_Network.fromPartial(object.net) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Mongosconfig60.$type, Mongosconfig60); + +const baseMongosconfig60_Network: object = { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongosConfig6_0.Network", +}; + +export const Mongosconfig60_Network = { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongosConfig6_0.Network" as const, + + encode( + message: Mongosconfig60_Network, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.maxIncomingConnections !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxIncomingConnections!, + }, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongosconfig60_Network { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMongosconfig60_Network } as Mongosconfig60_Network; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.maxIncomingConnections = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongosconfig60_Network { + const message = { ...baseMongosconfig60_Network } as Mongosconfig60_Network; + message.maxIncomingConnections = + object.maxIncomingConnections !== undefined && + object.maxIncomingConnections !== null + ? Number(object.maxIncomingConnections) + : undefined; + return message; + }, + + toJSON(message: Mongosconfig60_Network): unknown { + const obj: any = {}; + message.maxIncomingConnections !== undefined && + (obj.maxIncomingConnections = message.maxIncomingConnections); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongosconfig60_Network { + const message = { ...baseMongosconfig60_Network } as Mongosconfig60_Network; + message.maxIncomingConnections = object.maxIncomingConnections ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(Mongosconfig60_Network.$type, Mongosconfig60_Network); + +const baseMongodconfigset60: object = { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfigSet6_0", +}; + +export const Mongodconfigset60 = { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfigSet6_0" as const, + + encode( + message: Mongodconfigset60, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.effectiveConfig !== undefined) { + Mongodconfig60.encode( + message.effectiveConfig, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.userConfig !== undefined) { + Mongodconfig60.encode( + message.userConfig, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.defaultConfig !== undefined) { + Mongodconfig60.encode( + message.defaultConfig, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Mongodconfigset60 { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMongodconfigset60 } as Mongodconfigset60; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.effectiveConfig = Mongodconfig60.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.userConfig = Mongodconfig60.decode(reader, reader.uint32()); + break; + case 3: + message.defaultConfig = Mongodconfig60.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfigset60 { + const message = { ...baseMongodconfigset60 } as Mongodconfigset60; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Mongodconfig60.fromJSON(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Mongodconfig60.fromJSON(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Mongodconfig60.fromJSON(object.defaultConfig) + : undefined; + return message; + }, + + toJSON(message: Mongodconfigset60): unknown { + const obj: any = {}; + message.effectiveConfig !== undefined && + (obj.effectiveConfig = message.effectiveConfig + ? Mongodconfig60.toJSON(message.effectiveConfig) + : undefined); + message.userConfig !== undefined && + (obj.userConfig = message.userConfig + ? Mongodconfig60.toJSON(message.userConfig) + : undefined); + message.defaultConfig !== undefined && + (obj.defaultConfig = message.defaultConfig + ? Mongodconfig60.toJSON(message.defaultConfig) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodconfigset60 { + const message = { ...baseMongodconfigset60 } as Mongodconfigset60; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Mongodconfig60.fromPartial(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Mongodconfig60.fromPartial(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Mongodconfig60.fromPartial(object.defaultConfig) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Mongodconfigset60.$type, Mongodconfigset60); + +const baseMongocfgconfigset60: object = { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfigSet6_0", +}; + +export const Mongocfgconfigset60 = { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfigSet6_0" as const, + + encode( + message: Mongocfgconfigset60, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.effectiveConfig !== undefined) { + Mongocfgconfig60.encode( + message.effectiveConfig, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.userConfig !== undefined) { + Mongocfgconfig60.encode( + message.userConfig, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.defaultConfig !== undefined) { + Mongocfgconfig60.encode( + message.defaultConfig, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Mongocfgconfigset60 { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMongocfgconfigset60 } as Mongocfgconfigset60; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.effectiveConfig = Mongocfgconfig60.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.userConfig = Mongocfgconfig60.decode(reader, reader.uint32()); + break; + case 3: + message.defaultConfig = Mongocfgconfig60.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongocfgconfigset60 { + const message = { ...baseMongocfgconfigset60 } as Mongocfgconfigset60; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Mongocfgconfig60.fromJSON(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Mongocfgconfig60.fromJSON(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Mongocfgconfig60.fromJSON(object.defaultConfig) + : undefined; + return message; + }, + + toJSON(message: Mongocfgconfigset60): unknown { + const obj: any = {}; + message.effectiveConfig !== undefined && + (obj.effectiveConfig = message.effectiveConfig + ? Mongocfgconfig60.toJSON(message.effectiveConfig) + : undefined); + message.userConfig !== undefined && + (obj.userConfig = message.userConfig + ? Mongocfgconfig60.toJSON(message.userConfig) + : undefined); + message.defaultConfig !== undefined && + (obj.defaultConfig = message.defaultConfig + ? Mongocfgconfig60.toJSON(message.defaultConfig) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongocfgconfigset60 { + const message = { ...baseMongocfgconfigset60 } as Mongocfgconfigset60; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Mongocfgconfig60.fromPartial(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Mongocfgconfig60.fromPartial(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Mongocfgconfig60.fromPartial(object.defaultConfig) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Mongocfgconfigset60.$type, Mongocfgconfigset60); + +const baseMongosconfigset60: object = { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongosConfigSet6_0", +}; + +export const Mongosconfigset60 = { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongosConfigSet6_0" as const, + + encode( + message: Mongosconfigset60, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.effectiveConfig !== undefined) { + Mongosconfig60.encode( + message.effectiveConfig, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.userConfig !== undefined) { + Mongosconfig60.encode( + message.userConfig, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.defaultConfig !== undefined) { + Mongosconfig60.encode( + message.defaultConfig, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Mongosconfigset60 { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMongosconfigset60 } as Mongosconfigset60; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.effectiveConfig = Mongosconfig60.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.userConfig = Mongosconfig60.decode(reader, reader.uint32()); + break; + case 3: + message.defaultConfig = Mongosconfig60.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongosconfigset60 { + const message = { ...baseMongosconfigset60 } as Mongosconfigset60; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Mongosconfig60.fromJSON(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Mongosconfig60.fromJSON(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Mongosconfig60.fromJSON(object.defaultConfig) + : undefined; + return message; + }, + + toJSON(message: Mongosconfigset60): unknown { + const obj: any = {}; + message.effectiveConfig !== undefined && + (obj.effectiveConfig = message.effectiveConfig + ? Mongosconfig60.toJSON(message.effectiveConfig) + : undefined); + message.userConfig !== undefined && + (obj.userConfig = message.userConfig + ? Mongosconfig60.toJSON(message.userConfig) + : undefined); + message.defaultConfig !== undefined && + (obj.defaultConfig = message.defaultConfig + ? Mongosconfig60.toJSON(message.defaultConfig) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongosconfigset60 { + const message = { ...baseMongosconfigset60 } as Mongosconfigset60; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Mongosconfig60.fromPartial(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Mongosconfig60.fromPartial(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Mongosconfig60.fromPartial(object.defaultConfig) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Mongosconfigset60.$type, Mongosconfigset60); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/mongodb/v1/config/mongodb6_0_enterprise.ts b/src/generated/yandex/cloud/mdb/mongodb/v1/config/mongodb6_0_enterprise.ts new file mode 100644 index 00000000..9185800a --- /dev/null +++ b/src/generated/yandex/cloud/mdb/mongodb/v1/config/mongodb6_0_enterprise.ts @@ -0,0 +1,2934 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { + DoubleValue, + Int64Value, + BoolValue, +} from "../../../../../../google/protobuf/wrappers"; + +export const protobufPackage = "yandex.cloud.mdb.mongodb.v1.config"; + +/** + * Configuration of a mongod daemon. Supported options are a limited subset of all + * options described in [MongoDB documentation](https://docs.mongodb.com/v6.0/reference/configuration-options/). + */ +export interface Mongodconfig60Enterprise { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0_enterprise"; + /** `storage` section of mongod configuration. */ + storage?: Mongodconfig60Enterprise_Storage; + /** `operationProfiling` section of mongod configuration. */ + operationProfiling?: Mongodconfig60Enterprise_OperationProfiling; + /** `net` section of mongod configuration. */ + net?: Mongodconfig60Enterprise_Network; + /** `security` section of mongod configuration. */ + security?: Mongodconfig60Enterprise_Security; + /** `AuditLog` section of mongod configuration. */ + auditLog?: Mongodconfig60Enterprise_AuditLog; + /** `SetParameter` section of mongod configuration. */ + setParameter?: Mongodconfig60Enterprise_SetParameter; +} + +export interface Mongodconfig60Enterprise_Storage { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0_enterprise.Storage"; + /** Configuration of the WiredTiger storage engine. */ + wiredTiger?: Mongodconfig60Enterprise_Storage_WiredTiger; + /** Configuration of the MongoDB [journal](https://docs.mongodb.com/v6.0/reference/glossary/#term-journal). */ + journal?: Mongodconfig60Enterprise_Storage_Journal; +} + +/** Configuration of WiredTiger storage engine. */ +export interface Mongodconfig60Enterprise_Storage_WiredTiger { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0_enterprise.Storage.WiredTiger"; + /** Engine configuration for WiredTiger. */ + engineConfig?: Mongodconfig60Enterprise_Storage_WiredTiger_EngineConfig; + /** Collection configuration for WiredTiger. */ + collectionConfig?: Mongodconfig60Enterprise_Storage_WiredTiger_CollectionConfig; +} + +export interface Mongodconfig60Enterprise_Storage_WiredTiger_EngineConfig { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0_enterprise.Storage.WiredTiger.EngineConfig"; + /** The maximum size of the internal cache that WiredTiger will use for all data. */ + cacheSizeGb?: number; +} + +export interface Mongodconfig60Enterprise_Storage_WiredTiger_CollectionConfig { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0_enterprise.Storage.WiredTiger.CollectionConfig"; + /** Default type of compression to use for collection data. */ + blockCompressor: Mongodconfig60Enterprise_Storage_WiredTiger_CollectionConfig_Compressor; +} + +export enum Mongodconfig60Enterprise_Storage_WiredTiger_CollectionConfig_Compressor { + COMPRESSOR_UNSPECIFIED = 0, + /** NONE - No compression. */ + NONE = 1, + /** SNAPPY - The [Snappy](https://docs.mongodb.com/v6.0/reference/glossary/#term-snappy) compression. */ + SNAPPY = 2, + /** ZLIB - The [zlib](https://docs.mongodb.com/v6.0/reference/glossary/#term-zlib) compression. */ + ZLIB = 3, + /** ZSTD - The [zstd](https://docs.mongodb.com/v6.0/reference/glossary/#term-zstd) compression. */ + ZSTD = 4, + UNRECOGNIZED = -1, +} + +export function mongodconfig60Enterprise_Storage_WiredTiger_CollectionConfig_CompressorFromJSON( + object: any +): Mongodconfig60Enterprise_Storage_WiredTiger_CollectionConfig_Compressor { + switch (object) { + case 0: + case "COMPRESSOR_UNSPECIFIED": + return Mongodconfig60Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.COMPRESSOR_UNSPECIFIED; + case 1: + case "NONE": + return Mongodconfig60Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.NONE; + case 2: + case "SNAPPY": + return Mongodconfig60Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.SNAPPY; + case 3: + case "ZLIB": + return Mongodconfig60Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.ZLIB; + case 4: + case "ZSTD": + return Mongodconfig60Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.ZSTD; + case -1: + case "UNRECOGNIZED": + default: + return Mongodconfig60Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.UNRECOGNIZED; + } +} + +export function mongodconfig60Enterprise_Storage_WiredTiger_CollectionConfig_CompressorToJSON( + object: Mongodconfig60Enterprise_Storage_WiredTiger_CollectionConfig_Compressor +): string { + switch (object) { + case Mongodconfig60Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.COMPRESSOR_UNSPECIFIED: + return "COMPRESSOR_UNSPECIFIED"; + case Mongodconfig60Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.NONE: + return "NONE"; + case Mongodconfig60Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.SNAPPY: + return "SNAPPY"; + case Mongodconfig60Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.ZLIB: + return "ZLIB"; + case Mongodconfig60Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.ZSTD: + return "ZSTD"; + default: + return "UNKNOWN"; + } +} + +export interface Mongodconfig60Enterprise_Storage_Journal { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0_enterprise.Storage.Journal"; + /** + * Commit interval between journal operations, in milliseconds. + * Default: 100. + */ + commitInterval?: number; +} + +export interface Mongodconfig60Enterprise_OperationProfiling { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0_enterprise.OperationProfiling"; + /** Mode which specifies operations that should be profiled. */ + mode: Mongodconfig60Enterprise_OperationProfiling_Mode; + /** + * The slow operation time threshold, in milliseconds. Operations that run + * for longer than this threshold are considered slow, and are processed by the profiler + * running in the SLOW_OP mode. + */ + slowOpThreshold?: number; +} + +export enum Mongodconfig60Enterprise_OperationProfiling_Mode { + MODE_UNSPECIFIED = 0, + /** OFF - The profiler is off and does not collect any data. */ + OFF = 1, + /** SLOW_OP - The profiler collects data for operations that take longer than the value of [slow_op_threshold]. */ + SLOW_OP = 2, + /** ALL - The profiler collects data for all operations. */ + ALL = 3, + UNRECOGNIZED = -1, +} + +export function mongodconfig60Enterprise_OperationProfiling_ModeFromJSON( + object: any +): Mongodconfig60Enterprise_OperationProfiling_Mode { + switch (object) { + case 0: + case "MODE_UNSPECIFIED": + return Mongodconfig60Enterprise_OperationProfiling_Mode.MODE_UNSPECIFIED; + case 1: + case "OFF": + return Mongodconfig60Enterprise_OperationProfiling_Mode.OFF; + case 2: + case "SLOW_OP": + return Mongodconfig60Enterprise_OperationProfiling_Mode.SLOW_OP; + case 3: + case "ALL": + return Mongodconfig60Enterprise_OperationProfiling_Mode.ALL; + case -1: + case "UNRECOGNIZED": + default: + return Mongodconfig60Enterprise_OperationProfiling_Mode.UNRECOGNIZED; + } +} + +export function mongodconfig60Enterprise_OperationProfiling_ModeToJSON( + object: Mongodconfig60Enterprise_OperationProfiling_Mode +): string { + switch (object) { + case Mongodconfig60Enterprise_OperationProfiling_Mode.MODE_UNSPECIFIED: + return "MODE_UNSPECIFIED"; + case Mongodconfig60Enterprise_OperationProfiling_Mode.OFF: + return "OFF"; + case Mongodconfig60Enterprise_OperationProfiling_Mode.SLOW_OP: + return "SLOW_OP"; + case Mongodconfig60Enterprise_OperationProfiling_Mode.ALL: + return "ALL"; + default: + return "UNKNOWN"; + } +} + +export interface Mongodconfig60Enterprise_Network { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0_enterprise.Network"; + /** The maximum number of simultaneous connections that mongod will accept. */ + maxIncomingConnections?: number; +} + +export interface Mongodconfig60Enterprise_Security { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0_enterprise.Security"; + /** If encryption at rest should be enabled or not */ + enableEncryption?: boolean; + /** `kmip` section of mongod security config */ + kmip?: Mongodconfig60Enterprise_Security_KMIP; +} + +export interface Mongodconfig60Enterprise_Security_KMIP { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0_enterprise.Security.KMIP"; + /** KMIP server name */ + serverName: string; + /** KMIP server port */ + port?: number; + /** KMIP Server CA */ + serverCa: string; + /** KMIP client certificate + private key (unencrypted) */ + clientCertificate: string; + /** KMIP Key identifier (if any) */ + keyIdentifier: string; +} + +export interface Mongodconfig60Enterprise_AuditLog { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0_enterprise.AuditLog"; + /** Audit filter */ + filter: string; + /** Allows runtime configuration of audit filter and auditAuthorizationSuccess */ + runtimeConfiguration?: boolean; +} + +export interface Mongodconfig60Enterprise_SetParameter { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0_enterprise.SetParameter"; + /** Enables the auditing of authorization successes */ + auditAuthorizationSuccess?: boolean; +} + +export interface Mongocfgconfig60Enterprise { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig6_0_enterprise"; + /** `storage` section of mongocfg configuration. */ + storage?: Mongocfgconfig60Enterprise_Storage; + /** `operationProfiling` section of mongocfg configuration. */ + operationProfiling?: Mongocfgconfig60Enterprise_OperationProfiling; + /** `net` section of mongocfg configuration. */ + net?: Mongocfgconfig60Enterprise_Network; +} + +export interface Mongocfgconfig60Enterprise_Storage { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig6_0_enterprise.Storage"; + /** Configuration of the WiredTiger storage engine. */ + wiredTiger?: Mongocfgconfig60Enterprise_Storage_WiredTiger; +} + +/** Configuration of WiredTiger storage engine. */ +export interface Mongocfgconfig60Enterprise_Storage_WiredTiger { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig6_0_enterprise.Storage.WiredTiger"; + /** Engine configuration for WiredTiger. */ + engineConfig?: Mongocfgconfig60Enterprise_Storage_WiredTiger_EngineConfig; +} + +export interface Mongocfgconfig60Enterprise_Storage_WiredTiger_EngineConfig { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig6_0_enterprise.Storage.WiredTiger.EngineConfig"; + /** The maximum size of the internal cache that WiredTiger will use for all data. */ + cacheSizeGb?: number; +} + +export interface Mongocfgconfig60Enterprise_OperationProfiling { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig6_0_enterprise.OperationProfiling"; + /** Mode which specifies operations that should be profiled. */ + mode: Mongocfgconfig60Enterprise_OperationProfiling_Mode; + /** + * The slow operation time threshold, in milliseconds. Operations that run + * for longer than this threshold are considered slow, and are processed by the profiler + * running in the SLOW_OP mode. For details see [MongoDB documentation](https://docs.mongodb.com/v6.0/reference/configuration-options/#operationProfiling.slowOpThresholdMs). + */ + slowOpThreshold?: number; +} + +export enum Mongocfgconfig60Enterprise_OperationProfiling_Mode { + MODE_UNSPECIFIED = 0, + /** OFF - The profiler is off and does not collect any data. */ + OFF = 1, + /** SLOW_OP - The profiler collects data for operations that take longer than the value of [slow_op_threshold]. */ + SLOW_OP = 2, + /** ALL - The profiler collects data for all operations. */ + ALL = 3, + UNRECOGNIZED = -1, +} + +export function mongocfgconfig60Enterprise_OperationProfiling_ModeFromJSON( + object: any +): Mongocfgconfig60Enterprise_OperationProfiling_Mode { + switch (object) { + case 0: + case "MODE_UNSPECIFIED": + return Mongocfgconfig60Enterprise_OperationProfiling_Mode.MODE_UNSPECIFIED; + case 1: + case "OFF": + return Mongocfgconfig60Enterprise_OperationProfiling_Mode.OFF; + case 2: + case "SLOW_OP": + return Mongocfgconfig60Enterprise_OperationProfiling_Mode.SLOW_OP; + case 3: + case "ALL": + return Mongocfgconfig60Enterprise_OperationProfiling_Mode.ALL; + case -1: + case "UNRECOGNIZED": + default: + return Mongocfgconfig60Enterprise_OperationProfiling_Mode.UNRECOGNIZED; + } +} + +export function mongocfgconfig60Enterprise_OperationProfiling_ModeToJSON( + object: Mongocfgconfig60Enterprise_OperationProfiling_Mode +): string { + switch (object) { + case Mongocfgconfig60Enterprise_OperationProfiling_Mode.MODE_UNSPECIFIED: + return "MODE_UNSPECIFIED"; + case Mongocfgconfig60Enterprise_OperationProfiling_Mode.OFF: + return "OFF"; + case Mongocfgconfig60Enterprise_OperationProfiling_Mode.SLOW_OP: + return "SLOW_OP"; + case Mongocfgconfig60Enterprise_OperationProfiling_Mode.ALL: + return "ALL"; + default: + return "UNKNOWN"; + } +} + +export interface Mongocfgconfig60Enterprise_Network { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig6_0_enterprise.Network"; + /** The maximum number of simultaneous connections that mongocfg will accept. */ + maxIncomingConnections?: number; +} + +export interface Mongosconfig60Enterprise { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongosConfig6_0_enterprise"; + /** Network settings for mongos. */ + net?: Mongosconfig60Enterprise_Network; +} + +export interface Mongosconfig60Enterprise_Network { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongosConfig6_0_enterprise.Network"; + /** The maximum number of simultaneous connections that mongos will accept. */ + maxIncomingConnections?: number; +} + +export interface Mongodconfigset60Enterprise { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfigSet6_0_enterprise"; + /** + * Effective mongod settings for a MongoDB 6.0 cluster (a combination of settings defined + * in [user_config] and [default_config]). + */ + effectiveConfig?: Mongodconfig60Enterprise; + /** User-defined mongod settings for a MongoDB 6.0 cluster. */ + userConfig?: Mongodconfig60Enterprise; + /** Default mongod configuration for a MongoDB 6.0 cluster. */ + defaultConfig?: Mongodconfig60Enterprise; +} + +export interface Mongocfgconfigset60Enterprise { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfigSet6_0_enterprise"; + /** + * Effective mongocfg settings for a MongoDB 6.0 cluster (a combination of settings defined + * in [user_config] and [default_config]). + */ + effectiveConfig?: Mongocfgconfig60Enterprise; + /** User-defined mongocfg settings for a MongoDB 6.0 cluster. */ + userConfig?: Mongocfgconfig60Enterprise; + /** Default mongocfg configuration for a MongoDB 6.0 cluster. */ + defaultConfig?: Mongocfgconfig60Enterprise; +} + +export interface Mongosconfigset60Enterprise { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongosConfigSet6_0_enterprise"; + /** + * Effective mongos settings for a MongoDB 6.0 cluster (a combination of settings defined + * in [user_config] and [default_config]). + */ + effectiveConfig?: Mongosconfig60Enterprise; + /** User-defined mongos settings for a MongoDB 5.0 cluster. */ + userConfig?: Mongosconfig60Enterprise; + /** Default mongos configuration for a MongoDB 5.0 cluster. */ + defaultConfig?: Mongosconfig60Enterprise; +} + +const baseMongodconfig60Enterprise: object = { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0_enterprise", +}; + +export const Mongodconfig60Enterprise = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0_enterprise" as const, + + encode( + message: Mongodconfig60Enterprise, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.storage !== undefined) { + Mongodconfig60Enterprise_Storage.encode( + message.storage, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.operationProfiling !== undefined) { + Mongodconfig60Enterprise_OperationProfiling.encode( + message.operationProfiling, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.net !== undefined) { + Mongodconfig60Enterprise_Network.encode( + message.net, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.security !== undefined) { + Mongodconfig60Enterprise_Security.encode( + message.security, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.auditLog !== undefined) { + Mongodconfig60Enterprise_AuditLog.encode( + message.auditLog, + writer.uint32(42).fork() + ).ldelim(); + } + if (message.setParameter !== undefined) { + Mongodconfig60Enterprise_SetParameter.encode( + message.setParameter, + writer.uint32(50).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig60Enterprise { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig60Enterprise, + } as Mongodconfig60Enterprise; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.storage = Mongodconfig60Enterprise_Storage.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.operationProfiling = + Mongodconfig60Enterprise_OperationProfiling.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.net = Mongodconfig60Enterprise_Network.decode( + reader, + reader.uint32() + ); + break; + case 4: + message.security = Mongodconfig60Enterprise_Security.decode( + reader, + reader.uint32() + ); + break; + case 5: + message.auditLog = Mongodconfig60Enterprise_AuditLog.decode( + reader, + reader.uint32() + ); + break; + case 6: + message.setParameter = Mongodconfig60Enterprise_SetParameter.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig60Enterprise { + const message = { + ...baseMongodconfig60Enterprise, + } as Mongodconfig60Enterprise; + message.storage = + object.storage !== undefined && object.storage !== null + ? Mongodconfig60Enterprise_Storage.fromJSON(object.storage) + : undefined; + message.operationProfiling = + object.operationProfiling !== undefined && + object.operationProfiling !== null + ? Mongodconfig60Enterprise_OperationProfiling.fromJSON( + object.operationProfiling + ) + : undefined; + message.net = + object.net !== undefined && object.net !== null + ? Mongodconfig60Enterprise_Network.fromJSON(object.net) + : undefined; + message.security = + object.security !== undefined && object.security !== null + ? Mongodconfig60Enterprise_Security.fromJSON(object.security) + : undefined; + message.auditLog = + object.auditLog !== undefined && object.auditLog !== null + ? Mongodconfig60Enterprise_AuditLog.fromJSON(object.auditLog) + : undefined; + message.setParameter = + object.setParameter !== undefined && object.setParameter !== null + ? Mongodconfig60Enterprise_SetParameter.fromJSON(object.setParameter) + : undefined; + return message; + }, + + toJSON(message: Mongodconfig60Enterprise): unknown { + const obj: any = {}; + message.storage !== undefined && + (obj.storage = message.storage + ? Mongodconfig60Enterprise_Storage.toJSON(message.storage) + : undefined); + message.operationProfiling !== undefined && + (obj.operationProfiling = message.operationProfiling + ? Mongodconfig60Enterprise_OperationProfiling.toJSON( + message.operationProfiling + ) + : undefined); + message.net !== undefined && + (obj.net = message.net + ? Mongodconfig60Enterprise_Network.toJSON(message.net) + : undefined); + message.security !== undefined && + (obj.security = message.security + ? Mongodconfig60Enterprise_Security.toJSON(message.security) + : undefined); + message.auditLog !== undefined && + (obj.auditLog = message.auditLog + ? Mongodconfig60Enterprise_AuditLog.toJSON(message.auditLog) + : undefined); + message.setParameter !== undefined && + (obj.setParameter = message.setParameter + ? Mongodconfig60Enterprise_SetParameter.toJSON(message.setParameter) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodconfig60Enterprise { + const message = { + ...baseMongodconfig60Enterprise, + } as Mongodconfig60Enterprise; + message.storage = + object.storage !== undefined && object.storage !== null + ? Mongodconfig60Enterprise_Storage.fromPartial(object.storage) + : undefined; + message.operationProfiling = + object.operationProfiling !== undefined && + object.operationProfiling !== null + ? Mongodconfig60Enterprise_OperationProfiling.fromPartial( + object.operationProfiling + ) + : undefined; + message.net = + object.net !== undefined && object.net !== null + ? Mongodconfig60Enterprise_Network.fromPartial(object.net) + : undefined; + message.security = + object.security !== undefined && object.security !== null + ? Mongodconfig60Enterprise_Security.fromPartial(object.security) + : undefined; + message.auditLog = + object.auditLog !== undefined && object.auditLog !== null + ? Mongodconfig60Enterprise_AuditLog.fromPartial(object.auditLog) + : undefined; + message.setParameter = + object.setParameter !== undefined && object.setParameter !== null + ? Mongodconfig60Enterprise_SetParameter.fromPartial(object.setParameter) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig60Enterprise.$type, + Mongodconfig60Enterprise +); + +const baseMongodconfig60Enterprise_Storage: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0_enterprise.Storage", +}; + +export const Mongodconfig60Enterprise_Storage = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0_enterprise.Storage" as const, + + encode( + message: Mongodconfig60Enterprise_Storage, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.wiredTiger !== undefined) { + Mongodconfig60Enterprise_Storage_WiredTiger.encode( + message.wiredTiger, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.journal !== undefined) { + Mongodconfig60Enterprise_Storage_Journal.encode( + message.journal, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig60Enterprise_Storage { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig60Enterprise_Storage, + } as Mongodconfig60Enterprise_Storage; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.wiredTiger = + Mongodconfig60Enterprise_Storage_WiredTiger.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.journal = Mongodconfig60Enterprise_Storage_Journal.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig60Enterprise_Storage { + const message = { + ...baseMongodconfig60Enterprise_Storage, + } as Mongodconfig60Enterprise_Storage; + message.wiredTiger = + object.wiredTiger !== undefined && object.wiredTiger !== null + ? Mongodconfig60Enterprise_Storage_WiredTiger.fromJSON( + object.wiredTiger + ) + : undefined; + message.journal = + object.journal !== undefined && object.journal !== null + ? Mongodconfig60Enterprise_Storage_Journal.fromJSON(object.journal) + : undefined; + return message; + }, + + toJSON(message: Mongodconfig60Enterprise_Storage): unknown { + const obj: any = {}; + message.wiredTiger !== undefined && + (obj.wiredTiger = message.wiredTiger + ? Mongodconfig60Enterprise_Storage_WiredTiger.toJSON(message.wiredTiger) + : undefined); + message.journal !== undefined && + (obj.journal = message.journal + ? Mongodconfig60Enterprise_Storage_Journal.toJSON(message.journal) + : undefined); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodconfig60Enterprise_Storage { + const message = { + ...baseMongodconfig60Enterprise_Storage, + } as Mongodconfig60Enterprise_Storage; + message.wiredTiger = + object.wiredTiger !== undefined && object.wiredTiger !== null + ? Mongodconfig60Enterprise_Storage_WiredTiger.fromPartial( + object.wiredTiger + ) + : undefined; + message.journal = + object.journal !== undefined && object.journal !== null + ? Mongodconfig60Enterprise_Storage_Journal.fromPartial(object.journal) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig60Enterprise_Storage.$type, + Mongodconfig60Enterprise_Storage +); + +const baseMongodconfig60Enterprise_Storage_WiredTiger: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0_enterprise.Storage.WiredTiger", +}; + +export const Mongodconfig60Enterprise_Storage_WiredTiger = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0_enterprise.Storage.WiredTiger" as const, + + encode( + message: Mongodconfig60Enterprise_Storage_WiredTiger, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.engineConfig !== undefined) { + Mongodconfig60Enterprise_Storage_WiredTiger_EngineConfig.encode( + message.engineConfig, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.collectionConfig !== undefined) { + Mongodconfig60Enterprise_Storage_WiredTiger_CollectionConfig.encode( + message.collectionConfig, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig60Enterprise_Storage_WiredTiger { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig60Enterprise_Storage_WiredTiger, + } as Mongodconfig60Enterprise_Storage_WiredTiger; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.engineConfig = + Mongodconfig60Enterprise_Storage_WiredTiger_EngineConfig.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.collectionConfig = + Mongodconfig60Enterprise_Storage_WiredTiger_CollectionConfig.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig60Enterprise_Storage_WiredTiger { + const message = { + ...baseMongodconfig60Enterprise_Storage_WiredTiger, + } as Mongodconfig60Enterprise_Storage_WiredTiger; + message.engineConfig = + object.engineConfig !== undefined && object.engineConfig !== null + ? Mongodconfig60Enterprise_Storage_WiredTiger_EngineConfig.fromJSON( + object.engineConfig + ) + : undefined; + message.collectionConfig = + object.collectionConfig !== undefined && object.collectionConfig !== null + ? Mongodconfig60Enterprise_Storage_WiredTiger_CollectionConfig.fromJSON( + object.collectionConfig + ) + : undefined; + return message; + }, + + toJSON(message: Mongodconfig60Enterprise_Storage_WiredTiger): unknown { + const obj: any = {}; + message.engineConfig !== undefined && + (obj.engineConfig = message.engineConfig + ? Mongodconfig60Enterprise_Storage_WiredTiger_EngineConfig.toJSON( + message.engineConfig + ) + : undefined); + message.collectionConfig !== undefined && + (obj.collectionConfig = message.collectionConfig + ? Mongodconfig60Enterprise_Storage_WiredTiger_CollectionConfig.toJSON( + message.collectionConfig + ) + : undefined); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodconfig60Enterprise_Storage_WiredTiger { + const message = { + ...baseMongodconfig60Enterprise_Storage_WiredTiger, + } as Mongodconfig60Enterprise_Storage_WiredTiger; + message.engineConfig = + object.engineConfig !== undefined && object.engineConfig !== null + ? Mongodconfig60Enterprise_Storage_WiredTiger_EngineConfig.fromPartial( + object.engineConfig + ) + : undefined; + message.collectionConfig = + object.collectionConfig !== undefined && object.collectionConfig !== null + ? Mongodconfig60Enterprise_Storage_WiredTiger_CollectionConfig.fromPartial( + object.collectionConfig + ) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig60Enterprise_Storage_WiredTiger.$type, + Mongodconfig60Enterprise_Storage_WiredTiger +); + +const baseMongodconfig60Enterprise_Storage_WiredTiger_EngineConfig: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0_enterprise.Storage.WiredTiger.EngineConfig", +}; + +export const Mongodconfig60Enterprise_Storage_WiredTiger_EngineConfig = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0_enterprise.Storage.WiredTiger.EngineConfig" as const, + + encode( + message: Mongodconfig60Enterprise_Storage_WiredTiger_EngineConfig, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.cacheSizeGb !== undefined) { + DoubleValue.encode( + { $type: "google.protobuf.DoubleValue", value: message.cacheSizeGb! }, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig60Enterprise_Storage_WiredTiger_EngineConfig { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig60Enterprise_Storage_WiredTiger_EngineConfig, + } as Mongodconfig60Enterprise_Storage_WiredTiger_EngineConfig; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.cacheSizeGb = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON( + object: any + ): Mongodconfig60Enterprise_Storage_WiredTiger_EngineConfig { + const message = { + ...baseMongodconfig60Enterprise_Storage_WiredTiger_EngineConfig, + } as Mongodconfig60Enterprise_Storage_WiredTiger_EngineConfig; + message.cacheSizeGb = + object.cacheSizeGb !== undefined && object.cacheSizeGb !== null + ? Number(object.cacheSizeGb) + : undefined; + return message; + }, + + toJSON( + message: Mongodconfig60Enterprise_Storage_WiredTiger_EngineConfig + ): unknown { + const obj: any = {}; + message.cacheSizeGb !== undefined && + (obj.cacheSizeGb = message.cacheSizeGb); + return obj; + }, + + fromPartial< + I extends Exact< + DeepPartial, + I + > + >(object: I): Mongodconfig60Enterprise_Storage_WiredTiger_EngineConfig { + const message = { + ...baseMongodconfig60Enterprise_Storage_WiredTiger_EngineConfig, + } as Mongodconfig60Enterprise_Storage_WiredTiger_EngineConfig; + message.cacheSizeGb = object.cacheSizeGb ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig60Enterprise_Storage_WiredTiger_EngineConfig.$type, + Mongodconfig60Enterprise_Storage_WiredTiger_EngineConfig +); + +const baseMongodconfig60Enterprise_Storage_WiredTiger_CollectionConfig: object = + { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0_enterprise.Storage.WiredTiger.CollectionConfig", + blockCompressor: 0, + }; + +export const Mongodconfig60Enterprise_Storage_WiredTiger_CollectionConfig = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0_enterprise.Storage.WiredTiger.CollectionConfig" as const, + + encode( + message: Mongodconfig60Enterprise_Storage_WiredTiger_CollectionConfig, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.blockCompressor !== 0) { + writer.uint32(8).int32(message.blockCompressor); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig60Enterprise_Storage_WiredTiger_CollectionConfig { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig60Enterprise_Storage_WiredTiger_CollectionConfig, + } as Mongodconfig60Enterprise_Storage_WiredTiger_CollectionConfig; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.blockCompressor = reader.int32() as any; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON( + object: any + ): Mongodconfig60Enterprise_Storage_WiredTiger_CollectionConfig { + const message = { + ...baseMongodconfig60Enterprise_Storage_WiredTiger_CollectionConfig, + } as Mongodconfig60Enterprise_Storage_WiredTiger_CollectionConfig; + message.blockCompressor = + object.blockCompressor !== undefined && object.blockCompressor !== null + ? mongodconfig60Enterprise_Storage_WiredTiger_CollectionConfig_CompressorFromJSON( + object.blockCompressor + ) + : 0; + return message; + }, + + toJSON( + message: Mongodconfig60Enterprise_Storage_WiredTiger_CollectionConfig + ): unknown { + const obj: any = {}; + message.blockCompressor !== undefined && + (obj.blockCompressor = + mongodconfig60Enterprise_Storage_WiredTiger_CollectionConfig_CompressorToJSON( + message.blockCompressor + )); + return obj; + }, + + fromPartial< + I extends Exact< + DeepPartial, + I + > + >(object: I): Mongodconfig60Enterprise_Storage_WiredTiger_CollectionConfig { + const message = { + ...baseMongodconfig60Enterprise_Storage_WiredTiger_CollectionConfig, + } as Mongodconfig60Enterprise_Storage_WiredTiger_CollectionConfig; + message.blockCompressor = object.blockCompressor ?? 0; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig60Enterprise_Storage_WiredTiger_CollectionConfig.$type, + Mongodconfig60Enterprise_Storage_WiredTiger_CollectionConfig +); + +const baseMongodconfig60Enterprise_Storage_Journal: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0_enterprise.Storage.Journal", +}; + +export const Mongodconfig60Enterprise_Storage_Journal = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0_enterprise.Storage.Journal" as const, + + encode( + message: Mongodconfig60Enterprise_Storage_Journal, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.commitInterval !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.commitInterval! }, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig60Enterprise_Storage_Journal { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig60Enterprise_Storage_Journal, + } as Mongodconfig60Enterprise_Storage_Journal; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + message.commitInterval = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig60Enterprise_Storage_Journal { + const message = { + ...baseMongodconfig60Enterprise_Storage_Journal, + } as Mongodconfig60Enterprise_Storage_Journal; + message.commitInterval = + object.commitInterval !== undefined && object.commitInterval !== null + ? Number(object.commitInterval) + : undefined; + return message; + }, + + toJSON(message: Mongodconfig60Enterprise_Storage_Journal): unknown { + const obj: any = {}; + message.commitInterval !== undefined && + (obj.commitInterval = message.commitInterval); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodconfig60Enterprise_Storage_Journal { + const message = { + ...baseMongodconfig60Enterprise_Storage_Journal, + } as Mongodconfig60Enterprise_Storage_Journal; + message.commitInterval = object.commitInterval ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig60Enterprise_Storage_Journal.$type, + Mongodconfig60Enterprise_Storage_Journal +); + +const baseMongodconfig60Enterprise_OperationProfiling: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0_enterprise.OperationProfiling", + mode: 0, +}; + +export const Mongodconfig60Enterprise_OperationProfiling = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0_enterprise.OperationProfiling" as const, + + encode( + message: Mongodconfig60Enterprise_OperationProfiling, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.mode !== 0) { + writer.uint32(8).int32(message.mode); + } + if (message.slowOpThreshold !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.slowOpThreshold!, + }, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig60Enterprise_OperationProfiling { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig60Enterprise_OperationProfiling, + } as Mongodconfig60Enterprise_OperationProfiling; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.mode = reader.int32() as any; + break; + case 2: + message.slowOpThreshold = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig60Enterprise_OperationProfiling { + const message = { + ...baseMongodconfig60Enterprise_OperationProfiling, + } as Mongodconfig60Enterprise_OperationProfiling; + message.mode = + object.mode !== undefined && object.mode !== null + ? mongodconfig60Enterprise_OperationProfiling_ModeFromJSON(object.mode) + : 0; + message.slowOpThreshold = + object.slowOpThreshold !== undefined && object.slowOpThreshold !== null + ? Number(object.slowOpThreshold) + : undefined; + return message; + }, + + toJSON(message: Mongodconfig60Enterprise_OperationProfiling): unknown { + const obj: any = {}; + message.mode !== undefined && + (obj.mode = mongodconfig60Enterprise_OperationProfiling_ModeToJSON( + message.mode + )); + message.slowOpThreshold !== undefined && + (obj.slowOpThreshold = message.slowOpThreshold); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodconfig60Enterprise_OperationProfiling { + const message = { + ...baseMongodconfig60Enterprise_OperationProfiling, + } as Mongodconfig60Enterprise_OperationProfiling; + message.mode = object.mode ?? 0; + message.slowOpThreshold = object.slowOpThreshold ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig60Enterprise_OperationProfiling.$type, + Mongodconfig60Enterprise_OperationProfiling +); + +const baseMongodconfig60Enterprise_Network: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0_enterprise.Network", +}; + +export const Mongodconfig60Enterprise_Network = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0_enterprise.Network" as const, + + encode( + message: Mongodconfig60Enterprise_Network, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.maxIncomingConnections !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxIncomingConnections!, + }, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig60Enterprise_Network { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig60Enterprise_Network, + } as Mongodconfig60Enterprise_Network; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.maxIncomingConnections = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig60Enterprise_Network { + const message = { + ...baseMongodconfig60Enterprise_Network, + } as Mongodconfig60Enterprise_Network; + message.maxIncomingConnections = + object.maxIncomingConnections !== undefined && + object.maxIncomingConnections !== null + ? Number(object.maxIncomingConnections) + : undefined; + return message; + }, + + toJSON(message: Mongodconfig60Enterprise_Network): unknown { + const obj: any = {}; + message.maxIncomingConnections !== undefined && + (obj.maxIncomingConnections = message.maxIncomingConnections); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodconfig60Enterprise_Network { + const message = { + ...baseMongodconfig60Enterprise_Network, + } as Mongodconfig60Enterprise_Network; + message.maxIncomingConnections = object.maxIncomingConnections ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig60Enterprise_Network.$type, + Mongodconfig60Enterprise_Network +); + +const baseMongodconfig60Enterprise_Security: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0_enterprise.Security", +}; + +export const Mongodconfig60Enterprise_Security = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0_enterprise.Security" as const, + + encode( + message: Mongodconfig60Enterprise_Security, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.enableEncryption !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableEncryption!, + }, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.kmip !== undefined) { + Mongodconfig60Enterprise_Security_KMIP.encode( + message.kmip, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig60Enterprise_Security { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig60Enterprise_Security, + } as Mongodconfig60Enterprise_Security; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.enableEncryption = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 2: + message.kmip = Mongodconfig60Enterprise_Security_KMIP.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig60Enterprise_Security { + const message = { + ...baseMongodconfig60Enterprise_Security, + } as Mongodconfig60Enterprise_Security; + message.enableEncryption = + object.enableEncryption !== undefined && object.enableEncryption !== null + ? Boolean(object.enableEncryption) + : undefined; + message.kmip = + object.kmip !== undefined && object.kmip !== null + ? Mongodconfig60Enterprise_Security_KMIP.fromJSON(object.kmip) + : undefined; + return message; + }, + + toJSON(message: Mongodconfig60Enterprise_Security): unknown { + const obj: any = {}; + message.enableEncryption !== undefined && + (obj.enableEncryption = message.enableEncryption); + message.kmip !== undefined && + (obj.kmip = message.kmip + ? Mongodconfig60Enterprise_Security_KMIP.toJSON(message.kmip) + : undefined); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodconfig60Enterprise_Security { + const message = { + ...baseMongodconfig60Enterprise_Security, + } as Mongodconfig60Enterprise_Security; + message.enableEncryption = object.enableEncryption ?? undefined; + message.kmip = + object.kmip !== undefined && object.kmip !== null + ? Mongodconfig60Enterprise_Security_KMIP.fromPartial(object.kmip) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig60Enterprise_Security.$type, + Mongodconfig60Enterprise_Security +); + +const baseMongodconfig60Enterprise_Security_KMIP: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0_enterprise.Security.KMIP", + serverName: "", + serverCa: "", + clientCertificate: "", + keyIdentifier: "", +}; + +export const Mongodconfig60Enterprise_Security_KMIP = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0_enterprise.Security.KMIP" as const, + + encode( + message: Mongodconfig60Enterprise_Security_KMIP, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.serverName !== "") { + writer.uint32(10).string(message.serverName); + } + if (message.port !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.port! }, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.serverCa !== "") { + writer.uint32(26).string(message.serverCa); + } + if (message.clientCertificate !== "") { + writer.uint32(34).string(message.clientCertificate); + } + if (message.keyIdentifier !== "") { + writer.uint32(42).string(message.keyIdentifier); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig60Enterprise_Security_KMIP { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig60Enterprise_Security_KMIP, + } as Mongodconfig60Enterprise_Security_KMIP; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.serverName = reader.string(); + break; + case 2: + message.port = Int64Value.decode(reader, reader.uint32()).value; + break; + case 3: + message.serverCa = reader.string(); + break; + case 4: + message.clientCertificate = reader.string(); + break; + case 5: + message.keyIdentifier = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig60Enterprise_Security_KMIP { + const message = { + ...baseMongodconfig60Enterprise_Security_KMIP, + } as Mongodconfig60Enterprise_Security_KMIP; + message.serverName = + object.serverName !== undefined && object.serverName !== null + ? String(object.serverName) + : ""; + message.port = + object.port !== undefined && object.port !== null + ? Number(object.port) + : undefined; + message.serverCa = + object.serverCa !== undefined && object.serverCa !== null + ? String(object.serverCa) + : ""; + message.clientCertificate = + object.clientCertificate !== undefined && + object.clientCertificate !== null + ? String(object.clientCertificate) + : ""; + message.keyIdentifier = + object.keyIdentifier !== undefined && object.keyIdentifier !== null + ? String(object.keyIdentifier) + : ""; + return message; + }, + + toJSON(message: Mongodconfig60Enterprise_Security_KMIP): unknown { + const obj: any = {}; + message.serverName !== undefined && (obj.serverName = message.serverName); + message.port !== undefined && (obj.port = message.port); + message.serverCa !== undefined && (obj.serverCa = message.serverCa); + message.clientCertificate !== undefined && + (obj.clientCertificate = message.clientCertificate); + message.keyIdentifier !== undefined && + (obj.keyIdentifier = message.keyIdentifier); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodconfig60Enterprise_Security_KMIP { + const message = { + ...baseMongodconfig60Enterprise_Security_KMIP, + } as Mongodconfig60Enterprise_Security_KMIP; + message.serverName = object.serverName ?? ""; + message.port = object.port ?? undefined; + message.serverCa = object.serverCa ?? ""; + message.clientCertificate = object.clientCertificate ?? ""; + message.keyIdentifier = object.keyIdentifier ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig60Enterprise_Security_KMIP.$type, + Mongodconfig60Enterprise_Security_KMIP +); + +const baseMongodconfig60Enterprise_AuditLog: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0_enterprise.AuditLog", + filter: "", +}; + +export const Mongodconfig60Enterprise_AuditLog = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0_enterprise.AuditLog" as const, + + encode( + message: Mongodconfig60Enterprise_AuditLog, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.filter !== "") { + writer.uint32(10).string(message.filter); + } + if (message.runtimeConfiguration !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.runtimeConfiguration!, + }, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig60Enterprise_AuditLog { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig60Enterprise_AuditLog, + } as Mongodconfig60Enterprise_AuditLog; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.filter = reader.string(); + break; + case 2: + message.runtimeConfiguration = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig60Enterprise_AuditLog { + const message = { + ...baseMongodconfig60Enterprise_AuditLog, + } as Mongodconfig60Enterprise_AuditLog; + message.filter = + object.filter !== undefined && object.filter !== null + ? String(object.filter) + : ""; + message.runtimeConfiguration = + object.runtimeConfiguration !== undefined && + object.runtimeConfiguration !== null + ? Boolean(object.runtimeConfiguration) + : undefined; + return message; + }, + + toJSON(message: Mongodconfig60Enterprise_AuditLog): unknown { + const obj: any = {}; + message.filter !== undefined && (obj.filter = message.filter); + message.runtimeConfiguration !== undefined && + (obj.runtimeConfiguration = message.runtimeConfiguration); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodconfig60Enterprise_AuditLog { + const message = { + ...baseMongodconfig60Enterprise_AuditLog, + } as Mongodconfig60Enterprise_AuditLog; + message.filter = object.filter ?? ""; + message.runtimeConfiguration = object.runtimeConfiguration ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig60Enterprise_AuditLog.$type, + Mongodconfig60Enterprise_AuditLog +); + +const baseMongodconfig60Enterprise_SetParameter: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0_enterprise.SetParameter", +}; + +export const Mongodconfig60Enterprise_SetParameter = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig6_0_enterprise.SetParameter" as const, + + encode( + message: Mongodconfig60Enterprise_SetParameter, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.auditAuthorizationSuccess !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.auditAuthorizationSuccess!, + }, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig60Enterprise_SetParameter { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig60Enterprise_SetParameter, + } as Mongodconfig60Enterprise_SetParameter; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.auditAuthorizationSuccess = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig60Enterprise_SetParameter { + const message = { + ...baseMongodconfig60Enterprise_SetParameter, + } as Mongodconfig60Enterprise_SetParameter; + message.auditAuthorizationSuccess = + object.auditAuthorizationSuccess !== undefined && + object.auditAuthorizationSuccess !== null + ? Boolean(object.auditAuthorizationSuccess) + : undefined; + return message; + }, + + toJSON(message: Mongodconfig60Enterprise_SetParameter): unknown { + const obj: any = {}; + message.auditAuthorizationSuccess !== undefined && + (obj.auditAuthorizationSuccess = message.auditAuthorizationSuccess); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodconfig60Enterprise_SetParameter { + const message = { + ...baseMongodconfig60Enterprise_SetParameter, + } as Mongodconfig60Enterprise_SetParameter; + message.auditAuthorizationSuccess = + object.auditAuthorizationSuccess ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig60Enterprise_SetParameter.$type, + Mongodconfig60Enterprise_SetParameter +); + +const baseMongocfgconfig60Enterprise: object = { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig6_0_enterprise", +}; + +export const Mongocfgconfig60Enterprise = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig6_0_enterprise" as const, + + encode( + message: Mongocfgconfig60Enterprise, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.storage !== undefined) { + Mongocfgconfig60Enterprise_Storage.encode( + message.storage, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.operationProfiling !== undefined) { + Mongocfgconfig60Enterprise_OperationProfiling.encode( + message.operationProfiling, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.net !== undefined) { + Mongocfgconfig60Enterprise_Network.encode( + message.net, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongocfgconfig60Enterprise { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongocfgconfig60Enterprise, + } as Mongocfgconfig60Enterprise; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.storage = Mongocfgconfig60Enterprise_Storage.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.operationProfiling = + Mongocfgconfig60Enterprise_OperationProfiling.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.net = Mongocfgconfig60Enterprise_Network.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongocfgconfig60Enterprise { + const message = { + ...baseMongocfgconfig60Enterprise, + } as Mongocfgconfig60Enterprise; + message.storage = + object.storage !== undefined && object.storage !== null + ? Mongocfgconfig60Enterprise_Storage.fromJSON(object.storage) + : undefined; + message.operationProfiling = + object.operationProfiling !== undefined && + object.operationProfiling !== null + ? Mongocfgconfig60Enterprise_OperationProfiling.fromJSON( + object.operationProfiling + ) + : undefined; + message.net = + object.net !== undefined && object.net !== null + ? Mongocfgconfig60Enterprise_Network.fromJSON(object.net) + : undefined; + return message; + }, + + toJSON(message: Mongocfgconfig60Enterprise): unknown { + const obj: any = {}; + message.storage !== undefined && + (obj.storage = message.storage + ? Mongocfgconfig60Enterprise_Storage.toJSON(message.storage) + : undefined); + message.operationProfiling !== undefined && + (obj.operationProfiling = message.operationProfiling + ? Mongocfgconfig60Enterprise_OperationProfiling.toJSON( + message.operationProfiling + ) + : undefined); + message.net !== undefined && + (obj.net = message.net + ? Mongocfgconfig60Enterprise_Network.toJSON(message.net) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongocfgconfig60Enterprise { + const message = { + ...baseMongocfgconfig60Enterprise, + } as Mongocfgconfig60Enterprise; + message.storage = + object.storage !== undefined && object.storage !== null + ? Mongocfgconfig60Enterprise_Storage.fromPartial(object.storage) + : undefined; + message.operationProfiling = + object.operationProfiling !== undefined && + object.operationProfiling !== null + ? Mongocfgconfig60Enterprise_OperationProfiling.fromPartial( + object.operationProfiling + ) + : undefined; + message.net = + object.net !== undefined && object.net !== null + ? Mongocfgconfig60Enterprise_Network.fromPartial(object.net) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongocfgconfig60Enterprise.$type, + Mongocfgconfig60Enterprise +); + +const baseMongocfgconfig60Enterprise_Storage: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig6_0_enterprise.Storage", +}; + +export const Mongocfgconfig60Enterprise_Storage = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig6_0_enterprise.Storage" as const, + + encode( + message: Mongocfgconfig60Enterprise_Storage, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.wiredTiger !== undefined) { + Mongocfgconfig60Enterprise_Storage_WiredTiger.encode( + message.wiredTiger, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongocfgconfig60Enterprise_Storage { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongocfgconfig60Enterprise_Storage, + } as Mongocfgconfig60Enterprise_Storage; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.wiredTiger = + Mongocfgconfig60Enterprise_Storage_WiredTiger.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongocfgconfig60Enterprise_Storage { + const message = { + ...baseMongocfgconfig60Enterprise_Storage, + } as Mongocfgconfig60Enterprise_Storage; + message.wiredTiger = + object.wiredTiger !== undefined && object.wiredTiger !== null + ? Mongocfgconfig60Enterprise_Storage_WiredTiger.fromJSON( + object.wiredTiger + ) + : undefined; + return message; + }, + + toJSON(message: Mongocfgconfig60Enterprise_Storage): unknown { + const obj: any = {}; + message.wiredTiger !== undefined && + (obj.wiredTiger = message.wiredTiger + ? Mongocfgconfig60Enterprise_Storage_WiredTiger.toJSON( + message.wiredTiger + ) + : undefined); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongocfgconfig60Enterprise_Storage { + const message = { + ...baseMongocfgconfig60Enterprise_Storage, + } as Mongocfgconfig60Enterprise_Storage; + message.wiredTiger = + object.wiredTiger !== undefined && object.wiredTiger !== null + ? Mongocfgconfig60Enterprise_Storage_WiredTiger.fromPartial( + object.wiredTiger + ) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongocfgconfig60Enterprise_Storage.$type, + Mongocfgconfig60Enterprise_Storage +); + +const baseMongocfgconfig60Enterprise_Storage_WiredTiger: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig6_0_enterprise.Storage.WiredTiger", +}; + +export const Mongocfgconfig60Enterprise_Storage_WiredTiger = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig6_0_enterprise.Storage.WiredTiger" as const, + + encode( + message: Mongocfgconfig60Enterprise_Storage_WiredTiger, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.engineConfig !== undefined) { + Mongocfgconfig60Enterprise_Storage_WiredTiger_EngineConfig.encode( + message.engineConfig, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongocfgconfig60Enterprise_Storage_WiredTiger { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongocfgconfig60Enterprise_Storage_WiredTiger, + } as Mongocfgconfig60Enterprise_Storage_WiredTiger; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.engineConfig = + Mongocfgconfig60Enterprise_Storage_WiredTiger_EngineConfig.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongocfgconfig60Enterprise_Storage_WiredTiger { + const message = { + ...baseMongocfgconfig60Enterprise_Storage_WiredTiger, + } as Mongocfgconfig60Enterprise_Storage_WiredTiger; + message.engineConfig = + object.engineConfig !== undefined && object.engineConfig !== null + ? Mongocfgconfig60Enterprise_Storage_WiredTiger_EngineConfig.fromJSON( + object.engineConfig + ) + : undefined; + return message; + }, + + toJSON(message: Mongocfgconfig60Enterprise_Storage_WiredTiger): unknown { + const obj: any = {}; + message.engineConfig !== undefined && + (obj.engineConfig = message.engineConfig + ? Mongocfgconfig60Enterprise_Storage_WiredTiger_EngineConfig.toJSON( + message.engineConfig + ) + : undefined); + return obj; + }, + + fromPartial< + I extends Exact< + DeepPartial, + I + > + >(object: I): Mongocfgconfig60Enterprise_Storage_WiredTiger { + const message = { + ...baseMongocfgconfig60Enterprise_Storage_WiredTiger, + } as Mongocfgconfig60Enterprise_Storage_WiredTiger; + message.engineConfig = + object.engineConfig !== undefined && object.engineConfig !== null + ? Mongocfgconfig60Enterprise_Storage_WiredTiger_EngineConfig.fromPartial( + object.engineConfig + ) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongocfgconfig60Enterprise_Storage_WiredTiger.$type, + Mongocfgconfig60Enterprise_Storage_WiredTiger +); + +const baseMongocfgconfig60Enterprise_Storage_WiredTiger_EngineConfig: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig6_0_enterprise.Storage.WiredTiger.EngineConfig", +}; + +export const Mongocfgconfig60Enterprise_Storage_WiredTiger_EngineConfig = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig6_0_enterprise.Storage.WiredTiger.EngineConfig" as const, + + encode( + message: Mongocfgconfig60Enterprise_Storage_WiredTiger_EngineConfig, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.cacheSizeGb !== undefined) { + DoubleValue.encode( + { $type: "google.protobuf.DoubleValue", value: message.cacheSizeGb! }, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongocfgconfig60Enterprise_Storage_WiredTiger_EngineConfig { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongocfgconfig60Enterprise_Storage_WiredTiger_EngineConfig, + } as Mongocfgconfig60Enterprise_Storage_WiredTiger_EngineConfig; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.cacheSizeGb = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON( + object: any + ): Mongocfgconfig60Enterprise_Storage_WiredTiger_EngineConfig { + const message = { + ...baseMongocfgconfig60Enterprise_Storage_WiredTiger_EngineConfig, + } as Mongocfgconfig60Enterprise_Storage_WiredTiger_EngineConfig; + message.cacheSizeGb = + object.cacheSizeGb !== undefined && object.cacheSizeGb !== null + ? Number(object.cacheSizeGb) + : undefined; + return message; + }, + + toJSON( + message: Mongocfgconfig60Enterprise_Storage_WiredTiger_EngineConfig + ): unknown { + const obj: any = {}; + message.cacheSizeGb !== undefined && + (obj.cacheSizeGb = message.cacheSizeGb); + return obj; + }, + + fromPartial< + I extends Exact< + DeepPartial, + I + > + >(object: I): Mongocfgconfig60Enterprise_Storage_WiredTiger_EngineConfig { + const message = { + ...baseMongocfgconfig60Enterprise_Storage_WiredTiger_EngineConfig, + } as Mongocfgconfig60Enterprise_Storage_WiredTiger_EngineConfig; + message.cacheSizeGb = object.cacheSizeGb ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongocfgconfig60Enterprise_Storage_WiredTiger_EngineConfig.$type, + Mongocfgconfig60Enterprise_Storage_WiredTiger_EngineConfig +); + +const baseMongocfgconfig60Enterprise_OperationProfiling: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig6_0_enterprise.OperationProfiling", + mode: 0, +}; + +export const Mongocfgconfig60Enterprise_OperationProfiling = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig6_0_enterprise.OperationProfiling" as const, + + encode( + message: Mongocfgconfig60Enterprise_OperationProfiling, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.mode !== 0) { + writer.uint32(8).int32(message.mode); + } + if (message.slowOpThreshold !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.slowOpThreshold!, + }, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongocfgconfig60Enterprise_OperationProfiling { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongocfgconfig60Enterprise_OperationProfiling, + } as Mongocfgconfig60Enterprise_OperationProfiling; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.mode = reader.int32() as any; + break; + case 2: + message.slowOpThreshold = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongocfgconfig60Enterprise_OperationProfiling { + const message = { + ...baseMongocfgconfig60Enterprise_OperationProfiling, + } as Mongocfgconfig60Enterprise_OperationProfiling; + message.mode = + object.mode !== undefined && object.mode !== null + ? mongocfgconfig60Enterprise_OperationProfiling_ModeFromJSON( + object.mode + ) + : 0; + message.slowOpThreshold = + object.slowOpThreshold !== undefined && object.slowOpThreshold !== null + ? Number(object.slowOpThreshold) + : undefined; + return message; + }, + + toJSON(message: Mongocfgconfig60Enterprise_OperationProfiling): unknown { + const obj: any = {}; + message.mode !== undefined && + (obj.mode = mongocfgconfig60Enterprise_OperationProfiling_ModeToJSON( + message.mode + )); + message.slowOpThreshold !== undefined && + (obj.slowOpThreshold = message.slowOpThreshold); + return obj; + }, + + fromPartial< + I extends Exact< + DeepPartial, + I + > + >(object: I): Mongocfgconfig60Enterprise_OperationProfiling { + const message = { + ...baseMongocfgconfig60Enterprise_OperationProfiling, + } as Mongocfgconfig60Enterprise_OperationProfiling; + message.mode = object.mode ?? 0; + message.slowOpThreshold = object.slowOpThreshold ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongocfgconfig60Enterprise_OperationProfiling.$type, + Mongocfgconfig60Enterprise_OperationProfiling +); + +const baseMongocfgconfig60Enterprise_Network: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig6_0_enterprise.Network", +}; + +export const Mongocfgconfig60Enterprise_Network = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig6_0_enterprise.Network" as const, + + encode( + message: Mongocfgconfig60Enterprise_Network, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.maxIncomingConnections !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxIncomingConnections!, + }, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongocfgconfig60Enterprise_Network { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongocfgconfig60Enterprise_Network, + } as Mongocfgconfig60Enterprise_Network; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.maxIncomingConnections = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongocfgconfig60Enterprise_Network { + const message = { + ...baseMongocfgconfig60Enterprise_Network, + } as Mongocfgconfig60Enterprise_Network; + message.maxIncomingConnections = + object.maxIncomingConnections !== undefined && + object.maxIncomingConnections !== null + ? Number(object.maxIncomingConnections) + : undefined; + return message; + }, + + toJSON(message: Mongocfgconfig60Enterprise_Network): unknown { + const obj: any = {}; + message.maxIncomingConnections !== undefined && + (obj.maxIncomingConnections = message.maxIncomingConnections); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongocfgconfig60Enterprise_Network { + const message = { + ...baseMongocfgconfig60Enterprise_Network, + } as Mongocfgconfig60Enterprise_Network; + message.maxIncomingConnections = object.maxIncomingConnections ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongocfgconfig60Enterprise_Network.$type, + Mongocfgconfig60Enterprise_Network +); + +const baseMongosconfig60Enterprise: object = { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongosConfig6_0_enterprise", +}; + +export const Mongosconfig60Enterprise = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongosConfig6_0_enterprise" as const, + + encode( + message: Mongosconfig60Enterprise, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.net !== undefined) { + Mongosconfig60Enterprise_Network.encode( + message.net, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongosconfig60Enterprise { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongosconfig60Enterprise, + } as Mongosconfig60Enterprise; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 3: + message.net = Mongosconfig60Enterprise_Network.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongosconfig60Enterprise { + const message = { + ...baseMongosconfig60Enterprise, + } as Mongosconfig60Enterprise; + message.net = + object.net !== undefined && object.net !== null + ? Mongosconfig60Enterprise_Network.fromJSON(object.net) + : undefined; + return message; + }, + + toJSON(message: Mongosconfig60Enterprise): unknown { + const obj: any = {}; + message.net !== undefined && + (obj.net = message.net + ? Mongosconfig60Enterprise_Network.toJSON(message.net) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongosconfig60Enterprise { + const message = { + ...baseMongosconfig60Enterprise, + } as Mongosconfig60Enterprise; + message.net = + object.net !== undefined && object.net !== null + ? Mongosconfig60Enterprise_Network.fromPartial(object.net) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongosconfig60Enterprise.$type, + Mongosconfig60Enterprise +); + +const baseMongosconfig60Enterprise_Network: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongosConfig6_0_enterprise.Network", +}; + +export const Mongosconfig60Enterprise_Network = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongosConfig6_0_enterprise.Network" as const, + + encode( + message: Mongosconfig60Enterprise_Network, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.maxIncomingConnections !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxIncomingConnections!, + }, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongosconfig60Enterprise_Network { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongosconfig60Enterprise_Network, + } as Mongosconfig60Enterprise_Network; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.maxIncomingConnections = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongosconfig60Enterprise_Network { + const message = { + ...baseMongosconfig60Enterprise_Network, + } as Mongosconfig60Enterprise_Network; + message.maxIncomingConnections = + object.maxIncomingConnections !== undefined && + object.maxIncomingConnections !== null + ? Number(object.maxIncomingConnections) + : undefined; + return message; + }, + + toJSON(message: Mongosconfig60Enterprise_Network): unknown { + const obj: any = {}; + message.maxIncomingConnections !== undefined && + (obj.maxIncomingConnections = message.maxIncomingConnections); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongosconfig60Enterprise_Network { + const message = { + ...baseMongosconfig60Enterprise_Network, + } as Mongosconfig60Enterprise_Network; + message.maxIncomingConnections = object.maxIncomingConnections ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongosconfig60Enterprise_Network.$type, + Mongosconfig60Enterprise_Network +); + +const baseMongodconfigset60Enterprise: object = { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfigSet6_0_enterprise", +}; + +export const Mongodconfigset60Enterprise = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfigSet6_0_enterprise" as const, + + encode( + message: Mongodconfigset60Enterprise, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.effectiveConfig !== undefined) { + Mongodconfig60Enterprise.encode( + message.effectiveConfig, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.userConfig !== undefined) { + Mongodconfig60Enterprise.encode( + message.userConfig, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.defaultConfig !== undefined) { + Mongodconfig60Enterprise.encode( + message.defaultConfig, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfigset60Enterprise { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfigset60Enterprise, + } as Mongodconfigset60Enterprise; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.effectiveConfig = Mongodconfig60Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.userConfig = Mongodconfig60Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.defaultConfig = Mongodconfig60Enterprise.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfigset60Enterprise { + const message = { + ...baseMongodconfigset60Enterprise, + } as Mongodconfigset60Enterprise; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Mongodconfig60Enterprise.fromJSON(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Mongodconfig60Enterprise.fromJSON(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Mongodconfig60Enterprise.fromJSON(object.defaultConfig) + : undefined; + return message; + }, + + toJSON(message: Mongodconfigset60Enterprise): unknown { + const obj: any = {}; + message.effectiveConfig !== undefined && + (obj.effectiveConfig = message.effectiveConfig + ? Mongodconfig60Enterprise.toJSON(message.effectiveConfig) + : undefined); + message.userConfig !== undefined && + (obj.userConfig = message.userConfig + ? Mongodconfig60Enterprise.toJSON(message.userConfig) + : undefined); + message.defaultConfig !== undefined && + (obj.defaultConfig = message.defaultConfig + ? Mongodconfig60Enterprise.toJSON(message.defaultConfig) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodconfigset60Enterprise { + const message = { + ...baseMongodconfigset60Enterprise, + } as Mongodconfigset60Enterprise; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Mongodconfig60Enterprise.fromPartial(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Mongodconfig60Enterprise.fromPartial(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Mongodconfig60Enterprise.fromPartial(object.defaultConfig) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfigset60Enterprise.$type, + Mongodconfigset60Enterprise +); + +const baseMongocfgconfigset60Enterprise: object = { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfigSet6_0_enterprise", +}; + +export const Mongocfgconfigset60Enterprise = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfigSet6_0_enterprise" as const, + + encode( + message: Mongocfgconfigset60Enterprise, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.effectiveConfig !== undefined) { + Mongocfgconfig60Enterprise.encode( + message.effectiveConfig, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.userConfig !== undefined) { + Mongocfgconfig60Enterprise.encode( + message.userConfig, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.defaultConfig !== undefined) { + Mongocfgconfig60Enterprise.encode( + message.defaultConfig, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongocfgconfigset60Enterprise { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongocfgconfigset60Enterprise, + } as Mongocfgconfigset60Enterprise; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.effectiveConfig = Mongocfgconfig60Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.userConfig = Mongocfgconfig60Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.defaultConfig = Mongocfgconfig60Enterprise.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongocfgconfigset60Enterprise { + const message = { + ...baseMongocfgconfigset60Enterprise, + } as Mongocfgconfigset60Enterprise; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Mongocfgconfig60Enterprise.fromJSON(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Mongocfgconfig60Enterprise.fromJSON(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Mongocfgconfig60Enterprise.fromJSON(object.defaultConfig) + : undefined; + return message; + }, + + toJSON(message: Mongocfgconfigset60Enterprise): unknown { + const obj: any = {}; + message.effectiveConfig !== undefined && + (obj.effectiveConfig = message.effectiveConfig + ? Mongocfgconfig60Enterprise.toJSON(message.effectiveConfig) + : undefined); + message.userConfig !== undefined && + (obj.userConfig = message.userConfig + ? Mongocfgconfig60Enterprise.toJSON(message.userConfig) + : undefined); + message.defaultConfig !== undefined && + (obj.defaultConfig = message.defaultConfig + ? Mongocfgconfig60Enterprise.toJSON(message.defaultConfig) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongocfgconfigset60Enterprise { + const message = { + ...baseMongocfgconfigset60Enterprise, + } as Mongocfgconfigset60Enterprise; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Mongocfgconfig60Enterprise.fromPartial(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Mongocfgconfig60Enterprise.fromPartial(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Mongocfgconfig60Enterprise.fromPartial(object.defaultConfig) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongocfgconfigset60Enterprise.$type, + Mongocfgconfigset60Enterprise +); + +const baseMongosconfigset60Enterprise: object = { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongosConfigSet6_0_enterprise", +}; + +export const Mongosconfigset60Enterprise = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongosConfigSet6_0_enterprise" as const, + + encode( + message: Mongosconfigset60Enterprise, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.effectiveConfig !== undefined) { + Mongosconfig60Enterprise.encode( + message.effectiveConfig, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.userConfig !== undefined) { + Mongosconfig60Enterprise.encode( + message.userConfig, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.defaultConfig !== undefined) { + Mongosconfig60Enterprise.encode( + message.defaultConfig, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongosconfigset60Enterprise { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongosconfigset60Enterprise, + } as Mongosconfigset60Enterprise; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.effectiveConfig = Mongosconfig60Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.userConfig = Mongosconfig60Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.defaultConfig = Mongosconfig60Enterprise.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongosconfigset60Enterprise { + const message = { + ...baseMongosconfigset60Enterprise, + } as Mongosconfigset60Enterprise; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Mongosconfig60Enterprise.fromJSON(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Mongosconfig60Enterprise.fromJSON(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Mongosconfig60Enterprise.fromJSON(object.defaultConfig) + : undefined; + return message; + }, + + toJSON(message: Mongosconfigset60Enterprise): unknown { + const obj: any = {}; + message.effectiveConfig !== undefined && + (obj.effectiveConfig = message.effectiveConfig + ? Mongosconfig60Enterprise.toJSON(message.effectiveConfig) + : undefined); + message.userConfig !== undefined && + (obj.userConfig = message.userConfig + ? Mongosconfig60Enterprise.toJSON(message.userConfig) + : undefined); + message.defaultConfig !== undefined && + (obj.defaultConfig = message.defaultConfig + ? Mongosconfig60Enterprise.toJSON(message.defaultConfig) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongosconfigset60Enterprise { + const message = { + ...baseMongosconfigset60Enterprise, + } as Mongosconfigset60Enterprise; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Mongosconfig60Enterprise.fromPartial(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Mongosconfig60Enterprise.fromPartial(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Mongosconfig60Enterprise.fromPartial(object.defaultConfig) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongosconfigset60Enterprise.$type, + Mongosconfigset60Enterprise +); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/mongodb/v1/database_service.ts b/src/generated/yandex/cloud/mdb/mongodb/v1/database_service.ts index 3ea8e81b..9d858451 100644 --- a/src/generated/yandex/cloud/mdb/mongodb/v1/database_service.ts +++ b/src/generated/yandex/cloud/mdb/mongodb/v1/database_service.ts @@ -51,7 +51,7 @@ export interface ListDatabasesRequest { pageSize: number; /** * Page token. To get the next page of results, set [page_token] to the - * [ListDatabasesResponse.next_page_token] returned by a previous list request. + * [ListDatabasesResponse.next_page_token] returned by the previous list request. */ pageToken: string; } diff --git a/src/generated/yandex/cloud/mdb/mongodb/v1/resource_preset_service.ts b/src/generated/yandex/cloud/mdb/mongodb/v1/resource_preset_service.ts index 659465be..db78da5a 100644 --- a/src/generated/yandex/cloud/mdb/mongodb/v1/resource_preset_service.ts +++ b/src/generated/yandex/cloud/mdb/mongodb/v1/resource_preset_service.ts @@ -33,11 +33,12 @@ export interface ListResourcePresetsRequest { * The maximum number of results per page to return. If the number of available * results is larger than [page_size], the service returns a [ListResourcePresetsResponse.next_page_token] * that can be used to get the next page of results in subsequent list requests. + * Acceptable values are 0 to 1000, inclusive. Default value: 100. */ pageSize: number; /** * Page token. To get the next page of results, set [page_token] to the - * [ListResourcePresetsResponse.next_page_token] returned by a previous list request. + * [ListResourcePresetsResponse.next_page_token] returned by the previous list request. */ pageToken: string; } diff --git a/src/generated/yandex/cloud/mdb/mongodb/v1/user_service.ts b/src/generated/yandex/cloud/mdb/mongodb/v1/user_service.ts index 1088bc58..d410d7ff 100644 --- a/src/generated/yandex/cloud/mdb/mongodb/v1/user_service.ts +++ b/src/generated/yandex/cloud/mdb/mongodb/v1/user_service.ts @@ -45,10 +45,15 @@ export interface ListUsersRequest { * To get the cluster ID, use a [ClusterService.List] request. */ clusterId: string; + /** + * The maximum number of results per page to return. If the number of available + * results is larger than [page_size], the service returns a [ListUsersResponse.next_page_token] + * that can be used to get the next page of results in subsequent list requests. + */ pageSize: number; /** * Page token. To get the next page of results, set [page_token] to the - * [ListUsersResponse.next_page_token] returned by a previous list request. + * [ListUsersResponse.next_page_token] returned by the previous list request. */ pageToken: string; } diff --git a/src/generated/yandex/cloud/mdb/mysql/v1/cluster.ts b/src/generated/yandex/cloud/mdb/mysql/v1/cluster.ts index 64625e53..faba6f10 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1/cluster.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1/cluster.ts @@ -10,6 +10,7 @@ import { TimeOfDay } from "../../../../../google/type/timeofday"; import { Timestamp } from "../../../../../google/protobuf/timestamp"; import { Mysqlconfigset57 } from "../../../../../yandex/cloud/mdb/mysql/v1/config/mysql5_7"; import { Mysqlconfigset80 } from "../../../../../yandex/cloud/mdb/mysql/v1/config/mysql8_0"; +import { Int64Value } from "../../../../../google/protobuf/wrappers"; export const protobufPackage = "yandex.cloud.mdb.mysql.v1"; @@ -23,7 +24,7 @@ export interface Cluster { /** * ID of the cluster. * - * This ID is assigned by Yandex Cloud at the time of creation. + * This ID is assigned by the platform at the time of creation. */ id: string; /** ID of the folder that the cluster belongs to. */ @@ -264,6 +265,8 @@ export interface ClusterConfig { access?: Access; /** Configuration of the performance diagnostics service. */ performanceDiagnostics?: PerformanceDiagnostics; + /** Retention policy of automated backups. */ + backupRetainPeriodDays?: number; } export interface Host { @@ -271,8 +274,8 @@ export interface Host { /** * Name of the host. * - * This name is assigned by Yandex Cloud at the time of creation. - * The name is unique across all existing MDB hosts in Yandex Cloud, as it defines the FQDN of the host. + * This name is assigned by the platform at the time of creation. + * The name is unique across all MDB hosts that exist on the platform, as it defines the FQDN of the host. */ name: string; /** ID of the cluster the host belongs to. */ @@ -504,7 +507,7 @@ export interface Access { */ dataLens: boolean; /** - * Allows SQL queries to the cluster databases from Yandex Cloud management console. + * Allows SQL queries to the cluster databases from management console. * * See [the documentation](/docs/managed-mysql/operations/web-sql-query) for details. */ @@ -1074,6 +1077,15 @@ export const ClusterConfig = { writer.uint32(58).fork() ).ldelim(); } + if (message.backupRetainPeriodDays !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.backupRetainPeriodDays!, + }, + writer.uint32(66).fork() + ).ldelim(); + } return writer; }, @@ -1114,6 +1126,12 @@ export const ClusterConfig = { reader.uint32() ); break; + case 8: + message.backupRetainPeriodDays = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; default: reader.skipType(tag & 7); break; @@ -1154,6 +1172,11 @@ export const ClusterConfig = { object.performanceDiagnostics !== null ? PerformanceDiagnostics.fromJSON(object.performanceDiagnostics) : undefined; + message.backupRetainPeriodDays = + object.backupRetainPeriodDays !== undefined && + object.backupRetainPeriodDays !== null + ? Number(object.backupRetainPeriodDays) + : undefined; return message; }, @@ -1182,6 +1205,8 @@ export const ClusterConfig = { (obj.performanceDiagnostics = message.performanceDiagnostics ? PerformanceDiagnostics.toJSON(message.performanceDiagnostics) : undefined); + message.backupRetainPeriodDays !== undefined && + (obj.backupRetainPeriodDays = message.backupRetainPeriodDays); return obj; }, @@ -1216,6 +1241,7 @@ export const ClusterConfig = { object.performanceDiagnostics !== null ? PerformanceDiagnostics.fromPartial(object.performanceDiagnostics) : undefined; + message.backupRetainPeriodDays = object.backupRetainPeriodDays ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/mysql/v1/cluster_service.ts b/src/generated/yandex/cloud/mdb/mysql/v1/cluster_service.ts index 5fd8e0db..02c9fdb6 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1/cluster_service.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1/cluster_service.ts @@ -36,6 +36,7 @@ import { Operation } from "../../../../../yandex/cloud/operation/operation"; import { Backup } from "../../../../../yandex/cloud/mdb/mysql/v1/backup"; import { Mysqlconfig57 } from "../../../../../yandex/cloud/mdb/mysql/v1/config/mysql5_7"; import { Mysqlconfig80 } from "../../../../../yandex/cloud/mdb/mysql/v1/config/mysql8_0"; +import { Int64Value } from "../../../../../google/protobuf/wrappers"; export const protobufPackage = "yandex.cloud.mdb.mysql.v1"; @@ -918,6 +919,8 @@ export interface ConfigSpec { access?: Access; /** Configuration of the performance diagnostics service. */ performanceDiagnostics?: PerformanceDiagnostics; + /** Retention policy of automated backups. */ + backupRetainPeriodDays?: number; } const baseGetClusterRequest: object = { @@ -5569,6 +5572,15 @@ export const ConfigSpec = { writer.uint32(58).fork() ).ldelim(); } + if (message.backupRetainPeriodDays !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.backupRetainPeriodDays!, + }, + writer.uint32(66).fork() + ).ldelim(); + } return writer; }, @@ -5603,6 +5615,12 @@ export const ConfigSpec = { reader.uint32() ); break; + case 8: + message.backupRetainPeriodDays = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; default: reader.skipType(tag & 7); break; @@ -5643,6 +5661,11 @@ export const ConfigSpec = { object.performanceDiagnostics !== null ? PerformanceDiagnostics.fromJSON(object.performanceDiagnostics) : undefined; + message.backupRetainPeriodDays = + object.backupRetainPeriodDays !== undefined && + object.backupRetainPeriodDays !== null + ? Number(object.backupRetainPeriodDays) + : undefined; return message; }, @@ -5671,6 +5694,8 @@ export const ConfigSpec = { (obj.performanceDiagnostics = message.performanceDiagnostics ? PerformanceDiagnostics.toJSON(message.performanceDiagnostics) : undefined); + message.backupRetainPeriodDays !== undefined && + (obj.backupRetainPeriodDays = message.backupRetainPeriodDays); return obj; }, @@ -5705,6 +5730,7 @@ export const ConfigSpec = { object.performanceDiagnostics !== null ? PerformanceDiagnostics.fromPartial(object.performanceDiagnostics) : undefined; + message.backupRetainPeriodDays = object.backupRetainPeriodDays ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/mysql/v1/config/mysql5_7.ts b/src/generated/yandex/cloud/mdb/mysql/v1/config/mysql5_7.ts index 2d7c6091..7fff574c 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1/config/mysql5_7.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1/config/mysql5_7.ts @@ -417,6 +417,18 @@ export interface Mysqlconfig57 { * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_max_sp_recursion_depth). */ maxSpRecursionDepth?: number; + /** + * The level of zlib compression to use for InnoDB compressed tables and indexes. + * + * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_compression_level). + */ + innodbCompressionLevel?: number; + /** + * Specifies how the source mysqld generates the dependency information that it writes in the binary log to help replicas determine which transactions can be executed in parallel. + * + * For details, see [MySQL documentation for the variabl](https://dev.mysql.com/doc/refman/5.7/en/replication-options-binary-log.html#sysvar_binlog_transaction_dependency_tracking). + */ + binlogTransactionDependencyTracking: Mysqlconfig57_BinlogTransactionDependencyTracking; } export enum Mysqlconfig57_SQLMode { @@ -932,6 +944,54 @@ export function mysqlconfig57_LogSlowFilterTypeToJSON( } } +export enum Mysqlconfig57_BinlogTransactionDependencyTracking { + BINLOG_TRANSACTION_DEPENDENCY_TRACKING_UNSPECIFIED = 0, + COMMIT_ORDER = 1, + WRITESET = 2, + WRITESET_SESSION = 3, + UNRECOGNIZED = -1, +} + +export function mysqlconfig57_BinlogTransactionDependencyTrackingFromJSON( + object: any +): Mysqlconfig57_BinlogTransactionDependencyTracking { + switch (object) { + case 0: + case "BINLOG_TRANSACTION_DEPENDENCY_TRACKING_UNSPECIFIED": + return Mysqlconfig57_BinlogTransactionDependencyTracking.BINLOG_TRANSACTION_DEPENDENCY_TRACKING_UNSPECIFIED; + case 1: + case "COMMIT_ORDER": + return Mysqlconfig57_BinlogTransactionDependencyTracking.COMMIT_ORDER; + case 2: + case "WRITESET": + return Mysqlconfig57_BinlogTransactionDependencyTracking.WRITESET; + case 3: + case "WRITESET_SESSION": + return Mysqlconfig57_BinlogTransactionDependencyTracking.WRITESET_SESSION; + case -1: + case "UNRECOGNIZED": + default: + return Mysqlconfig57_BinlogTransactionDependencyTracking.UNRECOGNIZED; + } +} + +export function mysqlconfig57_BinlogTransactionDependencyTrackingToJSON( + object: Mysqlconfig57_BinlogTransactionDependencyTracking +): string { + switch (object) { + case Mysqlconfig57_BinlogTransactionDependencyTracking.BINLOG_TRANSACTION_DEPENDENCY_TRACKING_UNSPECIFIED: + return "BINLOG_TRANSACTION_DEPENDENCY_TRACKING_UNSPECIFIED"; + case Mysqlconfig57_BinlogTransactionDependencyTracking.COMMIT_ORDER: + return "COMMIT_ORDER"; + case Mysqlconfig57_BinlogTransactionDependencyTracking.WRITESET: + return "WRITESET"; + case Mysqlconfig57_BinlogTransactionDependencyTracking.WRITESET_SESSION: + return "WRITESET_SESSION"; + default: + return "UNKNOWN"; + } +} + export interface Mysqlconfigset57 { $type: "yandex.cloud.mdb.mysql.v1.config.MysqlConfigSet5_7"; /** @@ -957,6 +1017,7 @@ const baseMysqlconfig57: object = { slaveParallelType: 0, logSlowRateType: 0, logSlowFilter: 0, + binlogTransactionDependencyTracking: 0, }; export const Mysqlconfig57 = { @@ -1489,6 +1550,18 @@ export const Mysqlconfig57 = { writer.uint32(554).fork() ).ldelim(); } + if (message.innodbCompressionLevel !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.innodbCompressionLevel!, + }, + writer.uint32(562).fork() + ).ldelim(); + } + if (message.binlogTransactionDependencyTracking !== 0) { + writer.uint32(568).int32(message.binlogTransactionDependencyTracking); + } return writer; }, @@ -1890,6 +1963,15 @@ export const Mysqlconfig57 = { reader.uint32() ).value; break; + case 70: + message.innodbCompressionLevel = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 71: + message.binlogTransactionDependencyTracking = reader.int32() as any; + break; default: reader.skipType(tag & 7); break; @@ -2218,6 +2300,18 @@ export const Mysqlconfig57 = { object.maxSpRecursionDepth !== null ? Number(object.maxSpRecursionDepth) : undefined; + message.innodbCompressionLevel = + object.innodbCompressionLevel !== undefined && + object.innodbCompressionLevel !== null + ? Number(object.innodbCompressionLevel) + : undefined; + message.binlogTransactionDependencyTracking = + object.binlogTransactionDependencyTracking !== undefined && + object.binlogTransactionDependencyTracking !== null + ? mysqlconfig57_BinlogTransactionDependencyTrackingFromJSON( + object.binlogTransactionDependencyTracking + ) + : 0; return message; }, @@ -2377,6 +2471,13 @@ export const Mysqlconfig57 = { (obj.showCompatibility_56 = message.showCompatibility56); message.maxSpRecursionDepth !== undefined && (obj.maxSpRecursionDepth = message.maxSpRecursionDepth); + message.innodbCompressionLevel !== undefined && + (obj.innodbCompressionLevel = message.innodbCompressionLevel); + message.binlogTransactionDependencyTracking !== undefined && + (obj.binlogTransactionDependencyTracking = + mysqlconfig57_BinlogTransactionDependencyTrackingToJSON( + message.binlogTransactionDependencyTracking + )); return obj; }, @@ -2470,6 +2571,9 @@ export const Mysqlconfig57 = { message.lowerCaseTableNames = object.lowerCaseTableNames ?? undefined; message.showCompatibility56 = object.showCompatibility56 ?? undefined; message.maxSpRecursionDepth = object.maxSpRecursionDepth ?? undefined; + message.innodbCompressionLevel = object.innodbCompressionLevel ?? undefined; + message.binlogTransactionDependencyTracking = + object.binlogTransactionDependencyTracking ?? 0; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/mysql/v1/config/mysql8_0.ts b/src/generated/yandex/cloud/mdb/mysql/v1/config/mysql8_0.ts index ad126a8e..86781b6c 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1/config/mysql8_0.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1/config/mysql8_0.ts @@ -417,6 +417,18 @@ export interface Mysqlconfig80 { * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_max_sp_recursion_depth). */ maxSpRecursionDepth?: number; + /** + * The level of zlib compression to use for InnoDB compressed tables and indexes. + * + * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_compression_level). + */ + innodbCompressionLevel?: number; + /** + * Specifies how the source mysqld generates the dependency information that it writes in the binary log to help replicas determine which transactions can be executed in parallel. + * + * For details, see [MySQL documentation for the variabl](https://dev.mysql.com/doc/refman/8.0/en/replication-options-binary-log.html#sysvar_binlog_transaction_dependency_tracking). + */ + binlogTransactionDependencyTracking: Mysqlconfig80_BinlogTransactionDependencyTracking; } export enum Mysqlconfig80_SQLMode { @@ -872,6 +884,54 @@ export function mysqlconfig80_LogSlowFilterTypeToJSON( } } +export enum Mysqlconfig80_BinlogTransactionDependencyTracking { + BINLOG_TRANSACTION_DEPENDENCY_TRACKING_UNSPECIFIED = 0, + COMMIT_ORDER = 1, + WRITESET = 2, + WRITESET_SESSION = 3, + UNRECOGNIZED = -1, +} + +export function mysqlconfig80_BinlogTransactionDependencyTrackingFromJSON( + object: any +): Mysqlconfig80_BinlogTransactionDependencyTracking { + switch (object) { + case 0: + case "BINLOG_TRANSACTION_DEPENDENCY_TRACKING_UNSPECIFIED": + return Mysqlconfig80_BinlogTransactionDependencyTracking.BINLOG_TRANSACTION_DEPENDENCY_TRACKING_UNSPECIFIED; + case 1: + case "COMMIT_ORDER": + return Mysqlconfig80_BinlogTransactionDependencyTracking.COMMIT_ORDER; + case 2: + case "WRITESET": + return Mysqlconfig80_BinlogTransactionDependencyTracking.WRITESET; + case 3: + case "WRITESET_SESSION": + return Mysqlconfig80_BinlogTransactionDependencyTracking.WRITESET_SESSION; + case -1: + case "UNRECOGNIZED": + default: + return Mysqlconfig80_BinlogTransactionDependencyTracking.UNRECOGNIZED; + } +} + +export function mysqlconfig80_BinlogTransactionDependencyTrackingToJSON( + object: Mysqlconfig80_BinlogTransactionDependencyTracking +): string { + switch (object) { + case Mysqlconfig80_BinlogTransactionDependencyTracking.BINLOG_TRANSACTION_DEPENDENCY_TRACKING_UNSPECIFIED: + return "BINLOG_TRANSACTION_DEPENDENCY_TRACKING_UNSPECIFIED"; + case Mysqlconfig80_BinlogTransactionDependencyTracking.COMMIT_ORDER: + return "COMMIT_ORDER"; + case Mysqlconfig80_BinlogTransactionDependencyTracking.WRITESET: + return "WRITESET"; + case Mysqlconfig80_BinlogTransactionDependencyTracking.WRITESET_SESSION: + return "WRITESET_SESSION"; + default: + return "UNKNOWN"; + } +} + export interface Mysqlconfigset80 { $type: "yandex.cloud.mdb.mysql.v1.config.MysqlConfigSet8_0"; /** @@ -897,6 +957,7 @@ const baseMysqlconfig80: object = { slaveParallelType: 0, logSlowRateType: 0, logSlowFilter: 0, + binlogTransactionDependencyTracking: 0, }; export const Mysqlconfig80 = { @@ -1429,6 +1490,18 @@ export const Mysqlconfig80 = { writer.uint32(554).fork() ).ldelim(); } + if (message.innodbCompressionLevel !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.innodbCompressionLevel!, + }, + writer.uint32(562).fork() + ).ldelim(); + } + if (message.binlogTransactionDependencyTracking !== 0) { + writer.uint32(568).int32(message.binlogTransactionDependencyTracking); + } return writer; }, @@ -1830,6 +1903,15 @@ export const Mysqlconfig80 = { reader.uint32() ).value; break; + case 70: + message.innodbCompressionLevel = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 71: + message.binlogTransactionDependencyTracking = reader.int32() as any; + break; default: reader.skipType(tag & 7); break; @@ -2157,6 +2239,18 @@ export const Mysqlconfig80 = { object.maxSpRecursionDepth !== null ? Number(object.maxSpRecursionDepth) : undefined; + message.innodbCompressionLevel = + object.innodbCompressionLevel !== undefined && + object.innodbCompressionLevel !== null + ? Number(object.innodbCompressionLevel) + : undefined; + message.binlogTransactionDependencyTracking = + object.binlogTransactionDependencyTracking !== undefined && + object.binlogTransactionDependencyTracking !== null + ? mysqlconfig80_BinlogTransactionDependencyTrackingFromJSON( + object.binlogTransactionDependencyTracking + ) + : 0; return message; }, @@ -2316,6 +2410,13 @@ export const Mysqlconfig80 = { (obj.lowerCaseTableNames = message.lowerCaseTableNames); message.maxSpRecursionDepth !== undefined && (obj.maxSpRecursionDepth = message.maxSpRecursionDepth); + message.innodbCompressionLevel !== undefined && + (obj.innodbCompressionLevel = message.innodbCompressionLevel); + message.binlogTransactionDependencyTracking !== undefined && + (obj.binlogTransactionDependencyTracking = + mysqlconfig80_BinlogTransactionDependencyTrackingToJSON( + message.binlogTransactionDependencyTracking + )); return obj; }, @@ -2409,6 +2510,9 @@ export const Mysqlconfig80 = { message.innodbFtMaxTokenSize = object.innodbFtMaxTokenSize ?? undefined; message.lowerCaseTableNames = object.lowerCaseTableNames ?? undefined; message.maxSpRecursionDepth = object.maxSpRecursionDepth ?? undefined; + message.innodbCompressionLevel = object.innodbCompressionLevel ?? undefined; + message.binlogTransactionDependencyTracking = + object.binlogTransactionDependencyTracking ?? 0; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/mysql/v1alpha/backup_service.ts b/src/generated/yandex/cloud/mdb/mysql/v1alpha/backup_service.ts index 5213ddb9..1ef45c56 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1alpha/backup_service.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1alpha/backup_service.ts @@ -42,7 +42,7 @@ export interface ListBackupsRequest { pageSize: number; /** * Page token. To get the next page of results, Set [page_token] to the [ListBackupsResponse.next_page_token] - * returned by a previous list request. + * returned by the previous list request. */ pageToken: string; } diff --git a/src/generated/yandex/cloud/mdb/mysql/v1alpha/cluster.ts b/src/generated/yandex/cloud/mdb/mysql/v1alpha/cluster.ts index 1e895864..433d6c12 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1alpha/cluster.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1alpha/cluster.ts @@ -253,7 +253,7 @@ export interface Host { * Name of the MySQL host. The host name is assigned by Managed Service for MySQL * at creation time, and cannot be changed. 1-63 characters long. * - * The name is unique across all existing database hosts in Yandex Cloud, + * The name is unique across all database hosts that exist on the platform, * as it defines the FQDN of the host. */ name: string; diff --git a/src/generated/yandex/cloud/mdb/mysql/v1alpha/cluster_service.ts b/src/generated/yandex/cloud/mdb/mysql/v1alpha/cluster_service.ts index a6956fc3..3576fd50 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1alpha/cluster_service.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1alpha/cluster_service.ts @@ -57,7 +57,7 @@ export interface ListClustersRequest { pageSize: number; /** * Page token. To get the next page of results, set [page_token] to the [ListClustersResponse.next_page_token] - * returned by a previous list request. + * returned by the previous list request. */ pageToken: string; /** @@ -274,7 +274,7 @@ export interface ListClusterLogsRequest { pageSize: number; /** * Page token. To get the next page of results, set [page_token] to the - * [ListClusterLogsResponse.next_page_token] returned by a previous list request. + * [ListClusterLogsResponse.next_page_token] returned by the previous list request. */ pageToken: string; /** Always return `next_page_token`, even if current page is empty. */ @@ -343,7 +343,7 @@ export interface ListClusterOperationsRequest { pageSize: number; /** * Page token. To get the next page of results, set [page_token] to the [ListClusterOperationsResponse.next_page_token] - * returned by a previous list request. + * returned by the previous list request. */ pageToken: string; } @@ -376,7 +376,7 @@ export interface ListClusterBackupsRequest { pageSize: number; /** * Page token. To get the next page of results, set [page_token] to the [ListClusterBackupsResponse.next_page_token] - * returned by a previous list request. + * returned by the previous list request. */ pageToken: string; } @@ -409,7 +409,7 @@ export interface ListClusterHostsRequest { pageSize: number; /** * Page token. To get the next page of results, set [page_token] to the [ListClusterHostsResponse.next_page_token] - * returned by a previous list request. + * returned by the previous list request. */ pageToken: string; } diff --git a/src/generated/yandex/cloud/mdb/mysql/v1alpha/database_service.ts b/src/generated/yandex/cloud/mdb/mysql/v1alpha/database_service.ts index ae9b3727..4f37e60b 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1alpha/database_service.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1alpha/database_service.ts @@ -51,7 +51,7 @@ export interface ListDatabasesRequest { pageSize: number; /** * Page token. To get the next page of results, Set [page_token] to the [ListDatabasesResponse.next_page_token] - * returned by a previous list request. + * returned by the previous list request. */ pageToken: string; } diff --git a/src/generated/yandex/cloud/mdb/mysql/v1alpha/resource_preset_service.ts b/src/generated/yandex/cloud/mdb/mysql/v1alpha/resource_preset_service.ts index aaa2db56..3a832b37 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1alpha/resource_preset_service.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1alpha/resource_preset_service.ts @@ -37,7 +37,7 @@ export interface ListResourcePresetsRequest { pageSize: number; /** * Page token. To get the next page of results, set [page_token] to the [ListResourcePresetsResponse.next_page_token] - * returned by a previous list request. + * returned by the previous list request. */ pageToken: string; } diff --git a/src/generated/yandex/cloud/mdb/mysql/v1alpha/user_service.ts b/src/generated/yandex/cloud/mdb/mysql/v1alpha/user_service.ts index b47e33b9..1d5b749c 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1alpha/user_service.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1alpha/user_service.ts @@ -38,7 +38,7 @@ export interface ListUsersRequest { clusterId: string; /** The maximum number of results per page to return. If the number of available results is larger than [page_size], the service returns a [ListUsersResponse.next_page_token] that can be used to get the next page of results in subsequent list requests. */ pageSize: number; - /** Page token. To get the next page of results, set [page_token] to the [ListUsersResponse.next_page_token] returned by a previous list request. */ + /** Page token. To get the next page of results, set [page_token] to the [ListUsersResponse.next_page_token] returned by the previous list request. */ pageToken: string; } diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/backup_service.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/backup_service.ts index 157b5832..a1762ccf 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/backup_service.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/backup_service.ts @@ -42,7 +42,7 @@ export interface ListBackupsRequest { pageSize: number; /** * Page token. To get the next page of results, Set [page_token] to the [ListBackupsResponse.next_page_token] - * returned by a previous list request. + * returned by the previous list request. */ pageToken: string; } diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/cluster.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/cluster.ts index 2c8401b1..c79362b7 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/cluster.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/cluster.ts @@ -1,5 +1,5 @@ /* eslint-disable */ -import { messageTypeRegistry } from "../../../../../typeRegistry"; +import {MessageType, messageTypeRegistry} from "../../../../../typeRegistry"; import Long from "long"; import _m0 from "protobufjs/minimal"; import { @@ -16,7 +16,9 @@ import { Postgresqlconfigset111c } from "../../../../../yandex/cloud/mdb/postgre import { PostgresqlConfigSet12 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/postgresql12"; import { Postgresqlconfigset121c } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/postgresql12_1c"; import { PostgresqlConfigSet13 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/postgresql13"; +import { Postgresqlconfigset131c } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/postgresql13_1c"; import { PostgresqlConfigSet14 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/postgresql14"; +import { Postgresqlconfigset141c } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/postgresql14_1c"; import { Postgresqlhostconfig96 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host9_6"; import { Postgresqlhostconfig101c } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host10_1c"; import { PostgresqlHostConfig10 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host10"; @@ -25,7 +27,9 @@ import { Postgresqlhostconfig111c } from "../../../../../yandex/cloud/mdb/postgr import { PostgresqlHostConfig12 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host12"; import { Postgresqlhostconfig121c } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host12_1c"; import { PostgresqlHostConfig13 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host13"; +import { Postgresqlhostconfig131c } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host13_1c"; import { PostgresqlHostConfig14 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host14"; +import { Postgresqlhostconfig141c } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host14_1c"; import { BoolValue, Int64Value } from "../../../../../google/protobuf/wrappers"; export const protobufPackage = "yandex.cloud.mdb.postgresql.v1"; @@ -288,8 +292,12 @@ export interface ClusterConfig { postgresqlConfig121c?: Postgresqlconfigset121c | undefined; /** Configuration of a PostgreSQL 13 server. */ postgresqlConfig13?: PostgresqlConfigSet13 | undefined; + /** Configuration of a PostgreSQL 13 1C server. */ + postgresqlConfig131c?: Postgresqlconfigset131c | undefined; /** Configuration of a PostgreSQL 14 server. */ postgresqlConfig14?: PostgresqlConfigSet14 | undefined; + /** Configuration of a PostgreSQL 14 1C server. */ + postgresqlConfig141c?: Postgresqlconfigset141c | undefined; /** Configuration of the connection pooler. */ poolerConfig?: ConnectionPoolerConfig; /** Resources allocated to PostgreSQL hosts. */ @@ -374,7 +382,7 @@ export interface Host { * Name of the PostgreSQL host. The host name is assigned by MDB at creation time, and cannot be changed. * 1-63 characters long. * - * The name is unique across all existing MDB hosts in Yandex Cloud, as it defines the FQDN of the host. + * The name is unique across all MDB hosts that exist on the platform, as it defines the FQDN of the host. */ name: string; /** ID of the PostgreSQL host. The ID is assigned by MDB at creation time. */ @@ -561,8 +569,12 @@ export interface HostConfig { postgresqlConfig121c?: Postgresqlhostconfig121c | undefined; /** Configuration for a host with PostgreSQL 13 server deployed. */ postgresqlConfig13?: PostgresqlHostConfig13 | undefined; + /** Configuration for a host with PostgreSQL 13 1C server deployed. */ + postgresqlConfig131c?: Postgresqlhostconfig131c | undefined; /** Configuration for a host with PostgreSQL 14 server deployed. */ postgresqlConfig14?: PostgresqlHostConfig14 | undefined; + /** Configuration for a host with PostgreSQL 14 1C server deployed. */ + postgresqlConfig141c?: Postgresqlhostconfig141c | undefined; } export interface Service { @@ -678,7 +690,7 @@ export interface Access { /** Allow access for DataLens */ dataLens: boolean; /** - * Allow SQL queries to the cluster databases from the Yandex Cloud management console. + * Allow SQL queries to the cluster databases from the management console. * * See [SQL queries in the management console](/docs/managed-postgresql/operations/web-sql-query) for more details. */ @@ -714,7 +726,7 @@ const baseCluster: object = { hostGroupIds: "", }; -export const Cluster = { +export const Cluster: MessageType = { $type: "yandex.cloud.mdb.postgresql.v1.Cluster" as const, encode( @@ -1210,7 +1222,7 @@ const baseClusterConfig: object = { version: "", }; -export const ClusterConfig = { +export const ClusterConfig: MessageType = { $type: "yandex.cloud.mdb.postgresql.v1.ClusterConfig" as const, encode( @@ -1268,12 +1280,24 @@ export const ClusterConfig = { writer.uint32(122).fork() ).ldelim(); } + if (message.postgresqlConfig131c !== undefined) { + Postgresqlconfigset131c.encode( + message.postgresqlConfig131c, + writer.uint32(146).fork() + ).ldelim(); + } if (message.postgresqlConfig14 !== undefined) { PostgresqlConfigSet14.encode( message.postgresqlConfig14, writer.uint32(130).fork() ).ldelim(); } + if (message.postgresqlConfig141c !== undefined) { + Postgresqlconfigset141c.encode( + message.postgresqlConfig141c, + writer.uint32(154).fork() + ).ldelim(); + } if (message.poolerConfig !== undefined) { ConnectionPoolerConfig.encode( message.poolerConfig, @@ -1374,12 +1398,24 @@ export const ClusterConfig = { reader.uint32() ); break; + case 18: + message.postgresqlConfig131c = Postgresqlconfigset131c.decode( + reader, + reader.uint32() + ); + break; case 16: message.postgresqlConfig14 = PostgresqlConfigSet14.decode( reader, reader.uint32() ); break; + case 19: + message.postgresqlConfig141c = Postgresqlconfigset141c.decode( + reader, + reader.uint32() + ); + break; case 4: message.poolerConfig = ConnectionPoolerConfig.decode( reader, @@ -1467,11 +1503,21 @@ export const ClusterConfig = { object.postgresqlConfig_13 !== null ? PostgresqlConfigSet13.fromJSON(object.postgresqlConfig_13) : undefined; + message.postgresqlConfig131c = + object.postgresqlConfig_13_1c !== undefined && + object.postgresqlConfig_13_1c !== null + ? Postgresqlconfigset131c.fromJSON(object.postgresqlConfig_13_1c) + : undefined; message.postgresqlConfig14 = object.postgresqlConfig_14 !== undefined && object.postgresqlConfig_14 !== null ? PostgresqlConfigSet14.fromJSON(object.postgresqlConfig_14) : undefined; + message.postgresqlConfig141c = + object.postgresqlConfig_14_1c !== undefined && + object.postgresqlConfig_14_1c !== null + ? Postgresqlconfigset141c.fromJSON(object.postgresqlConfig_14_1c) + : undefined; message.poolerConfig = object.poolerConfig !== undefined && object.poolerConfig !== null ? ConnectionPoolerConfig.fromJSON(object.poolerConfig) @@ -1541,10 +1587,18 @@ export const ClusterConfig = { (obj.postgresqlConfig_13 = message.postgresqlConfig13 ? PostgresqlConfigSet13.toJSON(message.postgresqlConfig13) : undefined); + message.postgresqlConfig131c !== undefined && + (obj.postgresqlConfig_13_1c = message.postgresqlConfig131c + ? Postgresqlconfigset131c.toJSON(message.postgresqlConfig131c) + : undefined); message.postgresqlConfig14 !== undefined && (obj.postgresqlConfig_14 = message.postgresqlConfig14 ? PostgresqlConfigSet14.toJSON(message.postgresqlConfig14) : undefined); + message.postgresqlConfig141c !== undefined && + (obj.postgresqlConfig_14_1c = message.postgresqlConfig141c + ? Postgresqlconfigset141c.toJSON(message.postgresqlConfig141c) + : undefined); message.poolerConfig !== undefined && (obj.poolerConfig = message.poolerConfig ? ConnectionPoolerConfig.toJSON(message.poolerConfig) @@ -1615,11 +1669,21 @@ export const ClusterConfig = { object.postgresqlConfig13 !== null ? PostgresqlConfigSet13.fromPartial(object.postgresqlConfig13) : undefined; + message.postgresqlConfig131c = + object.postgresqlConfig131c !== undefined && + object.postgresqlConfig131c !== null + ? Postgresqlconfigset131c.fromPartial(object.postgresqlConfig131c) + : undefined; message.postgresqlConfig14 = object.postgresqlConfig14 !== undefined && object.postgresqlConfig14 !== null ? PostgresqlConfigSet14.fromPartial(object.postgresqlConfig14) : undefined; + message.postgresqlConfig141c = + object.postgresqlConfig141c !== undefined && + object.postgresqlConfig141c !== null + ? Postgresqlconfigset141c.fromPartial(object.postgresqlConfig141c) + : undefined; message.poolerConfig = object.poolerConfig !== undefined && object.poolerConfig !== null ? ConnectionPoolerConfig.fromPartial(object.poolerConfig) @@ -2029,12 +2093,24 @@ export const HostConfig = { writer.uint32(66).fork() ).ldelim(); } + if (message.postgresqlConfig131c !== undefined) { + Postgresqlhostconfig131c.encode( + message.postgresqlConfig131c, + writer.uint32(82).fork() + ).ldelim(); + } if (message.postgresqlConfig14 !== undefined) { PostgresqlHostConfig14.encode( message.postgresqlConfig14, writer.uint32(74).fork() ).ldelim(); } + if (message.postgresqlConfig141c !== undefined) { + Postgresqlhostconfig141c.encode( + message.postgresqlConfig141c, + writer.uint32(90).fork() + ).ldelim(); + } return writer; }, @@ -2093,12 +2169,24 @@ export const HostConfig = { reader.uint32() ); break; + case 10: + message.postgresqlConfig131c = Postgresqlhostconfig131c.decode( + reader, + reader.uint32() + ); + break; case 9: message.postgresqlConfig14 = PostgresqlHostConfig14.decode( reader, reader.uint32() ); break; + case 11: + message.postgresqlConfig141c = Postgresqlhostconfig141c.decode( + reader, + reader.uint32() + ); + break; default: reader.skipType(tag & 7); break; @@ -2149,11 +2237,21 @@ export const HostConfig = { object.postgresqlHostConfig_13 !== null ? PostgresqlHostConfig13.fromJSON(object.postgresqlHostConfig_13) : undefined; + message.postgresqlConfig131c = + object.postgresqlHostConfig_13_1c !== undefined && + object.postgresqlHostConfig_13_1c !== null + ? Postgresqlhostconfig131c.fromJSON(object.postgresqlHostConfig_13_1c) + : undefined; message.postgresqlConfig14 = object.postgresqlHostConfig_14 !== undefined && object.postgresqlHostConfig_14 !== null ? PostgresqlHostConfig14.fromJSON(object.postgresqlHostConfig_14) : undefined; + message.postgresqlConfig141c = + object.postgresqlHostConfig_14_1c !== undefined && + object.postgresqlHostConfig_14_1c !== null + ? Postgresqlhostconfig141c.fromJSON(object.postgresqlHostConfig_14_1c) + : undefined; return message; }, @@ -2191,10 +2289,18 @@ export const HostConfig = { (obj.postgresqlHostConfig_13 = message.postgresqlConfig13 ? PostgresqlHostConfig13.toJSON(message.postgresqlConfig13) : undefined); + message.postgresqlConfig131c !== undefined && + (obj.postgresqlHostConfig_13_1c = message.postgresqlConfig131c + ? Postgresqlhostconfig131c.toJSON(message.postgresqlConfig131c) + : undefined); message.postgresqlConfig14 !== undefined && (obj.postgresqlHostConfig_14 = message.postgresqlConfig14 ? PostgresqlHostConfig14.toJSON(message.postgresqlConfig14) : undefined); + message.postgresqlConfig141c !== undefined && + (obj.postgresqlHostConfig_14_1c = message.postgresqlConfig141c + ? Postgresqlhostconfig141c.toJSON(message.postgresqlConfig141c) + : undefined); return obj; }, @@ -2242,11 +2348,21 @@ export const HostConfig = { object.postgresqlConfig13 !== null ? PostgresqlHostConfig13.fromPartial(object.postgresqlConfig13) : undefined; + message.postgresqlConfig131c = + object.postgresqlConfig131c !== undefined && + object.postgresqlConfig131c !== null + ? Postgresqlhostconfig131c.fromPartial(object.postgresqlConfig131c) + : undefined; message.postgresqlConfig14 = object.postgresqlConfig14 !== undefined && object.postgresqlConfig14 !== null ? PostgresqlHostConfig14.fromPartial(object.postgresqlConfig14) : undefined; + message.postgresqlConfig141c = + object.postgresqlConfig141c !== undefined && + object.postgresqlConfig141c !== null + ? Postgresqlhostconfig141c.fromPartial(object.postgresqlConfig141c) + : undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/cluster_service.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/cluster_service.ts index cd485249..f892fa7b 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/cluster_service.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/cluster_service.ts @@ -43,7 +43,9 @@ import { Postgresqlconfig111c } from "../../../../../yandex/cloud/mdb/postgresql import { PostgresqlConfig12 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/postgresql12"; import { Postgresqlconfig121c } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/postgresql12_1c"; import { PostgresqlConfig13 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/postgresql13"; +import { Postgresqlconfig131c } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/postgresql13_1c"; import { PostgresqlConfig14 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/postgresql14"; +import { Postgresqlconfig141c } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/postgresql14_1c"; import { Postgresqlhostconfig96 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host9_6"; import { Postgresqlhostconfig101c } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host10_1c"; import { PostgresqlHostConfig10 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host10"; @@ -52,7 +54,9 @@ import { Postgresqlhostconfig111c } from "../../../../../yandex/cloud/mdb/postgr import { PostgresqlHostConfig12 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host12"; import { Postgresqlhostconfig121c } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host12_1c"; import { PostgresqlHostConfig13 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host13"; +import { Postgresqlhostconfig131c } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host13_1c"; import { PostgresqlHostConfig14 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host14"; +import { Postgresqlhostconfig141c } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host14_1c"; import { Int64Value, BoolValue } from "../../../../../google/protobuf/wrappers"; export const protobufPackage = "yandex.cloud.mdb.postgresql.v1"; @@ -81,7 +85,7 @@ export interface ListClustersRequest { pageSize: number; /** * Page token. To get the next page of results, set [page_token] to the [ListClustersResponse.next_page_token] - * returned by a previous list request. + * returned by the previous list request. */ pageToken: string; /** @@ -453,7 +457,7 @@ export interface ListClusterLogsRequest { pageSize: number; /** * Page token. To get the next page of results, set [page_token] to the - * [ListClusterLogsResponse.next_page_token] returned by a previous list request. + * [ListClusterLogsResponse.next_page_token] returned by the previous list request. */ pageToken: string; /** Always return `next_page_token`, even if current page is empty. */ @@ -621,7 +625,7 @@ export interface ListClusterOperationsRequest { pageSize: number; /** * Page token. To get the next page of results, set [page_token] to the [ListClusterOperationsResponse.next_page_token] - * returned by a previous list request. + * returned by the previous list request. */ pageToken: string; } @@ -654,7 +658,7 @@ export interface ListClusterBackupsRequest { pageSize: number; /** * Page token. To get the next page of results, set [page_token] to the [ListClusterBackupsResponse.next_page_token] - * returned by a previous list request. + * returned by the previous list request. */ pageToken: string; } @@ -687,7 +691,7 @@ export interface ListClusterHostsRequest { pageSize: number; /** * Page token. To get the next page of results, set [page_token] to the [ListClusterHostsResponse.next_page_token] - * returned by a previous list request. + * returned by the previous list request. */ pageToken: string; } @@ -851,8 +855,12 @@ export interface ConfigSpec { postgresqlConfig121c?: Postgresqlconfig121c | undefined; /** Configuration for a PostgreSQL 13 cluster. */ postgresqlConfig13?: PostgresqlConfig13 | undefined; + /** Configuration for a PostgreSQL 13 1C cluster. */ + postgresqlConfig131c?: Postgresqlconfig131c | undefined; /** Configuration for a PostgreSQL 14 cluster. */ postgresqlConfig14?: PostgresqlConfig14 | undefined; + /** Configuration for a PostgreSQL 14 1C cluster. */ + postgresqlConfig141c?: Postgresqlconfig141c | undefined; /** Configuration of the connection pooler. */ poolerConfig?: ConnectionPoolerConfig; /** Resources allocated to PostgreSQL hosts. */ @@ -887,8 +895,12 @@ export interface ConfigHostSpec { postgresqlConfig121c?: Postgresqlhostconfig121c | undefined; /** Configuration for a host with PostgreSQL 13 server deployed. */ postgresqlConfig13?: PostgresqlHostConfig13 | undefined; + /** Configuration for a host with PostgreSQL 13 1C server deployed. */ + postgresqlConfig131c?: Postgresqlhostconfig131c | undefined; /** Configuration for a host with PostgreSQL 14 server deployed. */ postgresqlConfig14?: PostgresqlHostConfig14 | undefined; + /** Configuration for a host with PostgreSQL 14 1C server deployed. */ + postgresqlConfig141c?: Postgresqlhostconfig141c | undefined; } const baseGetClusterRequest: object = { @@ -1060,7 +1072,7 @@ const baseListClustersResponse: object = { nextPageToken: "", }; -export const ListClustersResponse: MessageType = { +export const ListClustersResponse: MessageType = { $type: "yandex.cloud.mdb.postgresql.v1.ListClustersResponse" as const, encode( @@ -5598,12 +5610,24 @@ export const ConfigSpec = { writer.uint32(122).fork() ).ldelim(); } + if (message.postgresqlConfig131c !== undefined) { + Postgresqlconfig131c.encode( + message.postgresqlConfig131c, + writer.uint32(146).fork() + ).ldelim(); + } if (message.postgresqlConfig14 !== undefined) { PostgresqlConfig14.encode( message.postgresqlConfig14, writer.uint32(130).fork() ).ldelim(); } + if (message.postgresqlConfig141c !== undefined) { + Postgresqlconfig141c.encode( + message.postgresqlConfig141c, + writer.uint32(154).fork() + ).ldelim(); + } if (message.poolerConfig !== undefined) { ConnectionPoolerConfig.encode( message.poolerConfig, @@ -5704,12 +5728,24 @@ export const ConfigSpec = { reader.uint32() ); break; + case 18: + message.postgresqlConfig131c = Postgresqlconfig131c.decode( + reader, + reader.uint32() + ); + break; case 16: message.postgresqlConfig14 = PostgresqlConfig14.decode( reader, reader.uint32() ); break; + case 19: + message.postgresqlConfig141c = Postgresqlconfig141c.decode( + reader, + reader.uint32() + ); + break; case 4: message.poolerConfig = ConnectionPoolerConfig.decode( reader, @@ -5797,11 +5833,21 @@ export const ConfigSpec = { object.postgresqlConfig_13 !== null ? PostgresqlConfig13.fromJSON(object.postgresqlConfig_13) : undefined; + message.postgresqlConfig131c = + object.postgresqlConfig_13_1c !== undefined && + object.postgresqlConfig_13_1c !== null + ? Postgresqlconfig131c.fromJSON(object.postgresqlConfig_13_1c) + : undefined; message.postgresqlConfig14 = object.postgresqlConfig_14 !== undefined && object.postgresqlConfig_14 !== null ? PostgresqlConfig14.fromJSON(object.postgresqlConfig_14) : undefined; + message.postgresqlConfig141c = + object.postgresqlConfig_14_1c !== undefined && + object.postgresqlConfig_14_1c !== null + ? Postgresqlconfig141c.fromJSON(object.postgresqlConfig_14_1c) + : undefined; message.poolerConfig = object.poolerConfig !== undefined && object.poolerConfig !== null ? ConnectionPoolerConfig.fromJSON(object.poolerConfig) @@ -5871,10 +5917,18 @@ export const ConfigSpec = { (obj.postgresqlConfig_13 = message.postgresqlConfig13 ? PostgresqlConfig13.toJSON(message.postgresqlConfig13) : undefined); + message.postgresqlConfig131c !== undefined && + (obj.postgresqlConfig_13_1c = message.postgresqlConfig131c + ? Postgresqlconfig131c.toJSON(message.postgresqlConfig131c) + : undefined); message.postgresqlConfig14 !== undefined && (obj.postgresqlConfig_14 = message.postgresqlConfig14 ? PostgresqlConfig14.toJSON(message.postgresqlConfig14) : undefined); + message.postgresqlConfig141c !== undefined && + (obj.postgresqlConfig_14_1c = message.postgresqlConfig141c + ? Postgresqlconfig141c.toJSON(message.postgresqlConfig141c) + : undefined); message.poolerConfig !== undefined && (obj.poolerConfig = message.poolerConfig ? ConnectionPoolerConfig.toJSON(message.poolerConfig) @@ -5945,11 +5999,21 @@ export const ConfigSpec = { object.postgresqlConfig13 !== null ? PostgresqlConfig13.fromPartial(object.postgresqlConfig13) : undefined; + message.postgresqlConfig131c = + object.postgresqlConfig131c !== undefined && + object.postgresqlConfig131c !== null + ? Postgresqlconfig131c.fromPartial(object.postgresqlConfig131c) + : undefined; message.postgresqlConfig14 = object.postgresqlConfig14 !== undefined && object.postgresqlConfig14 !== null ? PostgresqlConfig14.fromPartial(object.postgresqlConfig14) : undefined; + message.postgresqlConfig141c = + object.postgresqlConfig141c !== undefined && + object.postgresqlConfig141c !== null + ? Postgresqlconfig141c.fromPartial(object.postgresqlConfig141c) + : undefined; message.poolerConfig = object.poolerConfig !== undefined && object.poolerConfig !== null ? ConnectionPoolerConfig.fromPartial(object.poolerConfig) @@ -6039,12 +6103,24 @@ export const ConfigHostSpec = { writer.uint32(66).fork() ).ldelim(); } + if (message.postgresqlConfig131c !== undefined) { + Postgresqlhostconfig131c.encode( + message.postgresqlConfig131c, + writer.uint32(82).fork() + ).ldelim(); + } if (message.postgresqlConfig14 !== undefined) { PostgresqlHostConfig14.encode( message.postgresqlConfig14, writer.uint32(74).fork() ).ldelim(); } + if (message.postgresqlConfig141c !== undefined) { + Postgresqlhostconfig141c.encode( + message.postgresqlConfig141c, + writer.uint32(90).fork() + ).ldelim(); + } return writer; }, @@ -6103,12 +6179,24 @@ export const ConfigHostSpec = { reader.uint32() ); break; + case 10: + message.postgresqlConfig131c = Postgresqlhostconfig131c.decode( + reader, + reader.uint32() + ); + break; case 9: message.postgresqlConfig14 = PostgresqlHostConfig14.decode( reader, reader.uint32() ); break; + case 11: + message.postgresqlConfig141c = Postgresqlhostconfig141c.decode( + reader, + reader.uint32() + ); + break; default: reader.skipType(tag & 7); break; @@ -6159,11 +6247,21 @@ export const ConfigHostSpec = { object.postgresqlHostConfig_13 !== null ? PostgresqlHostConfig13.fromJSON(object.postgresqlHostConfig_13) : undefined; + message.postgresqlConfig131c = + object.postgresqlHostConfig_13_1c !== undefined && + object.postgresqlHostConfig_13_1c !== null + ? Postgresqlhostconfig131c.fromJSON(object.postgresqlHostConfig_13_1c) + : undefined; message.postgresqlConfig14 = object.postgresqlHostConfig_14 !== undefined && object.postgresqlHostConfig_14 !== null ? PostgresqlHostConfig14.fromJSON(object.postgresqlHostConfig_14) : undefined; + message.postgresqlConfig141c = + object.postgresqlHostConfig_14_1c !== undefined && + object.postgresqlHostConfig_14_1c !== null + ? Postgresqlhostconfig141c.fromJSON(object.postgresqlHostConfig_14_1c) + : undefined; return message; }, @@ -6201,10 +6299,18 @@ export const ConfigHostSpec = { (obj.postgresqlHostConfig_13 = message.postgresqlConfig13 ? PostgresqlHostConfig13.toJSON(message.postgresqlConfig13) : undefined); + message.postgresqlConfig131c !== undefined && + (obj.postgresqlHostConfig_13_1c = message.postgresqlConfig131c + ? Postgresqlhostconfig131c.toJSON(message.postgresqlConfig131c) + : undefined); message.postgresqlConfig14 !== undefined && (obj.postgresqlHostConfig_14 = message.postgresqlConfig14 ? PostgresqlHostConfig14.toJSON(message.postgresqlConfig14) : undefined); + message.postgresqlConfig141c !== undefined && + (obj.postgresqlHostConfig_14_1c = message.postgresqlConfig141c + ? Postgresqlhostconfig141c.toJSON(message.postgresqlConfig141c) + : undefined); return obj; }, @@ -6252,11 +6358,21 @@ export const ConfigHostSpec = { object.postgresqlConfig13 !== null ? PostgresqlHostConfig13.fromPartial(object.postgresqlConfig13) : undefined; + message.postgresqlConfig131c = + object.postgresqlConfig131c !== undefined && + object.postgresqlConfig131c !== null + ? Postgresqlhostconfig131c.fromPartial(object.postgresqlConfig131c) + : undefined; message.postgresqlConfig14 = object.postgresqlConfig14 !== undefined && object.postgresqlConfig14 !== null ? PostgresqlHostConfig14.fromPartial(object.postgresqlConfig14) : undefined; + message.postgresqlConfig141c = + object.postgresqlConfig141c !== undefined && + object.postgresqlConfig141c !== null + ? Postgresqlhostconfig141c.fromPartial(object.postgresqlConfig141c) + : undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/host11.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/host11.ts index b32731ad..29d1bcfb 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/config/host11.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/host11.ts @@ -98,6 +98,102 @@ export interface PostgresqlHostConfig11 { effectiveCacheSize?: number; } +export enum PostgresqlHostConfig11_BackslashQuote { + BACKSLASH_QUOTE_UNSPECIFIED = 0, + BACKSLASH_QUOTE = 1, + BACKSLASH_QUOTE_ON = 2, + BACKSLASH_QUOTE_OFF = 3, + BACKSLASH_QUOTE_SAFE_ENCODING = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig11_BackslashQuoteFromJSON( + object: any +): PostgresqlHostConfig11_BackslashQuote { + switch (object) { + case 0: + case "BACKSLASH_QUOTE_UNSPECIFIED": + return PostgresqlHostConfig11_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED; + case 1: + case "BACKSLASH_QUOTE": + return PostgresqlHostConfig11_BackslashQuote.BACKSLASH_QUOTE; + case 2: + case "BACKSLASH_QUOTE_ON": + return PostgresqlHostConfig11_BackslashQuote.BACKSLASH_QUOTE_ON; + case 3: + case "BACKSLASH_QUOTE_OFF": + return PostgresqlHostConfig11_BackslashQuote.BACKSLASH_QUOTE_OFF; + case 4: + case "BACKSLASH_QUOTE_SAFE_ENCODING": + return PostgresqlHostConfig11_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig11_BackslashQuote.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig11_BackslashQuoteToJSON( + object: PostgresqlHostConfig11_BackslashQuote +): string { + switch (object) { + case PostgresqlHostConfig11_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED: + return "BACKSLASH_QUOTE_UNSPECIFIED"; + case PostgresqlHostConfig11_BackslashQuote.BACKSLASH_QUOTE: + return "BACKSLASH_QUOTE"; + case PostgresqlHostConfig11_BackslashQuote.BACKSLASH_QUOTE_ON: + return "BACKSLASH_QUOTE_ON"; + case PostgresqlHostConfig11_BackslashQuote.BACKSLASH_QUOTE_OFF: + return "BACKSLASH_QUOTE_OFF"; + case PostgresqlHostConfig11_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING: + return "BACKSLASH_QUOTE_SAFE_ENCODING"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlHostConfig11_ByteaOutput { + BYTEA_OUTPUT_UNSPECIFIED = 0, + BYTEA_OUTPUT_HEX = 1, + BYTEA_OUTPUT_ESCAPED = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig11_ByteaOutputFromJSON( + object: any +): PostgresqlHostConfig11_ByteaOutput { + switch (object) { + case 0: + case "BYTEA_OUTPUT_UNSPECIFIED": + return PostgresqlHostConfig11_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED; + case 1: + case "BYTEA_OUTPUT_HEX": + return PostgresqlHostConfig11_ByteaOutput.BYTEA_OUTPUT_HEX; + case 2: + case "BYTEA_OUTPUT_ESCAPED": + return PostgresqlHostConfig11_ByteaOutput.BYTEA_OUTPUT_ESCAPED; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig11_ByteaOutput.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig11_ByteaOutputToJSON( + object: PostgresqlHostConfig11_ByteaOutput +): string { + switch (object) { + case PostgresqlHostConfig11_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED: + return "BYTEA_OUTPUT_UNSPECIFIED"; + case PostgresqlHostConfig11_ByteaOutput.BYTEA_OUTPUT_HEX: + return "BYTEA_OUTPUT_HEX"; + case PostgresqlHostConfig11_ByteaOutput.BYTEA_OUTPUT_ESCAPED: + return "BYTEA_OUTPUT_ESCAPED"; + default: + return "UNKNOWN"; + } +} + export enum PostgresqlHostConfig11_ConstraintExclusion { CONSTRAINT_EXCLUSION_UNSPECIFIED = 0, CONSTRAINT_EXCLUSION_ON = 1, @@ -194,6 +290,54 @@ export function postgresqlHostConfig11_ForceParallelModeToJSON( } } +export enum PostgresqlHostConfig11_LogErrorVerbosity { + LOG_ERROR_VERBOSITY_UNSPECIFIED = 0, + LOG_ERROR_VERBOSITY_TERSE = 1, + LOG_ERROR_VERBOSITY_DEFAULT = 2, + LOG_ERROR_VERBOSITY_VERBOSE = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig11_LogErrorVerbosityFromJSON( + object: any +): PostgresqlHostConfig11_LogErrorVerbosity { + switch (object) { + case 0: + case "LOG_ERROR_VERBOSITY_UNSPECIFIED": + return PostgresqlHostConfig11_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED; + case 1: + case "LOG_ERROR_VERBOSITY_TERSE": + return PostgresqlHostConfig11_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE; + case 2: + case "LOG_ERROR_VERBOSITY_DEFAULT": + return PostgresqlHostConfig11_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT; + case 3: + case "LOG_ERROR_VERBOSITY_VERBOSE": + return PostgresqlHostConfig11_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig11_LogErrorVerbosity.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig11_LogErrorVerbosityToJSON( + object: PostgresqlHostConfig11_LogErrorVerbosity +): string { + switch (object) { + case PostgresqlHostConfig11_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED: + return "LOG_ERROR_VERBOSITY_UNSPECIFIED"; + case PostgresqlHostConfig11_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE: + return "LOG_ERROR_VERBOSITY_TERSE"; + case PostgresqlHostConfig11_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT: + return "LOG_ERROR_VERBOSITY_DEFAULT"; + case PostgresqlHostConfig11_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE: + return "LOG_ERROR_VERBOSITY_VERBOSE"; + default: + return "UNKNOWN"; + } +} + export enum PostgresqlHostConfig11_LogLevel { LOG_LEVEL_UNSPECIFIED = 0, LOG_LEVEL_DEBUG5 = 1, @@ -290,54 +434,6 @@ export function postgresqlHostConfig11_LogLevelToJSON( } } -export enum PostgresqlHostConfig11_LogErrorVerbosity { - LOG_ERROR_VERBOSITY_UNSPECIFIED = 0, - LOG_ERROR_VERBOSITY_TERSE = 1, - LOG_ERROR_VERBOSITY_DEFAULT = 2, - LOG_ERROR_VERBOSITY_VERBOSE = 3, - UNRECOGNIZED = -1, -} - -export function postgresqlHostConfig11_LogErrorVerbosityFromJSON( - object: any -): PostgresqlHostConfig11_LogErrorVerbosity { - switch (object) { - case 0: - case "LOG_ERROR_VERBOSITY_UNSPECIFIED": - return PostgresqlHostConfig11_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED; - case 1: - case "LOG_ERROR_VERBOSITY_TERSE": - return PostgresqlHostConfig11_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE; - case 2: - case "LOG_ERROR_VERBOSITY_DEFAULT": - return PostgresqlHostConfig11_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT; - case 3: - case "LOG_ERROR_VERBOSITY_VERBOSE": - return PostgresqlHostConfig11_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE; - case -1: - case "UNRECOGNIZED": - default: - return PostgresqlHostConfig11_LogErrorVerbosity.UNRECOGNIZED; - } -} - -export function postgresqlHostConfig11_LogErrorVerbosityToJSON( - object: PostgresqlHostConfig11_LogErrorVerbosity -): string { - switch (object) { - case PostgresqlHostConfig11_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED: - return "LOG_ERROR_VERBOSITY_UNSPECIFIED"; - case PostgresqlHostConfig11_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE: - return "LOG_ERROR_VERBOSITY_TERSE"; - case PostgresqlHostConfig11_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT: - return "LOG_ERROR_VERBOSITY_DEFAULT"; - case PostgresqlHostConfig11_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE: - return "LOG_ERROR_VERBOSITY_VERBOSE"; - default: - return "UNKNOWN"; - } -} - export enum PostgresqlHostConfig11_LogStatement { LOG_STATEMENT_UNSPECIFIED = 0, LOG_STATEMENT_NONE = 1, @@ -446,48 +542,6 @@ export function postgresqlHostConfig11_TransactionIsolationToJSON( } } -export enum PostgresqlHostConfig11_ByteaOutput { - BYTEA_OUTPUT_UNSPECIFIED = 0, - BYTEA_OUTPUT_HEX = 1, - BYTEA_OUTPUT_ESCAPED = 2, - UNRECOGNIZED = -1, -} - -export function postgresqlHostConfig11_ByteaOutputFromJSON( - object: any -): PostgresqlHostConfig11_ByteaOutput { - switch (object) { - case 0: - case "BYTEA_OUTPUT_UNSPECIFIED": - return PostgresqlHostConfig11_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED; - case 1: - case "BYTEA_OUTPUT_HEX": - return PostgresqlHostConfig11_ByteaOutput.BYTEA_OUTPUT_HEX; - case 2: - case "BYTEA_OUTPUT_ESCAPED": - return PostgresqlHostConfig11_ByteaOutput.BYTEA_OUTPUT_ESCAPED; - case -1: - case "UNRECOGNIZED": - default: - return PostgresqlHostConfig11_ByteaOutput.UNRECOGNIZED; - } -} - -export function postgresqlHostConfig11_ByteaOutputToJSON( - object: PostgresqlHostConfig11_ByteaOutput -): string { - switch (object) { - case PostgresqlHostConfig11_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED: - return "BYTEA_OUTPUT_UNSPECIFIED"; - case PostgresqlHostConfig11_ByteaOutput.BYTEA_OUTPUT_HEX: - return "BYTEA_OUTPUT_HEX"; - case PostgresqlHostConfig11_ByteaOutput.BYTEA_OUTPUT_ESCAPED: - return "BYTEA_OUTPUT_ESCAPED"; - default: - return "UNKNOWN"; - } -} - export enum PostgresqlHostConfig11_XmlBinary { XML_BINARY_UNSPECIFIED = 0, XML_BINARY_BASE64 = 1, @@ -572,60 +626,6 @@ export function postgresqlHostConfig11_XmlOptionToJSON( } } -export enum PostgresqlHostConfig11_BackslashQuote { - BACKSLASH_QUOTE_UNSPECIFIED = 0, - BACKSLASH_QUOTE = 1, - BACKSLASH_QUOTE_ON = 2, - BACKSLASH_QUOTE_OFF = 3, - BACKSLASH_QUOTE_SAFE_ENCODING = 4, - UNRECOGNIZED = -1, -} - -export function postgresqlHostConfig11_BackslashQuoteFromJSON( - object: any -): PostgresqlHostConfig11_BackslashQuote { - switch (object) { - case 0: - case "BACKSLASH_QUOTE_UNSPECIFIED": - return PostgresqlHostConfig11_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED; - case 1: - case "BACKSLASH_QUOTE": - return PostgresqlHostConfig11_BackslashQuote.BACKSLASH_QUOTE; - case 2: - case "BACKSLASH_QUOTE_ON": - return PostgresqlHostConfig11_BackslashQuote.BACKSLASH_QUOTE_ON; - case 3: - case "BACKSLASH_QUOTE_OFF": - return PostgresqlHostConfig11_BackslashQuote.BACKSLASH_QUOTE_OFF; - case 4: - case "BACKSLASH_QUOTE_SAFE_ENCODING": - return PostgresqlHostConfig11_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING; - case -1: - case "UNRECOGNIZED": - default: - return PostgresqlHostConfig11_BackslashQuote.UNRECOGNIZED; - } -} - -export function postgresqlHostConfig11_BackslashQuoteToJSON( - object: PostgresqlHostConfig11_BackslashQuote -): string { - switch (object) { - case PostgresqlHostConfig11_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED: - return "BACKSLASH_QUOTE_UNSPECIFIED"; - case PostgresqlHostConfig11_BackslashQuote.BACKSLASH_QUOTE: - return "BACKSLASH_QUOTE"; - case PostgresqlHostConfig11_BackslashQuote.BACKSLASH_QUOTE_ON: - return "BACKSLASH_QUOTE_ON"; - case PostgresqlHostConfig11_BackslashQuote.BACKSLASH_QUOTE_OFF: - return "BACKSLASH_QUOTE_OFF"; - case PostgresqlHostConfig11_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING: - return "BACKSLASH_QUOTE_SAFE_ENCODING"; - default: - return "UNKNOWN"; - } -} - const basePostgresqlHostConfig11: object = { $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig11", constraintExclusion: 0, diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/host11_1c.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/host11_1c.ts index edc97d70..d2f75117 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/config/host11_1c.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/host11_1c.ts @@ -98,6 +98,102 @@ export interface Postgresqlhostconfig111c { effectiveCacheSize?: number; } +export enum Postgresqlhostconfig111c_BackslashQuote { + BACKSLASH_QUOTE_UNSPECIFIED = 0, + BACKSLASH_QUOTE = 1, + BACKSLASH_QUOTE_ON = 2, + BACKSLASH_QUOTE_OFF = 3, + BACKSLASH_QUOTE_SAFE_ENCODING = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlhostconfig111c_BackslashQuoteFromJSON( + object: any +): Postgresqlhostconfig111c_BackslashQuote { + switch (object) { + case 0: + case "BACKSLASH_QUOTE_UNSPECIFIED": + return Postgresqlhostconfig111c_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED; + case 1: + case "BACKSLASH_QUOTE": + return Postgresqlhostconfig111c_BackslashQuote.BACKSLASH_QUOTE; + case 2: + case "BACKSLASH_QUOTE_ON": + return Postgresqlhostconfig111c_BackslashQuote.BACKSLASH_QUOTE_ON; + case 3: + case "BACKSLASH_QUOTE_OFF": + return Postgresqlhostconfig111c_BackslashQuote.BACKSLASH_QUOTE_OFF; + case 4: + case "BACKSLASH_QUOTE_SAFE_ENCODING": + return Postgresqlhostconfig111c_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlhostconfig111c_BackslashQuote.UNRECOGNIZED; + } +} + +export function postgresqlhostconfig111c_BackslashQuoteToJSON( + object: Postgresqlhostconfig111c_BackslashQuote +): string { + switch (object) { + case Postgresqlhostconfig111c_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED: + return "BACKSLASH_QUOTE_UNSPECIFIED"; + case Postgresqlhostconfig111c_BackslashQuote.BACKSLASH_QUOTE: + return "BACKSLASH_QUOTE"; + case Postgresqlhostconfig111c_BackslashQuote.BACKSLASH_QUOTE_ON: + return "BACKSLASH_QUOTE_ON"; + case Postgresqlhostconfig111c_BackslashQuote.BACKSLASH_QUOTE_OFF: + return "BACKSLASH_QUOTE_OFF"; + case Postgresqlhostconfig111c_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING: + return "BACKSLASH_QUOTE_SAFE_ENCODING"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlhostconfig111c_ByteaOutput { + BYTEA_OUTPUT_UNSPECIFIED = 0, + BYTEA_OUTPUT_HEX = 1, + BYTEA_OUTPUT_ESCAPED = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlhostconfig111c_ByteaOutputFromJSON( + object: any +): Postgresqlhostconfig111c_ByteaOutput { + switch (object) { + case 0: + case "BYTEA_OUTPUT_UNSPECIFIED": + return Postgresqlhostconfig111c_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED; + case 1: + case "BYTEA_OUTPUT_HEX": + return Postgresqlhostconfig111c_ByteaOutput.BYTEA_OUTPUT_HEX; + case 2: + case "BYTEA_OUTPUT_ESCAPED": + return Postgresqlhostconfig111c_ByteaOutput.BYTEA_OUTPUT_ESCAPED; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlhostconfig111c_ByteaOutput.UNRECOGNIZED; + } +} + +export function postgresqlhostconfig111c_ByteaOutputToJSON( + object: Postgresqlhostconfig111c_ByteaOutput +): string { + switch (object) { + case Postgresqlhostconfig111c_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED: + return "BYTEA_OUTPUT_UNSPECIFIED"; + case Postgresqlhostconfig111c_ByteaOutput.BYTEA_OUTPUT_HEX: + return "BYTEA_OUTPUT_HEX"; + case Postgresqlhostconfig111c_ByteaOutput.BYTEA_OUTPUT_ESCAPED: + return "BYTEA_OUTPUT_ESCAPED"; + default: + return "UNKNOWN"; + } +} + export enum Postgresqlhostconfig111c_ConstraintExclusion { CONSTRAINT_EXCLUSION_UNSPECIFIED = 0, CONSTRAINT_EXCLUSION_ON = 1, @@ -194,6 +290,54 @@ export function postgresqlhostconfig111c_ForceParallelModeToJSON( } } +export enum Postgresqlhostconfig111c_LogErrorVerbosity { + LOG_ERROR_VERBOSITY_UNSPECIFIED = 0, + LOG_ERROR_VERBOSITY_TERSE = 1, + LOG_ERROR_VERBOSITY_DEFAULT = 2, + LOG_ERROR_VERBOSITY_VERBOSE = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlhostconfig111c_LogErrorVerbosityFromJSON( + object: any +): Postgresqlhostconfig111c_LogErrorVerbosity { + switch (object) { + case 0: + case "LOG_ERROR_VERBOSITY_UNSPECIFIED": + return Postgresqlhostconfig111c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED; + case 1: + case "LOG_ERROR_VERBOSITY_TERSE": + return Postgresqlhostconfig111c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE; + case 2: + case "LOG_ERROR_VERBOSITY_DEFAULT": + return Postgresqlhostconfig111c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT; + case 3: + case "LOG_ERROR_VERBOSITY_VERBOSE": + return Postgresqlhostconfig111c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlhostconfig111c_LogErrorVerbosity.UNRECOGNIZED; + } +} + +export function postgresqlhostconfig111c_LogErrorVerbosityToJSON( + object: Postgresqlhostconfig111c_LogErrorVerbosity +): string { + switch (object) { + case Postgresqlhostconfig111c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED: + return "LOG_ERROR_VERBOSITY_UNSPECIFIED"; + case Postgresqlhostconfig111c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE: + return "LOG_ERROR_VERBOSITY_TERSE"; + case Postgresqlhostconfig111c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT: + return "LOG_ERROR_VERBOSITY_DEFAULT"; + case Postgresqlhostconfig111c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE: + return "LOG_ERROR_VERBOSITY_VERBOSE"; + default: + return "UNKNOWN"; + } +} + export enum Postgresqlhostconfig111c_LogLevel { LOG_LEVEL_UNSPECIFIED = 0, LOG_LEVEL_DEBUG5 = 1, @@ -290,54 +434,6 @@ export function postgresqlhostconfig111c_LogLevelToJSON( } } -export enum Postgresqlhostconfig111c_LogErrorVerbosity { - LOG_ERROR_VERBOSITY_UNSPECIFIED = 0, - LOG_ERROR_VERBOSITY_TERSE = 1, - LOG_ERROR_VERBOSITY_DEFAULT = 2, - LOG_ERROR_VERBOSITY_VERBOSE = 3, - UNRECOGNIZED = -1, -} - -export function postgresqlhostconfig111c_LogErrorVerbosityFromJSON( - object: any -): Postgresqlhostconfig111c_LogErrorVerbosity { - switch (object) { - case 0: - case "LOG_ERROR_VERBOSITY_UNSPECIFIED": - return Postgresqlhostconfig111c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED; - case 1: - case "LOG_ERROR_VERBOSITY_TERSE": - return Postgresqlhostconfig111c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE; - case 2: - case "LOG_ERROR_VERBOSITY_DEFAULT": - return Postgresqlhostconfig111c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT; - case 3: - case "LOG_ERROR_VERBOSITY_VERBOSE": - return Postgresqlhostconfig111c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE; - case -1: - case "UNRECOGNIZED": - default: - return Postgresqlhostconfig111c_LogErrorVerbosity.UNRECOGNIZED; - } -} - -export function postgresqlhostconfig111c_LogErrorVerbosityToJSON( - object: Postgresqlhostconfig111c_LogErrorVerbosity -): string { - switch (object) { - case Postgresqlhostconfig111c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED: - return "LOG_ERROR_VERBOSITY_UNSPECIFIED"; - case Postgresqlhostconfig111c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE: - return "LOG_ERROR_VERBOSITY_TERSE"; - case Postgresqlhostconfig111c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT: - return "LOG_ERROR_VERBOSITY_DEFAULT"; - case Postgresqlhostconfig111c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE: - return "LOG_ERROR_VERBOSITY_VERBOSE"; - default: - return "UNKNOWN"; - } -} - export enum Postgresqlhostconfig111c_LogStatement { LOG_STATEMENT_UNSPECIFIED = 0, LOG_STATEMENT_NONE = 1, @@ -446,48 +542,6 @@ export function postgresqlhostconfig111c_TransactionIsolationToJSON( } } -export enum Postgresqlhostconfig111c_ByteaOutput { - BYTEA_OUTPUT_UNSPECIFIED = 0, - BYTEA_OUTPUT_HEX = 1, - BYTEA_OUTPUT_ESCAPED = 2, - UNRECOGNIZED = -1, -} - -export function postgresqlhostconfig111c_ByteaOutputFromJSON( - object: any -): Postgresqlhostconfig111c_ByteaOutput { - switch (object) { - case 0: - case "BYTEA_OUTPUT_UNSPECIFIED": - return Postgresqlhostconfig111c_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED; - case 1: - case "BYTEA_OUTPUT_HEX": - return Postgresqlhostconfig111c_ByteaOutput.BYTEA_OUTPUT_HEX; - case 2: - case "BYTEA_OUTPUT_ESCAPED": - return Postgresqlhostconfig111c_ByteaOutput.BYTEA_OUTPUT_ESCAPED; - case -1: - case "UNRECOGNIZED": - default: - return Postgresqlhostconfig111c_ByteaOutput.UNRECOGNIZED; - } -} - -export function postgresqlhostconfig111c_ByteaOutputToJSON( - object: Postgresqlhostconfig111c_ByteaOutput -): string { - switch (object) { - case Postgresqlhostconfig111c_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED: - return "BYTEA_OUTPUT_UNSPECIFIED"; - case Postgresqlhostconfig111c_ByteaOutput.BYTEA_OUTPUT_HEX: - return "BYTEA_OUTPUT_HEX"; - case Postgresqlhostconfig111c_ByteaOutput.BYTEA_OUTPUT_ESCAPED: - return "BYTEA_OUTPUT_ESCAPED"; - default: - return "UNKNOWN"; - } -} - export enum Postgresqlhostconfig111c_XmlBinary { XML_BINARY_UNSPECIFIED = 0, XML_BINARY_BASE64 = 1, @@ -572,60 +626,6 @@ export function postgresqlhostconfig111c_XmlOptionToJSON( } } -export enum Postgresqlhostconfig111c_BackslashQuote { - BACKSLASH_QUOTE_UNSPECIFIED = 0, - BACKSLASH_QUOTE = 1, - BACKSLASH_QUOTE_ON = 2, - BACKSLASH_QUOTE_OFF = 3, - BACKSLASH_QUOTE_SAFE_ENCODING = 4, - UNRECOGNIZED = -1, -} - -export function postgresqlhostconfig111c_BackslashQuoteFromJSON( - object: any -): Postgresqlhostconfig111c_BackslashQuote { - switch (object) { - case 0: - case "BACKSLASH_QUOTE_UNSPECIFIED": - return Postgresqlhostconfig111c_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED; - case 1: - case "BACKSLASH_QUOTE": - return Postgresqlhostconfig111c_BackslashQuote.BACKSLASH_QUOTE; - case 2: - case "BACKSLASH_QUOTE_ON": - return Postgresqlhostconfig111c_BackslashQuote.BACKSLASH_QUOTE_ON; - case 3: - case "BACKSLASH_QUOTE_OFF": - return Postgresqlhostconfig111c_BackslashQuote.BACKSLASH_QUOTE_OFF; - case 4: - case "BACKSLASH_QUOTE_SAFE_ENCODING": - return Postgresqlhostconfig111c_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING; - case -1: - case "UNRECOGNIZED": - default: - return Postgresqlhostconfig111c_BackslashQuote.UNRECOGNIZED; - } -} - -export function postgresqlhostconfig111c_BackslashQuoteToJSON( - object: Postgresqlhostconfig111c_BackslashQuote -): string { - switch (object) { - case Postgresqlhostconfig111c_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED: - return "BACKSLASH_QUOTE_UNSPECIFIED"; - case Postgresqlhostconfig111c_BackslashQuote.BACKSLASH_QUOTE: - return "BACKSLASH_QUOTE"; - case Postgresqlhostconfig111c_BackslashQuote.BACKSLASH_QUOTE_ON: - return "BACKSLASH_QUOTE_ON"; - case Postgresqlhostconfig111c_BackslashQuote.BACKSLASH_QUOTE_OFF: - return "BACKSLASH_QUOTE_OFF"; - case Postgresqlhostconfig111c_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING: - return "BACKSLASH_QUOTE_SAFE_ENCODING"; - default: - return "UNKNOWN"; - } -} - const basePostgresqlhostconfig111c: object = { $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig11_1C", constraintExclusion: 0, diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/host12.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/host12.ts index 9f0b5819..b0bf9835 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/config/host12.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/host12.ts @@ -98,6 +98,102 @@ export interface PostgresqlHostConfig12 { effectiveCacheSize?: number; } +export enum PostgresqlHostConfig12_BackslashQuote { + BACKSLASH_QUOTE_UNSPECIFIED = 0, + BACKSLASH_QUOTE = 1, + BACKSLASH_QUOTE_ON = 2, + BACKSLASH_QUOTE_OFF = 3, + BACKSLASH_QUOTE_SAFE_ENCODING = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig12_BackslashQuoteFromJSON( + object: any +): PostgresqlHostConfig12_BackslashQuote { + switch (object) { + case 0: + case "BACKSLASH_QUOTE_UNSPECIFIED": + return PostgresqlHostConfig12_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED; + case 1: + case "BACKSLASH_QUOTE": + return PostgresqlHostConfig12_BackslashQuote.BACKSLASH_QUOTE; + case 2: + case "BACKSLASH_QUOTE_ON": + return PostgresqlHostConfig12_BackslashQuote.BACKSLASH_QUOTE_ON; + case 3: + case "BACKSLASH_QUOTE_OFF": + return PostgresqlHostConfig12_BackslashQuote.BACKSLASH_QUOTE_OFF; + case 4: + case "BACKSLASH_QUOTE_SAFE_ENCODING": + return PostgresqlHostConfig12_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig12_BackslashQuote.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig12_BackslashQuoteToJSON( + object: PostgresqlHostConfig12_BackslashQuote +): string { + switch (object) { + case PostgresqlHostConfig12_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED: + return "BACKSLASH_QUOTE_UNSPECIFIED"; + case PostgresqlHostConfig12_BackslashQuote.BACKSLASH_QUOTE: + return "BACKSLASH_QUOTE"; + case PostgresqlHostConfig12_BackslashQuote.BACKSLASH_QUOTE_ON: + return "BACKSLASH_QUOTE_ON"; + case PostgresqlHostConfig12_BackslashQuote.BACKSLASH_QUOTE_OFF: + return "BACKSLASH_QUOTE_OFF"; + case PostgresqlHostConfig12_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING: + return "BACKSLASH_QUOTE_SAFE_ENCODING"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlHostConfig12_ByteaOutput { + BYTEA_OUTPUT_UNSPECIFIED = 0, + BYTEA_OUTPUT_HEX = 1, + BYTEA_OUTPUT_ESCAPED = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig12_ByteaOutputFromJSON( + object: any +): PostgresqlHostConfig12_ByteaOutput { + switch (object) { + case 0: + case "BYTEA_OUTPUT_UNSPECIFIED": + return PostgresqlHostConfig12_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED; + case 1: + case "BYTEA_OUTPUT_HEX": + return PostgresqlHostConfig12_ByteaOutput.BYTEA_OUTPUT_HEX; + case 2: + case "BYTEA_OUTPUT_ESCAPED": + return PostgresqlHostConfig12_ByteaOutput.BYTEA_OUTPUT_ESCAPED; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig12_ByteaOutput.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig12_ByteaOutputToJSON( + object: PostgresqlHostConfig12_ByteaOutput +): string { + switch (object) { + case PostgresqlHostConfig12_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED: + return "BYTEA_OUTPUT_UNSPECIFIED"; + case PostgresqlHostConfig12_ByteaOutput.BYTEA_OUTPUT_HEX: + return "BYTEA_OUTPUT_HEX"; + case PostgresqlHostConfig12_ByteaOutput.BYTEA_OUTPUT_ESCAPED: + return "BYTEA_OUTPUT_ESCAPED"; + default: + return "UNKNOWN"; + } +} + export enum PostgresqlHostConfig12_ConstraintExclusion { CONSTRAINT_EXCLUSION_UNSPECIFIED = 0, CONSTRAINT_EXCLUSION_ON = 1, @@ -194,6 +290,54 @@ export function postgresqlHostConfig12_ForceParallelModeToJSON( } } +export enum PostgresqlHostConfig12_LogErrorVerbosity { + LOG_ERROR_VERBOSITY_UNSPECIFIED = 0, + LOG_ERROR_VERBOSITY_TERSE = 1, + LOG_ERROR_VERBOSITY_DEFAULT = 2, + LOG_ERROR_VERBOSITY_VERBOSE = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig12_LogErrorVerbosityFromJSON( + object: any +): PostgresqlHostConfig12_LogErrorVerbosity { + switch (object) { + case 0: + case "LOG_ERROR_VERBOSITY_UNSPECIFIED": + return PostgresqlHostConfig12_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED; + case 1: + case "LOG_ERROR_VERBOSITY_TERSE": + return PostgresqlHostConfig12_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE; + case 2: + case "LOG_ERROR_VERBOSITY_DEFAULT": + return PostgresqlHostConfig12_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT; + case 3: + case "LOG_ERROR_VERBOSITY_VERBOSE": + return PostgresqlHostConfig12_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig12_LogErrorVerbosity.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig12_LogErrorVerbosityToJSON( + object: PostgresqlHostConfig12_LogErrorVerbosity +): string { + switch (object) { + case PostgresqlHostConfig12_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED: + return "LOG_ERROR_VERBOSITY_UNSPECIFIED"; + case PostgresqlHostConfig12_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE: + return "LOG_ERROR_VERBOSITY_TERSE"; + case PostgresqlHostConfig12_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT: + return "LOG_ERROR_VERBOSITY_DEFAULT"; + case PostgresqlHostConfig12_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE: + return "LOG_ERROR_VERBOSITY_VERBOSE"; + default: + return "UNKNOWN"; + } +} + export enum PostgresqlHostConfig12_LogLevel { LOG_LEVEL_UNSPECIFIED = 0, LOG_LEVEL_DEBUG5 = 1, @@ -290,54 +434,6 @@ export function postgresqlHostConfig12_LogLevelToJSON( } } -export enum PostgresqlHostConfig12_LogErrorVerbosity { - LOG_ERROR_VERBOSITY_UNSPECIFIED = 0, - LOG_ERROR_VERBOSITY_TERSE = 1, - LOG_ERROR_VERBOSITY_DEFAULT = 2, - LOG_ERROR_VERBOSITY_VERBOSE = 3, - UNRECOGNIZED = -1, -} - -export function postgresqlHostConfig12_LogErrorVerbosityFromJSON( - object: any -): PostgresqlHostConfig12_LogErrorVerbosity { - switch (object) { - case 0: - case "LOG_ERROR_VERBOSITY_UNSPECIFIED": - return PostgresqlHostConfig12_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED; - case 1: - case "LOG_ERROR_VERBOSITY_TERSE": - return PostgresqlHostConfig12_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE; - case 2: - case "LOG_ERROR_VERBOSITY_DEFAULT": - return PostgresqlHostConfig12_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT; - case 3: - case "LOG_ERROR_VERBOSITY_VERBOSE": - return PostgresqlHostConfig12_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE; - case -1: - case "UNRECOGNIZED": - default: - return PostgresqlHostConfig12_LogErrorVerbosity.UNRECOGNIZED; - } -} - -export function postgresqlHostConfig12_LogErrorVerbosityToJSON( - object: PostgresqlHostConfig12_LogErrorVerbosity -): string { - switch (object) { - case PostgresqlHostConfig12_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED: - return "LOG_ERROR_VERBOSITY_UNSPECIFIED"; - case PostgresqlHostConfig12_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE: - return "LOG_ERROR_VERBOSITY_TERSE"; - case PostgresqlHostConfig12_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT: - return "LOG_ERROR_VERBOSITY_DEFAULT"; - case PostgresqlHostConfig12_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE: - return "LOG_ERROR_VERBOSITY_VERBOSE"; - default: - return "UNKNOWN"; - } -} - export enum PostgresqlHostConfig12_LogStatement { LOG_STATEMENT_UNSPECIFIED = 0, LOG_STATEMENT_NONE = 1, @@ -446,48 +542,6 @@ export function postgresqlHostConfig12_TransactionIsolationToJSON( } } -export enum PostgresqlHostConfig12_ByteaOutput { - BYTEA_OUTPUT_UNSPECIFIED = 0, - BYTEA_OUTPUT_HEX = 1, - BYTEA_OUTPUT_ESCAPED = 2, - UNRECOGNIZED = -1, -} - -export function postgresqlHostConfig12_ByteaOutputFromJSON( - object: any -): PostgresqlHostConfig12_ByteaOutput { - switch (object) { - case 0: - case "BYTEA_OUTPUT_UNSPECIFIED": - return PostgresqlHostConfig12_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED; - case 1: - case "BYTEA_OUTPUT_HEX": - return PostgresqlHostConfig12_ByteaOutput.BYTEA_OUTPUT_HEX; - case 2: - case "BYTEA_OUTPUT_ESCAPED": - return PostgresqlHostConfig12_ByteaOutput.BYTEA_OUTPUT_ESCAPED; - case -1: - case "UNRECOGNIZED": - default: - return PostgresqlHostConfig12_ByteaOutput.UNRECOGNIZED; - } -} - -export function postgresqlHostConfig12_ByteaOutputToJSON( - object: PostgresqlHostConfig12_ByteaOutput -): string { - switch (object) { - case PostgresqlHostConfig12_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED: - return "BYTEA_OUTPUT_UNSPECIFIED"; - case PostgresqlHostConfig12_ByteaOutput.BYTEA_OUTPUT_HEX: - return "BYTEA_OUTPUT_HEX"; - case PostgresqlHostConfig12_ByteaOutput.BYTEA_OUTPUT_ESCAPED: - return "BYTEA_OUTPUT_ESCAPED"; - default: - return "UNKNOWN"; - } -} - export enum PostgresqlHostConfig12_XmlBinary { XML_BINARY_UNSPECIFIED = 0, XML_BINARY_BASE64 = 1, @@ -572,60 +626,6 @@ export function postgresqlHostConfig12_XmlOptionToJSON( } } -export enum PostgresqlHostConfig12_BackslashQuote { - BACKSLASH_QUOTE_UNSPECIFIED = 0, - BACKSLASH_QUOTE = 1, - BACKSLASH_QUOTE_ON = 2, - BACKSLASH_QUOTE_OFF = 3, - BACKSLASH_QUOTE_SAFE_ENCODING = 4, - UNRECOGNIZED = -1, -} - -export function postgresqlHostConfig12_BackslashQuoteFromJSON( - object: any -): PostgresqlHostConfig12_BackslashQuote { - switch (object) { - case 0: - case "BACKSLASH_QUOTE_UNSPECIFIED": - return PostgresqlHostConfig12_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED; - case 1: - case "BACKSLASH_QUOTE": - return PostgresqlHostConfig12_BackslashQuote.BACKSLASH_QUOTE; - case 2: - case "BACKSLASH_QUOTE_ON": - return PostgresqlHostConfig12_BackslashQuote.BACKSLASH_QUOTE_ON; - case 3: - case "BACKSLASH_QUOTE_OFF": - return PostgresqlHostConfig12_BackslashQuote.BACKSLASH_QUOTE_OFF; - case 4: - case "BACKSLASH_QUOTE_SAFE_ENCODING": - return PostgresqlHostConfig12_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING; - case -1: - case "UNRECOGNIZED": - default: - return PostgresqlHostConfig12_BackslashQuote.UNRECOGNIZED; - } -} - -export function postgresqlHostConfig12_BackslashQuoteToJSON( - object: PostgresqlHostConfig12_BackslashQuote -): string { - switch (object) { - case PostgresqlHostConfig12_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED: - return "BACKSLASH_QUOTE_UNSPECIFIED"; - case PostgresqlHostConfig12_BackslashQuote.BACKSLASH_QUOTE: - return "BACKSLASH_QUOTE"; - case PostgresqlHostConfig12_BackslashQuote.BACKSLASH_QUOTE_ON: - return "BACKSLASH_QUOTE_ON"; - case PostgresqlHostConfig12_BackslashQuote.BACKSLASH_QUOTE_OFF: - return "BACKSLASH_QUOTE_OFF"; - case PostgresqlHostConfig12_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING: - return "BACKSLASH_QUOTE_SAFE_ENCODING"; - default: - return "UNKNOWN"; - } -} - const basePostgresqlHostConfig12: object = { $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig12", constraintExclusion: 0, diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/host12_1c.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/host12_1c.ts index a2d26549..f55cf657 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/config/host12_1c.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/host12_1c.ts @@ -98,6 +98,102 @@ export interface Postgresqlhostconfig121c { effectiveCacheSize?: number; } +export enum Postgresqlhostconfig121c_BackslashQuote { + BACKSLASH_QUOTE_UNSPECIFIED = 0, + BACKSLASH_QUOTE = 1, + BACKSLASH_QUOTE_ON = 2, + BACKSLASH_QUOTE_OFF = 3, + BACKSLASH_QUOTE_SAFE_ENCODING = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlhostconfig121c_BackslashQuoteFromJSON( + object: any +): Postgresqlhostconfig121c_BackslashQuote { + switch (object) { + case 0: + case "BACKSLASH_QUOTE_UNSPECIFIED": + return Postgresqlhostconfig121c_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED; + case 1: + case "BACKSLASH_QUOTE": + return Postgresqlhostconfig121c_BackslashQuote.BACKSLASH_QUOTE; + case 2: + case "BACKSLASH_QUOTE_ON": + return Postgresqlhostconfig121c_BackslashQuote.BACKSLASH_QUOTE_ON; + case 3: + case "BACKSLASH_QUOTE_OFF": + return Postgresqlhostconfig121c_BackslashQuote.BACKSLASH_QUOTE_OFF; + case 4: + case "BACKSLASH_QUOTE_SAFE_ENCODING": + return Postgresqlhostconfig121c_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlhostconfig121c_BackslashQuote.UNRECOGNIZED; + } +} + +export function postgresqlhostconfig121c_BackslashQuoteToJSON( + object: Postgresqlhostconfig121c_BackslashQuote +): string { + switch (object) { + case Postgresqlhostconfig121c_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED: + return "BACKSLASH_QUOTE_UNSPECIFIED"; + case Postgresqlhostconfig121c_BackslashQuote.BACKSLASH_QUOTE: + return "BACKSLASH_QUOTE"; + case Postgresqlhostconfig121c_BackslashQuote.BACKSLASH_QUOTE_ON: + return "BACKSLASH_QUOTE_ON"; + case Postgresqlhostconfig121c_BackslashQuote.BACKSLASH_QUOTE_OFF: + return "BACKSLASH_QUOTE_OFF"; + case Postgresqlhostconfig121c_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING: + return "BACKSLASH_QUOTE_SAFE_ENCODING"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlhostconfig121c_ByteaOutput { + BYTEA_OUTPUT_UNSPECIFIED = 0, + BYTEA_OUTPUT_HEX = 1, + BYTEA_OUTPUT_ESCAPED = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlhostconfig121c_ByteaOutputFromJSON( + object: any +): Postgresqlhostconfig121c_ByteaOutput { + switch (object) { + case 0: + case "BYTEA_OUTPUT_UNSPECIFIED": + return Postgresqlhostconfig121c_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED; + case 1: + case "BYTEA_OUTPUT_HEX": + return Postgresqlhostconfig121c_ByteaOutput.BYTEA_OUTPUT_HEX; + case 2: + case "BYTEA_OUTPUT_ESCAPED": + return Postgresqlhostconfig121c_ByteaOutput.BYTEA_OUTPUT_ESCAPED; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlhostconfig121c_ByteaOutput.UNRECOGNIZED; + } +} + +export function postgresqlhostconfig121c_ByteaOutputToJSON( + object: Postgresqlhostconfig121c_ByteaOutput +): string { + switch (object) { + case Postgresqlhostconfig121c_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED: + return "BYTEA_OUTPUT_UNSPECIFIED"; + case Postgresqlhostconfig121c_ByteaOutput.BYTEA_OUTPUT_HEX: + return "BYTEA_OUTPUT_HEX"; + case Postgresqlhostconfig121c_ByteaOutput.BYTEA_OUTPUT_ESCAPED: + return "BYTEA_OUTPUT_ESCAPED"; + default: + return "UNKNOWN"; + } +} + export enum Postgresqlhostconfig121c_ConstraintExclusion { CONSTRAINT_EXCLUSION_UNSPECIFIED = 0, CONSTRAINT_EXCLUSION_ON = 1, @@ -194,6 +290,54 @@ export function postgresqlhostconfig121c_ForceParallelModeToJSON( } } +export enum Postgresqlhostconfig121c_LogErrorVerbosity { + LOG_ERROR_VERBOSITY_UNSPECIFIED = 0, + LOG_ERROR_VERBOSITY_TERSE = 1, + LOG_ERROR_VERBOSITY_DEFAULT = 2, + LOG_ERROR_VERBOSITY_VERBOSE = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlhostconfig121c_LogErrorVerbosityFromJSON( + object: any +): Postgresqlhostconfig121c_LogErrorVerbosity { + switch (object) { + case 0: + case "LOG_ERROR_VERBOSITY_UNSPECIFIED": + return Postgresqlhostconfig121c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED; + case 1: + case "LOG_ERROR_VERBOSITY_TERSE": + return Postgresqlhostconfig121c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE; + case 2: + case "LOG_ERROR_VERBOSITY_DEFAULT": + return Postgresqlhostconfig121c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT; + case 3: + case "LOG_ERROR_VERBOSITY_VERBOSE": + return Postgresqlhostconfig121c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlhostconfig121c_LogErrorVerbosity.UNRECOGNIZED; + } +} + +export function postgresqlhostconfig121c_LogErrorVerbosityToJSON( + object: Postgresqlhostconfig121c_LogErrorVerbosity +): string { + switch (object) { + case Postgresqlhostconfig121c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED: + return "LOG_ERROR_VERBOSITY_UNSPECIFIED"; + case Postgresqlhostconfig121c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE: + return "LOG_ERROR_VERBOSITY_TERSE"; + case Postgresqlhostconfig121c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT: + return "LOG_ERROR_VERBOSITY_DEFAULT"; + case Postgresqlhostconfig121c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE: + return "LOG_ERROR_VERBOSITY_VERBOSE"; + default: + return "UNKNOWN"; + } +} + export enum Postgresqlhostconfig121c_LogLevel { LOG_LEVEL_UNSPECIFIED = 0, LOG_LEVEL_DEBUG5 = 1, @@ -290,54 +434,6 @@ export function postgresqlhostconfig121c_LogLevelToJSON( } } -export enum Postgresqlhostconfig121c_LogErrorVerbosity { - LOG_ERROR_VERBOSITY_UNSPECIFIED = 0, - LOG_ERROR_VERBOSITY_TERSE = 1, - LOG_ERROR_VERBOSITY_DEFAULT = 2, - LOG_ERROR_VERBOSITY_VERBOSE = 3, - UNRECOGNIZED = -1, -} - -export function postgresqlhostconfig121c_LogErrorVerbosityFromJSON( - object: any -): Postgresqlhostconfig121c_LogErrorVerbosity { - switch (object) { - case 0: - case "LOG_ERROR_VERBOSITY_UNSPECIFIED": - return Postgresqlhostconfig121c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED; - case 1: - case "LOG_ERROR_VERBOSITY_TERSE": - return Postgresqlhostconfig121c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE; - case 2: - case "LOG_ERROR_VERBOSITY_DEFAULT": - return Postgresqlhostconfig121c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT; - case 3: - case "LOG_ERROR_VERBOSITY_VERBOSE": - return Postgresqlhostconfig121c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE; - case -1: - case "UNRECOGNIZED": - default: - return Postgresqlhostconfig121c_LogErrorVerbosity.UNRECOGNIZED; - } -} - -export function postgresqlhostconfig121c_LogErrorVerbosityToJSON( - object: Postgresqlhostconfig121c_LogErrorVerbosity -): string { - switch (object) { - case Postgresqlhostconfig121c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED: - return "LOG_ERROR_VERBOSITY_UNSPECIFIED"; - case Postgresqlhostconfig121c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE: - return "LOG_ERROR_VERBOSITY_TERSE"; - case Postgresqlhostconfig121c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT: - return "LOG_ERROR_VERBOSITY_DEFAULT"; - case Postgresqlhostconfig121c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE: - return "LOG_ERROR_VERBOSITY_VERBOSE"; - default: - return "UNKNOWN"; - } -} - export enum Postgresqlhostconfig121c_LogStatement { LOG_STATEMENT_UNSPECIFIED = 0, LOG_STATEMENT_NONE = 1, @@ -446,48 +542,6 @@ export function postgresqlhostconfig121c_TransactionIsolationToJSON( } } -export enum Postgresqlhostconfig121c_ByteaOutput { - BYTEA_OUTPUT_UNSPECIFIED = 0, - BYTEA_OUTPUT_HEX = 1, - BYTEA_OUTPUT_ESCAPED = 2, - UNRECOGNIZED = -1, -} - -export function postgresqlhostconfig121c_ByteaOutputFromJSON( - object: any -): Postgresqlhostconfig121c_ByteaOutput { - switch (object) { - case 0: - case "BYTEA_OUTPUT_UNSPECIFIED": - return Postgresqlhostconfig121c_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED; - case 1: - case "BYTEA_OUTPUT_HEX": - return Postgresqlhostconfig121c_ByteaOutput.BYTEA_OUTPUT_HEX; - case 2: - case "BYTEA_OUTPUT_ESCAPED": - return Postgresqlhostconfig121c_ByteaOutput.BYTEA_OUTPUT_ESCAPED; - case -1: - case "UNRECOGNIZED": - default: - return Postgresqlhostconfig121c_ByteaOutput.UNRECOGNIZED; - } -} - -export function postgresqlhostconfig121c_ByteaOutputToJSON( - object: Postgresqlhostconfig121c_ByteaOutput -): string { - switch (object) { - case Postgresqlhostconfig121c_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED: - return "BYTEA_OUTPUT_UNSPECIFIED"; - case Postgresqlhostconfig121c_ByteaOutput.BYTEA_OUTPUT_HEX: - return "BYTEA_OUTPUT_HEX"; - case Postgresqlhostconfig121c_ByteaOutput.BYTEA_OUTPUT_ESCAPED: - return "BYTEA_OUTPUT_ESCAPED"; - default: - return "UNKNOWN"; - } -} - export enum Postgresqlhostconfig121c_XmlBinary { XML_BINARY_UNSPECIFIED = 0, XML_BINARY_BASE64 = 1, @@ -572,60 +626,6 @@ export function postgresqlhostconfig121c_XmlOptionToJSON( } } -export enum Postgresqlhostconfig121c_BackslashQuote { - BACKSLASH_QUOTE_UNSPECIFIED = 0, - BACKSLASH_QUOTE = 1, - BACKSLASH_QUOTE_ON = 2, - BACKSLASH_QUOTE_OFF = 3, - BACKSLASH_QUOTE_SAFE_ENCODING = 4, - UNRECOGNIZED = -1, -} - -export function postgresqlhostconfig121c_BackslashQuoteFromJSON( - object: any -): Postgresqlhostconfig121c_BackslashQuote { - switch (object) { - case 0: - case "BACKSLASH_QUOTE_UNSPECIFIED": - return Postgresqlhostconfig121c_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED; - case 1: - case "BACKSLASH_QUOTE": - return Postgresqlhostconfig121c_BackslashQuote.BACKSLASH_QUOTE; - case 2: - case "BACKSLASH_QUOTE_ON": - return Postgresqlhostconfig121c_BackslashQuote.BACKSLASH_QUOTE_ON; - case 3: - case "BACKSLASH_QUOTE_OFF": - return Postgresqlhostconfig121c_BackslashQuote.BACKSLASH_QUOTE_OFF; - case 4: - case "BACKSLASH_QUOTE_SAFE_ENCODING": - return Postgresqlhostconfig121c_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING; - case -1: - case "UNRECOGNIZED": - default: - return Postgresqlhostconfig121c_BackslashQuote.UNRECOGNIZED; - } -} - -export function postgresqlhostconfig121c_BackslashQuoteToJSON( - object: Postgresqlhostconfig121c_BackslashQuote -): string { - switch (object) { - case Postgresqlhostconfig121c_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED: - return "BACKSLASH_QUOTE_UNSPECIFIED"; - case Postgresqlhostconfig121c_BackslashQuote.BACKSLASH_QUOTE: - return "BACKSLASH_QUOTE"; - case Postgresqlhostconfig121c_BackslashQuote.BACKSLASH_QUOTE_ON: - return "BACKSLASH_QUOTE_ON"; - case Postgresqlhostconfig121c_BackslashQuote.BACKSLASH_QUOTE_OFF: - return "BACKSLASH_QUOTE_OFF"; - case Postgresqlhostconfig121c_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING: - return "BACKSLASH_QUOTE_SAFE_ENCODING"; - default: - return "UNKNOWN"; - } -} - const basePostgresqlhostconfig121c: object = { $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig12_1C", constraintExclusion: 0, diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/host13.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/host13.ts index 916ae530..e4e6b5e8 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/config/host13.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/host13.ts @@ -98,6 +98,102 @@ export interface PostgresqlHostConfig13 { effectiveCacheSize?: number; } +export enum PostgresqlHostConfig13_BackslashQuote { + BACKSLASH_QUOTE_UNSPECIFIED = 0, + BACKSLASH_QUOTE = 1, + BACKSLASH_QUOTE_ON = 2, + BACKSLASH_QUOTE_OFF = 3, + BACKSLASH_QUOTE_SAFE_ENCODING = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig13_BackslashQuoteFromJSON( + object: any +): PostgresqlHostConfig13_BackslashQuote { + switch (object) { + case 0: + case "BACKSLASH_QUOTE_UNSPECIFIED": + return PostgresqlHostConfig13_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED; + case 1: + case "BACKSLASH_QUOTE": + return PostgresqlHostConfig13_BackslashQuote.BACKSLASH_QUOTE; + case 2: + case "BACKSLASH_QUOTE_ON": + return PostgresqlHostConfig13_BackslashQuote.BACKSLASH_QUOTE_ON; + case 3: + case "BACKSLASH_QUOTE_OFF": + return PostgresqlHostConfig13_BackslashQuote.BACKSLASH_QUOTE_OFF; + case 4: + case "BACKSLASH_QUOTE_SAFE_ENCODING": + return PostgresqlHostConfig13_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig13_BackslashQuote.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig13_BackslashQuoteToJSON( + object: PostgresqlHostConfig13_BackslashQuote +): string { + switch (object) { + case PostgresqlHostConfig13_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED: + return "BACKSLASH_QUOTE_UNSPECIFIED"; + case PostgresqlHostConfig13_BackslashQuote.BACKSLASH_QUOTE: + return "BACKSLASH_QUOTE"; + case PostgresqlHostConfig13_BackslashQuote.BACKSLASH_QUOTE_ON: + return "BACKSLASH_QUOTE_ON"; + case PostgresqlHostConfig13_BackslashQuote.BACKSLASH_QUOTE_OFF: + return "BACKSLASH_QUOTE_OFF"; + case PostgresqlHostConfig13_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING: + return "BACKSLASH_QUOTE_SAFE_ENCODING"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlHostConfig13_ByteaOutput { + BYTEA_OUTPUT_UNSPECIFIED = 0, + BYTEA_OUTPUT_HEX = 1, + BYTEA_OUTPUT_ESCAPED = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig13_ByteaOutputFromJSON( + object: any +): PostgresqlHostConfig13_ByteaOutput { + switch (object) { + case 0: + case "BYTEA_OUTPUT_UNSPECIFIED": + return PostgresqlHostConfig13_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED; + case 1: + case "BYTEA_OUTPUT_HEX": + return PostgresqlHostConfig13_ByteaOutput.BYTEA_OUTPUT_HEX; + case 2: + case "BYTEA_OUTPUT_ESCAPED": + return PostgresqlHostConfig13_ByteaOutput.BYTEA_OUTPUT_ESCAPED; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig13_ByteaOutput.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig13_ByteaOutputToJSON( + object: PostgresqlHostConfig13_ByteaOutput +): string { + switch (object) { + case PostgresqlHostConfig13_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED: + return "BYTEA_OUTPUT_UNSPECIFIED"; + case PostgresqlHostConfig13_ByteaOutput.BYTEA_OUTPUT_HEX: + return "BYTEA_OUTPUT_HEX"; + case PostgresqlHostConfig13_ByteaOutput.BYTEA_OUTPUT_ESCAPED: + return "BYTEA_OUTPUT_ESCAPED"; + default: + return "UNKNOWN"; + } +} + export enum PostgresqlHostConfig13_ConstraintExclusion { CONSTRAINT_EXCLUSION_UNSPECIFIED = 0, CONSTRAINT_EXCLUSION_ON = 1, @@ -194,6 +290,54 @@ export function postgresqlHostConfig13_ForceParallelModeToJSON( } } +export enum PostgresqlHostConfig13_LogErrorVerbosity { + LOG_ERROR_VERBOSITY_UNSPECIFIED = 0, + LOG_ERROR_VERBOSITY_TERSE = 1, + LOG_ERROR_VERBOSITY_DEFAULT = 2, + LOG_ERROR_VERBOSITY_VERBOSE = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig13_LogErrorVerbosityFromJSON( + object: any +): PostgresqlHostConfig13_LogErrorVerbosity { + switch (object) { + case 0: + case "LOG_ERROR_VERBOSITY_UNSPECIFIED": + return PostgresqlHostConfig13_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED; + case 1: + case "LOG_ERROR_VERBOSITY_TERSE": + return PostgresqlHostConfig13_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE; + case 2: + case "LOG_ERROR_VERBOSITY_DEFAULT": + return PostgresqlHostConfig13_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT; + case 3: + case "LOG_ERROR_VERBOSITY_VERBOSE": + return PostgresqlHostConfig13_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig13_LogErrorVerbosity.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig13_LogErrorVerbosityToJSON( + object: PostgresqlHostConfig13_LogErrorVerbosity +): string { + switch (object) { + case PostgresqlHostConfig13_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED: + return "LOG_ERROR_VERBOSITY_UNSPECIFIED"; + case PostgresqlHostConfig13_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE: + return "LOG_ERROR_VERBOSITY_TERSE"; + case PostgresqlHostConfig13_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT: + return "LOG_ERROR_VERBOSITY_DEFAULT"; + case PostgresqlHostConfig13_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE: + return "LOG_ERROR_VERBOSITY_VERBOSE"; + default: + return "UNKNOWN"; + } +} + export enum PostgresqlHostConfig13_LogLevel { LOG_LEVEL_UNSPECIFIED = 0, LOG_LEVEL_DEBUG5 = 1, @@ -290,54 +434,6 @@ export function postgresqlHostConfig13_LogLevelToJSON( } } -export enum PostgresqlHostConfig13_LogErrorVerbosity { - LOG_ERROR_VERBOSITY_UNSPECIFIED = 0, - LOG_ERROR_VERBOSITY_TERSE = 1, - LOG_ERROR_VERBOSITY_DEFAULT = 2, - LOG_ERROR_VERBOSITY_VERBOSE = 3, - UNRECOGNIZED = -1, -} - -export function postgresqlHostConfig13_LogErrorVerbosityFromJSON( - object: any -): PostgresqlHostConfig13_LogErrorVerbosity { - switch (object) { - case 0: - case "LOG_ERROR_VERBOSITY_UNSPECIFIED": - return PostgresqlHostConfig13_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED; - case 1: - case "LOG_ERROR_VERBOSITY_TERSE": - return PostgresqlHostConfig13_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE; - case 2: - case "LOG_ERROR_VERBOSITY_DEFAULT": - return PostgresqlHostConfig13_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT; - case 3: - case "LOG_ERROR_VERBOSITY_VERBOSE": - return PostgresqlHostConfig13_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE; - case -1: - case "UNRECOGNIZED": - default: - return PostgresqlHostConfig13_LogErrorVerbosity.UNRECOGNIZED; - } -} - -export function postgresqlHostConfig13_LogErrorVerbosityToJSON( - object: PostgresqlHostConfig13_LogErrorVerbosity -): string { - switch (object) { - case PostgresqlHostConfig13_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED: - return "LOG_ERROR_VERBOSITY_UNSPECIFIED"; - case PostgresqlHostConfig13_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE: - return "LOG_ERROR_VERBOSITY_TERSE"; - case PostgresqlHostConfig13_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT: - return "LOG_ERROR_VERBOSITY_DEFAULT"; - case PostgresqlHostConfig13_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE: - return "LOG_ERROR_VERBOSITY_VERBOSE"; - default: - return "UNKNOWN"; - } -} - export enum PostgresqlHostConfig13_LogStatement { LOG_STATEMENT_UNSPECIFIED = 0, LOG_STATEMENT_NONE = 1, @@ -446,48 +542,6 @@ export function postgresqlHostConfig13_TransactionIsolationToJSON( } } -export enum PostgresqlHostConfig13_ByteaOutput { - BYTEA_OUTPUT_UNSPECIFIED = 0, - BYTEA_OUTPUT_HEX = 1, - BYTEA_OUTPUT_ESCAPED = 2, - UNRECOGNIZED = -1, -} - -export function postgresqlHostConfig13_ByteaOutputFromJSON( - object: any -): PostgresqlHostConfig13_ByteaOutput { - switch (object) { - case 0: - case "BYTEA_OUTPUT_UNSPECIFIED": - return PostgresqlHostConfig13_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED; - case 1: - case "BYTEA_OUTPUT_HEX": - return PostgresqlHostConfig13_ByteaOutput.BYTEA_OUTPUT_HEX; - case 2: - case "BYTEA_OUTPUT_ESCAPED": - return PostgresqlHostConfig13_ByteaOutput.BYTEA_OUTPUT_ESCAPED; - case -1: - case "UNRECOGNIZED": - default: - return PostgresqlHostConfig13_ByteaOutput.UNRECOGNIZED; - } -} - -export function postgresqlHostConfig13_ByteaOutputToJSON( - object: PostgresqlHostConfig13_ByteaOutput -): string { - switch (object) { - case PostgresqlHostConfig13_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED: - return "BYTEA_OUTPUT_UNSPECIFIED"; - case PostgresqlHostConfig13_ByteaOutput.BYTEA_OUTPUT_HEX: - return "BYTEA_OUTPUT_HEX"; - case PostgresqlHostConfig13_ByteaOutput.BYTEA_OUTPUT_ESCAPED: - return "BYTEA_OUTPUT_ESCAPED"; - default: - return "UNKNOWN"; - } -} - export enum PostgresqlHostConfig13_XmlBinary { XML_BINARY_UNSPECIFIED = 0, XML_BINARY_BASE64 = 1, @@ -572,60 +626,6 @@ export function postgresqlHostConfig13_XmlOptionToJSON( } } -export enum PostgresqlHostConfig13_BackslashQuote { - BACKSLASH_QUOTE_UNSPECIFIED = 0, - BACKSLASH_QUOTE = 1, - BACKSLASH_QUOTE_ON = 2, - BACKSLASH_QUOTE_OFF = 3, - BACKSLASH_QUOTE_SAFE_ENCODING = 4, - UNRECOGNIZED = -1, -} - -export function postgresqlHostConfig13_BackslashQuoteFromJSON( - object: any -): PostgresqlHostConfig13_BackslashQuote { - switch (object) { - case 0: - case "BACKSLASH_QUOTE_UNSPECIFIED": - return PostgresqlHostConfig13_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED; - case 1: - case "BACKSLASH_QUOTE": - return PostgresqlHostConfig13_BackslashQuote.BACKSLASH_QUOTE; - case 2: - case "BACKSLASH_QUOTE_ON": - return PostgresqlHostConfig13_BackslashQuote.BACKSLASH_QUOTE_ON; - case 3: - case "BACKSLASH_QUOTE_OFF": - return PostgresqlHostConfig13_BackslashQuote.BACKSLASH_QUOTE_OFF; - case 4: - case "BACKSLASH_QUOTE_SAFE_ENCODING": - return PostgresqlHostConfig13_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING; - case -1: - case "UNRECOGNIZED": - default: - return PostgresqlHostConfig13_BackslashQuote.UNRECOGNIZED; - } -} - -export function postgresqlHostConfig13_BackslashQuoteToJSON( - object: PostgresqlHostConfig13_BackslashQuote -): string { - switch (object) { - case PostgresqlHostConfig13_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED: - return "BACKSLASH_QUOTE_UNSPECIFIED"; - case PostgresqlHostConfig13_BackslashQuote.BACKSLASH_QUOTE: - return "BACKSLASH_QUOTE"; - case PostgresqlHostConfig13_BackslashQuote.BACKSLASH_QUOTE_ON: - return "BACKSLASH_QUOTE_ON"; - case PostgresqlHostConfig13_BackslashQuote.BACKSLASH_QUOTE_OFF: - return "BACKSLASH_QUOTE_OFF"; - case PostgresqlHostConfig13_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING: - return "BACKSLASH_QUOTE_SAFE_ENCODING"; - default: - return "UNKNOWN"; - } -} - const basePostgresqlHostConfig13: object = { $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig13", constraintExclusion: 0, diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/host13_1c.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/host13_1c.ts new file mode 100644 index 00000000..44ce596d --- /dev/null +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/host13_1c.ts @@ -0,0 +1,2062 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { + Int64Value, + DoubleValue, + BoolValue, +} from "../../../../../../google/protobuf/wrappers"; + +export const protobufPackage = "yandex.cloud.mdb.postgresql.v1.config"; + +/** + * Options and structure of `PostgresqlConfig` reflects PostgreSQL configuration file + * parameters which detailed description is available in + * [PostgreSQL documentation](https://www.postgresql.org/docs/11/runtime-config.html). + */ +export interface Postgresqlhostconfig131c { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig13_1C"; + /** in milliseconds. */ + recoveryMinApplyDelay?: number; + /** in bytes. */ + sharedBuffers?: number; + /** in bytes. */ + tempBuffers?: number; + /** in bytes. */ + workMem?: number; + /** in bytes. */ + tempFileLimit?: number; + backendFlushAfter?: number; + oldSnapshotThreshold?: number; + /** in milliseconds. */ + maxStandbyStreamingDelay?: number; + constraintExclusion: Postgresqlhostconfig131c_ConstraintExclusion; + cursorTupleFraction?: number; + fromCollapseLimit?: number; + joinCollapseLimit?: number; + forceParallelMode: Postgresqlhostconfig131c_ForceParallelMode; + clientMinMessages: Postgresqlhostconfig131c_LogLevel; + logMinMessages: Postgresqlhostconfig131c_LogLevel; + logMinErrorStatement: Postgresqlhostconfig131c_LogLevel; + /** in milliseconds. */ + logMinDurationStatement?: number; + logCheckpoints?: boolean; + logConnections?: boolean; + logDisconnections?: boolean; + logDuration?: boolean; + logErrorVerbosity: Postgresqlhostconfig131c_LogErrorVerbosity; + logLockWaits?: boolean; + logStatement: Postgresqlhostconfig131c_LogStatement; + logTempFiles?: number; + searchPath: string; + rowSecurity?: boolean; + defaultTransactionIsolation: Postgresqlhostconfig131c_TransactionIsolation; + /** in milliseconds. */ + statementTimeout?: number; + /** in milliseconds. */ + lockTimeout?: number; + /** in milliseconds. */ + idleInTransactionSessionTimeout?: number; + byteaOutput: Postgresqlhostconfig131c_ByteaOutput; + xmlbinary: Postgresqlhostconfig131c_XmlBinary; + xmloption: Postgresqlhostconfig131c_XmlOption; + /** in bytes. */ + ginPendingListLimit?: number; + /** in milliseconds. */ + deadlockTimeout?: number; + maxLocksPerTransaction?: number; + maxPredLocksPerTransaction?: number; + arrayNulls?: boolean; + backslashQuote: Postgresqlhostconfig131c_BackslashQuote; + defaultWithOids?: boolean; + escapeStringWarning?: boolean; + loCompatPrivileges?: boolean; + operatorPrecedenceWarning?: boolean; + quoteAllIdentifiers?: boolean; + standardConformingStrings?: boolean; + synchronizeSeqscans?: boolean; + transformNullEquals?: boolean; + exitOnError?: boolean; + seqPageCost?: number; + randomPageCost?: number; + enableBitmapscan?: boolean; + enableHashagg?: boolean; + enableHashjoin?: boolean; + enableIndexscan?: boolean; + enableIndexonlyscan?: boolean; + enableMaterial?: boolean; + enableMergejoin?: boolean; + enableNestloop?: boolean; + enableSeqscan?: boolean; + enableSort?: boolean; + enableTidscan?: boolean; + maxParallelWorkers?: number; + maxParallelWorkersPerGather?: number; + timezone: string; + effectiveIoConcurrency?: number; + effectiveCacheSize?: number; +} + +export enum Postgresqlhostconfig131c_BackslashQuote { + BACKSLASH_QUOTE_UNSPECIFIED = 0, + BACKSLASH_QUOTE = 1, + BACKSLASH_QUOTE_ON = 2, + BACKSLASH_QUOTE_OFF = 3, + BACKSLASH_QUOTE_SAFE_ENCODING = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlhostconfig131c_BackslashQuoteFromJSON( + object: any +): Postgresqlhostconfig131c_BackslashQuote { + switch (object) { + case 0: + case "BACKSLASH_QUOTE_UNSPECIFIED": + return Postgresqlhostconfig131c_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED; + case 1: + case "BACKSLASH_QUOTE": + return Postgresqlhostconfig131c_BackslashQuote.BACKSLASH_QUOTE; + case 2: + case "BACKSLASH_QUOTE_ON": + return Postgresqlhostconfig131c_BackslashQuote.BACKSLASH_QUOTE_ON; + case 3: + case "BACKSLASH_QUOTE_OFF": + return Postgresqlhostconfig131c_BackslashQuote.BACKSLASH_QUOTE_OFF; + case 4: + case "BACKSLASH_QUOTE_SAFE_ENCODING": + return Postgresqlhostconfig131c_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlhostconfig131c_BackslashQuote.UNRECOGNIZED; + } +} + +export function postgresqlhostconfig131c_BackslashQuoteToJSON( + object: Postgresqlhostconfig131c_BackslashQuote +): string { + switch (object) { + case Postgresqlhostconfig131c_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED: + return "BACKSLASH_QUOTE_UNSPECIFIED"; + case Postgresqlhostconfig131c_BackslashQuote.BACKSLASH_QUOTE: + return "BACKSLASH_QUOTE"; + case Postgresqlhostconfig131c_BackslashQuote.BACKSLASH_QUOTE_ON: + return "BACKSLASH_QUOTE_ON"; + case Postgresqlhostconfig131c_BackslashQuote.BACKSLASH_QUOTE_OFF: + return "BACKSLASH_QUOTE_OFF"; + case Postgresqlhostconfig131c_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING: + return "BACKSLASH_QUOTE_SAFE_ENCODING"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlhostconfig131c_ByteaOutput { + BYTEA_OUTPUT_UNSPECIFIED = 0, + BYTEA_OUTPUT_HEX = 1, + BYTEA_OUTPUT_ESCAPED = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlhostconfig131c_ByteaOutputFromJSON( + object: any +): Postgresqlhostconfig131c_ByteaOutput { + switch (object) { + case 0: + case "BYTEA_OUTPUT_UNSPECIFIED": + return Postgresqlhostconfig131c_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED; + case 1: + case "BYTEA_OUTPUT_HEX": + return Postgresqlhostconfig131c_ByteaOutput.BYTEA_OUTPUT_HEX; + case 2: + case "BYTEA_OUTPUT_ESCAPED": + return Postgresqlhostconfig131c_ByteaOutput.BYTEA_OUTPUT_ESCAPED; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlhostconfig131c_ByteaOutput.UNRECOGNIZED; + } +} + +export function postgresqlhostconfig131c_ByteaOutputToJSON( + object: Postgresqlhostconfig131c_ByteaOutput +): string { + switch (object) { + case Postgresqlhostconfig131c_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED: + return "BYTEA_OUTPUT_UNSPECIFIED"; + case Postgresqlhostconfig131c_ByteaOutput.BYTEA_OUTPUT_HEX: + return "BYTEA_OUTPUT_HEX"; + case Postgresqlhostconfig131c_ByteaOutput.BYTEA_OUTPUT_ESCAPED: + return "BYTEA_OUTPUT_ESCAPED"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlhostconfig131c_ConstraintExclusion { + CONSTRAINT_EXCLUSION_UNSPECIFIED = 0, + CONSTRAINT_EXCLUSION_ON = 1, + CONSTRAINT_EXCLUSION_OFF = 2, + CONSTRAINT_EXCLUSION_PARTITION = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlhostconfig131c_ConstraintExclusionFromJSON( + object: any +): Postgresqlhostconfig131c_ConstraintExclusion { + switch (object) { + case 0: + case "CONSTRAINT_EXCLUSION_UNSPECIFIED": + return Postgresqlhostconfig131c_ConstraintExclusion.CONSTRAINT_EXCLUSION_UNSPECIFIED; + case 1: + case "CONSTRAINT_EXCLUSION_ON": + return Postgresqlhostconfig131c_ConstraintExclusion.CONSTRAINT_EXCLUSION_ON; + case 2: + case "CONSTRAINT_EXCLUSION_OFF": + return Postgresqlhostconfig131c_ConstraintExclusion.CONSTRAINT_EXCLUSION_OFF; + case 3: + case "CONSTRAINT_EXCLUSION_PARTITION": + return Postgresqlhostconfig131c_ConstraintExclusion.CONSTRAINT_EXCLUSION_PARTITION; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlhostconfig131c_ConstraintExclusion.UNRECOGNIZED; + } +} + +export function postgresqlhostconfig131c_ConstraintExclusionToJSON( + object: Postgresqlhostconfig131c_ConstraintExclusion +): string { + switch (object) { + case Postgresqlhostconfig131c_ConstraintExclusion.CONSTRAINT_EXCLUSION_UNSPECIFIED: + return "CONSTRAINT_EXCLUSION_UNSPECIFIED"; + case Postgresqlhostconfig131c_ConstraintExclusion.CONSTRAINT_EXCLUSION_ON: + return "CONSTRAINT_EXCLUSION_ON"; + case Postgresqlhostconfig131c_ConstraintExclusion.CONSTRAINT_EXCLUSION_OFF: + return "CONSTRAINT_EXCLUSION_OFF"; + case Postgresqlhostconfig131c_ConstraintExclusion.CONSTRAINT_EXCLUSION_PARTITION: + return "CONSTRAINT_EXCLUSION_PARTITION"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlhostconfig131c_ForceParallelMode { + FORCE_PARALLEL_MODE_UNSPECIFIED = 0, + FORCE_PARALLEL_MODE_ON = 1, + FORCE_PARALLEL_MODE_OFF = 2, + FORCE_PARALLEL_MODE_REGRESS = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlhostconfig131c_ForceParallelModeFromJSON( + object: any +): Postgresqlhostconfig131c_ForceParallelMode { + switch (object) { + case 0: + case "FORCE_PARALLEL_MODE_UNSPECIFIED": + return Postgresqlhostconfig131c_ForceParallelMode.FORCE_PARALLEL_MODE_UNSPECIFIED; + case 1: + case "FORCE_PARALLEL_MODE_ON": + return Postgresqlhostconfig131c_ForceParallelMode.FORCE_PARALLEL_MODE_ON; + case 2: + case "FORCE_PARALLEL_MODE_OFF": + return Postgresqlhostconfig131c_ForceParallelMode.FORCE_PARALLEL_MODE_OFF; + case 3: + case "FORCE_PARALLEL_MODE_REGRESS": + return Postgresqlhostconfig131c_ForceParallelMode.FORCE_PARALLEL_MODE_REGRESS; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlhostconfig131c_ForceParallelMode.UNRECOGNIZED; + } +} + +export function postgresqlhostconfig131c_ForceParallelModeToJSON( + object: Postgresqlhostconfig131c_ForceParallelMode +): string { + switch (object) { + case Postgresqlhostconfig131c_ForceParallelMode.FORCE_PARALLEL_MODE_UNSPECIFIED: + return "FORCE_PARALLEL_MODE_UNSPECIFIED"; + case Postgresqlhostconfig131c_ForceParallelMode.FORCE_PARALLEL_MODE_ON: + return "FORCE_PARALLEL_MODE_ON"; + case Postgresqlhostconfig131c_ForceParallelMode.FORCE_PARALLEL_MODE_OFF: + return "FORCE_PARALLEL_MODE_OFF"; + case Postgresqlhostconfig131c_ForceParallelMode.FORCE_PARALLEL_MODE_REGRESS: + return "FORCE_PARALLEL_MODE_REGRESS"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlhostconfig131c_LogErrorVerbosity { + LOG_ERROR_VERBOSITY_UNSPECIFIED = 0, + LOG_ERROR_VERBOSITY_TERSE = 1, + LOG_ERROR_VERBOSITY_DEFAULT = 2, + LOG_ERROR_VERBOSITY_VERBOSE = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlhostconfig131c_LogErrorVerbosityFromJSON( + object: any +): Postgresqlhostconfig131c_LogErrorVerbosity { + switch (object) { + case 0: + case "LOG_ERROR_VERBOSITY_UNSPECIFIED": + return Postgresqlhostconfig131c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED; + case 1: + case "LOG_ERROR_VERBOSITY_TERSE": + return Postgresqlhostconfig131c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE; + case 2: + case "LOG_ERROR_VERBOSITY_DEFAULT": + return Postgresqlhostconfig131c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT; + case 3: + case "LOG_ERROR_VERBOSITY_VERBOSE": + return Postgresqlhostconfig131c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlhostconfig131c_LogErrorVerbosity.UNRECOGNIZED; + } +} + +export function postgresqlhostconfig131c_LogErrorVerbosityToJSON( + object: Postgresqlhostconfig131c_LogErrorVerbosity +): string { + switch (object) { + case Postgresqlhostconfig131c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED: + return "LOG_ERROR_VERBOSITY_UNSPECIFIED"; + case Postgresqlhostconfig131c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE: + return "LOG_ERROR_VERBOSITY_TERSE"; + case Postgresqlhostconfig131c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT: + return "LOG_ERROR_VERBOSITY_DEFAULT"; + case Postgresqlhostconfig131c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE: + return "LOG_ERROR_VERBOSITY_VERBOSE"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlhostconfig131c_LogLevel { + LOG_LEVEL_UNSPECIFIED = 0, + LOG_LEVEL_DEBUG5 = 1, + LOG_LEVEL_DEBUG4 = 2, + LOG_LEVEL_DEBUG3 = 3, + LOG_LEVEL_DEBUG2 = 4, + LOG_LEVEL_DEBUG1 = 5, + LOG_LEVEL_LOG = 6, + LOG_LEVEL_NOTICE = 7, + LOG_LEVEL_WARNING = 8, + LOG_LEVEL_ERROR = 9, + LOG_LEVEL_FATAL = 10, + LOG_LEVEL_PANIC = 11, + UNRECOGNIZED = -1, +} + +export function postgresqlhostconfig131c_LogLevelFromJSON( + object: any +): Postgresqlhostconfig131c_LogLevel { + switch (object) { + case 0: + case "LOG_LEVEL_UNSPECIFIED": + return Postgresqlhostconfig131c_LogLevel.LOG_LEVEL_UNSPECIFIED; + case 1: + case "LOG_LEVEL_DEBUG5": + return Postgresqlhostconfig131c_LogLevel.LOG_LEVEL_DEBUG5; + case 2: + case "LOG_LEVEL_DEBUG4": + return Postgresqlhostconfig131c_LogLevel.LOG_LEVEL_DEBUG4; + case 3: + case "LOG_LEVEL_DEBUG3": + return Postgresqlhostconfig131c_LogLevel.LOG_LEVEL_DEBUG3; + case 4: + case "LOG_LEVEL_DEBUG2": + return Postgresqlhostconfig131c_LogLevel.LOG_LEVEL_DEBUG2; + case 5: + case "LOG_LEVEL_DEBUG1": + return Postgresqlhostconfig131c_LogLevel.LOG_LEVEL_DEBUG1; + case 6: + case "LOG_LEVEL_LOG": + return Postgresqlhostconfig131c_LogLevel.LOG_LEVEL_LOG; + case 7: + case "LOG_LEVEL_NOTICE": + return Postgresqlhostconfig131c_LogLevel.LOG_LEVEL_NOTICE; + case 8: + case "LOG_LEVEL_WARNING": + return Postgresqlhostconfig131c_LogLevel.LOG_LEVEL_WARNING; + case 9: + case "LOG_LEVEL_ERROR": + return Postgresqlhostconfig131c_LogLevel.LOG_LEVEL_ERROR; + case 10: + case "LOG_LEVEL_FATAL": + return Postgresqlhostconfig131c_LogLevel.LOG_LEVEL_FATAL; + case 11: + case "LOG_LEVEL_PANIC": + return Postgresqlhostconfig131c_LogLevel.LOG_LEVEL_PANIC; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlhostconfig131c_LogLevel.UNRECOGNIZED; + } +} + +export function postgresqlhostconfig131c_LogLevelToJSON( + object: Postgresqlhostconfig131c_LogLevel +): string { + switch (object) { + case Postgresqlhostconfig131c_LogLevel.LOG_LEVEL_UNSPECIFIED: + return "LOG_LEVEL_UNSPECIFIED"; + case Postgresqlhostconfig131c_LogLevel.LOG_LEVEL_DEBUG5: + return "LOG_LEVEL_DEBUG5"; + case Postgresqlhostconfig131c_LogLevel.LOG_LEVEL_DEBUG4: + return "LOG_LEVEL_DEBUG4"; + case Postgresqlhostconfig131c_LogLevel.LOG_LEVEL_DEBUG3: + return "LOG_LEVEL_DEBUG3"; + case Postgresqlhostconfig131c_LogLevel.LOG_LEVEL_DEBUG2: + return "LOG_LEVEL_DEBUG2"; + case Postgresqlhostconfig131c_LogLevel.LOG_LEVEL_DEBUG1: + return "LOG_LEVEL_DEBUG1"; + case Postgresqlhostconfig131c_LogLevel.LOG_LEVEL_LOG: + return "LOG_LEVEL_LOG"; + case Postgresqlhostconfig131c_LogLevel.LOG_LEVEL_NOTICE: + return "LOG_LEVEL_NOTICE"; + case Postgresqlhostconfig131c_LogLevel.LOG_LEVEL_WARNING: + return "LOG_LEVEL_WARNING"; + case Postgresqlhostconfig131c_LogLevel.LOG_LEVEL_ERROR: + return "LOG_LEVEL_ERROR"; + case Postgresqlhostconfig131c_LogLevel.LOG_LEVEL_FATAL: + return "LOG_LEVEL_FATAL"; + case Postgresqlhostconfig131c_LogLevel.LOG_LEVEL_PANIC: + return "LOG_LEVEL_PANIC"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlhostconfig131c_LogStatement { + LOG_STATEMENT_UNSPECIFIED = 0, + LOG_STATEMENT_NONE = 1, + LOG_STATEMENT_DDL = 2, + LOG_STATEMENT_MOD = 3, + LOG_STATEMENT_ALL = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlhostconfig131c_LogStatementFromJSON( + object: any +): Postgresqlhostconfig131c_LogStatement { + switch (object) { + case 0: + case "LOG_STATEMENT_UNSPECIFIED": + return Postgresqlhostconfig131c_LogStatement.LOG_STATEMENT_UNSPECIFIED; + case 1: + case "LOG_STATEMENT_NONE": + return Postgresqlhostconfig131c_LogStatement.LOG_STATEMENT_NONE; + case 2: + case "LOG_STATEMENT_DDL": + return Postgresqlhostconfig131c_LogStatement.LOG_STATEMENT_DDL; + case 3: + case "LOG_STATEMENT_MOD": + return Postgresqlhostconfig131c_LogStatement.LOG_STATEMENT_MOD; + case 4: + case "LOG_STATEMENT_ALL": + return Postgresqlhostconfig131c_LogStatement.LOG_STATEMENT_ALL; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlhostconfig131c_LogStatement.UNRECOGNIZED; + } +} + +export function postgresqlhostconfig131c_LogStatementToJSON( + object: Postgresqlhostconfig131c_LogStatement +): string { + switch (object) { + case Postgresqlhostconfig131c_LogStatement.LOG_STATEMENT_UNSPECIFIED: + return "LOG_STATEMENT_UNSPECIFIED"; + case Postgresqlhostconfig131c_LogStatement.LOG_STATEMENT_NONE: + return "LOG_STATEMENT_NONE"; + case Postgresqlhostconfig131c_LogStatement.LOG_STATEMENT_DDL: + return "LOG_STATEMENT_DDL"; + case Postgresqlhostconfig131c_LogStatement.LOG_STATEMENT_MOD: + return "LOG_STATEMENT_MOD"; + case Postgresqlhostconfig131c_LogStatement.LOG_STATEMENT_ALL: + return "LOG_STATEMENT_ALL"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlhostconfig131c_TransactionIsolation { + TRANSACTION_ISOLATION_UNSPECIFIED = 0, + TRANSACTION_ISOLATION_READ_UNCOMMITTED = 1, + TRANSACTION_ISOLATION_READ_COMMITTED = 2, + TRANSACTION_ISOLATION_REPEATABLE_READ = 3, + TRANSACTION_ISOLATION_SERIALIZABLE = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlhostconfig131c_TransactionIsolationFromJSON( + object: any +): Postgresqlhostconfig131c_TransactionIsolation { + switch (object) { + case 0: + case "TRANSACTION_ISOLATION_UNSPECIFIED": + return Postgresqlhostconfig131c_TransactionIsolation.TRANSACTION_ISOLATION_UNSPECIFIED; + case 1: + case "TRANSACTION_ISOLATION_READ_UNCOMMITTED": + return Postgresqlhostconfig131c_TransactionIsolation.TRANSACTION_ISOLATION_READ_UNCOMMITTED; + case 2: + case "TRANSACTION_ISOLATION_READ_COMMITTED": + return Postgresqlhostconfig131c_TransactionIsolation.TRANSACTION_ISOLATION_READ_COMMITTED; + case 3: + case "TRANSACTION_ISOLATION_REPEATABLE_READ": + return Postgresqlhostconfig131c_TransactionIsolation.TRANSACTION_ISOLATION_REPEATABLE_READ; + case 4: + case "TRANSACTION_ISOLATION_SERIALIZABLE": + return Postgresqlhostconfig131c_TransactionIsolation.TRANSACTION_ISOLATION_SERIALIZABLE; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlhostconfig131c_TransactionIsolation.UNRECOGNIZED; + } +} + +export function postgresqlhostconfig131c_TransactionIsolationToJSON( + object: Postgresqlhostconfig131c_TransactionIsolation +): string { + switch (object) { + case Postgresqlhostconfig131c_TransactionIsolation.TRANSACTION_ISOLATION_UNSPECIFIED: + return "TRANSACTION_ISOLATION_UNSPECIFIED"; + case Postgresqlhostconfig131c_TransactionIsolation.TRANSACTION_ISOLATION_READ_UNCOMMITTED: + return "TRANSACTION_ISOLATION_READ_UNCOMMITTED"; + case Postgresqlhostconfig131c_TransactionIsolation.TRANSACTION_ISOLATION_READ_COMMITTED: + return "TRANSACTION_ISOLATION_READ_COMMITTED"; + case Postgresqlhostconfig131c_TransactionIsolation.TRANSACTION_ISOLATION_REPEATABLE_READ: + return "TRANSACTION_ISOLATION_REPEATABLE_READ"; + case Postgresqlhostconfig131c_TransactionIsolation.TRANSACTION_ISOLATION_SERIALIZABLE: + return "TRANSACTION_ISOLATION_SERIALIZABLE"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlhostconfig131c_XmlBinary { + XML_BINARY_UNSPECIFIED = 0, + XML_BINARY_BASE64 = 1, + XML_BINARY_HEX = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlhostconfig131c_XmlBinaryFromJSON( + object: any +): Postgresqlhostconfig131c_XmlBinary { + switch (object) { + case 0: + case "XML_BINARY_UNSPECIFIED": + return Postgresqlhostconfig131c_XmlBinary.XML_BINARY_UNSPECIFIED; + case 1: + case "XML_BINARY_BASE64": + return Postgresqlhostconfig131c_XmlBinary.XML_BINARY_BASE64; + case 2: + case "XML_BINARY_HEX": + return Postgresqlhostconfig131c_XmlBinary.XML_BINARY_HEX; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlhostconfig131c_XmlBinary.UNRECOGNIZED; + } +} + +export function postgresqlhostconfig131c_XmlBinaryToJSON( + object: Postgresqlhostconfig131c_XmlBinary +): string { + switch (object) { + case Postgresqlhostconfig131c_XmlBinary.XML_BINARY_UNSPECIFIED: + return "XML_BINARY_UNSPECIFIED"; + case Postgresqlhostconfig131c_XmlBinary.XML_BINARY_BASE64: + return "XML_BINARY_BASE64"; + case Postgresqlhostconfig131c_XmlBinary.XML_BINARY_HEX: + return "XML_BINARY_HEX"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlhostconfig131c_XmlOption { + XML_OPTION_UNSPECIFIED = 0, + XML_OPTION_DOCUMENT = 1, + XML_OPTION_CONTENT = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlhostconfig131c_XmlOptionFromJSON( + object: any +): Postgresqlhostconfig131c_XmlOption { + switch (object) { + case 0: + case "XML_OPTION_UNSPECIFIED": + return Postgresqlhostconfig131c_XmlOption.XML_OPTION_UNSPECIFIED; + case 1: + case "XML_OPTION_DOCUMENT": + return Postgresqlhostconfig131c_XmlOption.XML_OPTION_DOCUMENT; + case 2: + case "XML_OPTION_CONTENT": + return Postgresqlhostconfig131c_XmlOption.XML_OPTION_CONTENT; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlhostconfig131c_XmlOption.UNRECOGNIZED; + } +} + +export function postgresqlhostconfig131c_XmlOptionToJSON( + object: Postgresqlhostconfig131c_XmlOption +): string { + switch (object) { + case Postgresqlhostconfig131c_XmlOption.XML_OPTION_UNSPECIFIED: + return "XML_OPTION_UNSPECIFIED"; + case Postgresqlhostconfig131c_XmlOption.XML_OPTION_DOCUMENT: + return "XML_OPTION_DOCUMENT"; + case Postgresqlhostconfig131c_XmlOption.XML_OPTION_CONTENT: + return "XML_OPTION_CONTENT"; + default: + return "UNKNOWN"; + } +} + +const basePostgresqlhostconfig131c: object = { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig13_1C", + constraintExclusion: 0, + forceParallelMode: 0, + clientMinMessages: 0, + logMinMessages: 0, + logMinErrorStatement: 0, + logErrorVerbosity: 0, + logStatement: 0, + searchPath: "", + defaultTransactionIsolation: 0, + byteaOutput: 0, + xmlbinary: 0, + xmloption: 0, + backslashQuote: 0, + timezone: "", +}; + +export const Postgresqlhostconfig131c = { + $type: + "yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig13_1C" as const, + + encode( + message: Postgresqlhostconfig131c, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.recoveryMinApplyDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.recoveryMinApplyDelay!, + }, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.sharedBuffers !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.sharedBuffers! }, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.tempBuffers !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.tempBuffers! }, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.workMem !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.workMem! }, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.tempFileLimit !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.tempFileLimit! }, + writer.uint32(42).fork() + ).ldelim(); + } + if (message.backendFlushAfter !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.backendFlushAfter!, + }, + writer.uint32(50).fork() + ).ldelim(); + } + if (message.oldSnapshotThreshold !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.oldSnapshotThreshold!, + }, + writer.uint32(58).fork() + ).ldelim(); + } + if (message.maxStandbyStreamingDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxStandbyStreamingDelay!, + }, + writer.uint32(66).fork() + ).ldelim(); + } + if (message.constraintExclusion !== 0) { + writer.uint32(72).int32(message.constraintExclusion); + } + if (message.cursorTupleFraction !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.cursorTupleFraction!, + }, + writer.uint32(82).fork() + ).ldelim(); + } + if (message.fromCollapseLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.fromCollapseLimit!, + }, + writer.uint32(90).fork() + ).ldelim(); + } + if (message.joinCollapseLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.joinCollapseLimit!, + }, + writer.uint32(98).fork() + ).ldelim(); + } + if (message.forceParallelMode !== 0) { + writer.uint32(104).int32(message.forceParallelMode); + } + if (message.clientMinMessages !== 0) { + writer.uint32(112).int32(message.clientMinMessages); + } + if (message.logMinMessages !== 0) { + writer.uint32(120).int32(message.logMinMessages); + } + if (message.logMinErrorStatement !== 0) { + writer.uint32(128).int32(message.logMinErrorStatement); + } + if (message.logMinDurationStatement !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logMinDurationStatement!, + }, + writer.uint32(138).fork() + ).ldelim(); + } + if (message.logCheckpoints !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logCheckpoints! }, + writer.uint32(146).fork() + ).ldelim(); + } + if (message.logConnections !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logConnections! }, + writer.uint32(154).fork() + ).ldelim(); + } + if (message.logDisconnections !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.logDisconnections!, + }, + writer.uint32(162).fork() + ).ldelim(); + } + if (message.logDuration !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logDuration! }, + writer.uint32(170).fork() + ).ldelim(); + } + if (message.logErrorVerbosity !== 0) { + writer.uint32(176).int32(message.logErrorVerbosity); + } + if (message.logLockWaits !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logLockWaits! }, + writer.uint32(186).fork() + ).ldelim(); + } + if (message.logStatement !== 0) { + writer.uint32(192).int32(message.logStatement); + } + if (message.logTempFiles !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.logTempFiles! }, + writer.uint32(202).fork() + ).ldelim(); + } + if (message.searchPath !== "") { + writer.uint32(210).string(message.searchPath); + } + if (message.rowSecurity !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.rowSecurity! }, + writer.uint32(218).fork() + ).ldelim(); + } + if (message.defaultTransactionIsolation !== 0) { + writer.uint32(224).int32(message.defaultTransactionIsolation); + } + if (message.statementTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.statementTimeout!, + }, + writer.uint32(234).fork() + ).ldelim(); + } + if (message.lockTimeout !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.lockTimeout! }, + writer.uint32(242).fork() + ).ldelim(); + } + if (message.idleInTransactionSessionTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.idleInTransactionSessionTimeout!, + }, + writer.uint32(250).fork() + ).ldelim(); + } + if (message.byteaOutput !== 0) { + writer.uint32(256).int32(message.byteaOutput); + } + if (message.xmlbinary !== 0) { + writer.uint32(264).int32(message.xmlbinary); + } + if (message.xmloption !== 0) { + writer.uint32(272).int32(message.xmloption); + } + if (message.ginPendingListLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.ginPendingListLimit!, + }, + writer.uint32(282).fork() + ).ldelim(); + } + if (message.deadlockTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.deadlockTimeout!, + }, + writer.uint32(290).fork() + ).ldelim(); + } + if (message.maxLocksPerTransaction !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxLocksPerTransaction!, + }, + writer.uint32(298).fork() + ).ldelim(); + } + if (message.maxPredLocksPerTransaction !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxPredLocksPerTransaction!, + }, + writer.uint32(306).fork() + ).ldelim(); + } + if (message.arrayNulls !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.arrayNulls! }, + writer.uint32(314).fork() + ).ldelim(); + } + if (message.backslashQuote !== 0) { + writer.uint32(320).int32(message.backslashQuote); + } + if (message.defaultWithOids !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.defaultWithOids! }, + writer.uint32(330).fork() + ).ldelim(); + } + if (message.escapeStringWarning !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.escapeStringWarning!, + }, + writer.uint32(338).fork() + ).ldelim(); + } + if (message.loCompatPrivileges !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.loCompatPrivileges!, + }, + writer.uint32(346).fork() + ).ldelim(); + } + if (message.operatorPrecedenceWarning !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.operatorPrecedenceWarning!, + }, + writer.uint32(354).fork() + ).ldelim(); + } + if (message.quoteAllIdentifiers !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.quoteAllIdentifiers!, + }, + writer.uint32(362).fork() + ).ldelim(); + } + if (message.standardConformingStrings !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.standardConformingStrings!, + }, + writer.uint32(370).fork() + ).ldelim(); + } + if (message.synchronizeSeqscans !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.synchronizeSeqscans!, + }, + writer.uint32(378).fork() + ).ldelim(); + } + if (message.transformNullEquals !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.transformNullEquals!, + }, + writer.uint32(386).fork() + ).ldelim(); + } + if (message.exitOnError !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.exitOnError! }, + writer.uint32(394).fork() + ).ldelim(); + } + if (message.seqPageCost !== undefined) { + DoubleValue.encode( + { $type: "google.protobuf.DoubleValue", value: message.seqPageCost! }, + writer.uint32(402).fork() + ).ldelim(); + } + if (message.randomPageCost !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.randomPageCost!, + }, + writer.uint32(410).fork() + ).ldelim(); + } + if (message.enableBitmapscan !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableBitmapscan!, + }, + writer.uint32(434).fork() + ).ldelim(); + } + if (message.enableHashagg !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableHashagg! }, + writer.uint32(442).fork() + ).ldelim(); + } + if (message.enableHashjoin !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableHashjoin! }, + writer.uint32(450).fork() + ).ldelim(); + } + if (message.enableIndexscan !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableIndexscan! }, + writer.uint32(458).fork() + ).ldelim(); + } + if (message.enableIndexonlyscan !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableIndexonlyscan!, + }, + writer.uint32(466).fork() + ).ldelim(); + } + if (message.enableMaterial !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableMaterial! }, + writer.uint32(474).fork() + ).ldelim(); + } + if (message.enableMergejoin !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableMergejoin! }, + writer.uint32(482).fork() + ).ldelim(); + } + if (message.enableNestloop !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableNestloop! }, + writer.uint32(490).fork() + ).ldelim(); + } + if (message.enableSeqscan !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableSeqscan! }, + writer.uint32(498).fork() + ).ldelim(); + } + if (message.enableSort !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableSort! }, + writer.uint32(506).fork() + ).ldelim(); + } + if (message.enableTidscan !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableTidscan! }, + writer.uint32(514).fork() + ).ldelim(); + } + if (message.maxParallelWorkers !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxParallelWorkers!, + }, + writer.uint32(522).fork() + ).ldelim(); + } + if (message.maxParallelWorkersPerGather !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxParallelWorkersPerGather!, + }, + writer.uint32(530).fork() + ).ldelim(); + } + if (message.timezone !== "") { + writer.uint32(538).string(message.timezone); + } + if (message.effectiveIoConcurrency !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.effectiveIoConcurrency!, + }, + writer.uint32(546).fork() + ).ldelim(); + } + if (message.effectiveCacheSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.effectiveCacheSize!, + }, + writer.uint32(554).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Postgresqlhostconfig131c { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...basePostgresqlhostconfig131c, + } as Postgresqlhostconfig131c; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.recoveryMinApplyDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 2: + message.sharedBuffers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 3: + message.tempBuffers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 4: + message.workMem = Int64Value.decode(reader, reader.uint32()).value; + break; + case 5: + message.tempFileLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 6: + message.backendFlushAfter = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 7: + message.oldSnapshotThreshold = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 8: + message.maxStandbyStreamingDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 9: + message.constraintExclusion = reader.int32() as any; + break; + case 10: + message.cursorTupleFraction = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 11: + message.fromCollapseLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 12: + message.joinCollapseLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 13: + message.forceParallelMode = reader.int32() as any; + break; + case 14: + message.clientMinMessages = reader.int32() as any; + break; + case 15: + message.logMinMessages = reader.int32() as any; + break; + case 16: + message.logMinErrorStatement = reader.int32() as any; + break; + case 17: + message.logMinDurationStatement = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 18: + message.logCheckpoints = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 19: + message.logConnections = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 20: + message.logDisconnections = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 21: + message.logDuration = BoolValue.decode(reader, reader.uint32()).value; + break; + case 22: + message.logErrorVerbosity = reader.int32() as any; + break; + case 23: + message.logLockWaits = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 24: + message.logStatement = reader.int32() as any; + break; + case 25: + message.logTempFiles = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 26: + message.searchPath = reader.string(); + break; + case 27: + message.rowSecurity = BoolValue.decode(reader, reader.uint32()).value; + break; + case 28: + message.defaultTransactionIsolation = reader.int32() as any; + break; + case 29: + message.statementTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 30: + message.lockTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 31: + message.idleInTransactionSessionTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 32: + message.byteaOutput = reader.int32() as any; + break; + case 33: + message.xmlbinary = reader.int32() as any; + break; + case 34: + message.xmloption = reader.int32() as any; + break; + case 35: + message.ginPendingListLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 36: + message.deadlockTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 37: + message.maxLocksPerTransaction = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 38: + message.maxPredLocksPerTransaction = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 39: + message.arrayNulls = BoolValue.decode(reader, reader.uint32()).value; + break; + case 40: + message.backslashQuote = reader.int32() as any; + break; + case 41: + message.defaultWithOids = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 42: + message.escapeStringWarning = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 43: + message.loCompatPrivileges = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 44: + message.operatorPrecedenceWarning = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 45: + message.quoteAllIdentifiers = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 46: + message.standardConformingStrings = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 47: + message.synchronizeSeqscans = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 48: + message.transformNullEquals = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 49: + message.exitOnError = BoolValue.decode(reader, reader.uint32()).value; + break; + case 50: + message.seqPageCost = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 51: + message.randomPageCost = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 54: + message.enableBitmapscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 55: + message.enableHashagg = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 56: + message.enableHashjoin = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 57: + message.enableIndexscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 58: + message.enableIndexonlyscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 59: + message.enableMaterial = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 60: + message.enableMergejoin = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 61: + message.enableNestloop = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 62: + message.enableSeqscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 63: + message.enableSort = BoolValue.decode(reader, reader.uint32()).value; + break; + case 64: + message.enableTidscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 65: + message.maxParallelWorkers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 66: + message.maxParallelWorkersPerGather = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 67: + message.timezone = reader.string(); + break; + case 68: + message.effectiveIoConcurrency = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 69: + message.effectiveCacheSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Postgresqlhostconfig131c { + const message = { + ...basePostgresqlhostconfig131c, + } as Postgresqlhostconfig131c; + message.recoveryMinApplyDelay = + object.recoveryMinApplyDelay !== undefined && + object.recoveryMinApplyDelay !== null + ? Number(object.recoveryMinApplyDelay) + : undefined; + message.sharedBuffers = + object.sharedBuffers !== undefined && object.sharedBuffers !== null + ? Number(object.sharedBuffers) + : undefined; + message.tempBuffers = + object.tempBuffers !== undefined && object.tempBuffers !== null + ? Number(object.tempBuffers) + : undefined; + message.workMem = + object.workMem !== undefined && object.workMem !== null + ? Number(object.workMem) + : undefined; + message.tempFileLimit = + object.tempFileLimit !== undefined && object.tempFileLimit !== null + ? Number(object.tempFileLimit) + : undefined; + message.backendFlushAfter = + object.backendFlushAfter !== undefined && + object.backendFlushAfter !== null + ? Number(object.backendFlushAfter) + : undefined; + message.oldSnapshotThreshold = + object.oldSnapshotThreshold !== undefined && + object.oldSnapshotThreshold !== null + ? Number(object.oldSnapshotThreshold) + : undefined; + message.maxStandbyStreamingDelay = + object.maxStandbyStreamingDelay !== undefined && + object.maxStandbyStreamingDelay !== null + ? Number(object.maxStandbyStreamingDelay) + : undefined; + message.constraintExclusion = + object.constraintExclusion !== undefined && + object.constraintExclusion !== null + ? postgresqlhostconfig131c_ConstraintExclusionFromJSON( + object.constraintExclusion + ) + : 0; + message.cursorTupleFraction = + object.cursorTupleFraction !== undefined && + object.cursorTupleFraction !== null + ? Number(object.cursorTupleFraction) + : undefined; + message.fromCollapseLimit = + object.fromCollapseLimit !== undefined && + object.fromCollapseLimit !== null + ? Number(object.fromCollapseLimit) + : undefined; + message.joinCollapseLimit = + object.joinCollapseLimit !== undefined && + object.joinCollapseLimit !== null + ? Number(object.joinCollapseLimit) + : undefined; + message.forceParallelMode = + object.forceParallelMode !== undefined && + object.forceParallelMode !== null + ? postgresqlhostconfig131c_ForceParallelModeFromJSON( + object.forceParallelMode + ) + : 0; + message.clientMinMessages = + object.clientMinMessages !== undefined && + object.clientMinMessages !== null + ? postgresqlhostconfig131c_LogLevelFromJSON(object.clientMinMessages) + : 0; + message.logMinMessages = + object.logMinMessages !== undefined && object.logMinMessages !== null + ? postgresqlhostconfig131c_LogLevelFromJSON(object.logMinMessages) + : 0; + message.logMinErrorStatement = + object.logMinErrorStatement !== undefined && + object.logMinErrorStatement !== null + ? postgresqlhostconfig131c_LogLevelFromJSON(object.logMinErrorStatement) + : 0; + message.logMinDurationStatement = + object.logMinDurationStatement !== undefined && + object.logMinDurationStatement !== null + ? Number(object.logMinDurationStatement) + : undefined; + message.logCheckpoints = + object.logCheckpoints !== undefined && object.logCheckpoints !== null + ? Boolean(object.logCheckpoints) + : undefined; + message.logConnections = + object.logConnections !== undefined && object.logConnections !== null + ? Boolean(object.logConnections) + : undefined; + message.logDisconnections = + object.logDisconnections !== undefined && + object.logDisconnections !== null + ? Boolean(object.logDisconnections) + : undefined; + message.logDuration = + object.logDuration !== undefined && object.logDuration !== null + ? Boolean(object.logDuration) + : undefined; + message.logErrorVerbosity = + object.logErrorVerbosity !== undefined && + object.logErrorVerbosity !== null + ? postgresqlhostconfig131c_LogErrorVerbosityFromJSON( + object.logErrorVerbosity + ) + : 0; + message.logLockWaits = + object.logLockWaits !== undefined && object.logLockWaits !== null + ? Boolean(object.logLockWaits) + : undefined; + message.logStatement = + object.logStatement !== undefined && object.logStatement !== null + ? postgresqlhostconfig131c_LogStatementFromJSON(object.logStatement) + : 0; + message.logTempFiles = + object.logTempFiles !== undefined && object.logTempFiles !== null + ? Number(object.logTempFiles) + : undefined; + message.searchPath = + object.searchPath !== undefined && object.searchPath !== null + ? String(object.searchPath) + : ""; + message.rowSecurity = + object.rowSecurity !== undefined && object.rowSecurity !== null + ? Boolean(object.rowSecurity) + : undefined; + message.defaultTransactionIsolation = + object.defaultTransactionIsolation !== undefined && + object.defaultTransactionIsolation !== null + ? postgresqlhostconfig131c_TransactionIsolationFromJSON( + object.defaultTransactionIsolation + ) + : 0; + message.statementTimeout = + object.statementTimeout !== undefined && object.statementTimeout !== null + ? Number(object.statementTimeout) + : undefined; + message.lockTimeout = + object.lockTimeout !== undefined && object.lockTimeout !== null + ? Number(object.lockTimeout) + : undefined; + message.idleInTransactionSessionTimeout = + object.idleInTransactionSessionTimeout !== undefined && + object.idleInTransactionSessionTimeout !== null + ? Number(object.idleInTransactionSessionTimeout) + : undefined; + message.byteaOutput = + object.byteaOutput !== undefined && object.byteaOutput !== null + ? postgresqlhostconfig131c_ByteaOutputFromJSON(object.byteaOutput) + : 0; + message.xmlbinary = + object.xmlbinary !== undefined && object.xmlbinary !== null + ? postgresqlhostconfig131c_XmlBinaryFromJSON(object.xmlbinary) + : 0; + message.xmloption = + object.xmloption !== undefined && object.xmloption !== null + ? postgresqlhostconfig131c_XmlOptionFromJSON(object.xmloption) + : 0; + message.ginPendingListLimit = + object.ginPendingListLimit !== undefined && + object.ginPendingListLimit !== null + ? Number(object.ginPendingListLimit) + : undefined; + message.deadlockTimeout = + object.deadlockTimeout !== undefined && object.deadlockTimeout !== null + ? Number(object.deadlockTimeout) + : undefined; + message.maxLocksPerTransaction = + object.maxLocksPerTransaction !== undefined && + object.maxLocksPerTransaction !== null + ? Number(object.maxLocksPerTransaction) + : undefined; + message.maxPredLocksPerTransaction = + object.maxPredLocksPerTransaction !== undefined && + object.maxPredLocksPerTransaction !== null + ? Number(object.maxPredLocksPerTransaction) + : undefined; + message.arrayNulls = + object.arrayNulls !== undefined && object.arrayNulls !== null + ? Boolean(object.arrayNulls) + : undefined; + message.backslashQuote = + object.backslashQuote !== undefined && object.backslashQuote !== null + ? postgresqlhostconfig131c_BackslashQuoteFromJSON(object.backslashQuote) + : 0; + message.defaultWithOids = + object.defaultWithOids !== undefined && object.defaultWithOids !== null + ? Boolean(object.defaultWithOids) + : undefined; + message.escapeStringWarning = + object.escapeStringWarning !== undefined && + object.escapeStringWarning !== null + ? Boolean(object.escapeStringWarning) + : undefined; + message.loCompatPrivileges = + object.loCompatPrivileges !== undefined && + object.loCompatPrivileges !== null + ? Boolean(object.loCompatPrivileges) + : undefined; + message.operatorPrecedenceWarning = + object.operatorPrecedenceWarning !== undefined && + object.operatorPrecedenceWarning !== null + ? Boolean(object.operatorPrecedenceWarning) + : undefined; + message.quoteAllIdentifiers = + object.quoteAllIdentifiers !== undefined && + object.quoteAllIdentifiers !== null + ? Boolean(object.quoteAllIdentifiers) + : undefined; + message.standardConformingStrings = + object.standardConformingStrings !== undefined && + object.standardConformingStrings !== null + ? Boolean(object.standardConformingStrings) + : undefined; + message.synchronizeSeqscans = + object.synchronizeSeqscans !== undefined && + object.synchronizeSeqscans !== null + ? Boolean(object.synchronizeSeqscans) + : undefined; + message.transformNullEquals = + object.transformNullEquals !== undefined && + object.transformNullEquals !== null + ? Boolean(object.transformNullEquals) + : undefined; + message.exitOnError = + object.exitOnError !== undefined && object.exitOnError !== null + ? Boolean(object.exitOnError) + : undefined; + message.seqPageCost = + object.seqPageCost !== undefined && object.seqPageCost !== null + ? Number(object.seqPageCost) + : undefined; + message.randomPageCost = + object.randomPageCost !== undefined && object.randomPageCost !== null + ? Number(object.randomPageCost) + : undefined; + message.enableBitmapscan = + object.enableBitmapscan !== undefined && object.enableBitmapscan !== null + ? Boolean(object.enableBitmapscan) + : undefined; + message.enableHashagg = + object.enableHashagg !== undefined && object.enableHashagg !== null + ? Boolean(object.enableHashagg) + : undefined; + message.enableHashjoin = + object.enableHashjoin !== undefined && object.enableHashjoin !== null + ? Boolean(object.enableHashjoin) + : undefined; + message.enableIndexscan = + object.enableIndexscan !== undefined && object.enableIndexscan !== null + ? Boolean(object.enableIndexscan) + : undefined; + message.enableIndexonlyscan = + object.enableIndexonlyscan !== undefined && + object.enableIndexonlyscan !== null + ? Boolean(object.enableIndexonlyscan) + : undefined; + message.enableMaterial = + object.enableMaterial !== undefined && object.enableMaterial !== null + ? Boolean(object.enableMaterial) + : undefined; + message.enableMergejoin = + object.enableMergejoin !== undefined && object.enableMergejoin !== null + ? Boolean(object.enableMergejoin) + : undefined; + message.enableNestloop = + object.enableNestloop !== undefined && object.enableNestloop !== null + ? Boolean(object.enableNestloop) + : undefined; + message.enableSeqscan = + object.enableSeqscan !== undefined && object.enableSeqscan !== null + ? Boolean(object.enableSeqscan) + : undefined; + message.enableSort = + object.enableSort !== undefined && object.enableSort !== null + ? Boolean(object.enableSort) + : undefined; + message.enableTidscan = + object.enableTidscan !== undefined && object.enableTidscan !== null + ? Boolean(object.enableTidscan) + : undefined; + message.maxParallelWorkers = + object.maxParallelWorkers !== undefined && + object.maxParallelWorkers !== null + ? Number(object.maxParallelWorkers) + : undefined; + message.maxParallelWorkersPerGather = + object.maxParallelWorkersPerGather !== undefined && + object.maxParallelWorkersPerGather !== null + ? Number(object.maxParallelWorkersPerGather) + : undefined; + message.timezone = + object.timezone !== undefined && object.timezone !== null + ? String(object.timezone) + : ""; + message.effectiveIoConcurrency = + object.effectiveIoConcurrency !== undefined && + object.effectiveIoConcurrency !== null + ? Number(object.effectiveIoConcurrency) + : undefined; + message.effectiveCacheSize = + object.effectiveCacheSize !== undefined && + object.effectiveCacheSize !== null + ? Number(object.effectiveCacheSize) + : undefined; + return message; + }, + + toJSON(message: Postgresqlhostconfig131c): unknown { + const obj: any = {}; + message.recoveryMinApplyDelay !== undefined && + (obj.recoveryMinApplyDelay = message.recoveryMinApplyDelay); + message.sharedBuffers !== undefined && + (obj.sharedBuffers = message.sharedBuffers); + message.tempBuffers !== undefined && + (obj.tempBuffers = message.tempBuffers); + message.workMem !== undefined && (obj.workMem = message.workMem); + message.tempFileLimit !== undefined && + (obj.tempFileLimit = message.tempFileLimit); + message.backendFlushAfter !== undefined && + (obj.backendFlushAfter = message.backendFlushAfter); + message.oldSnapshotThreshold !== undefined && + (obj.oldSnapshotThreshold = message.oldSnapshotThreshold); + message.maxStandbyStreamingDelay !== undefined && + (obj.maxStandbyStreamingDelay = message.maxStandbyStreamingDelay); + message.constraintExclusion !== undefined && + (obj.constraintExclusion = + postgresqlhostconfig131c_ConstraintExclusionToJSON( + message.constraintExclusion + )); + message.cursorTupleFraction !== undefined && + (obj.cursorTupleFraction = message.cursorTupleFraction); + message.fromCollapseLimit !== undefined && + (obj.fromCollapseLimit = message.fromCollapseLimit); + message.joinCollapseLimit !== undefined && + (obj.joinCollapseLimit = message.joinCollapseLimit); + message.forceParallelMode !== undefined && + (obj.forceParallelMode = postgresqlhostconfig131c_ForceParallelModeToJSON( + message.forceParallelMode + )); + message.clientMinMessages !== undefined && + (obj.clientMinMessages = postgresqlhostconfig131c_LogLevelToJSON( + message.clientMinMessages + )); + message.logMinMessages !== undefined && + (obj.logMinMessages = postgresqlhostconfig131c_LogLevelToJSON( + message.logMinMessages + )); + message.logMinErrorStatement !== undefined && + (obj.logMinErrorStatement = postgresqlhostconfig131c_LogLevelToJSON( + message.logMinErrorStatement + )); + message.logMinDurationStatement !== undefined && + (obj.logMinDurationStatement = message.logMinDurationStatement); + message.logCheckpoints !== undefined && + (obj.logCheckpoints = message.logCheckpoints); + message.logConnections !== undefined && + (obj.logConnections = message.logConnections); + message.logDisconnections !== undefined && + (obj.logDisconnections = message.logDisconnections); + message.logDuration !== undefined && + (obj.logDuration = message.logDuration); + message.logErrorVerbosity !== undefined && + (obj.logErrorVerbosity = postgresqlhostconfig131c_LogErrorVerbosityToJSON( + message.logErrorVerbosity + )); + message.logLockWaits !== undefined && + (obj.logLockWaits = message.logLockWaits); + message.logStatement !== undefined && + (obj.logStatement = postgresqlhostconfig131c_LogStatementToJSON( + message.logStatement + )); + message.logTempFiles !== undefined && + (obj.logTempFiles = message.logTempFiles); + message.searchPath !== undefined && (obj.searchPath = message.searchPath); + message.rowSecurity !== undefined && + (obj.rowSecurity = message.rowSecurity); + message.defaultTransactionIsolation !== undefined && + (obj.defaultTransactionIsolation = + postgresqlhostconfig131c_TransactionIsolationToJSON( + message.defaultTransactionIsolation + )); + message.statementTimeout !== undefined && + (obj.statementTimeout = message.statementTimeout); + message.lockTimeout !== undefined && + (obj.lockTimeout = message.lockTimeout); + message.idleInTransactionSessionTimeout !== undefined && + (obj.idleInTransactionSessionTimeout = + message.idleInTransactionSessionTimeout); + message.byteaOutput !== undefined && + (obj.byteaOutput = postgresqlhostconfig131c_ByteaOutputToJSON( + message.byteaOutput + )); + message.xmlbinary !== undefined && + (obj.xmlbinary = postgresqlhostconfig131c_XmlBinaryToJSON( + message.xmlbinary + )); + message.xmloption !== undefined && + (obj.xmloption = postgresqlhostconfig131c_XmlOptionToJSON( + message.xmloption + )); + message.ginPendingListLimit !== undefined && + (obj.ginPendingListLimit = message.ginPendingListLimit); + message.deadlockTimeout !== undefined && + (obj.deadlockTimeout = message.deadlockTimeout); + message.maxLocksPerTransaction !== undefined && + (obj.maxLocksPerTransaction = message.maxLocksPerTransaction); + message.maxPredLocksPerTransaction !== undefined && + (obj.maxPredLocksPerTransaction = message.maxPredLocksPerTransaction); + message.arrayNulls !== undefined && (obj.arrayNulls = message.arrayNulls); + message.backslashQuote !== undefined && + (obj.backslashQuote = postgresqlhostconfig131c_BackslashQuoteToJSON( + message.backslashQuote + )); + message.defaultWithOids !== undefined && + (obj.defaultWithOids = message.defaultWithOids); + message.escapeStringWarning !== undefined && + (obj.escapeStringWarning = message.escapeStringWarning); + message.loCompatPrivileges !== undefined && + (obj.loCompatPrivileges = message.loCompatPrivileges); + message.operatorPrecedenceWarning !== undefined && + (obj.operatorPrecedenceWarning = message.operatorPrecedenceWarning); + message.quoteAllIdentifiers !== undefined && + (obj.quoteAllIdentifiers = message.quoteAllIdentifiers); + message.standardConformingStrings !== undefined && + (obj.standardConformingStrings = message.standardConformingStrings); + message.synchronizeSeqscans !== undefined && + (obj.synchronizeSeqscans = message.synchronizeSeqscans); + message.transformNullEquals !== undefined && + (obj.transformNullEquals = message.transformNullEquals); + message.exitOnError !== undefined && + (obj.exitOnError = message.exitOnError); + message.seqPageCost !== undefined && + (obj.seqPageCost = message.seqPageCost); + message.randomPageCost !== undefined && + (obj.randomPageCost = message.randomPageCost); + message.enableBitmapscan !== undefined && + (obj.enableBitmapscan = message.enableBitmapscan); + message.enableHashagg !== undefined && + (obj.enableHashagg = message.enableHashagg); + message.enableHashjoin !== undefined && + (obj.enableHashjoin = message.enableHashjoin); + message.enableIndexscan !== undefined && + (obj.enableIndexscan = message.enableIndexscan); + message.enableIndexonlyscan !== undefined && + (obj.enableIndexonlyscan = message.enableIndexonlyscan); + message.enableMaterial !== undefined && + (obj.enableMaterial = message.enableMaterial); + message.enableMergejoin !== undefined && + (obj.enableMergejoin = message.enableMergejoin); + message.enableNestloop !== undefined && + (obj.enableNestloop = message.enableNestloop); + message.enableSeqscan !== undefined && + (obj.enableSeqscan = message.enableSeqscan); + message.enableSort !== undefined && (obj.enableSort = message.enableSort); + message.enableTidscan !== undefined && + (obj.enableTidscan = message.enableTidscan); + message.maxParallelWorkers !== undefined && + (obj.maxParallelWorkers = message.maxParallelWorkers); + message.maxParallelWorkersPerGather !== undefined && + (obj.maxParallelWorkersPerGather = message.maxParallelWorkersPerGather); + message.timezone !== undefined && (obj.timezone = message.timezone); + message.effectiveIoConcurrency !== undefined && + (obj.effectiveIoConcurrency = message.effectiveIoConcurrency); + message.effectiveCacheSize !== undefined && + (obj.effectiveCacheSize = message.effectiveCacheSize); + return obj; + }, + + fromPartial, I>>( + object: I + ): Postgresqlhostconfig131c { + const message = { + ...basePostgresqlhostconfig131c, + } as Postgresqlhostconfig131c; + message.recoveryMinApplyDelay = object.recoveryMinApplyDelay ?? undefined; + message.sharedBuffers = object.sharedBuffers ?? undefined; + message.tempBuffers = object.tempBuffers ?? undefined; + message.workMem = object.workMem ?? undefined; + message.tempFileLimit = object.tempFileLimit ?? undefined; + message.backendFlushAfter = object.backendFlushAfter ?? undefined; + message.oldSnapshotThreshold = object.oldSnapshotThreshold ?? undefined; + message.maxStandbyStreamingDelay = + object.maxStandbyStreamingDelay ?? undefined; + message.constraintExclusion = object.constraintExclusion ?? 0; + message.cursorTupleFraction = object.cursorTupleFraction ?? undefined; + message.fromCollapseLimit = object.fromCollapseLimit ?? undefined; + message.joinCollapseLimit = object.joinCollapseLimit ?? undefined; + message.forceParallelMode = object.forceParallelMode ?? 0; + message.clientMinMessages = object.clientMinMessages ?? 0; + message.logMinMessages = object.logMinMessages ?? 0; + message.logMinErrorStatement = object.logMinErrorStatement ?? 0; + message.logMinDurationStatement = + object.logMinDurationStatement ?? undefined; + message.logCheckpoints = object.logCheckpoints ?? undefined; + message.logConnections = object.logConnections ?? undefined; + message.logDisconnections = object.logDisconnections ?? undefined; + message.logDuration = object.logDuration ?? undefined; + message.logErrorVerbosity = object.logErrorVerbosity ?? 0; + message.logLockWaits = object.logLockWaits ?? undefined; + message.logStatement = object.logStatement ?? 0; + message.logTempFiles = object.logTempFiles ?? undefined; + message.searchPath = object.searchPath ?? ""; + message.rowSecurity = object.rowSecurity ?? undefined; + message.defaultTransactionIsolation = + object.defaultTransactionIsolation ?? 0; + message.statementTimeout = object.statementTimeout ?? undefined; + message.lockTimeout = object.lockTimeout ?? undefined; + message.idleInTransactionSessionTimeout = + object.idleInTransactionSessionTimeout ?? undefined; + message.byteaOutput = object.byteaOutput ?? 0; + message.xmlbinary = object.xmlbinary ?? 0; + message.xmloption = object.xmloption ?? 0; + message.ginPendingListLimit = object.ginPendingListLimit ?? undefined; + message.deadlockTimeout = object.deadlockTimeout ?? undefined; + message.maxLocksPerTransaction = object.maxLocksPerTransaction ?? undefined; + message.maxPredLocksPerTransaction = + object.maxPredLocksPerTransaction ?? undefined; + message.arrayNulls = object.arrayNulls ?? undefined; + message.backslashQuote = object.backslashQuote ?? 0; + message.defaultWithOids = object.defaultWithOids ?? undefined; + message.escapeStringWarning = object.escapeStringWarning ?? undefined; + message.loCompatPrivileges = object.loCompatPrivileges ?? undefined; + message.operatorPrecedenceWarning = + object.operatorPrecedenceWarning ?? undefined; + message.quoteAllIdentifiers = object.quoteAllIdentifiers ?? undefined; + message.standardConformingStrings = + object.standardConformingStrings ?? undefined; + message.synchronizeSeqscans = object.synchronizeSeqscans ?? undefined; + message.transformNullEquals = object.transformNullEquals ?? undefined; + message.exitOnError = object.exitOnError ?? undefined; + message.seqPageCost = object.seqPageCost ?? undefined; + message.randomPageCost = object.randomPageCost ?? undefined; + message.enableBitmapscan = object.enableBitmapscan ?? undefined; + message.enableHashagg = object.enableHashagg ?? undefined; + message.enableHashjoin = object.enableHashjoin ?? undefined; + message.enableIndexscan = object.enableIndexscan ?? undefined; + message.enableIndexonlyscan = object.enableIndexonlyscan ?? undefined; + message.enableMaterial = object.enableMaterial ?? undefined; + message.enableMergejoin = object.enableMergejoin ?? undefined; + message.enableNestloop = object.enableNestloop ?? undefined; + message.enableSeqscan = object.enableSeqscan ?? undefined; + message.enableSort = object.enableSort ?? undefined; + message.enableTidscan = object.enableTidscan ?? undefined; + message.maxParallelWorkers = object.maxParallelWorkers ?? undefined; + message.maxParallelWorkersPerGather = + object.maxParallelWorkersPerGather ?? undefined; + message.timezone = object.timezone ?? ""; + message.effectiveIoConcurrency = object.effectiveIoConcurrency ?? undefined; + message.effectiveCacheSize = object.effectiveCacheSize ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Postgresqlhostconfig131c.$type, + Postgresqlhostconfig131c +); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/host14.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/host14.ts index 72f96036..6feed32a 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/config/host14.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/host14.ts @@ -97,6 +97,102 @@ export interface PostgresqlHostConfig14 { effectiveCacheSize?: number; } +export enum PostgresqlHostConfig14_BackslashQuote { + BACKSLASH_QUOTE_UNSPECIFIED = 0, + BACKSLASH_QUOTE = 1, + BACKSLASH_QUOTE_ON = 2, + BACKSLASH_QUOTE_OFF = 3, + BACKSLASH_QUOTE_SAFE_ENCODING = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig14_BackslashQuoteFromJSON( + object: any +): PostgresqlHostConfig14_BackslashQuote { + switch (object) { + case 0: + case "BACKSLASH_QUOTE_UNSPECIFIED": + return PostgresqlHostConfig14_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED; + case 1: + case "BACKSLASH_QUOTE": + return PostgresqlHostConfig14_BackslashQuote.BACKSLASH_QUOTE; + case 2: + case "BACKSLASH_QUOTE_ON": + return PostgresqlHostConfig14_BackslashQuote.BACKSLASH_QUOTE_ON; + case 3: + case "BACKSLASH_QUOTE_OFF": + return PostgresqlHostConfig14_BackslashQuote.BACKSLASH_QUOTE_OFF; + case 4: + case "BACKSLASH_QUOTE_SAFE_ENCODING": + return PostgresqlHostConfig14_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig14_BackslashQuote.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig14_BackslashQuoteToJSON( + object: PostgresqlHostConfig14_BackslashQuote +): string { + switch (object) { + case PostgresqlHostConfig14_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED: + return "BACKSLASH_QUOTE_UNSPECIFIED"; + case PostgresqlHostConfig14_BackslashQuote.BACKSLASH_QUOTE: + return "BACKSLASH_QUOTE"; + case PostgresqlHostConfig14_BackslashQuote.BACKSLASH_QUOTE_ON: + return "BACKSLASH_QUOTE_ON"; + case PostgresqlHostConfig14_BackslashQuote.BACKSLASH_QUOTE_OFF: + return "BACKSLASH_QUOTE_OFF"; + case PostgresqlHostConfig14_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING: + return "BACKSLASH_QUOTE_SAFE_ENCODING"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlHostConfig14_ByteaOutput { + BYTEA_OUTPUT_UNSPECIFIED = 0, + BYTEA_OUTPUT_HEX = 1, + BYTEA_OUTPUT_ESCAPED = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig14_ByteaOutputFromJSON( + object: any +): PostgresqlHostConfig14_ByteaOutput { + switch (object) { + case 0: + case "BYTEA_OUTPUT_UNSPECIFIED": + return PostgresqlHostConfig14_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED; + case 1: + case "BYTEA_OUTPUT_HEX": + return PostgresqlHostConfig14_ByteaOutput.BYTEA_OUTPUT_HEX; + case 2: + case "BYTEA_OUTPUT_ESCAPED": + return PostgresqlHostConfig14_ByteaOutput.BYTEA_OUTPUT_ESCAPED; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig14_ByteaOutput.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig14_ByteaOutputToJSON( + object: PostgresqlHostConfig14_ByteaOutput +): string { + switch (object) { + case PostgresqlHostConfig14_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED: + return "BYTEA_OUTPUT_UNSPECIFIED"; + case PostgresqlHostConfig14_ByteaOutput.BYTEA_OUTPUT_HEX: + return "BYTEA_OUTPUT_HEX"; + case PostgresqlHostConfig14_ByteaOutput.BYTEA_OUTPUT_ESCAPED: + return "BYTEA_OUTPUT_ESCAPED"; + default: + return "UNKNOWN"; + } +} + export enum PostgresqlHostConfig14_ConstraintExclusion { CONSTRAINT_EXCLUSION_UNSPECIFIED = 0, CONSTRAINT_EXCLUSION_ON = 1, @@ -193,6 +289,54 @@ export function postgresqlHostConfig14_ForceParallelModeToJSON( } } +export enum PostgresqlHostConfig14_LogErrorVerbosity { + LOG_ERROR_VERBOSITY_UNSPECIFIED = 0, + LOG_ERROR_VERBOSITY_TERSE = 1, + LOG_ERROR_VERBOSITY_DEFAULT = 2, + LOG_ERROR_VERBOSITY_VERBOSE = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig14_LogErrorVerbosityFromJSON( + object: any +): PostgresqlHostConfig14_LogErrorVerbosity { + switch (object) { + case 0: + case "LOG_ERROR_VERBOSITY_UNSPECIFIED": + return PostgresqlHostConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED; + case 1: + case "LOG_ERROR_VERBOSITY_TERSE": + return PostgresqlHostConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE; + case 2: + case "LOG_ERROR_VERBOSITY_DEFAULT": + return PostgresqlHostConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT; + case 3: + case "LOG_ERROR_VERBOSITY_VERBOSE": + return PostgresqlHostConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig14_LogErrorVerbosity.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig14_LogErrorVerbosityToJSON( + object: PostgresqlHostConfig14_LogErrorVerbosity +): string { + switch (object) { + case PostgresqlHostConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED: + return "LOG_ERROR_VERBOSITY_UNSPECIFIED"; + case PostgresqlHostConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE: + return "LOG_ERROR_VERBOSITY_TERSE"; + case PostgresqlHostConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT: + return "LOG_ERROR_VERBOSITY_DEFAULT"; + case PostgresqlHostConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE: + return "LOG_ERROR_VERBOSITY_VERBOSE"; + default: + return "UNKNOWN"; + } +} + export enum PostgresqlHostConfig14_LogLevel { LOG_LEVEL_UNSPECIFIED = 0, LOG_LEVEL_DEBUG5 = 1, @@ -289,54 +433,6 @@ export function postgresqlHostConfig14_LogLevelToJSON( } } -export enum PostgresqlHostConfig14_LogErrorVerbosity { - LOG_ERROR_VERBOSITY_UNSPECIFIED = 0, - LOG_ERROR_VERBOSITY_TERSE = 1, - LOG_ERROR_VERBOSITY_DEFAULT = 2, - LOG_ERROR_VERBOSITY_VERBOSE = 3, - UNRECOGNIZED = -1, -} - -export function postgresqlHostConfig14_LogErrorVerbosityFromJSON( - object: any -): PostgresqlHostConfig14_LogErrorVerbosity { - switch (object) { - case 0: - case "LOG_ERROR_VERBOSITY_UNSPECIFIED": - return PostgresqlHostConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED; - case 1: - case "LOG_ERROR_VERBOSITY_TERSE": - return PostgresqlHostConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE; - case 2: - case "LOG_ERROR_VERBOSITY_DEFAULT": - return PostgresqlHostConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT; - case 3: - case "LOG_ERROR_VERBOSITY_VERBOSE": - return PostgresqlHostConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE; - case -1: - case "UNRECOGNIZED": - default: - return PostgresqlHostConfig14_LogErrorVerbosity.UNRECOGNIZED; - } -} - -export function postgresqlHostConfig14_LogErrorVerbosityToJSON( - object: PostgresqlHostConfig14_LogErrorVerbosity -): string { - switch (object) { - case PostgresqlHostConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED: - return "LOG_ERROR_VERBOSITY_UNSPECIFIED"; - case PostgresqlHostConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE: - return "LOG_ERROR_VERBOSITY_TERSE"; - case PostgresqlHostConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT: - return "LOG_ERROR_VERBOSITY_DEFAULT"; - case PostgresqlHostConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE: - return "LOG_ERROR_VERBOSITY_VERBOSE"; - default: - return "UNKNOWN"; - } -} - export enum PostgresqlHostConfig14_LogStatement { LOG_STATEMENT_UNSPECIFIED = 0, LOG_STATEMENT_NONE = 1, @@ -445,48 +541,6 @@ export function postgresqlHostConfig14_TransactionIsolationToJSON( } } -export enum PostgresqlHostConfig14_ByteaOutput { - BYTEA_OUTPUT_UNSPECIFIED = 0, - BYTEA_OUTPUT_HEX = 1, - BYTEA_OUTPUT_ESCAPED = 2, - UNRECOGNIZED = -1, -} - -export function postgresqlHostConfig14_ByteaOutputFromJSON( - object: any -): PostgresqlHostConfig14_ByteaOutput { - switch (object) { - case 0: - case "BYTEA_OUTPUT_UNSPECIFIED": - return PostgresqlHostConfig14_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED; - case 1: - case "BYTEA_OUTPUT_HEX": - return PostgresqlHostConfig14_ByteaOutput.BYTEA_OUTPUT_HEX; - case 2: - case "BYTEA_OUTPUT_ESCAPED": - return PostgresqlHostConfig14_ByteaOutput.BYTEA_OUTPUT_ESCAPED; - case -1: - case "UNRECOGNIZED": - default: - return PostgresqlHostConfig14_ByteaOutput.UNRECOGNIZED; - } -} - -export function postgresqlHostConfig14_ByteaOutputToJSON( - object: PostgresqlHostConfig14_ByteaOutput -): string { - switch (object) { - case PostgresqlHostConfig14_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED: - return "BYTEA_OUTPUT_UNSPECIFIED"; - case PostgresqlHostConfig14_ByteaOutput.BYTEA_OUTPUT_HEX: - return "BYTEA_OUTPUT_HEX"; - case PostgresqlHostConfig14_ByteaOutput.BYTEA_OUTPUT_ESCAPED: - return "BYTEA_OUTPUT_ESCAPED"; - default: - return "UNKNOWN"; - } -} - export enum PostgresqlHostConfig14_XmlBinary { XML_BINARY_UNSPECIFIED = 0, XML_BINARY_BASE64 = 1, @@ -571,60 +625,6 @@ export function postgresqlHostConfig14_XmlOptionToJSON( } } -export enum PostgresqlHostConfig14_BackslashQuote { - BACKSLASH_QUOTE_UNSPECIFIED = 0, - BACKSLASH_QUOTE = 1, - BACKSLASH_QUOTE_ON = 2, - BACKSLASH_QUOTE_OFF = 3, - BACKSLASH_QUOTE_SAFE_ENCODING = 4, - UNRECOGNIZED = -1, -} - -export function postgresqlHostConfig14_BackslashQuoteFromJSON( - object: any -): PostgresqlHostConfig14_BackslashQuote { - switch (object) { - case 0: - case "BACKSLASH_QUOTE_UNSPECIFIED": - return PostgresqlHostConfig14_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED; - case 1: - case "BACKSLASH_QUOTE": - return PostgresqlHostConfig14_BackslashQuote.BACKSLASH_QUOTE; - case 2: - case "BACKSLASH_QUOTE_ON": - return PostgresqlHostConfig14_BackslashQuote.BACKSLASH_QUOTE_ON; - case 3: - case "BACKSLASH_QUOTE_OFF": - return PostgresqlHostConfig14_BackslashQuote.BACKSLASH_QUOTE_OFF; - case 4: - case "BACKSLASH_QUOTE_SAFE_ENCODING": - return PostgresqlHostConfig14_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING; - case -1: - case "UNRECOGNIZED": - default: - return PostgresqlHostConfig14_BackslashQuote.UNRECOGNIZED; - } -} - -export function postgresqlHostConfig14_BackslashQuoteToJSON( - object: PostgresqlHostConfig14_BackslashQuote -): string { - switch (object) { - case PostgresqlHostConfig14_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED: - return "BACKSLASH_QUOTE_UNSPECIFIED"; - case PostgresqlHostConfig14_BackslashQuote.BACKSLASH_QUOTE: - return "BACKSLASH_QUOTE"; - case PostgresqlHostConfig14_BackslashQuote.BACKSLASH_QUOTE_ON: - return "BACKSLASH_QUOTE_ON"; - case PostgresqlHostConfig14_BackslashQuote.BACKSLASH_QUOTE_OFF: - return "BACKSLASH_QUOTE_OFF"; - case PostgresqlHostConfig14_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING: - return "BACKSLASH_QUOTE_SAFE_ENCODING"; - default: - return "UNKNOWN"; - } -} - const basePostgresqlHostConfig14: object = { $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig14", constraintExclusion: 0, diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/host14_1c.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/host14_1c.ts new file mode 100644 index 00000000..5e3f2955 --- /dev/null +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/host14_1c.ts @@ -0,0 +1,2037 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { + Int64Value, + DoubleValue, + BoolValue, +} from "../../../../../../google/protobuf/wrappers"; + +export const protobufPackage = "yandex.cloud.mdb.postgresql.v1.config"; + +/** + * Options and structure of `PostgresqlConfig` reflects PostgreSQL configuration file + * parameters which detailed description is available in + * [PostgreSQL documentation](https://www.postgresql.org/docs/11/runtime-config.html). + */ +export interface Postgresqlhostconfig141c { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig14_1C"; + /** in milliseconds. */ + recoveryMinApplyDelay?: number; + /** in bytes. */ + sharedBuffers?: number; + /** in bytes. */ + tempBuffers?: number; + /** in bytes. */ + workMem?: number; + /** in bytes. */ + tempFileLimit?: number; + backendFlushAfter?: number; + oldSnapshotThreshold?: number; + /** in milliseconds. */ + maxStandbyStreamingDelay?: number; + constraintExclusion: Postgresqlhostconfig141c_ConstraintExclusion; + cursorTupleFraction?: number; + fromCollapseLimit?: number; + joinCollapseLimit?: number; + forceParallelMode: Postgresqlhostconfig141c_ForceParallelMode; + clientMinMessages: Postgresqlhostconfig141c_LogLevel; + logMinMessages: Postgresqlhostconfig141c_LogLevel; + logMinErrorStatement: Postgresqlhostconfig141c_LogLevel; + /** in milliseconds. */ + logMinDurationStatement?: number; + logCheckpoints?: boolean; + logConnections?: boolean; + logDisconnections?: boolean; + logDuration?: boolean; + logErrorVerbosity: Postgresqlhostconfig141c_LogErrorVerbosity; + logLockWaits?: boolean; + logStatement: Postgresqlhostconfig141c_LogStatement; + logTempFiles?: number; + searchPath: string; + rowSecurity?: boolean; + defaultTransactionIsolation: Postgresqlhostconfig141c_TransactionIsolation; + /** in milliseconds. */ + statementTimeout?: number; + /** in milliseconds. */ + lockTimeout?: number; + /** in milliseconds. */ + idleInTransactionSessionTimeout?: number; + byteaOutput: Postgresqlhostconfig141c_ByteaOutput; + xmlbinary: Postgresqlhostconfig141c_XmlBinary; + xmloption: Postgresqlhostconfig141c_XmlOption; + /** in bytes. */ + ginPendingListLimit?: number; + /** in milliseconds. */ + deadlockTimeout?: number; + maxLocksPerTransaction?: number; + maxPredLocksPerTransaction?: number; + arrayNulls?: boolean; + backslashQuote: Postgresqlhostconfig141c_BackslashQuote; + defaultWithOids?: boolean; + escapeStringWarning?: boolean; + loCompatPrivileges?: boolean; + quoteAllIdentifiers?: boolean; + standardConformingStrings?: boolean; + synchronizeSeqscans?: boolean; + transformNullEquals?: boolean; + exitOnError?: boolean; + seqPageCost?: number; + randomPageCost?: number; + enableBitmapscan?: boolean; + enableHashagg?: boolean; + enableHashjoin?: boolean; + enableIndexscan?: boolean; + enableIndexonlyscan?: boolean; + enableMaterial?: boolean; + enableMergejoin?: boolean; + enableNestloop?: boolean; + enableSeqscan?: boolean; + enableSort?: boolean; + enableTidscan?: boolean; + maxParallelWorkers?: number; + maxParallelWorkersPerGather?: number; + timezone: string; + effectiveIoConcurrency?: number; + effectiveCacheSize?: number; +} + +export enum Postgresqlhostconfig141c_BackslashQuote { + BACKSLASH_QUOTE_UNSPECIFIED = 0, + BACKSLASH_QUOTE = 1, + BACKSLASH_QUOTE_ON = 2, + BACKSLASH_QUOTE_OFF = 3, + BACKSLASH_QUOTE_SAFE_ENCODING = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlhostconfig141c_BackslashQuoteFromJSON( + object: any +): Postgresqlhostconfig141c_BackslashQuote { + switch (object) { + case 0: + case "BACKSLASH_QUOTE_UNSPECIFIED": + return Postgresqlhostconfig141c_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED; + case 1: + case "BACKSLASH_QUOTE": + return Postgresqlhostconfig141c_BackslashQuote.BACKSLASH_QUOTE; + case 2: + case "BACKSLASH_QUOTE_ON": + return Postgresqlhostconfig141c_BackslashQuote.BACKSLASH_QUOTE_ON; + case 3: + case "BACKSLASH_QUOTE_OFF": + return Postgresqlhostconfig141c_BackslashQuote.BACKSLASH_QUOTE_OFF; + case 4: + case "BACKSLASH_QUOTE_SAFE_ENCODING": + return Postgresqlhostconfig141c_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlhostconfig141c_BackslashQuote.UNRECOGNIZED; + } +} + +export function postgresqlhostconfig141c_BackslashQuoteToJSON( + object: Postgresqlhostconfig141c_BackslashQuote +): string { + switch (object) { + case Postgresqlhostconfig141c_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED: + return "BACKSLASH_QUOTE_UNSPECIFIED"; + case Postgresqlhostconfig141c_BackslashQuote.BACKSLASH_QUOTE: + return "BACKSLASH_QUOTE"; + case Postgresqlhostconfig141c_BackslashQuote.BACKSLASH_QUOTE_ON: + return "BACKSLASH_QUOTE_ON"; + case Postgresqlhostconfig141c_BackslashQuote.BACKSLASH_QUOTE_OFF: + return "BACKSLASH_QUOTE_OFF"; + case Postgresqlhostconfig141c_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING: + return "BACKSLASH_QUOTE_SAFE_ENCODING"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlhostconfig141c_ByteaOutput { + BYTEA_OUTPUT_UNSPECIFIED = 0, + BYTEA_OUTPUT_HEX = 1, + BYTEA_OUTPUT_ESCAPED = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlhostconfig141c_ByteaOutputFromJSON( + object: any +): Postgresqlhostconfig141c_ByteaOutput { + switch (object) { + case 0: + case "BYTEA_OUTPUT_UNSPECIFIED": + return Postgresqlhostconfig141c_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED; + case 1: + case "BYTEA_OUTPUT_HEX": + return Postgresqlhostconfig141c_ByteaOutput.BYTEA_OUTPUT_HEX; + case 2: + case "BYTEA_OUTPUT_ESCAPED": + return Postgresqlhostconfig141c_ByteaOutput.BYTEA_OUTPUT_ESCAPED; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlhostconfig141c_ByteaOutput.UNRECOGNIZED; + } +} + +export function postgresqlhostconfig141c_ByteaOutputToJSON( + object: Postgresqlhostconfig141c_ByteaOutput +): string { + switch (object) { + case Postgresqlhostconfig141c_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED: + return "BYTEA_OUTPUT_UNSPECIFIED"; + case Postgresqlhostconfig141c_ByteaOutput.BYTEA_OUTPUT_HEX: + return "BYTEA_OUTPUT_HEX"; + case Postgresqlhostconfig141c_ByteaOutput.BYTEA_OUTPUT_ESCAPED: + return "BYTEA_OUTPUT_ESCAPED"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlhostconfig141c_ConstraintExclusion { + CONSTRAINT_EXCLUSION_UNSPECIFIED = 0, + CONSTRAINT_EXCLUSION_ON = 1, + CONSTRAINT_EXCLUSION_OFF = 2, + CONSTRAINT_EXCLUSION_PARTITION = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlhostconfig141c_ConstraintExclusionFromJSON( + object: any +): Postgresqlhostconfig141c_ConstraintExclusion { + switch (object) { + case 0: + case "CONSTRAINT_EXCLUSION_UNSPECIFIED": + return Postgresqlhostconfig141c_ConstraintExclusion.CONSTRAINT_EXCLUSION_UNSPECIFIED; + case 1: + case "CONSTRAINT_EXCLUSION_ON": + return Postgresqlhostconfig141c_ConstraintExclusion.CONSTRAINT_EXCLUSION_ON; + case 2: + case "CONSTRAINT_EXCLUSION_OFF": + return Postgresqlhostconfig141c_ConstraintExclusion.CONSTRAINT_EXCLUSION_OFF; + case 3: + case "CONSTRAINT_EXCLUSION_PARTITION": + return Postgresqlhostconfig141c_ConstraintExclusion.CONSTRAINT_EXCLUSION_PARTITION; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlhostconfig141c_ConstraintExclusion.UNRECOGNIZED; + } +} + +export function postgresqlhostconfig141c_ConstraintExclusionToJSON( + object: Postgresqlhostconfig141c_ConstraintExclusion +): string { + switch (object) { + case Postgresqlhostconfig141c_ConstraintExclusion.CONSTRAINT_EXCLUSION_UNSPECIFIED: + return "CONSTRAINT_EXCLUSION_UNSPECIFIED"; + case Postgresqlhostconfig141c_ConstraintExclusion.CONSTRAINT_EXCLUSION_ON: + return "CONSTRAINT_EXCLUSION_ON"; + case Postgresqlhostconfig141c_ConstraintExclusion.CONSTRAINT_EXCLUSION_OFF: + return "CONSTRAINT_EXCLUSION_OFF"; + case Postgresqlhostconfig141c_ConstraintExclusion.CONSTRAINT_EXCLUSION_PARTITION: + return "CONSTRAINT_EXCLUSION_PARTITION"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlhostconfig141c_ForceParallelMode { + FORCE_PARALLEL_MODE_UNSPECIFIED = 0, + FORCE_PARALLEL_MODE_ON = 1, + FORCE_PARALLEL_MODE_OFF = 2, + FORCE_PARALLEL_MODE_REGRESS = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlhostconfig141c_ForceParallelModeFromJSON( + object: any +): Postgresqlhostconfig141c_ForceParallelMode { + switch (object) { + case 0: + case "FORCE_PARALLEL_MODE_UNSPECIFIED": + return Postgresqlhostconfig141c_ForceParallelMode.FORCE_PARALLEL_MODE_UNSPECIFIED; + case 1: + case "FORCE_PARALLEL_MODE_ON": + return Postgresqlhostconfig141c_ForceParallelMode.FORCE_PARALLEL_MODE_ON; + case 2: + case "FORCE_PARALLEL_MODE_OFF": + return Postgresqlhostconfig141c_ForceParallelMode.FORCE_PARALLEL_MODE_OFF; + case 3: + case "FORCE_PARALLEL_MODE_REGRESS": + return Postgresqlhostconfig141c_ForceParallelMode.FORCE_PARALLEL_MODE_REGRESS; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlhostconfig141c_ForceParallelMode.UNRECOGNIZED; + } +} + +export function postgresqlhostconfig141c_ForceParallelModeToJSON( + object: Postgresqlhostconfig141c_ForceParallelMode +): string { + switch (object) { + case Postgresqlhostconfig141c_ForceParallelMode.FORCE_PARALLEL_MODE_UNSPECIFIED: + return "FORCE_PARALLEL_MODE_UNSPECIFIED"; + case Postgresqlhostconfig141c_ForceParallelMode.FORCE_PARALLEL_MODE_ON: + return "FORCE_PARALLEL_MODE_ON"; + case Postgresqlhostconfig141c_ForceParallelMode.FORCE_PARALLEL_MODE_OFF: + return "FORCE_PARALLEL_MODE_OFF"; + case Postgresqlhostconfig141c_ForceParallelMode.FORCE_PARALLEL_MODE_REGRESS: + return "FORCE_PARALLEL_MODE_REGRESS"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlhostconfig141c_LogErrorVerbosity { + LOG_ERROR_VERBOSITY_UNSPECIFIED = 0, + LOG_ERROR_VERBOSITY_TERSE = 1, + LOG_ERROR_VERBOSITY_DEFAULT = 2, + LOG_ERROR_VERBOSITY_VERBOSE = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlhostconfig141c_LogErrorVerbosityFromJSON( + object: any +): Postgresqlhostconfig141c_LogErrorVerbosity { + switch (object) { + case 0: + case "LOG_ERROR_VERBOSITY_UNSPECIFIED": + return Postgresqlhostconfig141c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED; + case 1: + case "LOG_ERROR_VERBOSITY_TERSE": + return Postgresqlhostconfig141c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE; + case 2: + case "LOG_ERROR_VERBOSITY_DEFAULT": + return Postgresqlhostconfig141c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT; + case 3: + case "LOG_ERROR_VERBOSITY_VERBOSE": + return Postgresqlhostconfig141c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlhostconfig141c_LogErrorVerbosity.UNRECOGNIZED; + } +} + +export function postgresqlhostconfig141c_LogErrorVerbosityToJSON( + object: Postgresqlhostconfig141c_LogErrorVerbosity +): string { + switch (object) { + case Postgresqlhostconfig141c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED: + return "LOG_ERROR_VERBOSITY_UNSPECIFIED"; + case Postgresqlhostconfig141c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE: + return "LOG_ERROR_VERBOSITY_TERSE"; + case Postgresqlhostconfig141c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT: + return "LOG_ERROR_VERBOSITY_DEFAULT"; + case Postgresqlhostconfig141c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE: + return "LOG_ERROR_VERBOSITY_VERBOSE"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlhostconfig141c_LogLevel { + LOG_LEVEL_UNSPECIFIED = 0, + LOG_LEVEL_DEBUG5 = 1, + LOG_LEVEL_DEBUG4 = 2, + LOG_LEVEL_DEBUG3 = 3, + LOG_LEVEL_DEBUG2 = 4, + LOG_LEVEL_DEBUG1 = 5, + LOG_LEVEL_LOG = 6, + LOG_LEVEL_NOTICE = 7, + LOG_LEVEL_WARNING = 8, + LOG_LEVEL_ERROR = 9, + LOG_LEVEL_FATAL = 10, + LOG_LEVEL_PANIC = 11, + UNRECOGNIZED = -1, +} + +export function postgresqlhostconfig141c_LogLevelFromJSON( + object: any +): Postgresqlhostconfig141c_LogLevel { + switch (object) { + case 0: + case "LOG_LEVEL_UNSPECIFIED": + return Postgresqlhostconfig141c_LogLevel.LOG_LEVEL_UNSPECIFIED; + case 1: + case "LOG_LEVEL_DEBUG5": + return Postgresqlhostconfig141c_LogLevel.LOG_LEVEL_DEBUG5; + case 2: + case "LOG_LEVEL_DEBUG4": + return Postgresqlhostconfig141c_LogLevel.LOG_LEVEL_DEBUG4; + case 3: + case "LOG_LEVEL_DEBUG3": + return Postgresqlhostconfig141c_LogLevel.LOG_LEVEL_DEBUG3; + case 4: + case "LOG_LEVEL_DEBUG2": + return Postgresqlhostconfig141c_LogLevel.LOG_LEVEL_DEBUG2; + case 5: + case "LOG_LEVEL_DEBUG1": + return Postgresqlhostconfig141c_LogLevel.LOG_LEVEL_DEBUG1; + case 6: + case "LOG_LEVEL_LOG": + return Postgresqlhostconfig141c_LogLevel.LOG_LEVEL_LOG; + case 7: + case "LOG_LEVEL_NOTICE": + return Postgresqlhostconfig141c_LogLevel.LOG_LEVEL_NOTICE; + case 8: + case "LOG_LEVEL_WARNING": + return Postgresqlhostconfig141c_LogLevel.LOG_LEVEL_WARNING; + case 9: + case "LOG_LEVEL_ERROR": + return Postgresqlhostconfig141c_LogLevel.LOG_LEVEL_ERROR; + case 10: + case "LOG_LEVEL_FATAL": + return Postgresqlhostconfig141c_LogLevel.LOG_LEVEL_FATAL; + case 11: + case "LOG_LEVEL_PANIC": + return Postgresqlhostconfig141c_LogLevel.LOG_LEVEL_PANIC; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlhostconfig141c_LogLevel.UNRECOGNIZED; + } +} + +export function postgresqlhostconfig141c_LogLevelToJSON( + object: Postgresqlhostconfig141c_LogLevel +): string { + switch (object) { + case Postgresqlhostconfig141c_LogLevel.LOG_LEVEL_UNSPECIFIED: + return "LOG_LEVEL_UNSPECIFIED"; + case Postgresqlhostconfig141c_LogLevel.LOG_LEVEL_DEBUG5: + return "LOG_LEVEL_DEBUG5"; + case Postgresqlhostconfig141c_LogLevel.LOG_LEVEL_DEBUG4: + return "LOG_LEVEL_DEBUG4"; + case Postgresqlhostconfig141c_LogLevel.LOG_LEVEL_DEBUG3: + return "LOG_LEVEL_DEBUG3"; + case Postgresqlhostconfig141c_LogLevel.LOG_LEVEL_DEBUG2: + return "LOG_LEVEL_DEBUG2"; + case Postgresqlhostconfig141c_LogLevel.LOG_LEVEL_DEBUG1: + return "LOG_LEVEL_DEBUG1"; + case Postgresqlhostconfig141c_LogLevel.LOG_LEVEL_LOG: + return "LOG_LEVEL_LOG"; + case Postgresqlhostconfig141c_LogLevel.LOG_LEVEL_NOTICE: + return "LOG_LEVEL_NOTICE"; + case Postgresqlhostconfig141c_LogLevel.LOG_LEVEL_WARNING: + return "LOG_LEVEL_WARNING"; + case Postgresqlhostconfig141c_LogLevel.LOG_LEVEL_ERROR: + return "LOG_LEVEL_ERROR"; + case Postgresqlhostconfig141c_LogLevel.LOG_LEVEL_FATAL: + return "LOG_LEVEL_FATAL"; + case Postgresqlhostconfig141c_LogLevel.LOG_LEVEL_PANIC: + return "LOG_LEVEL_PANIC"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlhostconfig141c_LogStatement { + LOG_STATEMENT_UNSPECIFIED = 0, + LOG_STATEMENT_NONE = 1, + LOG_STATEMENT_DDL = 2, + LOG_STATEMENT_MOD = 3, + LOG_STATEMENT_ALL = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlhostconfig141c_LogStatementFromJSON( + object: any +): Postgresqlhostconfig141c_LogStatement { + switch (object) { + case 0: + case "LOG_STATEMENT_UNSPECIFIED": + return Postgresqlhostconfig141c_LogStatement.LOG_STATEMENT_UNSPECIFIED; + case 1: + case "LOG_STATEMENT_NONE": + return Postgresqlhostconfig141c_LogStatement.LOG_STATEMENT_NONE; + case 2: + case "LOG_STATEMENT_DDL": + return Postgresqlhostconfig141c_LogStatement.LOG_STATEMENT_DDL; + case 3: + case "LOG_STATEMENT_MOD": + return Postgresqlhostconfig141c_LogStatement.LOG_STATEMENT_MOD; + case 4: + case "LOG_STATEMENT_ALL": + return Postgresqlhostconfig141c_LogStatement.LOG_STATEMENT_ALL; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlhostconfig141c_LogStatement.UNRECOGNIZED; + } +} + +export function postgresqlhostconfig141c_LogStatementToJSON( + object: Postgresqlhostconfig141c_LogStatement +): string { + switch (object) { + case Postgresqlhostconfig141c_LogStatement.LOG_STATEMENT_UNSPECIFIED: + return "LOG_STATEMENT_UNSPECIFIED"; + case Postgresqlhostconfig141c_LogStatement.LOG_STATEMENT_NONE: + return "LOG_STATEMENT_NONE"; + case Postgresqlhostconfig141c_LogStatement.LOG_STATEMENT_DDL: + return "LOG_STATEMENT_DDL"; + case Postgresqlhostconfig141c_LogStatement.LOG_STATEMENT_MOD: + return "LOG_STATEMENT_MOD"; + case Postgresqlhostconfig141c_LogStatement.LOG_STATEMENT_ALL: + return "LOG_STATEMENT_ALL"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlhostconfig141c_TransactionIsolation { + TRANSACTION_ISOLATION_UNSPECIFIED = 0, + TRANSACTION_ISOLATION_READ_UNCOMMITTED = 1, + TRANSACTION_ISOLATION_READ_COMMITTED = 2, + TRANSACTION_ISOLATION_REPEATABLE_READ = 3, + TRANSACTION_ISOLATION_SERIALIZABLE = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlhostconfig141c_TransactionIsolationFromJSON( + object: any +): Postgresqlhostconfig141c_TransactionIsolation { + switch (object) { + case 0: + case "TRANSACTION_ISOLATION_UNSPECIFIED": + return Postgresqlhostconfig141c_TransactionIsolation.TRANSACTION_ISOLATION_UNSPECIFIED; + case 1: + case "TRANSACTION_ISOLATION_READ_UNCOMMITTED": + return Postgresqlhostconfig141c_TransactionIsolation.TRANSACTION_ISOLATION_READ_UNCOMMITTED; + case 2: + case "TRANSACTION_ISOLATION_READ_COMMITTED": + return Postgresqlhostconfig141c_TransactionIsolation.TRANSACTION_ISOLATION_READ_COMMITTED; + case 3: + case "TRANSACTION_ISOLATION_REPEATABLE_READ": + return Postgresqlhostconfig141c_TransactionIsolation.TRANSACTION_ISOLATION_REPEATABLE_READ; + case 4: + case "TRANSACTION_ISOLATION_SERIALIZABLE": + return Postgresqlhostconfig141c_TransactionIsolation.TRANSACTION_ISOLATION_SERIALIZABLE; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlhostconfig141c_TransactionIsolation.UNRECOGNIZED; + } +} + +export function postgresqlhostconfig141c_TransactionIsolationToJSON( + object: Postgresqlhostconfig141c_TransactionIsolation +): string { + switch (object) { + case Postgresqlhostconfig141c_TransactionIsolation.TRANSACTION_ISOLATION_UNSPECIFIED: + return "TRANSACTION_ISOLATION_UNSPECIFIED"; + case Postgresqlhostconfig141c_TransactionIsolation.TRANSACTION_ISOLATION_READ_UNCOMMITTED: + return "TRANSACTION_ISOLATION_READ_UNCOMMITTED"; + case Postgresqlhostconfig141c_TransactionIsolation.TRANSACTION_ISOLATION_READ_COMMITTED: + return "TRANSACTION_ISOLATION_READ_COMMITTED"; + case Postgresqlhostconfig141c_TransactionIsolation.TRANSACTION_ISOLATION_REPEATABLE_READ: + return "TRANSACTION_ISOLATION_REPEATABLE_READ"; + case Postgresqlhostconfig141c_TransactionIsolation.TRANSACTION_ISOLATION_SERIALIZABLE: + return "TRANSACTION_ISOLATION_SERIALIZABLE"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlhostconfig141c_XmlBinary { + XML_BINARY_UNSPECIFIED = 0, + XML_BINARY_BASE64 = 1, + XML_BINARY_HEX = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlhostconfig141c_XmlBinaryFromJSON( + object: any +): Postgresqlhostconfig141c_XmlBinary { + switch (object) { + case 0: + case "XML_BINARY_UNSPECIFIED": + return Postgresqlhostconfig141c_XmlBinary.XML_BINARY_UNSPECIFIED; + case 1: + case "XML_BINARY_BASE64": + return Postgresqlhostconfig141c_XmlBinary.XML_BINARY_BASE64; + case 2: + case "XML_BINARY_HEX": + return Postgresqlhostconfig141c_XmlBinary.XML_BINARY_HEX; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlhostconfig141c_XmlBinary.UNRECOGNIZED; + } +} + +export function postgresqlhostconfig141c_XmlBinaryToJSON( + object: Postgresqlhostconfig141c_XmlBinary +): string { + switch (object) { + case Postgresqlhostconfig141c_XmlBinary.XML_BINARY_UNSPECIFIED: + return "XML_BINARY_UNSPECIFIED"; + case Postgresqlhostconfig141c_XmlBinary.XML_BINARY_BASE64: + return "XML_BINARY_BASE64"; + case Postgresqlhostconfig141c_XmlBinary.XML_BINARY_HEX: + return "XML_BINARY_HEX"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlhostconfig141c_XmlOption { + XML_OPTION_UNSPECIFIED = 0, + XML_OPTION_DOCUMENT = 1, + XML_OPTION_CONTENT = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlhostconfig141c_XmlOptionFromJSON( + object: any +): Postgresqlhostconfig141c_XmlOption { + switch (object) { + case 0: + case "XML_OPTION_UNSPECIFIED": + return Postgresqlhostconfig141c_XmlOption.XML_OPTION_UNSPECIFIED; + case 1: + case "XML_OPTION_DOCUMENT": + return Postgresqlhostconfig141c_XmlOption.XML_OPTION_DOCUMENT; + case 2: + case "XML_OPTION_CONTENT": + return Postgresqlhostconfig141c_XmlOption.XML_OPTION_CONTENT; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlhostconfig141c_XmlOption.UNRECOGNIZED; + } +} + +export function postgresqlhostconfig141c_XmlOptionToJSON( + object: Postgresqlhostconfig141c_XmlOption +): string { + switch (object) { + case Postgresqlhostconfig141c_XmlOption.XML_OPTION_UNSPECIFIED: + return "XML_OPTION_UNSPECIFIED"; + case Postgresqlhostconfig141c_XmlOption.XML_OPTION_DOCUMENT: + return "XML_OPTION_DOCUMENT"; + case Postgresqlhostconfig141c_XmlOption.XML_OPTION_CONTENT: + return "XML_OPTION_CONTENT"; + default: + return "UNKNOWN"; + } +} + +const basePostgresqlhostconfig141c: object = { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig14_1C", + constraintExclusion: 0, + forceParallelMode: 0, + clientMinMessages: 0, + logMinMessages: 0, + logMinErrorStatement: 0, + logErrorVerbosity: 0, + logStatement: 0, + searchPath: "", + defaultTransactionIsolation: 0, + byteaOutput: 0, + xmlbinary: 0, + xmloption: 0, + backslashQuote: 0, + timezone: "", +}; + +export const Postgresqlhostconfig141c = { + $type: + "yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig14_1C" as const, + + encode( + message: Postgresqlhostconfig141c, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.recoveryMinApplyDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.recoveryMinApplyDelay!, + }, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.sharedBuffers !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.sharedBuffers! }, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.tempBuffers !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.tempBuffers! }, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.workMem !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.workMem! }, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.tempFileLimit !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.tempFileLimit! }, + writer.uint32(42).fork() + ).ldelim(); + } + if (message.backendFlushAfter !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.backendFlushAfter!, + }, + writer.uint32(50).fork() + ).ldelim(); + } + if (message.oldSnapshotThreshold !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.oldSnapshotThreshold!, + }, + writer.uint32(58).fork() + ).ldelim(); + } + if (message.maxStandbyStreamingDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxStandbyStreamingDelay!, + }, + writer.uint32(66).fork() + ).ldelim(); + } + if (message.constraintExclusion !== 0) { + writer.uint32(72).int32(message.constraintExclusion); + } + if (message.cursorTupleFraction !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.cursorTupleFraction!, + }, + writer.uint32(82).fork() + ).ldelim(); + } + if (message.fromCollapseLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.fromCollapseLimit!, + }, + writer.uint32(90).fork() + ).ldelim(); + } + if (message.joinCollapseLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.joinCollapseLimit!, + }, + writer.uint32(98).fork() + ).ldelim(); + } + if (message.forceParallelMode !== 0) { + writer.uint32(104).int32(message.forceParallelMode); + } + if (message.clientMinMessages !== 0) { + writer.uint32(112).int32(message.clientMinMessages); + } + if (message.logMinMessages !== 0) { + writer.uint32(120).int32(message.logMinMessages); + } + if (message.logMinErrorStatement !== 0) { + writer.uint32(128).int32(message.logMinErrorStatement); + } + if (message.logMinDurationStatement !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logMinDurationStatement!, + }, + writer.uint32(138).fork() + ).ldelim(); + } + if (message.logCheckpoints !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logCheckpoints! }, + writer.uint32(146).fork() + ).ldelim(); + } + if (message.logConnections !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logConnections! }, + writer.uint32(154).fork() + ).ldelim(); + } + if (message.logDisconnections !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.logDisconnections!, + }, + writer.uint32(162).fork() + ).ldelim(); + } + if (message.logDuration !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logDuration! }, + writer.uint32(170).fork() + ).ldelim(); + } + if (message.logErrorVerbosity !== 0) { + writer.uint32(176).int32(message.logErrorVerbosity); + } + if (message.logLockWaits !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logLockWaits! }, + writer.uint32(186).fork() + ).ldelim(); + } + if (message.logStatement !== 0) { + writer.uint32(192).int32(message.logStatement); + } + if (message.logTempFiles !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.logTempFiles! }, + writer.uint32(202).fork() + ).ldelim(); + } + if (message.searchPath !== "") { + writer.uint32(210).string(message.searchPath); + } + if (message.rowSecurity !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.rowSecurity! }, + writer.uint32(218).fork() + ).ldelim(); + } + if (message.defaultTransactionIsolation !== 0) { + writer.uint32(224).int32(message.defaultTransactionIsolation); + } + if (message.statementTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.statementTimeout!, + }, + writer.uint32(234).fork() + ).ldelim(); + } + if (message.lockTimeout !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.lockTimeout! }, + writer.uint32(242).fork() + ).ldelim(); + } + if (message.idleInTransactionSessionTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.idleInTransactionSessionTimeout!, + }, + writer.uint32(250).fork() + ).ldelim(); + } + if (message.byteaOutput !== 0) { + writer.uint32(256).int32(message.byteaOutput); + } + if (message.xmlbinary !== 0) { + writer.uint32(264).int32(message.xmlbinary); + } + if (message.xmloption !== 0) { + writer.uint32(272).int32(message.xmloption); + } + if (message.ginPendingListLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.ginPendingListLimit!, + }, + writer.uint32(282).fork() + ).ldelim(); + } + if (message.deadlockTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.deadlockTimeout!, + }, + writer.uint32(290).fork() + ).ldelim(); + } + if (message.maxLocksPerTransaction !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxLocksPerTransaction!, + }, + writer.uint32(298).fork() + ).ldelim(); + } + if (message.maxPredLocksPerTransaction !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxPredLocksPerTransaction!, + }, + writer.uint32(306).fork() + ).ldelim(); + } + if (message.arrayNulls !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.arrayNulls! }, + writer.uint32(314).fork() + ).ldelim(); + } + if (message.backslashQuote !== 0) { + writer.uint32(320).int32(message.backslashQuote); + } + if (message.defaultWithOids !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.defaultWithOids! }, + writer.uint32(330).fork() + ).ldelim(); + } + if (message.escapeStringWarning !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.escapeStringWarning!, + }, + writer.uint32(338).fork() + ).ldelim(); + } + if (message.loCompatPrivileges !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.loCompatPrivileges!, + }, + writer.uint32(346).fork() + ).ldelim(); + } + if (message.quoteAllIdentifiers !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.quoteAllIdentifiers!, + }, + writer.uint32(362).fork() + ).ldelim(); + } + if (message.standardConformingStrings !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.standardConformingStrings!, + }, + writer.uint32(370).fork() + ).ldelim(); + } + if (message.synchronizeSeqscans !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.synchronizeSeqscans!, + }, + writer.uint32(378).fork() + ).ldelim(); + } + if (message.transformNullEquals !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.transformNullEquals!, + }, + writer.uint32(386).fork() + ).ldelim(); + } + if (message.exitOnError !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.exitOnError! }, + writer.uint32(394).fork() + ).ldelim(); + } + if (message.seqPageCost !== undefined) { + DoubleValue.encode( + { $type: "google.protobuf.DoubleValue", value: message.seqPageCost! }, + writer.uint32(402).fork() + ).ldelim(); + } + if (message.randomPageCost !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.randomPageCost!, + }, + writer.uint32(410).fork() + ).ldelim(); + } + if (message.enableBitmapscan !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableBitmapscan!, + }, + writer.uint32(434).fork() + ).ldelim(); + } + if (message.enableHashagg !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableHashagg! }, + writer.uint32(442).fork() + ).ldelim(); + } + if (message.enableHashjoin !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableHashjoin! }, + writer.uint32(450).fork() + ).ldelim(); + } + if (message.enableIndexscan !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableIndexscan! }, + writer.uint32(458).fork() + ).ldelim(); + } + if (message.enableIndexonlyscan !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableIndexonlyscan!, + }, + writer.uint32(466).fork() + ).ldelim(); + } + if (message.enableMaterial !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableMaterial! }, + writer.uint32(474).fork() + ).ldelim(); + } + if (message.enableMergejoin !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableMergejoin! }, + writer.uint32(482).fork() + ).ldelim(); + } + if (message.enableNestloop !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableNestloop! }, + writer.uint32(490).fork() + ).ldelim(); + } + if (message.enableSeqscan !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableSeqscan! }, + writer.uint32(498).fork() + ).ldelim(); + } + if (message.enableSort !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableSort! }, + writer.uint32(506).fork() + ).ldelim(); + } + if (message.enableTidscan !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableTidscan! }, + writer.uint32(514).fork() + ).ldelim(); + } + if (message.maxParallelWorkers !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxParallelWorkers!, + }, + writer.uint32(522).fork() + ).ldelim(); + } + if (message.maxParallelWorkersPerGather !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxParallelWorkersPerGather!, + }, + writer.uint32(530).fork() + ).ldelim(); + } + if (message.timezone !== "") { + writer.uint32(538).string(message.timezone); + } + if (message.effectiveIoConcurrency !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.effectiveIoConcurrency!, + }, + writer.uint32(546).fork() + ).ldelim(); + } + if (message.effectiveCacheSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.effectiveCacheSize!, + }, + writer.uint32(554).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Postgresqlhostconfig141c { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...basePostgresqlhostconfig141c, + } as Postgresqlhostconfig141c; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.recoveryMinApplyDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 2: + message.sharedBuffers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 3: + message.tempBuffers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 4: + message.workMem = Int64Value.decode(reader, reader.uint32()).value; + break; + case 5: + message.tempFileLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 6: + message.backendFlushAfter = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 7: + message.oldSnapshotThreshold = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 8: + message.maxStandbyStreamingDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 9: + message.constraintExclusion = reader.int32() as any; + break; + case 10: + message.cursorTupleFraction = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 11: + message.fromCollapseLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 12: + message.joinCollapseLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 13: + message.forceParallelMode = reader.int32() as any; + break; + case 14: + message.clientMinMessages = reader.int32() as any; + break; + case 15: + message.logMinMessages = reader.int32() as any; + break; + case 16: + message.logMinErrorStatement = reader.int32() as any; + break; + case 17: + message.logMinDurationStatement = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 18: + message.logCheckpoints = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 19: + message.logConnections = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 20: + message.logDisconnections = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 21: + message.logDuration = BoolValue.decode(reader, reader.uint32()).value; + break; + case 22: + message.logErrorVerbosity = reader.int32() as any; + break; + case 23: + message.logLockWaits = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 24: + message.logStatement = reader.int32() as any; + break; + case 25: + message.logTempFiles = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 26: + message.searchPath = reader.string(); + break; + case 27: + message.rowSecurity = BoolValue.decode(reader, reader.uint32()).value; + break; + case 28: + message.defaultTransactionIsolation = reader.int32() as any; + break; + case 29: + message.statementTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 30: + message.lockTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 31: + message.idleInTransactionSessionTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 32: + message.byteaOutput = reader.int32() as any; + break; + case 33: + message.xmlbinary = reader.int32() as any; + break; + case 34: + message.xmloption = reader.int32() as any; + break; + case 35: + message.ginPendingListLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 36: + message.deadlockTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 37: + message.maxLocksPerTransaction = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 38: + message.maxPredLocksPerTransaction = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 39: + message.arrayNulls = BoolValue.decode(reader, reader.uint32()).value; + break; + case 40: + message.backslashQuote = reader.int32() as any; + break; + case 41: + message.defaultWithOids = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 42: + message.escapeStringWarning = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 43: + message.loCompatPrivileges = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 45: + message.quoteAllIdentifiers = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 46: + message.standardConformingStrings = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 47: + message.synchronizeSeqscans = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 48: + message.transformNullEquals = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 49: + message.exitOnError = BoolValue.decode(reader, reader.uint32()).value; + break; + case 50: + message.seqPageCost = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 51: + message.randomPageCost = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 54: + message.enableBitmapscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 55: + message.enableHashagg = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 56: + message.enableHashjoin = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 57: + message.enableIndexscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 58: + message.enableIndexonlyscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 59: + message.enableMaterial = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 60: + message.enableMergejoin = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 61: + message.enableNestloop = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 62: + message.enableSeqscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 63: + message.enableSort = BoolValue.decode(reader, reader.uint32()).value; + break; + case 64: + message.enableTidscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 65: + message.maxParallelWorkers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 66: + message.maxParallelWorkersPerGather = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 67: + message.timezone = reader.string(); + break; + case 68: + message.effectiveIoConcurrency = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 69: + message.effectiveCacheSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Postgresqlhostconfig141c { + const message = { + ...basePostgresqlhostconfig141c, + } as Postgresqlhostconfig141c; + message.recoveryMinApplyDelay = + object.recoveryMinApplyDelay !== undefined && + object.recoveryMinApplyDelay !== null + ? Number(object.recoveryMinApplyDelay) + : undefined; + message.sharedBuffers = + object.sharedBuffers !== undefined && object.sharedBuffers !== null + ? Number(object.sharedBuffers) + : undefined; + message.tempBuffers = + object.tempBuffers !== undefined && object.tempBuffers !== null + ? Number(object.tempBuffers) + : undefined; + message.workMem = + object.workMem !== undefined && object.workMem !== null + ? Number(object.workMem) + : undefined; + message.tempFileLimit = + object.tempFileLimit !== undefined && object.tempFileLimit !== null + ? Number(object.tempFileLimit) + : undefined; + message.backendFlushAfter = + object.backendFlushAfter !== undefined && + object.backendFlushAfter !== null + ? Number(object.backendFlushAfter) + : undefined; + message.oldSnapshotThreshold = + object.oldSnapshotThreshold !== undefined && + object.oldSnapshotThreshold !== null + ? Number(object.oldSnapshotThreshold) + : undefined; + message.maxStandbyStreamingDelay = + object.maxStandbyStreamingDelay !== undefined && + object.maxStandbyStreamingDelay !== null + ? Number(object.maxStandbyStreamingDelay) + : undefined; + message.constraintExclusion = + object.constraintExclusion !== undefined && + object.constraintExclusion !== null + ? postgresqlhostconfig141c_ConstraintExclusionFromJSON( + object.constraintExclusion + ) + : 0; + message.cursorTupleFraction = + object.cursorTupleFraction !== undefined && + object.cursorTupleFraction !== null + ? Number(object.cursorTupleFraction) + : undefined; + message.fromCollapseLimit = + object.fromCollapseLimit !== undefined && + object.fromCollapseLimit !== null + ? Number(object.fromCollapseLimit) + : undefined; + message.joinCollapseLimit = + object.joinCollapseLimit !== undefined && + object.joinCollapseLimit !== null + ? Number(object.joinCollapseLimit) + : undefined; + message.forceParallelMode = + object.forceParallelMode !== undefined && + object.forceParallelMode !== null + ? postgresqlhostconfig141c_ForceParallelModeFromJSON( + object.forceParallelMode + ) + : 0; + message.clientMinMessages = + object.clientMinMessages !== undefined && + object.clientMinMessages !== null + ? postgresqlhostconfig141c_LogLevelFromJSON(object.clientMinMessages) + : 0; + message.logMinMessages = + object.logMinMessages !== undefined && object.logMinMessages !== null + ? postgresqlhostconfig141c_LogLevelFromJSON(object.logMinMessages) + : 0; + message.logMinErrorStatement = + object.logMinErrorStatement !== undefined && + object.logMinErrorStatement !== null + ? postgresqlhostconfig141c_LogLevelFromJSON(object.logMinErrorStatement) + : 0; + message.logMinDurationStatement = + object.logMinDurationStatement !== undefined && + object.logMinDurationStatement !== null + ? Number(object.logMinDurationStatement) + : undefined; + message.logCheckpoints = + object.logCheckpoints !== undefined && object.logCheckpoints !== null + ? Boolean(object.logCheckpoints) + : undefined; + message.logConnections = + object.logConnections !== undefined && object.logConnections !== null + ? Boolean(object.logConnections) + : undefined; + message.logDisconnections = + object.logDisconnections !== undefined && + object.logDisconnections !== null + ? Boolean(object.logDisconnections) + : undefined; + message.logDuration = + object.logDuration !== undefined && object.logDuration !== null + ? Boolean(object.logDuration) + : undefined; + message.logErrorVerbosity = + object.logErrorVerbosity !== undefined && + object.logErrorVerbosity !== null + ? postgresqlhostconfig141c_LogErrorVerbosityFromJSON( + object.logErrorVerbosity + ) + : 0; + message.logLockWaits = + object.logLockWaits !== undefined && object.logLockWaits !== null + ? Boolean(object.logLockWaits) + : undefined; + message.logStatement = + object.logStatement !== undefined && object.logStatement !== null + ? postgresqlhostconfig141c_LogStatementFromJSON(object.logStatement) + : 0; + message.logTempFiles = + object.logTempFiles !== undefined && object.logTempFiles !== null + ? Number(object.logTempFiles) + : undefined; + message.searchPath = + object.searchPath !== undefined && object.searchPath !== null + ? String(object.searchPath) + : ""; + message.rowSecurity = + object.rowSecurity !== undefined && object.rowSecurity !== null + ? Boolean(object.rowSecurity) + : undefined; + message.defaultTransactionIsolation = + object.defaultTransactionIsolation !== undefined && + object.defaultTransactionIsolation !== null + ? postgresqlhostconfig141c_TransactionIsolationFromJSON( + object.defaultTransactionIsolation + ) + : 0; + message.statementTimeout = + object.statementTimeout !== undefined && object.statementTimeout !== null + ? Number(object.statementTimeout) + : undefined; + message.lockTimeout = + object.lockTimeout !== undefined && object.lockTimeout !== null + ? Number(object.lockTimeout) + : undefined; + message.idleInTransactionSessionTimeout = + object.idleInTransactionSessionTimeout !== undefined && + object.idleInTransactionSessionTimeout !== null + ? Number(object.idleInTransactionSessionTimeout) + : undefined; + message.byteaOutput = + object.byteaOutput !== undefined && object.byteaOutput !== null + ? postgresqlhostconfig141c_ByteaOutputFromJSON(object.byteaOutput) + : 0; + message.xmlbinary = + object.xmlbinary !== undefined && object.xmlbinary !== null + ? postgresqlhostconfig141c_XmlBinaryFromJSON(object.xmlbinary) + : 0; + message.xmloption = + object.xmloption !== undefined && object.xmloption !== null + ? postgresqlhostconfig141c_XmlOptionFromJSON(object.xmloption) + : 0; + message.ginPendingListLimit = + object.ginPendingListLimit !== undefined && + object.ginPendingListLimit !== null + ? Number(object.ginPendingListLimit) + : undefined; + message.deadlockTimeout = + object.deadlockTimeout !== undefined && object.deadlockTimeout !== null + ? Number(object.deadlockTimeout) + : undefined; + message.maxLocksPerTransaction = + object.maxLocksPerTransaction !== undefined && + object.maxLocksPerTransaction !== null + ? Number(object.maxLocksPerTransaction) + : undefined; + message.maxPredLocksPerTransaction = + object.maxPredLocksPerTransaction !== undefined && + object.maxPredLocksPerTransaction !== null + ? Number(object.maxPredLocksPerTransaction) + : undefined; + message.arrayNulls = + object.arrayNulls !== undefined && object.arrayNulls !== null + ? Boolean(object.arrayNulls) + : undefined; + message.backslashQuote = + object.backslashQuote !== undefined && object.backslashQuote !== null + ? postgresqlhostconfig141c_BackslashQuoteFromJSON(object.backslashQuote) + : 0; + message.defaultWithOids = + object.defaultWithOids !== undefined && object.defaultWithOids !== null + ? Boolean(object.defaultWithOids) + : undefined; + message.escapeStringWarning = + object.escapeStringWarning !== undefined && + object.escapeStringWarning !== null + ? Boolean(object.escapeStringWarning) + : undefined; + message.loCompatPrivileges = + object.loCompatPrivileges !== undefined && + object.loCompatPrivileges !== null + ? Boolean(object.loCompatPrivileges) + : undefined; + message.quoteAllIdentifiers = + object.quoteAllIdentifiers !== undefined && + object.quoteAllIdentifiers !== null + ? Boolean(object.quoteAllIdentifiers) + : undefined; + message.standardConformingStrings = + object.standardConformingStrings !== undefined && + object.standardConformingStrings !== null + ? Boolean(object.standardConformingStrings) + : undefined; + message.synchronizeSeqscans = + object.synchronizeSeqscans !== undefined && + object.synchronizeSeqscans !== null + ? Boolean(object.synchronizeSeqscans) + : undefined; + message.transformNullEquals = + object.transformNullEquals !== undefined && + object.transformNullEquals !== null + ? Boolean(object.transformNullEquals) + : undefined; + message.exitOnError = + object.exitOnError !== undefined && object.exitOnError !== null + ? Boolean(object.exitOnError) + : undefined; + message.seqPageCost = + object.seqPageCost !== undefined && object.seqPageCost !== null + ? Number(object.seqPageCost) + : undefined; + message.randomPageCost = + object.randomPageCost !== undefined && object.randomPageCost !== null + ? Number(object.randomPageCost) + : undefined; + message.enableBitmapscan = + object.enableBitmapscan !== undefined && object.enableBitmapscan !== null + ? Boolean(object.enableBitmapscan) + : undefined; + message.enableHashagg = + object.enableHashagg !== undefined && object.enableHashagg !== null + ? Boolean(object.enableHashagg) + : undefined; + message.enableHashjoin = + object.enableHashjoin !== undefined && object.enableHashjoin !== null + ? Boolean(object.enableHashjoin) + : undefined; + message.enableIndexscan = + object.enableIndexscan !== undefined && object.enableIndexscan !== null + ? Boolean(object.enableIndexscan) + : undefined; + message.enableIndexonlyscan = + object.enableIndexonlyscan !== undefined && + object.enableIndexonlyscan !== null + ? Boolean(object.enableIndexonlyscan) + : undefined; + message.enableMaterial = + object.enableMaterial !== undefined && object.enableMaterial !== null + ? Boolean(object.enableMaterial) + : undefined; + message.enableMergejoin = + object.enableMergejoin !== undefined && object.enableMergejoin !== null + ? Boolean(object.enableMergejoin) + : undefined; + message.enableNestloop = + object.enableNestloop !== undefined && object.enableNestloop !== null + ? Boolean(object.enableNestloop) + : undefined; + message.enableSeqscan = + object.enableSeqscan !== undefined && object.enableSeqscan !== null + ? Boolean(object.enableSeqscan) + : undefined; + message.enableSort = + object.enableSort !== undefined && object.enableSort !== null + ? Boolean(object.enableSort) + : undefined; + message.enableTidscan = + object.enableTidscan !== undefined && object.enableTidscan !== null + ? Boolean(object.enableTidscan) + : undefined; + message.maxParallelWorkers = + object.maxParallelWorkers !== undefined && + object.maxParallelWorkers !== null + ? Number(object.maxParallelWorkers) + : undefined; + message.maxParallelWorkersPerGather = + object.maxParallelWorkersPerGather !== undefined && + object.maxParallelWorkersPerGather !== null + ? Number(object.maxParallelWorkersPerGather) + : undefined; + message.timezone = + object.timezone !== undefined && object.timezone !== null + ? String(object.timezone) + : ""; + message.effectiveIoConcurrency = + object.effectiveIoConcurrency !== undefined && + object.effectiveIoConcurrency !== null + ? Number(object.effectiveIoConcurrency) + : undefined; + message.effectiveCacheSize = + object.effectiveCacheSize !== undefined && + object.effectiveCacheSize !== null + ? Number(object.effectiveCacheSize) + : undefined; + return message; + }, + + toJSON(message: Postgresqlhostconfig141c): unknown { + const obj: any = {}; + message.recoveryMinApplyDelay !== undefined && + (obj.recoveryMinApplyDelay = message.recoveryMinApplyDelay); + message.sharedBuffers !== undefined && + (obj.sharedBuffers = message.sharedBuffers); + message.tempBuffers !== undefined && + (obj.tempBuffers = message.tempBuffers); + message.workMem !== undefined && (obj.workMem = message.workMem); + message.tempFileLimit !== undefined && + (obj.tempFileLimit = message.tempFileLimit); + message.backendFlushAfter !== undefined && + (obj.backendFlushAfter = message.backendFlushAfter); + message.oldSnapshotThreshold !== undefined && + (obj.oldSnapshotThreshold = message.oldSnapshotThreshold); + message.maxStandbyStreamingDelay !== undefined && + (obj.maxStandbyStreamingDelay = message.maxStandbyStreamingDelay); + message.constraintExclusion !== undefined && + (obj.constraintExclusion = + postgresqlhostconfig141c_ConstraintExclusionToJSON( + message.constraintExclusion + )); + message.cursorTupleFraction !== undefined && + (obj.cursorTupleFraction = message.cursorTupleFraction); + message.fromCollapseLimit !== undefined && + (obj.fromCollapseLimit = message.fromCollapseLimit); + message.joinCollapseLimit !== undefined && + (obj.joinCollapseLimit = message.joinCollapseLimit); + message.forceParallelMode !== undefined && + (obj.forceParallelMode = postgresqlhostconfig141c_ForceParallelModeToJSON( + message.forceParallelMode + )); + message.clientMinMessages !== undefined && + (obj.clientMinMessages = postgresqlhostconfig141c_LogLevelToJSON( + message.clientMinMessages + )); + message.logMinMessages !== undefined && + (obj.logMinMessages = postgresqlhostconfig141c_LogLevelToJSON( + message.logMinMessages + )); + message.logMinErrorStatement !== undefined && + (obj.logMinErrorStatement = postgresqlhostconfig141c_LogLevelToJSON( + message.logMinErrorStatement + )); + message.logMinDurationStatement !== undefined && + (obj.logMinDurationStatement = message.logMinDurationStatement); + message.logCheckpoints !== undefined && + (obj.logCheckpoints = message.logCheckpoints); + message.logConnections !== undefined && + (obj.logConnections = message.logConnections); + message.logDisconnections !== undefined && + (obj.logDisconnections = message.logDisconnections); + message.logDuration !== undefined && + (obj.logDuration = message.logDuration); + message.logErrorVerbosity !== undefined && + (obj.logErrorVerbosity = postgresqlhostconfig141c_LogErrorVerbosityToJSON( + message.logErrorVerbosity + )); + message.logLockWaits !== undefined && + (obj.logLockWaits = message.logLockWaits); + message.logStatement !== undefined && + (obj.logStatement = postgresqlhostconfig141c_LogStatementToJSON( + message.logStatement + )); + message.logTempFiles !== undefined && + (obj.logTempFiles = message.logTempFiles); + message.searchPath !== undefined && (obj.searchPath = message.searchPath); + message.rowSecurity !== undefined && + (obj.rowSecurity = message.rowSecurity); + message.defaultTransactionIsolation !== undefined && + (obj.defaultTransactionIsolation = + postgresqlhostconfig141c_TransactionIsolationToJSON( + message.defaultTransactionIsolation + )); + message.statementTimeout !== undefined && + (obj.statementTimeout = message.statementTimeout); + message.lockTimeout !== undefined && + (obj.lockTimeout = message.lockTimeout); + message.idleInTransactionSessionTimeout !== undefined && + (obj.idleInTransactionSessionTimeout = + message.idleInTransactionSessionTimeout); + message.byteaOutput !== undefined && + (obj.byteaOutput = postgresqlhostconfig141c_ByteaOutputToJSON( + message.byteaOutput + )); + message.xmlbinary !== undefined && + (obj.xmlbinary = postgresqlhostconfig141c_XmlBinaryToJSON( + message.xmlbinary + )); + message.xmloption !== undefined && + (obj.xmloption = postgresqlhostconfig141c_XmlOptionToJSON( + message.xmloption + )); + message.ginPendingListLimit !== undefined && + (obj.ginPendingListLimit = message.ginPendingListLimit); + message.deadlockTimeout !== undefined && + (obj.deadlockTimeout = message.deadlockTimeout); + message.maxLocksPerTransaction !== undefined && + (obj.maxLocksPerTransaction = message.maxLocksPerTransaction); + message.maxPredLocksPerTransaction !== undefined && + (obj.maxPredLocksPerTransaction = message.maxPredLocksPerTransaction); + message.arrayNulls !== undefined && (obj.arrayNulls = message.arrayNulls); + message.backslashQuote !== undefined && + (obj.backslashQuote = postgresqlhostconfig141c_BackslashQuoteToJSON( + message.backslashQuote + )); + message.defaultWithOids !== undefined && + (obj.defaultWithOids = message.defaultWithOids); + message.escapeStringWarning !== undefined && + (obj.escapeStringWarning = message.escapeStringWarning); + message.loCompatPrivileges !== undefined && + (obj.loCompatPrivileges = message.loCompatPrivileges); + message.quoteAllIdentifiers !== undefined && + (obj.quoteAllIdentifiers = message.quoteAllIdentifiers); + message.standardConformingStrings !== undefined && + (obj.standardConformingStrings = message.standardConformingStrings); + message.synchronizeSeqscans !== undefined && + (obj.synchronizeSeqscans = message.synchronizeSeqscans); + message.transformNullEquals !== undefined && + (obj.transformNullEquals = message.transformNullEquals); + message.exitOnError !== undefined && + (obj.exitOnError = message.exitOnError); + message.seqPageCost !== undefined && + (obj.seqPageCost = message.seqPageCost); + message.randomPageCost !== undefined && + (obj.randomPageCost = message.randomPageCost); + message.enableBitmapscan !== undefined && + (obj.enableBitmapscan = message.enableBitmapscan); + message.enableHashagg !== undefined && + (obj.enableHashagg = message.enableHashagg); + message.enableHashjoin !== undefined && + (obj.enableHashjoin = message.enableHashjoin); + message.enableIndexscan !== undefined && + (obj.enableIndexscan = message.enableIndexscan); + message.enableIndexonlyscan !== undefined && + (obj.enableIndexonlyscan = message.enableIndexonlyscan); + message.enableMaterial !== undefined && + (obj.enableMaterial = message.enableMaterial); + message.enableMergejoin !== undefined && + (obj.enableMergejoin = message.enableMergejoin); + message.enableNestloop !== undefined && + (obj.enableNestloop = message.enableNestloop); + message.enableSeqscan !== undefined && + (obj.enableSeqscan = message.enableSeqscan); + message.enableSort !== undefined && (obj.enableSort = message.enableSort); + message.enableTidscan !== undefined && + (obj.enableTidscan = message.enableTidscan); + message.maxParallelWorkers !== undefined && + (obj.maxParallelWorkers = message.maxParallelWorkers); + message.maxParallelWorkersPerGather !== undefined && + (obj.maxParallelWorkersPerGather = message.maxParallelWorkersPerGather); + message.timezone !== undefined && (obj.timezone = message.timezone); + message.effectiveIoConcurrency !== undefined && + (obj.effectiveIoConcurrency = message.effectiveIoConcurrency); + message.effectiveCacheSize !== undefined && + (obj.effectiveCacheSize = message.effectiveCacheSize); + return obj; + }, + + fromPartial, I>>( + object: I + ): Postgresqlhostconfig141c { + const message = { + ...basePostgresqlhostconfig141c, + } as Postgresqlhostconfig141c; + message.recoveryMinApplyDelay = object.recoveryMinApplyDelay ?? undefined; + message.sharedBuffers = object.sharedBuffers ?? undefined; + message.tempBuffers = object.tempBuffers ?? undefined; + message.workMem = object.workMem ?? undefined; + message.tempFileLimit = object.tempFileLimit ?? undefined; + message.backendFlushAfter = object.backendFlushAfter ?? undefined; + message.oldSnapshotThreshold = object.oldSnapshotThreshold ?? undefined; + message.maxStandbyStreamingDelay = + object.maxStandbyStreamingDelay ?? undefined; + message.constraintExclusion = object.constraintExclusion ?? 0; + message.cursorTupleFraction = object.cursorTupleFraction ?? undefined; + message.fromCollapseLimit = object.fromCollapseLimit ?? undefined; + message.joinCollapseLimit = object.joinCollapseLimit ?? undefined; + message.forceParallelMode = object.forceParallelMode ?? 0; + message.clientMinMessages = object.clientMinMessages ?? 0; + message.logMinMessages = object.logMinMessages ?? 0; + message.logMinErrorStatement = object.logMinErrorStatement ?? 0; + message.logMinDurationStatement = + object.logMinDurationStatement ?? undefined; + message.logCheckpoints = object.logCheckpoints ?? undefined; + message.logConnections = object.logConnections ?? undefined; + message.logDisconnections = object.logDisconnections ?? undefined; + message.logDuration = object.logDuration ?? undefined; + message.logErrorVerbosity = object.logErrorVerbosity ?? 0; + message.logLockWaits = object.logLockWaits ?? undefined; + message.logStatement = object.logStatement ?? 0; + message.logTempFiles = object.logTempFiles ?? undefined; + message.searchPath = object.searchPath ?? ""; + message.rowSecurity = object.rowSecurity ?? undefined; + message.defaultTransactionIsolation = + object.defaultTransactionIsolation ?? 0; + message.statementTimeout = object.statementTimeout ?? undefined; + message.lockTimeout = object.lockTimeout ?? undefined; + message.idleInTransactionSessionTimeout = + object.idleInTransactionSessionTimeout ?? undefined; + message.byteaOutput = object.byteaOutput ?? 0; + message.xmlbinary = object.xmlbinary ?? 0; + message.xmloption = object.xmloption ?? 0; + message.ginPendingListLimit = object.ginPendingListLimit ?? undefined; + message.deadlockTimeout = object.deadlockTimeout ?? undefined; + message.maxLocksPerTransaction = object.maxLocksPerTransaction ?? undefined; + message.maxPredLocksPerTransaction = + object.maxPredLocksPerTransaction ?? undefined; + message.arrayNulls = object.arrayNulls ?? undefined; + message.backslashQuote = object.backslashQuote ?? 0; + message.defaultWithOids = object.defaultWithOids ?? undefined; + message.escapeStringWarning = object.escapeStringWarning ?? undefined; + message.loCompatPrivileges = object.loCompatPrivileges ?? undefined; + message.quoteAllIdentifiers = object.quoteAllIdentifiers ?? undefined; + message.standardConformingStrings = + object.standardConformingStrings ?? undefined; + message.synchronizeSeqscans = object.synchronizeSeqscans ?? undefined; + message.transformNullEquals = object.transformNullEquals ?? undefined; + message.exitOnError = object.exitOnError ?? undefined; + message.seqPageCost = object.seqPageCost ?? undefined; + message.randomPageCost = object.randomPageCost ?? undefined; + message.enableBitmapscan = object.enableBitmapscan ?? undefined; + message.enableHashagg = object.enableHashagg ?? undefined; + message.enableHashjoin = object.enableHashjoin ?? undefined; + message.enableIndexscan = object.enableIndexscan ?? undefined; + message.enableIndexonlyscan = object.enableIndexonlyscan ?? undefined; + message.enableMaterial = object.enableMaterial ?? undefined; + message.enableMergejoin = object.enableMergejoin ?? undefined; + message.enableNestloop = object.enableNestloop ?? undefined; + message.enableSeqscan = object.enableSeqscan ?? undefined; + message.enableSort = object.enableSort ?? undefined; + message.enableTidscan = object.enableTidscan ?? undefined; + message.maxParallelWorkers = object.maxParallelWorkers ?? undefined; + message.maxParallelWorkersPerGather = + object.maxParallelWorkersPerGather ?? undefined; + message.timezone = object.timezone ?? ""; + message.effectiveIoConcurrency = object.effectiveIoConcurrency ?? undefined; + message.effectiveCacheSize = object.effectiveCacheSize ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Postgresqlhostconfig141c.$type, + Postgresqlhostconfig141c +); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql10.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql10.ts index 5545a401..54f12f79 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql10.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql10.ts @@ -154,6 +154,16 @@ export interface PostgresqlConfig10 { pgQualstatsMax?: number; pgQualstatsResolveOids?: boolean; pgQualstatsSampleRate?: number; + /** in bytes. */ + maxStackDepth?: number; + /** enable Genetic Query Optimizer, by default is on */ + geqo?: boolean; + /** The number of tables to use geqo, default is 12 */ + geqoThreshold?: number; + /** tradeoff between planning time and query plan quality, default is 5 */ + geqoEffort?: number; + /** initial value of the random number generator used by GEQO */ + geqoSeed?: number; } export enum PostgresqlConfig10_WalLevel { @@ -354,6 +364,54 @@ export function postgresqlConfig10_ForceParallelModeToJSON( } } +export enum PostgresqlConfig10_LogErrorVerbosity { + LOG_ERROR_VERBOSITY_UNSPECIFIED = 0, + LOG_ERROR_VERBOSITY_TERSE = 1, + LOG_ERROR_VERBOSITY_DEFAULT = 2, + LOG_ERROR_VERBOSITY_VERBOSE = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig10_LogErrorVerbosityFromJSON( + object: any +): PostgresqlConfig10_LogErrorVerbosity { + switch (object) { + case 0: + case "LOG_ERROR_VERBOSITY_UNSPECIFIED": + return PostgresqlConfig10_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED; + case 1: + case "LOG_ERROR_VERBOSITY_TERSE": + return PostgresqlConfig10_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE; + case 2: + case "LOG_ERROR_VERBOSITY_DEFAULT": + return PostgresqlConfig10_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT; + case 3: + case "LOG_ERROR_VERBOSITY_VERBOSE": + return PostgresqlConfig10_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig10_LogErrorVerbosity.UNRECOGNIZED; + } +} + +export function postgresqlConfig10_LogErrorVerbosityToJSON( + object: PostgresqlConfig10_LogErrorVerbosity +): string { + switch (object) { + case PostgresqlConfig10_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED: + return "LOG_ERROR_VERBOSITY_UNSPECIFIED"; + case PostgresqlConfig10_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE: + return "LOG_ERROR_VERBOSITY_TERSE"; + case PostgresqlConfig10_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT: + return "LOG_ERROR_VERBOSITY_DEFAULT"; + case PostgresqlConfig10_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE: + return "LOG_ERROR_VERBOSITY_VERBOSE"; + default: + return "UNKNOWN"; + } +} + export enum PostgresqlConfig10_LogLevel { LOG_LEVEL_UNSPECIFIED = 0, LOG_LEVEL_DEBUG5 = 1, @@ -450,54 +508,6 @@ export function postgresqlConfig10_LogLevelToJSON( } } -export enum PostgresqlConfig10_LogErrorVerbosity { - LOG_ERROR_VERBOSITY_UNSPECIFIED = 0, - LOG_ERROR_VERBOSITY_TERSE = 1, - LOG_ERROR_VERBOSITY_DEFAULT = 2, - LOG_ERROR_VERBOSITY_VERBOSE = 3, - UNRECOGNIZED = -1, -} - -export function postgresqlConfig10_LogErrorVerbosityFromJSON( - object: any -): PostgresqlConfig10_LogErrorVerbosity { - switch (object) { - case 0: - case "LOG_ERROR_VERBOSITY_UNSPECIFIED": - return PostgresqlConfig10_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED; - case 1: - case "LOG_ERROR_VERBOSITY_TERSE": - return PostgresqlConfig10_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE; - case 2: - case "LOG_ERROR_VERBOSITY_DEFAULT": - return PostgresqlConfig10_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT; - case 3: - case "LOG_ERROR_VERBOSITY_VERBOSE": - return PostgresqlConfig10_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE; - case -1: - case "UNRECOGNIZED": - default: - return PostgresqlConfig10_LogErrorVerbosity.UNRECOGNIZED; - } -} - -export function postgresqlConfig10_LogErrorVerbosityToJSON( - object: PostgresqlConfig10_LogErrorVerbosity -): string { - switch (object) { - case PostgresqlConfig10_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED: - return "LOG_ERROR_VERBOSITY_UNSPECIFIED"; - case PostgresqlConfig10_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE: - return "LOG_ERROR_VERBOSITY_TERSE"; - case PostgresqlConfig10_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT: - return "LOG_ERROR_VERBOSITY_DEFAULT"; - case PostgresqlConfig10_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE: - return "LOG_ERROR_VERBOSITY_VERBOSE"; - default: - return "UNKNOWN"; - } -} - export enum PostgresqlConfig10_LogStatement { LOG_STATEMENT_UNSPECIFIED = 0, LOG_STATEMENT_NONE = 1, @@ -846,6 +856,8 @@ export enum PostgresqlConfig10_SharedPreloadLibraries { SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN = 2, SHARED_PRELOAD_LIBRARIES_TIMESCALEDB = 3, SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS = 4, + SHARED_PRELOAD_LIBRARIES_PG_CRON = 5, + SHARED_PRELOAD_LIBRARIES_PGLOGICAL = 6, UNRECOGNIZED = -1, } @@ -868,6 +880,12 @@ export function postgresqlConfig10_SharedPreloadLibrariesFromJSON( case 4: case "SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS": return PostgresqlConfig10_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS; + case 5: + case "SHARED_PRELOAD_LIBRARIES_PG_CRON": + return PostgresqlConfig10_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_CRON; + case 6: + case "SHARED_PRELOAD_LIBRARIES_PGLOGICAL": + return PostgresqlConfig10_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGLOGICAL; case -1: case "UNRECOGNIZED": default: @@ -889,6 +907,10 @@ export function postgresqlConfig10_SharedPreloadLibrariesToJSON( return "SHARED_PRELOAD_LIBRARIES_TIMESCALEDB"; case PostgresqlConfig10_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS: return "SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS"; + case PostgresqlConfig10_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_CRON: + return "SHARED_PRELOAD_LIBRARIES_PG_CRON"; + case PostgresqlConfig10_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGLOGICAL: + return "SHARED_PRELOAD_LIBRARIES_PGLOGICAL"; default: return "UNKNOWN"; } @@ -1779,6 +1801,36 @@ export const PostgresqlConfig10 = { writer.uint32(938).fork() ).ldelim(); } + if (message.maxStackDepth !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.maxStackDepth! }, + writer.uint32(1202).fork() + ).ldelim(); + } + if (message.geqo !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.geqo! }, + writer.uint32(1218).fork() + ).ldelim(); + } + if (message.geqoThreshold !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.geqoThreshold! }, + writer.uint32(1226).fork() + ).ldelim(); + } + if (message.geqoEffort !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.geqoEffort! }, + writer.uint32(1234).fork() + ).ldelim(); + } + if (message.geqoSeed !== undefined) { + DoubleValue.encode( + { $type: "google.protobuf.DoubleValue", value: message.geqoSeed! }, + writer.uint32(1266).fork() + ).ldelim(); + } return writer; }, @@ -2412,6 +2464,27 @@ export const PostgresqlConfig10 = { reader.uint32() ).value; break; + case 150: + message.maxStackDepth = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 152: + message.geqo = BoolValue.decode(reader, reader.uint32()).value; + break; + case 153: + message.geqoThreshold = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 154: + message.geqoEffort = Int64Value.decode(reader, reader.uint32()).value; + break; + case 158: + message.geqoSeed = DoubleValue.decode(reader, reader.uint32()).value; + break; default: reader.skipType(tag & 7); break; @@ -2961,6 +3034,26 @@ export const PostgresqlConfig10 = { object.pgQualstatsSampleRate !== null ? Number(object.pgQualstatsSampleRate) : undefined; + message.maxStackDepth = + object.maxStackDepth !== undefined && object.maxStackDepth !== null + ? Number(object.maxStackDepth) + : undefined; + message.geqo = + object.geqo !== undefined && object.geqo !== null + ? Boolean(object.geqo) + : undefined; + message.geqoThreshold = + object.geqoThreshold !== undefined && object.geqoThreshold !== null + ? Number(object.geqoThreshold) + : undefined; + message.geqoEffort = + object.geqoEffort !== undefined && object.geqoEffort !== null + ? Number(object.geqoEffort) + : undefined; + message.geqoSeed = + object.geqoSeed !== undefined && object.geqoSeed !== null + ? Number(object.geqoSeed) + : undefined; return message; }, @@ -3225,6 +3318,13 @@ export const PostgresqlConfig10 = { (obj.pgQualstatsResolveOids = message.pgQualstatsResolveOids); message.pgQualstatsSampleRate !== undefined && (obj.pgQualstatsSampleRate = message.pgQualstatsSampleRate); + message.maxStackDepth !== undefined && + (obj.maxStackDepth = message.maxStackDepth); + message.geqo !== undefined && (obj.geqo = message.geqo); + message.geqoThreshold !== undefined && + (obj.geqoThreshold = message.geqoThreshold); + message.geqoEffort !== undefined && (obj.geqoEffort = message.geqoEffort); + message.geqoSeed !== undefined && (obj.geqoSeed = message.geqoSeed); return obj; }, @@ -3369,6 +3469,11 @@ export const PostgresqlConfig10 = { message.pgQualstatsMax = object.pgQualstatsMax ?? undefined; message.pgQualstatsResolveOids = object.pgQualstatsResolveOids ?? undefined; message.pgQualstatsSampleRate = object.pgQualstatsSampleRate ?? undefined; + message.maxStackDepth = object.maxStackDepth ?? undefined; + message.geqo = object.geqo ?? undefined; + message.geqoThreshold = object.geqoThreshold ?? undefined; + message.geqoEffort = object.geqoEffort ?? undefined; + message.geqoSeed = object.geqoSeed ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql10_1c.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql10_1c.ts index 430d62ab..ec292611 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql10_1c.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql10_1c.ts @@ -156,6 +156,16 @@ export interface Postgresqlconfig101c { pgQualstatsMax?: number; pgQualstatsResolveOids?: boolean; pgQualstatsSampleRate?: number; + /** in bytes. */ + maxStackDepth?: number; + /** enable Genetic Query Optimizer, by default is on */ + geqo?: boolean; + /** The number of tables to use geqo, default is 12 */ + geqoThreshold?: number; + /** tradeoff between planning time and query plan quality, default is 5 */ + geqoEffort?: number; + /** initial value of the random number generator used by GEQO */ + geqoSeed?: number; } export enum Postgresqlconfig101c_WalLevel { @@ -356,6 +366,54 @@ export function postgresqlconfig101c_ForceParallelModeToJSON( } } +export enum Postgresqlconfig101c_LogErrorVerbosity { + LOG_ERROR_VERBOSITY_UNSPECIFIED = 0, + LOG_ERROR_VERBOSITY_TERSE = 1, + LOG_ERROR_VERBOSITY_DEFAULT = 2, + LOG_ERROR_VERBOSITY_VERBOSE = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig101c_LogErrorVerbosityFromJSON( + object: any +): Postgresqlconfig101c_LogErrorVerbosity { + switch (object) { + case 0: + case "LOG_ERROR_VERBOSITY_UNSPECIFIED": + return Postgresqlconfig101c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED; + case 1: + case "LOG_ERROR_VERBOSITY_TERSE": + return Postgresqlconfig101c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE; + case 2: + case "LOG_ERROR_VERBOSITY_DEFAULT": + return Postgresqlconfig101c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT; + case 3: + case "LOG_ERROR_VERBOSITY_VERBOSE": + return Postgresqlconfig101c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig101c_LogErrorVerbosity.UNRECOGNIZED; + } +} + +export function postgresqlconfig101c_LogErrorVerbosityToJSON( + object: Postgresqlconfig101c_LogErrorVerbosity +): string { + switch (object) { + case Postgresqlconfig101c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED: + return "LOG_ERROR_VERBOSITY_UNSPECIFIED"; + case Postgresqlconfig101c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE: + return "LOG_ERROR_VERBOSITY_TERSE"; + case Postgresqlconfig101c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT: + return "LOG_ERROR_VERBOSITY_DEFAULT"; + case Postgresqlconfig101c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE: + return "LOG_ERROR_VERBOSITY_VERBOSE"; + default: + return "UNKNOWN"; + } +} + export enum Postgresqlconfig101c_LogLevel { LOG_LEVEL_UNSPECIFIED = 0, LOG_LEVEL_DEBUG5 = 1, @@ -452,54 +510,6 @@ export function postgresqlconfig101c_LogLevelToJSON( } } -export enum Postgresqlconfig101c_LogErrorVerbosity { - LOG_ERROR_VERBOSITY_UNSPECIFIED = 0, - LOG_ERROR_VERBOSITY_TERSE = 1, - LOG_ERROR_VERBOSITY_DEFAULT = 2, - LOG_ERROR_VERBOSITY_VERBOSE = 3, - UNRECOGNIZED = -1, -} - -export function postgresqlconfig101c_LogErrorVerbosityFromJSON( - object: any -): Postgresqlconfig101c_LogErrorVerbosity { - switch (object) { - case 0: - case "LOG_ERROR_VERBOSITY_UNSPECIFIED": - return Postgresqlconfig101c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED; - case 1: - case "LOG_ERROR_VERBOSITY_TERSE": - return Postgresqlconfig101c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE; - case 2: - case "LOG_ERROR_VERBOSITY_DEFAULT": - return Postgresqlconfig101c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT; - case 3: - case "LOG_ERROR_VERBOSITY_VERBOSE": - return Postgresqlconfig101c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE; - case -1: - case "UNRECOGNIZED": - default: - return Postgresqlconfig101c_LogErrorVerbosity.UNRECOGNIZED; - } -} - -export function postgresqlconfig101c_LogErrorVerbosityToJSON( - object: Postgresqlconfig101c_LogErrorVerbosity -): string { - switch (object) { - case Postgresqlconfig101c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED: - return "LOG_ERROR_VERBOSITY_UNSPECIFIED"; - case Postgresqlconfig101c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE: - return "LOG_ERROR_VERBOSITY_TERSE"; - case Postgresqlconfig101c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT: - return "LOG_ERROR_VERBOSITY_DEFAULT"; - case Postgresqlconfig101c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE: - return "LOG_ERROR_VERBOSITY_VERBOSE"; - default: - return "UNKNOWN"; - } -} - export enum Postgresqlconfig101c_LogStatement { LOG_STATEMENT_UNSPECIFIED = 0, LOG_STATEMENT_NONE = 1, @@ -848,6 +858,8 @@ export enum Postgresqlconfig101c_SharedPreloadLibraries { SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN = 2, SHARED_PRELOAD_LIBRARIES_TIMESCALEDB = 3, SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS = 4, + SHARED_PRELOAD_LIBRARIES_PG_CRON = 5, + SHARED_PRELOAD_LIBRARIES_PGLOGICAL = 6, UNRECOGNIZED = -1, } @@ -870,6 +882,12 @@ export function postgresqlconfig101c_SharedPreloadLibrariesFromJSON( case 4: case "SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS": return Postgresqlconfig101c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS; + case 5: + case "SHARED_PRELOAD_LIBRARIES_PG_CRON": + return Postgresqlconfig101c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_CRON; + case 6: + case "SHARED_PRELOAD_LIBRARIES_PGLOGICAL": + return Postgresqlconfig101c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGLOGICAL; case -1: case "UNRECOGNIZED": default: @@ -891,6 +909,10 @@ export function postgresqlconfig101c_SharedPreloadLibrariesToJSON( return "SHARED_PRELOAD_LIBRARIES_TIMESCALEDB"; case Postgresqlconfig101c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS: return "SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS"; + case Postgresqlconfig101c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_CRON: + return "SHARED_PRELOAD_LIBRARIES_PG_CRON"; + case Postgresqlconfig101c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGLOGICAL: + return "SHARED_PRELOAD_LIBRARIES_PGLOGICAL"; default: return "UNKNOWN"; } @@ -1799,6 +1821,36 @@ export const Postgresqlconfig101c = { writer.uint32(954).fork() ).ldelim(); } + if (message.maxStackDepth !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.maxStackDepth! }, + writer.uint32(1202).fork() + ).ldelim(); + } + if (message.geqo !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.geqo! }, + writer.uint32(1218).fork() + ).ldelim(); + } + if (message.geqoThreshold !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.geqoThreshold! }, + writer.uint32(1226).fork() + ).ldelim(); + } + if (message.geqoEffort !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.geqoEffort! }, + writer.uint32(1234).fork() + ).ldelim(); + } + if (message.geqoSeed !== undefined) { + DoubleValue.encode( + { $type: "google.protobuf.DoubleValue", value: message.geqoSeed! }, + writer.uint32(1266).fork() + ).ldelim(); + } return writer; }, @@ -2447,6 +2499,27 @@ export const Postgresqlconfig101c = { reader.uint32() ).value; break; + case 150: + message.maxStackDepth = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 152: + message.geqo = BoolValue.decode(reader, reader.uint32()).value; + break; + case 153: + message.geqoThreshold = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 154: + message.geqoEffort = Int64Value.decode(reader, reader.uint32()).value; + break; + case 158: + message.geqoSeed = DoubleValue.decode(reader, reader.uint32()).value; + break; default: reader.skipType(tag & 7); break; @@ -3012,6 +3085,26 @@ export const Postgresqlconfig101c = { object.pgQualstatsSampleRate !== null ? Number(object.pgQualstatsSampleRate) : undefined; + message.maxStackDepth = + object.maxStackDepth !== undefined && object.maxStackDepth !== null + ? Number(object.maxStackDepth) + : undefined; + message.geqo = + object.geqo !== undefined && object.geqo !== null + ? Boolean(object.geqo) + : undefined; + message.geqoThreshold = + object.geqoThreshold !== undefined && object.geqoThreshold !== null + ? Number(object.geqoThreshold) + : undefined; + message.geqoEffort = + object.geqoEffort !== undefined && object.geqoEffort !== null + ? Number(object.geqoEffort) + : undefined; + message.geqoSeed = + object.geqoSeed !== undefined && object.geqoSeed !== null + ? Number(object.geqoSeed) + : undefined; return message; }, @@ -3281,6 +3374,13 @@ export const Postgresqlconfig101c = { (obj.pgQualstatsResolveOids = message.pgQualstatsResolveOids); message.pgQualstatsSampleRate !== undefined && (obj.pgQualstatsSampleRate = message.pgQualstatsSampleRate); + message.maxStackDepth !== undefined && + (obj.maxStackDepth = message.maxStackDepth); + message.geqo !== undefined && (obj.geqo = message.geqo); + message.geqoThreshold !== undefined && + (obj.geqoThreshold = message.geqoThreshold); + message.geqoEffort !== undefined && (obj.geqoEffort = message.geqoEffort); + message.geqoSeed !== undefined && (obj.geqoSeed = message.geqoSeed); return obj; }, @@ -3427,6 +3527,11 @@ export const Postgresqlconfig101c = { message.pgQualstatsMax = object.pgQualstatsMax ?? undefined; message.pgQualstatsResolveOids = object.pgQualstatsResolveOids ?? undefined; message.pgQualstatsSampleRate = object.pgQualstatsSampleRate ?? undefined; + message.maxStackDepth = object.maxStackDepth ?? undefined; + message.geqo = object.geqo ?? undefined; + message.geqoThreshold = object.geqoThreshold ?? undefined; + message.geqoEffort = object.geqoEffort ?? undefined; + message.geqoSeed = object.geqoSeed ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql11.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql11.ts index bda39416..5dccefc6 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql11.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql11.ts @@ -162,105 +162,109 @@ export interface PostgresqlConfig11 { pgQualstatsMax?: number; pgQualstatsResolveOids?: boolean; pgQualstatsSampleRate?: number; + /** in bytes. */ + maxStackDepth?: number; + /** enable Genetic Query Optimizer, by default is on */ + geqo?: boolean; + /** The number of tables to use geqo, default is 12 */ + geqoThreshold?: number; + /** tradeoff between planning time and query plan quality, default is 5 */ + geqoEffort?: number; + /** initial value of the random number generator used by GEQO */ + geqoSeed?: number; } -export enum PostgresqlConfig11_WalLevel { - WAL_LEVEL_UNSPECIFIED = 0, - WAL_LEVEL_REPLICA = 1, - WAL_LEVEL_LOGICAL = 2, +export enum PostgresqlConfig11_BackslashQuote { + BACKSLASH_QUOTE_UNSPECIFIED = 0, + BACKSLASH_QUOTE = 1, + BACKSLASH_QUOTE_ON = 2, + BACKSLASH_QUOTE_OFF = 3, + BACKSLASH_QUOTE_SAFE_ENCODING = 4, UNRECOGNIZED = -1, } -export function postgresqlConfig11_WalLevelFromJSON( +export function postgresqlConfig11_BackslashQuoteFromJSON( object: any -): PostgresqlConfig11_WalLevel { +): PostgresqlConfig11_BackslashQuote { switch (object) { case 0: - case "WAL_LEVEL_UNSPECIFIED": - return PostgresqlConfig11_WalLevel.WAL_LEVEL_UNSPECIFIED; + case "BACKSLASH_QUOTE_UNSPECIFIED": + return PostgresqlConfig11_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED; case 1: - case "WAL_LEVEL_REPLICA": - return PostgresqlConfig11_WalLevel.WAL_LEVEL_REPLICA; + case "BACKSLASH_QUOTE": + return PostgresqlConfig11_BackslashQuote.BACKSLASH_QUOTE; case 2: - case "WAL_LEVEL_LOGICAL": - return PostgresqlConfig11_WalLevel.WAL_LEVEL_LOGICAL; + case "BACKSLASH_QUOTE_ON": + return PostgresqlConfig11_BackslashQuote.BACKSLASH_QUOTE_ON; + case 3: + case "BACKSLASH_QUOTE_OFF": + return PostgresqlConfig11_BackslashQuote.BACKSLASH_QUOTE_OFF; + case 4: + case "BACKSLASH_QUOTE_SAFE_ENCODING": + return PostgresqlConfig11_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING; case -1: case "UNRECOGNIZED": default: - return PostgresqlConfig11_WalLevel.UNRECOGNIZED; + return PostgresqlConfig11_BackslashQuote.UNRECOGNIZED; } } -export function postgresqlConfig11_WalLevelToJSON( - object: PostgresqlConfig11_WalLevel +export function postgresqlConfig11_BackslashQuoteToJSON( + object: PostgresqlConfig11_BackslashQuote ): string { switch (object) { - case PostgresqlConfig11_WalLevel.WAL_LEVEL_UNSPECIFIED: - return "WAL_LEVEL_UNSPECIFIED"; - case PostgresqlConfig11_WalLevel.WAL_LEVEL_REPLICA: - return "WAL_LEVEL_REPLICA"; - case PostgresqlConfig11_WalLevel.WAL_LEVEL_LOGICAL: - return "WAL_LEVEL_LOGICAL"; + case PostgresqlConfig11_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED: + return "BACKSLASH_QUOTE_UNSPECIFIED"; + case PostgresqlConfig11_BackslashQuote.BACKSLASH_QUOTE: + return "BACKSLASH_QUOTE"; + case PostgresqlConfig11_BackslashQuote.BACKSLASH_QUOTE_ON: + return "BACKSLASH_QUOTE_ON"; + case PostgresqlConfig11_BackslashQuote.BACKSLASH_QUOTE_OFF: + return "BACKSLASH_QUOTE_OFF"; + case PostgresqlConfig11_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING: + return "BACKSLASH_QUOTE_SAFE_ENCODING"; default: return "UNKNOWN"; } } -export enum PostgresqlConfig11_SynchronousCommit { - SYNCHRONOUS_COMMIT_UNSPECIFIED = 0, - SYNCHRONOUS_COMMIT_ON = 1, - SYNCHRONOUS_COMMIT_OFF = 2, - SYNCHRONOUS_COMMIT_LOCAL = 3, - SYNCHRONOUS_COMMIT_REMOTE_WRITE = 4, - SYNCHRONOUS_COMMIT_REMOTE_APPLY = 5, +export enum PostgresqlConfig11_ByteaOutput { + BYTEA_OUTPUT_UNSPECIFIED = 0, + BYTEA_OUTPUT_HEX = 1, + BYTEA_OUTPUT_ESCAPED = 2, UNRECOGNIZED = -1, } -export function postgresqlConfig11_SynchronousCommitFromJSON( +export function postgresqlConfig11_ByteaOutputFromJSON( object: any -): PostgresqlConfig11_SynchronousCommit { +): PostgresqlConfig11_ByteaOutput { switch (object) { case 0: - case "SYNCHRONOUS_COMMIT_UNSPECIFIED": - return PostgresqlConfig11_SynchronousCommit.SYNCHRONOUS_COMMIT_UNSPECIFIED; + case "BYTEA_OUTPUT_UNSPECIFIED": + return PostgresqlConfig11_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED; case 1: - case "SYNCHRONOUS_COMMIT_ON": - return PostgresqlConfig11_SynchronousCommit.SYNCHRONOUS_COMMIT_ON; + case "BYTEA_OUTPUT_HEX": + return PostgresqlConfig11_ByteaOutput.BYTEA_OUTPUT_HEX; case 2: - case "SYNCHRONOUS_COMMIT_OFF": - return PostgresqlConfig11_SynchronousCommit.SYNCHRONOUS_COMMIT_OFF; - case 3: - case "SYNCHRONOUS_COMMIT_LOCAL": - return PostgresqlConfig11_SynchronousCommit.SYNCHRONOUS_COMMIT_LOCAL; - case 4: - case "SYNCHRONOUS_COMMIT_REMOTE_WRITE": - return PostgresqlConfig11_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_WRITE; - case 5: - case "SYNCHRONOUS_COMMIT_REMOTE_APPLY": - return PostgresqlConfig11_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_APPLY; + case "BYTEA_OUTPUT_ESCAPED": + return PostgresqlConfig11_ByteaOutput.BYTEA_OUTPUT_ESCAPED; case -1: case "UNRECOGNIZED": default: - return PostgresqlConfig11_SynchronousCommit.UNRECOGNIZED; + return PostgresqlConfig11_ByteaOutput.UNRECOGNIZED; } } -export function postgresqlConfig11_SynchronousCommitToJSON( - object: PostgresqlConfig11_SynchronousCommit +export function postgresqlConfig11_ByteaOutputToJSON( + object: PostgresqlConfig11_ByteaOutput ): string { switch (object) { - case PostgresqlConfig11_SynchronousCommit.SYNCHRONOUS_COMMIT_UNSPECIFIED: - return "SYNCHRONOUS_COMMIT_UNSPECIFIED"; - case PostgresqlConfig11_SynchronousCommit.SYNCHRONOUS_COMMIT_ON: - return "SYNCHRONOUS_COMMIT_ON"; - case PostgresqlConfig11_SynchronousCommit.SYNCHRONOUS_COMMIT_OFF: - return "SYNCHRONOUS_COMMIT_OFF"; - case PostgresqlConfig11_SynchronousCommit.SYNCHRONOUS_COMMIT_LOCAL: - return "SYNCHRONOUS_COMMIT_LOCAL"; - case PostgresqlConfig11_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_WRITE: - return "SYNCHRONOUS_COMMIT_REMOTE_WRITE"; - case PostgresqlConfig11_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_APPLY: - return "SYNCHRONOUS_COMMIT_REMOTE_APPLY"; + case PostgresqlConfig11_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED: + return "BYTEA_OUTPUT_UNSPECIFIED"; + case PostgresqlConfig11_ByteaOutput.BYTEA_OUTPUT_HEX: + return "BYTEA_OUTPUT_HEX"; + case PostgresqlConfig11_ByteaOutput.BYTEA_OUTPUT_ESCAPED: + return "BYTEA_OUTPUT_ESCAPED"; default: return "UNKNOWN"; } @@ -362,6 +366,54 @@ export function postgresqlConfig11_ForceParallelModeToJSON( } } +export enum PostgresqlConfig11_LogErrorVerbosity { + LOG_ERROR_VERBOSITY_UNSPECIFIED = 0, + LOG_ERROR_VERBOSITY_TERSE = 1, + LOG_ERROR_VERBOSITY_DEFAULT = 2, + LOG_ERROR_VERBOSITY_VERBOSE = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig11_LogErrorVerbosityFromJSON( + object: any +): PostgresqlConfig11_LogErrorVerbosity { + switch (object) { + case 0: + case "LOG_ERROR_VERBOSITY_UNSPECIFIED": + return PostgresqlConfig11_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED; + case 1: + case "LOG_ERROR_VERBOSITY_TERSE": + return PostgresqlConfig11_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE; + case 2: + case "LOG_ERROR_VERBOSITY_DEFAULT": + return PostgresqlConfig11_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT; + case 3: + case "LOG_ERROR_VERBOSITY_VERBOSE": + return PostgresqlConfig11_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig11_LogErrorVerbosity.UNRECOGNIZED; + } +} + +export function postgresqlConfig11_LogErrorVerbosityToJSON( + object: PostgresqlConfig11_LogErrorVerbosity +): string { + switch (object) { + case PostgresqlConfig11_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED: + return "LOG_ERROR_VERBOSITY_UNSPECIFIED"; + case PostgresqlConfig11_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE: + return "LOG_ERROR_VERBOSITY_TERSE"; + case PostgresqlConfig11_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT: + return "LOG_ERROR_VERBOSITY_DEFAULT"; + case PostgresqlConfig11_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE: + return "LOG_ERROR_VERBOSITY_VERBOSE"; + default: + return "UNKNOWN"; + } +} + export enum PostgresqlConfig11_LogLevel { LOG_LEVEL_UNSPECIFIED = 0, LOG_LEVEL_DEBUG5 = 1, @@ -458,54 +510,6 @@ export function postgresqlConfig11_LogLevelToJSON( } } -export enum PostgresqlConfig11_LogErrorVerbosity { - LOG_ERROR_VERBOSITY_UNSPECIFIED = 0, - LOG_ERROR_VERBOSITY_TERSE = 1, - LOG_ERROR_VERBOSITY_DEFAULT = 2, - LOG_ERROR_VERBOSITY_VERBOSE = 3, - UNRECOGNIZED = -1, -} - -export function postgresqlConfig11_LogErrorVerbosityFromJSON( - object: any -): PostgresqlConfig11_LogErrorVerbosity { - switch (object) { - case 0: - case "LOG_ERROR_VERBOSITY_UNSPECIFIED": - return PostgresqlConfig11_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED; - case 1: - case "LOG_ERROR_VERBOSITY_TERSE": - return PostgresqlConfig11_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE; - case 2: - case "LOG_ERROR_VERBOSITY_DEFAULT": - return PostgresqlConfig11_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT; - case 3: - case "LOG_ERROR_VERBOSITY_VERBOSE": - return PostgresqlConfig11_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE; - case -1: - case "UNRECOGNIZED": - default: - return PostgresqlConfig11_LogErrorVerbosity.UNRECOGNIZED; - } -} - -export function postgresqlConfig11_LogErrorVerbosityToJSON( - object: PostgresqlConfig11_LogErrorVerbosity -): string { - switch (object) { - case PostgresqlConfig11_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED: - return "LOG_ERROR_VERBOSITY_UNSPECIFIED"; - case PostgresqlConfig11_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE: - return "LOG_ERROR_VERBOSITY_TERSE"; - case PostgresqlConfig11_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT: - return "LOG_ERROR_VERBOSITY_DEFAULT"; - case PostgresqlConfig11_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE: - return "LOG_ERROR_VERBOSITY_VERBOSE"; - default: - return "UNKNOWN"; - } -} - export enum PostgresqlConfig11_LogStatement { LOG_STATEMENT_UNSPECIFIED = 0, LOG_STATEMENT_NONE = 1, @@ -560,6 +564,186 @@ export function postgresqlConfig11_LogStatementToJSON( } } +export enum PostgresqlConfig11_PgHintPlanDebugPrint { + PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED = 0, + PG_HINT_PLAN_DEBUG_PRINT_OFF = 1, + PG_HINT_PLAN_DEBUG_PRINT_ON = 2, + PG_HINT_PLAN_DEBUG_PRINT_DETAILED = 3, + PG_HINT_PLAN_DEBUG_PRINT_VERBOSE = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig11_PgHintPlanDebugPrintFromJSON( + object: any +): PostgresqlConfig11_PgHintPlanDebugPrint { + switch (object) { + case 0: + case "PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED": + return PostgresqlConfig11_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED; + case 1: + case "PG_HINT_PLAN_DEBUG_PRINT_OFF": + return PostgresqlConfig11_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_OFF; + case 2: + case "PG_HINT_PLAN_DEBUG_PRINT_ON": + return PostgresqlConfig11_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_ON; + case 3: + case "PG_HINT_PLAN_DEBUG_PRINT_DETAILED": + return PostgresqlConfig11_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_DETAILED; + case 4: + case "PG_HINT_PLAN_DEBUG_PRINT_VERBOSE": + return PostgresqlConfig11_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_VERBOSE; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig11_PgHintPlanDebugPrint.UNRECOGNIZED; + } +} + +export function postgresqlConfig11_PgHintPlanDebugPrintToJSON( + object: PostgresqlConfig11_PgHintPlanDebugPrint +): string { + switch (object) { + case PostgresqlConfig11_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED: + return "PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED"; + case PostgresqlConfig11_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_OFF: + return "PG_HINT_PLAN_DEBUG_PRINT_OFF"; + case PostgresqlConfig11_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_ON: + return "PG_HINT_PLAN_DEBUG_PRINT_ON"; + case PostgresqlConfig11_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_DETAILED: + return "PG_HINT_PLAN_DEBUG_PRINT_DETAILED"; + case PostgresqlConfig11_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_VERBOSE: + return "PG_HINT_PLAN_DEBUG_PRINT_VERBOSE"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig11_SharedPreloadLibraries { + SHARED_PRELOAD_LIBRARIES_UNSPECIFIED = 0, + SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN = 1, + SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN = 2, + SHARED_PRELOAD_LIBRARIES_TIMESCALEDB = 3, + SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS = 4, + SHARED_PRELOAD_LIBRARIES_PG_CRON = 5, + SHARED_PRELOAD_LIBRARIES_PGLOGICAL = 6, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig11_SharedPreloadLibrariesFromJSON( + object: any +): PostgresqlConfig11_SharedPreloadLibraries { + switch (object) { + case 0: + case "SHARED_PRELOAD_LIBRARIES_UNSPECIFIED": + return PostgresqlConfig11_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_UNSPECIFIED; + case 1: + case "SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN": + return PostgresqlConfig11_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN; + case 2: + case "SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN": + return PostgresqlConfig11_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN; + case 3: + case "SHARED_PRELOAD_LIBRARIES_TIMESCALEDB": + return PostgresqlConfig11_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_TIMESCALEDB; + case 4: + case "SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS": + return PostgresqlConfig11_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS; + case 5: + case "SHARED_PRELOAD_LIBRARIES_PG_CRON": + return PostgresqlConfig11_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_CRON; + case 6: + case "SHARED_PRELOAD_LIBRARIES_PGLOGICAL": + return PostgresqlConfig11_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGLOGICAL; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig11_SharedPreloadLibraries.UNRECOGNIZED; + } +} + +export function postgresqlConfig11_SharedPreloadLibrariesToJSON( + object: PostgresqlConfig11_SharedPreloadLibraries +): string { + switch (object) { + case PostgresqlConfig11_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_UNSPECIFIED: + return "SHARED_PRELOAD_LIBRARIES_UNSPECIFIED"; + case PostgresqlConfig11_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN: + return "SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN"; + case PostgresqlConfig11_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN: + return "SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN"; + case PostgresqlConfig11_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_TIMESCALEDB: + return "SHARED_PRELOAD_LIBRARIES_TIMESCALEDB"; + case PostgresqlConfig11_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS: + return "SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS"; + case PostgresqlConfig11_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_CRON: + return "SHARED_PRELOAD_LIBRARIES_PG_CRON"; + case PostgresqlConfig11_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGLOGICAL: + return "SHARED_PRELOAD_LIBRARIES_PGLOGICAL"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig11_SynchronousCommit { + SYNCHRONOUS_COMMIT_UNSPECIFIED = 0, + SYNCHRONOUS_COMMIT_ON = 1, + SYNCHRONOUS_COMMIT_OFF = 2, + SYNCHRONOUS_COMMIT_LOCAL = 3, + SYNCHRONOUS_COMMIT_REMOTE_WRITE = 4, + SYNCHRONOUS_COMMIT_REMOTE_APPLY = 5, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig11_SynchronousCommitFromJSON( + object: any +): PostgresqlConfig11_SynchronousCommit { + switch (object) { + case 0: + case "SYNCHRONOUS_COMMIT_UNSPECIFIED": + return PostgresqlConfig11_SynchronousCommit.SYNCHRONOUS_COMMIT_UNSPECIFIED; + case 1: + case "SYNCHRONOUS_COMMIT_ON": + return PostgresqlConfig11_SynchronousCommit.SYNCHRONOUS_COMMIT_ON; + case 2: + case "SYNCHRONOUS_COMMIT_OFF": + return PostgresqlConfig11_SynchronousCommit.SYNCHRONOUS_COMMIT_OFF; + case 3: + case "SYNCHRONOUS_COMMIT_LOCAL": + return PostgresqlConfig11_SynchronousCommit.SYNCHRONOUS_COMMIT_LOCAL; + case 4: + case "SYNCHRONOUS_COMMIT_REMOTE_WRITE": + return PostgresqlConfig11_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_WRITE; + case 5: + case "SYNCHRONOUS_COMMIT_REMOTE_APPLY": + return PostgresqlConfig11_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_APPLY; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig11_SynchronousCommit.UNRECOGNIZED; + } +} + +export function postgresqlConfig11_SynchronousCommitToJSON( + object: PostgresqlConfig11_SynchronousCommit +): string { + switch (object) { + case PostgresqlConfig11_SynchronousCommit.SYNCHRONOUS_COMMIT_UNSPECIFIED: + return "SYNCHRONOUS_COMMIT_UNSPECIFIED"; + case PostgresqlConfig11_SynchronousCommit.SYNCHRONOUS_COMMIT_ON: + return "SYNCHRONOUS_COMMIT_ON"; + case PostgresqlConfig11_SynchronousCommit.SYNCHRONOUS_COMMIT_OFF: + return "SYNCHRONOUS_COMMIT_OFF"; + case PostgresqlConfig11_SynchronousCommit.SYNCHRONOUS_COMMIT_LOCAL: + return "SYNCHRONOUS_COMMIT_LOCAL"; + case PostgresqlConfig11_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_WRITE: + return "SYNCHRONOUS_COMMIT_REMOTE_WRITE"; + case PostgresqlConfig11_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_APPLY: + return "SYNCHRONOUS_COMMIT_REMOTE_APPLY"; + default: + return "UNKNOWN"; + } +} + export enum PostgresqlConfig11_TransactionIsolation { TRANSACTION_ISOLATION_UNSPECIFIED = 0, TRANSACTION_ISOLATION_READ_UNCOMMITTED = 1, @@ -614,43 +798,43 @@ export function postgresqlConfig11_TransactionIsolationToJSON( } } -export enum PostgresqlConfig11_ByteaOutput { - BYTEA_OUTPUT_UNSPECIFIED = 0, - BYTEA_OUTPUT_HEX = 1, - BYTEA_OUTPUT_ESCAPED = 2, +export enum PostgresqlConfig11_WalLevel { + WAL_LEVEL_UNSPECIFIED = 0, + WAL_LEVEL_REPLICA = 1, + WAL_LEVEL_LOGICAL = 2, UNRECOGNIZED = -1, } -export function postgresqlConfig11_ByteaOutputFromJSON( +export function postgresqlConfig11_WalLevelFromJSON( object: any -): PostgresqlConfig11_ByteaOutput { +): PostgresqlConfig11_WalLevel { switch (object) { case 0: - case "BYTEA_OUTPUT_UNSPECIFIED": - return PostgresqlConfig11_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED; + case "WAL_LEVEL_UNSPECIFIED": + return PostgresqlConfig11_WalLevel.WAL_LEVEL_UNSPECIFIED; case 1: - case "BYTEA_OUTPUT_HEX": - return PostgresqlConfig11_ByteaOutput.BYTEA_OUTPUT_HEX; + case "WAL_LEVEL_REPLICA": + return PostgresqlConfig11_WalLevel.WAL_LEVEL_REPLICA; case 2: - case "BYTEA_OUTPUT_ESCAPED": - return PostgresqlConfig11_ByteaOutput.BYTEA_OUTPUT_ESCAPED; + case "WAL_LEVEL_LOGICAL": + return PostgresqlConfig11_WalLevel.WAL_LEVEL_LOGICAL; case -1: case "UNRECOGNIZED": default: - return PostgresqlConfig11_ByteaOutput.UNRECOGNIZED; + return PostgresqlConfig11_WalLevel.UNRECOGNIZED; } } -export function postgresqlConfig11_ByteaOutputToJSON( - object: PostgresqlConfig11_ByteaOutput +export function postgresqlConfig11_WalLevelToJSON( + object: PostgresqlConfig11_WalLevel ): string { switch (object) { - case PostgresqlConfig11_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED: - return "BYTEA_OUTPUT_UNSPECIFIED"; - case PostgresqlConfig11_ByteaOutput.BYTEA_OUTPUT_HEX: - return "BYTEA_OUTPUT_HEX"; - case PostgresqlConfig11_ByteaOutput.BYTEA_OUTPUT_ESCAPED: - return "BYTEA_OUTPUT_ESCAPED"; + case PostgresqlConfig11_WalLevel.WAL_LEVEL_UNSPECIFIED: + return "WAL_LEVEL_UNSPECIFIED"; + case PostgresqlConfig11_WalLevel.WAL_LEVEL_REPLICA: + return "WAL_LEVEL_REPLICA"; + case PostgresqlConfig11_WalLevel.WAL_LEVEL_LOGICAL: + return "WAL_LEVEL_LOGICAL"; default: return "UNKNOWN"; } @@ -740,168 +924,6 @@ export function postgresqlConfig11_XmlOptionToJSON( } } -export enum PostgresqlConfig11_BackslashQuote { - BACKSLASH_QUOTE_UNSPECIFIED = 0, - BACKSLASH_QUOTE = 1, - BACKSLASH_QUOTE_ON = 2, - BACKSLASH_QUOTE_OFF = 3, - BACKSLASH_QUOTE_SAFE_ENCODING = 4, - UNRECOGNIZED = -1, -} - -export function postgresqlConfig11_BackslashQuoteFromJSON( - object: any -): PostgresqlConfig11_BackslashQuote { - switch (object) { - case 0: - case "BACKSLASH_QUOTE_UNSPECIFIED": - return PostgresqlConfig11_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED; - case 1: - case "BACKSLASH_QUOTE": - return PostgresqlConfig11_BackslashQuote.BACKSLASH_QUOTE; - case 2: - case "BACKSLASH_QUOTE_ON": - return PostgresqlConfig11_BackslashQuote.BACKSLASH_QUOTE_ON; - case 3: - case "BACKSLASH_QUOTE_OFF": - return PostgresqlConfig11_BackslashQuote.BACKSLASH_QUOTE_OFF; - case 4: - case "BACKSLASH_QUOTE_SAFE_ENCODING": - return PostgresqlConfig11_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING; - case -1: - case "UNRECOGNIZED": - default: - return PostgresqlConfig11_BackslashQuote.UNRECOGNIZED; - } -} - -export function postgresqlConfig11_BackslashQuoteToJSON( - object: PostgresqlConfig11_BackslashQuote -): string { - switch (object) { - case PostgresqlConfig11_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED: - return "BACKSLASH_QUOTE_UNSPECIFIED"; - case PostgresqlConfig11_BackslashQuote.BACKSLASH_QUOTE: - return "BACKSLASH_QUOTE"; - case PostgresqlConfig11_BackslashQuote.BACKSLASH_QUOTE_ON: - return "BACKSLASH_QUOTE_ON"; - case PostgresqlConfig11_BackslashQuote.BACKSLASH_QUOTE_OFF: - return "BACKSLASH_QUOTE_OFF"; - case PostgresqlConfig11_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING: - return "BACKSLASH_QUOTE_SAFE_ENCODING"; - default: - return "UNKNOWN"; - } -} - -export enum PostgresqlConfig11_PgHintPlanDebugPrint { - PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED = 0, - PG_HINT_PLAN_DEBUG_PRINT_OFF = 1, - PG_HINT_PLAN_DEBUG_PRINT_ON = 2, - PG_HINT_PLAN_DEBUG_PRINT_DETAILED = 3, - PG_HINT_PLAN_DEBUG_PRINT_VERBOSE = 4, - UNRECOGNIZED = -1, -} - -export function postgresqlConfig11_PgHintPlanDebugPrintFromJSON( - object: any -): PostgresqlConfig11_PgHintPlanDebugPrint { - switch (object) { - case 0: - case "PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED": - return PostgresqlConfig11_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED; - case 1: - case "PG_HINT_PLAN_DEBUG_PRINT_OFF": - return PostgresqlConfig11_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_OFF; - case 2: - case "PG_HINT_PLAN_DEBUG_PRINT_ON": - return PostgresqlConfig11_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_ON; - case 3: - case "PG_HINT_PLAN_DEBUG_PRINT_DETAILED": - return PostgresqlConfig11_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_DETAILED; - case 4: - case "PG_HINT_PLAN_DEBUG_PRINT_VERBOSE": - return PostgresqlConfig11_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_VERBOSE; - case -1: - case "UNRECOGNIZED": - default: - return PostgresqlConfig11_PgHintPlanDebugPrint.UNRECOGNIZED; - } -} - -export function postgresqlConfig11_PgHintPlanDebugPrintToJSON( - object: PostgresqlConfig11_PgHintPlanDebugPrint -): string { - switch (object) { - case PostgresqlConfig11_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED: - return "PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED"; - case PostgresqlConfig11_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_OFF: - return "PG_HINT_PLAN_DEBUG_PRINT_OFF"; - case PostgresqlConfig11_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_ON: - return "PG_HINT_PLAN_DEBUG_PRINT_ON"; - case PostgresqlConfig11_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_DETAILED: - return "PG_HINT_PLAN_DEBUG_PRINT_DETAILED"; - case PostgresqlConfig11_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_VERBOSE: - return "PG_HINT_PLAN_DEBUG_PRINT_VERBOSE"; - default: - return "UNKNOWN"; - } -} - -export enum PostgresqlConfig11_SharedPreloadLibraries { - SHARED_PRELOAD_LIBRARIES_UNSPECIFIED = 0, - SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN = 1, - SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN = 2, - SHARED_PRELOAD_LIBRARIES_TIMESCALEDB = 3, - SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS = 4, - UNRECOGNIZED = -1, -} - -export function postgresqlConfig11_SharedPreloadLibrariesFromJSON( - object: any -): PostgresqlConfig11_SharedPreloadLibraries { - switch (object) { - case 0: - case "SHARED_PRELOAD_LIBRARIES_UNSPECIFIED": - return PostgresqlConfig11_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_UNSPECIFIED; - case 1: - case "SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN": - return PostgresqlConfig11_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN; - case 2: - case "SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN": - return PostgresqlConfig11_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN; - case 3: - case "SHARED_PRELOAD_LIBRARIES_TIMESCALEDB": - return PostgresqlConfig11_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_TIMESCALEDB; - case 4: - case "SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS": - return PostgresqlConfig11_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS; - case -1: - case "UNRECOGNIZED": - default: - return PostgresqlConfig11_SharedPreloadLibraries.UNRECOGNIZED; - } -} - -export function postgresqlConfig11_SharedPreloadLibrariesToJSON( - object: PostgresqlConfig11_SharedPreloadLibraries -): string { - switch (object) { - case PostgresqlConfig11_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_UNSPECIFIED: - return "SHARED_PRELOAD_LIBRARIES_UNSPECIFIED"; - case PostgresqlConfig11_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN: - return "SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN"; - case PostgresqlConfig11_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN: - return "SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN"; - case PostgresqlConfig11_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_TIMESCALEDB: - return "SHARED_PRELOAD_LIBRARIES_TIMESCALEDB"; - case PostgresqlConfig11_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS: - return "SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS"; - default: - return "UNKNOWN"; - } -} - export interface PostgresqlConfigSet11 { $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfigSet11"; /** @@ -1856,6 +1878,36 @@ export const PostgresqlConfig11 = { writer.uint32(1010).fork() ).ldelim(); } + if (message.maxStackDepth !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.maxStackDepth! }, + writer.uint32(1202).fork() + ).ldelim(); + } + if (message.geqo !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.geqo! }, + writer.uint32(1218).fork() + ).ldelim(); + } + if (message.geqoThreshold !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.geqoThreshold! }, + writer.uint32(1226).fork() + ).ldelim(); + } + if (message.geqoEffort !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.geqoEffort! }, + writer.uint32(1234).fork() + ).ldelim(); + } + if (message.geqoSeed !== undefined) { + DoubleValue.encode( + { $type: "google.protobuf.DoubleValue", value: message.geqoSeed! }, + writer.uint32(1266).fork() + ).ldelim(); + } return writer; }, @@ -2534,6 +2586,27 @@ export const PostgresqlConfig11 = { reader.uint32() ).value; break; + case 150: + message.maxStackDepth = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 152: + message.geqo = BoolValue.decode(reader, reader.uint32()).value; + break; + case 153: + message.geqoThreshold = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 154: + message.geqoEffort = Int64Value.decode(reader, reader.uint32()).value; + break; + case 158: + message.geqoSeed = DoubleValue.decode(reader, reader.uint32()).value; + break; default: reader.skipType(tag & 7); break; @@ -3122,6 +3195,26 @@ export const PostgresqlConfig11 = { object.pgQualstatsSampleRate !== null ? Number(object.pgQualstatsSampleRate) : undefined; + message.maxStackDepth = + object.maxStackDepth !== undefined && object.maxStackDepth !== null + ? Number(object.maxStackDepth) + : undefined; + message.geqo = + object.geqo !== undefined && object.geqo !== null + ? Boolean(object.geqo) + : undefined; + message.geqoThreshold = + object.geqoThreshold !== undefined && object.geqoThreshold !== null + ? Number(object.geqoThreshold) + : undefined; + message.geqoEffort = + object.geqoEffort !== undefined && object.geqoEffort !== null + ? Number(object.geqoEffort) + : undefined; + message.geqoSeed = + object.geqoSeed !== undefined && object.geqoSeed !== null + ? Number(object.geqoSeed) + : undefined; return message; }, @@ -3403,6 +3496,13 @@ export const PostgresqlConfig11 = { (obj.pgQualstatsResolveOids = message.pgQualstatsResolveOids); message.pgQualstatsSampleRate !== undefined && (obj.pgQualstatsSampleRate = message.pgQualstatsSampleRate); + message.maxStackDepth !== undefined && + (obj.maxStackDepth = message.maxStackDepth); + message.geqo !== undefined && (obj.geqo = message.geqo); + message.geqoThreshold !== undefined && + (obj.geqoThreshold = message.geqoThreshold); + message.geqoEffort !== undefined && (obj.geqoEffort = message.geqoEffort); + message.geqoSeed !== undefined && (obj.geqoSeed = message.geqoSeed); return obj; }, @@ -3560,6 +3660,11 @@ export const PostgresqlConfig11 = { message.pgQualstatsMax = object.pgQualstatsMax ?? undefined; message.pgQualstatsResolveOids = object.pgQualstatsResolveOids ?? undefined; message.pgQualstatsSampleRate = object.pgQualstatsSampleRate ?? undefined; + message.maxStackDepth = object.maxStackDepth ?? undefined; + message.geqo = object.geqo ?? undefined; + message.geqoThreshold = object.geqoThreshold ?? undefined; + message.geqoEffort = object.geqoEffort ?? undefined; + message.geqoSeed = object.geqoSeed ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql11_1c.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql11_1c.ts index f8e82ee5..31d5cc74 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql11_1c.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql11_1c.ts @@ -114,6 +114,7 @@ export interface Postgresqlconfig111c { /** in milliseconds. */ archiveTimeout?: number; trackActivityQuerySize?: number; + onlineAnalyzeEnable?: boolean; enableBitmapscan?: boolean; enableHashagg?: boolean; enableHashjoin?: boolean; @@ -162,105 +163,110 @@ export interface Postgresqlconfig111c { pgQualstatsMax?: number; pgQualstatsResolveOids?: boolean; pgQualstatsSampleRate?: number; + plantunerFixEmptyTable?: boolean; + /** in bytes. */ + maxStackDepth?: number; + /** enable Genetic Query Optimizer, by default is on */ + geqo?: boolean; + /** The number of tables to use geqo, default is 12 */ + geqoThreshold?: number; + /** tradeoff between planning time and query plan quality, default is 5 */ + geqoEffort?: number; + /** initial value of the random number generator used by GEQO */ + geqoSeed?: number; } -export enum Postgresqlconfig111c_WalLevel { - WAL_LEVEL_UNSPECIFIED = 0, - WAL_LEVEL_REPLICA = 1, - WAL_LEVEL_LOGICAL = 2, +export enum Postgresqlconfig111c_BackslashQuote { + BACKSLASH_QUOTE_UNSPECIFIED = 0, + BACKSLASH_QUOTE = 1, + BACKSLASH_QUOTE_ON = 2, + BACKSLASH_QUOTE_OFF = 3, + BACKSLASH_QUOTE_SAFE_ENCODING = 4, UNRECOGNIZED = -1, } -export function postgresqlconfig111c_WalLevelFromJSON( +export function postgresqlconfig111c_BackslashQuoteFromJSON( object: any -): Postgresqlconfig111c_WalLevel { +): Postgresqlconfig111c_BackslashQuote { switch (object) { case 0: - case "WAL_LEVEL_UNSPECIFIED": - return Postgresqlconfig111c_WalLevel.WAL_LEVEL_UNSPECIFIED; + case "BACKSLASH_QUOTE_UNSPECIFIED": + return Postgresqlconfig111c_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED; case 1: - case "WAL_LEVEL_REPLICA": - return Postgresqlconfig111c_WalLevel.WAL_LEVEL_REPLICA; + case "BACKSLASH_QUOTE": + return Postgresqlconfig111c_BackslashQuote.BACKSLASH_QUOTE; case 2: - case "WAL_LEVEL_LOGICAL": - return Postgresqlconfig111c_WalLevel.WAL_LEVEL_LOGICAL; + case "BACKSLASH_QUOTE_ON": + return Postgresqlconfig111c_BackslashQuote.BACKSLASH_QUOTE_ON; + case 3: + case "BACKSLASH_QUOTE_OFF": + return Postgresqlconfig111c_BackslashQuote.BACKSLASH_QUOTE_OFF; + case 4: + case "BACKSLASH_QUOTE_SAFE_ENCODING": + return Postgresqlconfig111c_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING; case -1: case "UNRECOGNIZED": default: - return Postgresqlconfig111c_WalLevel.UNRECOGNIZED; + return Postgresqlconfig111c_BackslashQuote.UNRECOGNIZED; } } -export function postgresqlconfig111c_WalLevelToJSON( - object: Postgresqlconfig111c_WalLevel +export function postgresqlconfig111c_BackslashQuoteToJSON( + object: Postgresqlconfig111c_BackslashQuote ): string { switch (object) { - case Postgresqlconfig111c_WalLevel.WAL_LEVEL_UNSPECIFIED: - return "WAL_LEVEL_UNSPECIFIED"; - case Postgresqlconfig111c_WalLevel.WAL_LEVEL_REPLICA: - return "WAL_LEVEL_REPLICA"; - case Postgresqlconfig111c_WalLevel.WAL_LEVEL_LOGICAL: - return "WAL_LEVEL_LOGICAL"; + case Postgresqlconfig111c_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED: + return "BACKSLASH_QUOTE_UNSPECIFIED"; + case Postgresqlconfig111c_BackslashQuote.BACKSLASH_QUOTE: + return "BACKSLASH_QUOTE"; + case Postgresqlconfig111c_BackslashQuote.BACKSLASH_QUOTE_ON: + return "BACKSLASH_QUOTE_ON"; + case Postgresqlconfig111c_BackslashQuote.BACKSLASH_QUOTE_OFF: + return "BACKSLASH_QUOTE_OFF"; + case Postgresqlconfig111c_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING: + return "BACKSLASH_QUOTE_SAFE_ENCODING"; default: return "UNKNOWN"; } } -export enum Postgresqlconfig111c_SynchronousCommit { - SYNCHRONOUS_COMMIT_UNSPECIFIED = 0, - SYNCHRONOUS_COMMIT_ON = 1, - SYNCHRONOUS_COMMIT_OFF = 2, - SYNCHRONOUS_COMMIT_LOCAL = 3, - SYNCHRONOUS_COMMIT_REMOTE_WRITE = 4, - SYNCHRONOUS_COMMIT_REMOTE_APPLY = 5, +export enum Postgresqlconfig111c_ByteaOutput { + BYTEA_OUTPUT_UNSPECIFIED = 0, + BYTEA_OUTPUT_HEX = 1, + BYTEA_OUTPUT_ESCAPED = 2, UNRECOGNIZED = -1, } -export function postgresqlconfig111c_SynchronousCommitFromJSON( +export function postgresqlconfig111c_ByteaOutputFromJSON( object: any -): Postgresqlconfig111c_SynchronousCommit { +): Postgresqlconfig111c_ByteaOutput { switch (object) { case 0: - case "SYNCHRONOUS_COMMIT_UNSPECIFIED": - return Postgresqlconfig111c_SynchronousCommit.SYNCHRONOUS_COMMIT_UNSPECIFIED; + case "BYTEA_OUTPUT_UNSPECIFIED": + return Postgresqlconfig111c_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED; case 1: - case "SYNCHRONOUS_COMMIT_ON": - return Postgresqlconfig111c_SynchronousCommit.SYNCHRONOUS_COMMIT_ON; + case "BYTEA_OUTPUT_HEX": + return Postgresqlconfig111c_ByteaOutput.BYTEA_OUTPUT_HEX; case 2: - case "SYNCHRONOUS_COMMIT_OFF": - return Postgresqlconfig111c_SynchronousCommit.SYNCHRONOUS_COMMIT_OFF; - case 3: - case "SYNCHRONOUS_COMMIT_LOCAL": - return Postgresqlconfig111c_SynchronousCommit.SYNCHRONOUS_COMMIT_LOCAL; - case 4: - case "SYNCHRONOUS_COMMIT_REMOTE_WRITE": - return Postgresqlconfig111c_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_WRITE; - case 5: - case "SYNCHRONOUS_COMMIT_REMOTE_APPLY": - return Postgresqlconfig111c_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_APPLY; + case "BYTEA_OUTPUT_ESCAPED": + return Postgresqlconfig111c_ByteaOutput.BYTEA_OUTPUT_ESCAPED; case -1: case "UNRECOGNIZED": default: - return Postgresqlconfig111c_SynchronousCommit.UNRECOGNIZED; + return Postgresqlconfig111c_ByteaOutput.UNRECOGNIZED; } } -export function postgresqlconfig111c_SynchronousCommitToJSON( - object: Postgresqlconfig111c_SynchronousCommit +export function postgresqlconfig111c_ByteaOutputToJSON( + object: Postgresqlconfig111c_ByteaOutput ): string { switch (object) { - case Postgresqlconfig111c_SynchronousCommit.SYNCHRONOUS_COMMIT_UNSPECIFIED: - return "SYNCHRONOUS_COMMIT_UNSPECIFIED"; - case Postgresqlconfig111c_SynchronousCommit.SYNCHRONOUS_COMMIT_ON: - return "SYNCHRONOUS_COMMIT_ON"; - case Postgresqlconfig111c_SynchronousCommit.SYNCHRONOUS_COMMIT_OFF: - return "SYNCHRONOUS_COMMIT_OFF"; - case Postgresqlconfig111c_SynchronousCommit.SYNCHRONOUS_COMMIT_LOCAL: - return "SYNCHRONOUS_COMMIT_LOCAL"; - case Postgresqlconfig111c_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_WRITE: - return "SYNCHRONOUS_COMMIT_REMOTE_WRITE"; - case Postgresqlconfig111c_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_APPLY: - return "SYNCHRONOUS_COMMIT_REMOTE_APPLY"; + case Postgresqlconfig111c_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED: + return "BYTEA_OUTPUT_UNSPECIFIED"; + case Postgresqlconfig111c_ByteaOutput.BYTEA_OUTPUT_HEX: + return "BYTEA_OUTPUT_HEX"; + case Postgresqlconfig111c_ByteaOutput.BYTEA_OUTPUT_ESCAPED: + return "BYTEA_OUTPUT_ESCAPED"; default: return "UNKNOWN"; } @@ -362,6 +368,54 @@ export function postgresqlconfig111c_ForceParallelModeToJSON( } } +export enum Postgresqlconfig111c_LogErrorVerbosity { + LOG_ERROR_VERBOSITY_UNSPECIFIED = 0, + LOG_ERROR_VERBOSITY_TERSE = 1, + LOG_ERROR_VERBOSITY_DEFAULT = 2, + LOG_ERROR_VERBOSITY_VERBOSE = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig111c_LogErrorVerbosityFromJSON( + object: any +): Postgresqlconfig111c_LogErrorVerbosity { + switch (object) { + case 0: + case "LOG_ERROR_VERBOSITY_UNSPECIFIED": + return Postgresqlconfig111c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED; + case 1: + case "LOG_ERROR_VERBOSITY_TERSE": + return Postgresqlconfig111c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE; + case 2: + case "LOG_ERROR_VERBOSITY_DEFAULT": + return Postgresqlconfig111c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT; + case 3: + case "LOG_ERROR_VERBOSITY_VERBOSE": + return Postgresqlconfig111c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig111c_LogErrorVerbosity.UNRECOGNIZED; + } +} + +export function postgresqlconfig111c_LogErrorVerbosityToJSON( + object: Postgresqlconfig111c_LogErrorVerbosity +): string { + switch (object) { + case Postgresqlconfig111c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED: + return "LOG_ERROR_VERBOSITY_UNSPECIFIED"; + case Postgresqlconfig111c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE: + return "LOG_ERROR_VERBOSITY_TERSE"; + case Postgresqlconfig111c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT: + return "LOG_ERROR_VERBOSITY_DEFAULT"; + case Postgresqlconfig111c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE: + return "LOG_ERROR_VERBOSITY_VERBOSE"; + default: + return "UNKNOWN"; + } +} + export enum Postgresqlconfig111c_LogLevel { LOG_LEVEL_UNSPECIFIED = 0, LOG_LEVEL_DEBUG5 = 1, @@ -458,54 +512,6 @@ export function postgresqlconfig111c_LogLevelToJSON( } } -export enum Postgresqlconfig111c_LogErrorVerbosity { - LOG_ERROR_VERBOSITY_UNSPECIFIED = 0, - LOG_ERROR_VERBOSITY_TERSE = 1, - LOG_ERROR_VERBOSITY_DEFAULT = 2, - LOG_ERROR_VERBOSITY_VERBOSE = 3, - UNRECOGNIZED = -1, -} - -export function postgresqlconfig111c_LogErrorVerbosityFromJSON( - object: any -): Postgresqlconfig111c_LogErrorVerbosity { - switch (object) { - case 0: - case "LOG_ERROR_VERBOSITY_UNSPECIFIED": - return Postgresqlconfig111c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED; - case 1: - case "LOG_ERROR_VERBOSITY_TERSE": - return Postgresqlconfig111c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE; - case 2: - case "LOG_ERROR_VERBOSITY_DEFAULT": - return Postgresqlconfig111c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT; - case 3: - case "LOG_ERROR_VERBOSITY_VERBOSE": - return Postgresqlconfig111c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE; - case -1: - case "UNRECOGNIZED": - default: - return Postgresqlconfig111c_LogErrorVerbosity.UNRECOGNIZED; - } -} - -export function postgresqlconfig111c_LogErrorVerbosityToJSON( - object: Postgresqlconfig111c_LogErrorVerbosity -): string { - switch (object) { - case Postgresqlconfig111c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED: - return "LOG_ERROR_VERBOSITY_UNSPECIFIED"; - case Postgresqlconfig111c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE: - return "LOG_ERROR_VERBOSITY_TERSE"; - case Postgresqlconfig111c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT: - return "LOG_ERROR_VERBOSITY_DEFAULT"; - case Postgresqlconfig111c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE: - return "LOG_ERROR_VERBOSITY_VERBOSE"; - default: - return "UNKNOWN"; - } -} - export enum Postgresqlconfig111c_LogStatement { LOG_STATEMENT_UNSPECIFIED = 0, LOG_STATEMENT_NONE = 1, @@ -560,6 +566,186 @@ export function postgresqlconfig111c_LogStatementToJSON( } } +export enum Postgresqlconfig111c_PgHintPlanDebugPrint { + PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED = 0, + PG_HINT_PLAN_DEBUG_PRINT_OFF = 1, + PG_HINT_PLAN_DEBUG_PRINT_ON = 2, + PG_HINT_PLAN_DEBUG_PRINT_DETAILED = 3, + PG_HINT_PLAN_DEBUG_PRINT_VERBOSE = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig111c_PgHintPlanDebugPrintFromJSON( + object: any +): Postgresqlconfig111c_PgHintPlanDebugPrint { + switch (object) { + case 0: + case "PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED": + return Postgresqlconfig111c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED; + case 1: + case "PG_HINT_PLAN_DEBUG_PRINT_OFF": + return Postgresqlconfig111c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_OFF; + case 2: + case "PG_HINT_PLAN_DEBUG_PRINT_ON": + return Postgresqlconfig111c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_ON; + case 3: + case "PG_HINT_PLAN_DEBUG_PRINT_DETAILED": + return Postgresqlconfig111c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_DETAILED; + case 4: + case "PG_HINT_PLAN_DEBUG_PRINT_VERBOSE": + return Postgresqlconfig111c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_VERBOSE; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig111c_PgHintPlanDebugPrint.UNRECOGNIZED; + } +} + +export function postgresqlconfig111c_PgHintPlanDebugPrintToJSON( + object: Postgresqlconfig111c_PgHintPlanDebugPrint +): string { + switch (object) { + case Postgresqlconfig111c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED: + return "PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED"; + case Postgresqlconfig111c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_OFF: + return "PG_HINT_PLAN_DEBUG_PRINT_OFF"; + case Postgresqlconfig111c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_ON: + return "PG_HINT_PLAN_DEBUG_PRINT_ON"; + case Postgresqlconfig111c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_DETAILED: + return "PG_HINT_PLAN_DEBUG_PRINT_DETAILED"; + case Postgresqlconfig111c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_VERBOSE: + return "PG_HINT_PLAN_DEBUG_PRINT_VERBOSE"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig111c_SharedPreloadLibraries { + SHARED_PRELOAD_LIBRARIES_UNSPECIFIED = 0, + SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN = 1, + SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN = 2, + SHARED_PRELOAD_LIBRARIES_TIMESCALEDB = 3, + SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS = 4, + SHARED_PRELOAD_LIBRARIES_PG_CRON = 5, + SHARED_PRELOAD_LIBRARIES_PGLOGICAL = 6, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig111c_SharedPreloadLibrariesFromJSON( + object: any +): Postgresqlconfig111c_SharedPreloadLibraries { + switch (object) { + case 0: + case "SHARED_PRELOAD_LIBRARIES_UNSPECIFIED": + return Postgresqlconfig111c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_UNSPECIFIED; + case 1: + case "SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN": + return Postgresqlconfig111c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN; + case 2: + case "SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN": + return Postgresqlconfig111c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN; + case 3: + case "SHARED_PRELOAD_LIBRARIES_TIMESCALEDB": + return Postgresqlconfig111c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_TIMESCALEDB; + case 4: + case "SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS": + return Postgresqlconfig111c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS; + case 5: + case "SHARED_PRELOAD_LIBRARIES_PG_CRON": + return Postgresqlconfig111c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_CRON; + case 6: + case "SHARED_PRELOAD_LIBRARIES_PGLOGICAL": + return Postgresqlconfig111c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGLOGICAL; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig111c_SharedPreloadLibraries.UNRECOGNIZED; + } +} + +export function postgresqlconfig111c_SharedPreloadLibrariesToJSON( + object: Postgresqlconfig111c_SharedPreloadLibraries +): string { + switch (object) { + case Postgresqlconfig111c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_UNSPECIFIED: + return "SHARED_PRELOAD_LIBRARIES_UNSPECIFIED"; + case Postgresqlconfig111c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN: + return "SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN"; + case Postgresqlconfig111c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN: + return "SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN"; + case Postgresqlconfig111c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_TIMESCALEDB: + return "SHARED_PRELOAD_LIBRARIES_TIMESCALEDB"; + case Postgresqlconfig111c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS: + return "SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS"; + case Postgresqlconfig111c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_CRON: + return "SHARED_PRELOAD_LIBRARIES_PG_CRON"; + case Postgresqlconfig111c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGLOGICAL: + return "SHARED_PRELOAD_LIBRARIES_PGLOGICAL"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig111c_SynchronousCommit { + SYNCHRONOUS_COMMIT_UNSPECIFIED = 0, + SYNCHRONOUS_COMMIT_ON = 1, + SYNCHRONOUS_COMMIT_OFF = 2, + SYNCHRONOUS_COMMIT_LOCAL = 3, + SYNCHRONOUS_COMMIT_REMOTE_WRITE = 4, + SYNCHRONOUS_COMMIT_REMOTE_APPLY = 5, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig111c_SynchronousCommitFromJSON( + object: any +): Postgresqlconfig111c_SynchronousCommit { + switch (object) { + case 0: + case "SYNCHRONOUS_COMMIT_UNSPECIFIED": + return Postgresqlconfig111c_SynchronousCommit.SYNCHRONOUS_COMMIT_UNSPECIFIED; + case 1: + case "SYNCHRONOUS_COMMIT_ON": + return Postgresqlconfig111c_SynchronousCommit.SYNCHRONOUS_COMMIT_ON; + case 2: + case "SYNCHRONOUS_COMMIT_OFF": + return Postgresqlconfig111c_SynchronousCommit.SYNCHRONOUS_COMMIT_OFF; + case 3: + case "SYNCHRONOUS_COMMIT_LOCAL": + return Postgresqlconfig111c_SynchronousCommit.SYNCHRONOUS_COMMIT_LOCAL; + case 4: + case "SYNCHRONOUS_COMMIT_REMOTE_WRITE": + return Postgresqlconfig111c_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_WRITE; + case 5: + case "SYNCHRONOUS_COMMIT_REMOTE_APPLY": + return Postgresqlconfig111c_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_APPLY; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig111c_SynchronousCommit.UNRECOGNIZED; + } +} + +export function postgresqlconfig111c_SynchronousCommitToJSON( + object: Postgresqlconfig111c_SynchronousCommit +): string { + switch (object) { + case Postgresqlconfig111c_SynchronousCommit.SYNCHRONOUS_COMMIT_UNSPECIFIED: + return "SYNCHRONOUS_COMMIT_UNSPECIFIED"; + case Postgresqlconfig111c_SynchronousCommit.SYNCHRONOUS_COMMIT_ON: + return "SYNCHRONOUS_COMMIT_ON"; + case Postgresqlconfig111c_SynchronousCommit.SYNCHRONOUS_COMMIT_OFF: + return "SYNCHRONOUS_COMMIT_OFF"; + case Postgresqlconfig111c_SynchronousCommit.SYNCHRONOUS_COMMIT_LOCAL: + return "SYNCHRONOUS_COMMIT_LOCAL"; + case Postgresqlconfig111c_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_WRITE: + return "SYNCHRONOUS_COMMIT_REMOTE_WRITE"; + case Postgresqlconfig111c_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_APPLY: + return "SYNCHRONOUS_COMMIT_REMOTE_APPLY"; + default: + return "UNKNOWN"; + } +} + export enum Postgresqlconfig111c_TransactionIsolation { TRANSACTION_ISOLATION_UNSPECIFIED = 0, TRANSACTION_ISOLATION_READ_UNCOMMITTED = 1, @@ -611,46 +797,46 @@ export function postgresqlconfig111c_TransactionIsolationToJSON( return "TRANSACTION_ISOLATION_SERIALIZABLE"; default: return "UNKNOWN"; - } -} - -export enum Postgresqlconfig111c_ByteaOutput { - BYTEA_OUTPUT_UNSPECIFIED = 0, - BYTEA_OUTPUT_HEX = 1, - BYTEA_OUTPUT_ESCAPED = 2, + } +} + +export enum Postgresqlconfig111c_WalLevel { + WAL_LEVEL_UNSPECIFIED = 0, + WAL_LEVEL_REPLICA = 1, + WAL_LEVEL_LOGICAL = 2, UNRECOGNIZED = -1, } -export function postgresqlconfig111c_ByteaOutputFromJSON( +export function postgresqlconfig111c_WalLevelFromJSON( object: any -): Postgresqlconfig111c_ByteaOutput { +): Postgresqlconfig111c_WalLevel { switch (object) { case 0: - case "BYTEA_OUTPUT_UNSPECIFIED": - return Postgresqlconfig111c_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED; + case "WAL_LEVEL_UNSPECIFIED": + return Postgresqlconfig111c_WalLevel.WAL_LEVEL_UNSPECIFIED; case 1: - case "BYTEA_OUTPUT_HEX": - return Postgresqlconfig111c_ByteaOutput.BYTEA_OUTPUT_HEX; + case "WAL_LEVEL_REPLICA": + return Postgresqlconfig111c_WalLevel.WAL_LEVEL_REPLICA; case 2: - case "BYTEA_OUTPUT_ESCAPED": - return Postgresqlconfig111c_ByteaOutput.BYTEA_OUTPUT_ESCAPED; + case "WAL_LEVEL_LOGICAL": + return Postgresqlconfig111c_WalLevel.WAL_LEVEL_LOGICAL; case -1: case "UNRECOGNIZED": default: - return Postgresqlconfig111c_ByteaOutput.UNRECOGNIZED; + return Postgresqlconfig111c_WalLevel.UNRECOGNIZED; } } -export function postgresqlconfig111c_ByteaOutputToJSON( - object: Postgresqlconfig111c_ByteaOutput +export function postgresqlconfig111c_WalLevelToJSON( + object: Postgresqlconfig111c_WalLevel ): string { switch (object) { - case Postgresqlconfig111c_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED: - return "BYTEA_OUTPUT_UNSPECIFIED"; - case Postgresqlconfig111c_ByteaOutput.BYTEA_OUTPUT_HEX: - return "BYTEA_OUTPUT_HEX"; - case Postgresqlconfig111c_ByteaOutput.BYTEA_OUTPUT_ESCAPED: - return "BYTEA_OUTPUT_ESCAPED"; + case Postgresqlconfig111c_WalLevel.WAL_LEVEL_UNSPECIFIED: + return "WAL_LEVEL_UNSPECIFIED"; + case Postgresqlconfig111c_WalLevel.WAL_LEVEL_REPLICA: + return "WAL_LEVEL_REPLICA"; + case Postgresqlconfig111c_WalLevel.WAL_LEVEL_LOGICAL: + return "WAL_LEVEL_LOGICAL"; default: return "UNKNOWN"; } @@ -740,168 +926,6 @@ export function postgresqlconfig111c_XmlOptionToJSON( } } -export enum Postgresqlconfig111c_BackslashQuote { - BACKSLASH_QUOTE_UNSPECIFIED = 0, - BACKSLASH_QUOTE = 1, - BACKSLASH_QUOTE_ON = 2, - BACKSLASH_QUOTE_OFF = 3, - BACKSLASH_QUOTE_SAFE_ENCODING = 4, - UNRECOGNIZED = -1, -} - -export function postgresqlconfig111c_BackslashQuoteFromJSON( - object: any -): Postgresqlconfig111c_BackslashQuote { - switch (object) { - case 0: - case "BACKSLASH_QUOTE_UNSPECIFIED": - return Postgresqlconfig111c_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED; - case 1: - case "BACKSLASH_QUOTE": - return Postgresqlconfig111c_BackslashQuote.BACKSLASH_QUOTE; - case 2: - case "BACKSLASH_QUOTE_ON": - return Postgresqlconfig111c_BackslashQuote.BACKSLASH_QUOTE_ON; - case 3: - case "BACKSLASH_QUOTE_OFF": - return Postgresqlconfig111c_BackslashQuote.BACKSLASH_QUOTE_OFF; - case 4: - case "BACKSLASH_QUOTE_SAFE_ENCODING": - return Postgresqlconfig111c_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING; - case -1: - case "UNRECOGNIZED": - default: - return Postgresqlconfig111c_BackslashQuote.UNRECOGNIZED; - } -} - -export function postgresqlconfig111c_BackslashQuoteToJSON( - object: Postgresqlconfig111c_BackslashQuote -): string { - switch (object) { - case Postgresqlconfig111c_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED: - return "BACKSLASH_QUOTE_UNSPECIFIED"; - case Postgresqlconfig111c_BackslashQuote.BACKSLASH_QUOTE: - return "BACKSLASH_QUOTE"; - case Postgresqlconfig111c_BackslashQuote.BACKSLASH_QUOTE_ON: - return "BACKSLASH_QUOTE_ON"; - case Postgresqlconfig111c_BackslashQuote.BACKSLASH_QUOTE_OFF: - return "BACKSLASH_QUOTE_OFF"; - case Postgresqlconfig111c_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING: - return "BACKSLASH_QUOTE_SAFE_ENCODING"; - default: - return "UNKNOWN"; - } -} - -export enum Postgresqlconfig111c_PgHintPlanDebugPrint { - PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED = 0, - PG_HINT_PLAN_DEBUG_PRINT_OFF = 1, - PG_HINT_PLAN_DEBUG_PRINT_ON = 2, - PG_HINT_PLAN_DEBUG_PRINT_DETAILED = 3, - PG_HINT_PLAN_DEBUG_PRINT_VERBOSE = 4, - UNRECOGNIZED = -1, -} - -export function postgresqlconfig111c_PgHintPlanDebugPrintFromJSON( - object: any -): Postgresqlconfig111c_PgHintPlanDebugPrint { - switch (object) { - case 0: - case "PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED": - return Postgresqlconfig111c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED; - case 1: - case "PG_HINT_PLAN_DEBUG_PRINT_OFF": - return Postgresqlconfig111c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_OFF; - case 2: - case "PG_HINT_PLAN_DEBUG_PRINT_ON": - return Postgresqlconfig111c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_ON; - case 3: - case "PG_HINT_PLAN_DEBUG_PRINT_DETAILED": - return Postgresqlconfig111c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_DETAILED; - case 4: - case "PG_HINT_PLAN_DEBUG_PRINT_VERBOSE": - return Postgresqlconfig111c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_VERBOSE; - case -1: - case "UNRECOGNIZED": - default: - return Postgresqlconfig111c_PgHintPlanDebugPrint.UNRECOGNIZED; - } -} - -export function postgresqlconfig111c_PgHintPlanDebugPrintToJSON( - object: Postgresqlconfig111c_PgHintPlanDebugPrint -): string { - switch (object) { - case Postgresqlconfig111c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED: - return "PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED"; - case Postgresqlconfig111c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_OFF: - return "PG_HINT_PLAN_DEBUG_PRINT_OFF"; - case Postgresqlconfig111c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_ON: - return "PG_HINT_PLAN_DEBUG_PRINT_ON"; - case Postgresqlconfig111c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_DETAILED: - return "PG_HINT_PLAN_DEBUG_PRINT_DETAILED"; - case Postgresqlconfig111c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_VERBOSE: - return "PG_HINT_PLAN_DEBUG_PRINT_VERBOSE"; - default: - return "UNKNOWN"; - } -} - -export enum Postgresqlconfig111c_SharedPreloadLibraries { - SHARED_PRELOAD_LIBRARIES_UNSPECIFIED = 0, - SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN = 1, - SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN = 2, - SHARED_PRELOAD_LIBRARIES_TIMESCALEDB = 3, - SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS = 4, - UNRECOGNIZED = -1, -} - -export function postgresqlconfig111c_SharedPreloadLibrariesFromJSON( - object: any -): Postgresqlconfig111c_SharedPreloadLibraries { - switch (object) { - case 0: - case "SHARED_PRELOAD_LIBRARIES_UNSPECIFIED": - return Postgresqlconfig111c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_UNSPECIFIED; - case 1: - case "SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN": - return Postgresqlconfig111c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN; - case 2: - case "SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN": - return Postgresqlconfig111c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN; - case 3: - case "SHARED_PRELOAD_LIBRARIES_TIMESCALEDB": - return Postgresqlconfig111c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_TIMESCALEDB; - case 4: - case "SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS": - return Postgresqlconfig111c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS; - case -1: - case "UNRECOGNIZED": - default: - return Postgresqlconfig111c_SharedPreloadLibraries.UNRECOGNIZED; - } -} - -export function postgresqlconfig111c_SharedPreloadLibrariesToJSON( - object: Postgresqlconfig111c_SharedPreloadLibraries -): string { - switch (object) { - case Postgresqlconfig111c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_UNSPECIFIED: - return "SHARED_PRELOAD_LIBRARIES_UNSPECIFIED"; - case Postgresqlconfig111c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN: - return "SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN"; - case Postgresqlconfig111c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN: - return "SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN"; - case Postgresqlconfig111c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_TIMESCALEDB: - return "SHARED_PRELOAD_LIBRARIES_TIMESCALEDB"; - case Postgresqlconfig111c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS: - return "SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS"; - default: - return "UNKNOWN"; - } -} - export interface Postgresqlconfigset111c { $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfigSet11_1C"; /** @@ -1488,6 +1512,15 @@ export const Postgresqlconfig111c = { writer.uint32(618).fork() ).ldelim(); } + if (message.onlineAnalyzeEnable !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.onlineAnalyzeEnable!, + }, + writer.uint32(634).fork() + ).ldelim(); + } if (message.enableBitmapscan !== undefined) { BoolValue.encode( { @@ -1856,6 +1889,45 @@ export const Postgresqlconfig111c = { writer.uint32(1010).fork() ).ldelim(); } + if (message.plantunerFixEmptyTable !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.plantunerFixEmptyTable!, + }, + writer.uint32(1194).fork() + ).ldelim(); + } + if (message.maxStackDepth !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.maxStackDepth! }, + writer.uint32(1202).fork() + ).ldelim(); + } + if (message.geqo !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.geqo! }, + writer.uint32(1218).fork() + ).ldelim(); + } + if (message.geqoThreshold !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.geqoThreshold! }, + writer.uint32(1226).fork() + ).ldelim(); + } + if (message.geqoEffort !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.geqoEffort! }, + writer.uint32(1234).fork() + ).ldelim(); + } + if (message.geqoSeed !== undefined) { + DoubleValue.encode( + { $type: "google.protobuf.DoubleValue", value: message.geqoSeed! }, + writer.uint32(1266).fork() + ).ldelim(); + } return writer; }, @@ -2266,6 +2338,12 @@ export const Postgresqlconfig111c = { reader.uint32() ).value; break; + case 79: + message.onlineAnalyzeEnable = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; case 80: message.enableBitmapscan = BoolValue.decode( reader, @@ -2537,6 +2615,33 @@ export const Postgresqlconfig111c = { reader.uint32() ).value; break; + case 149: + message.plantunerFixEmptyTable = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 150: + message.maxStackDepth = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 152: + message.geqo = BoolValue.decode(reader, reader.uint32()).value; + break; + case 153: + message.geqoThreshold = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 154: + message.geqoEffort = Int64Value.decode(reader, reader.uint32()).value; + break; + case 158: + message.geqoSeed = DoubleValue.decode(reader, reader.uint32()).value; + break; default: reader.skipType(tag & 7); break; @@ -2909,6 +3014,11 @@ export const Postgresqlconfig111c = { object.trackActivityQuerySize !== null ? Number(object.trackActivityQuerySize) : undefined; + message.onlineAnalyzeEnable = + object.onlineAnalyzeEnable !== undefined && + object.onlineAnalyzeEnable !== null + ? Boolean(object.onlineAnalyzeEnable) + : undefined; message.enableBitmapscan = object.enableBitmapscan !== undefined && object.enableBitmapscan !== null ? Boolean(object.enableBitmapscan) @@ -3131,6 +3241,31 @@ export const Postgresqlconfig111c = { object.pgQualstatsSampleRate !== null ? Number(object.pgQualstatsSampleRate) : undefined; + message.plantunerFixEmptyTable = + object.plantunerFixEmptyTable !== undefined && + object.plantunerFixEmptyTable !== null + ? Boolean(object.plantunerFixEmptyTable) + : undefined; + message.maxStackDepth = + object.maxStackDepth !== undefined && object.maxStackDepth !== null + ? Number(object.maxStackDepth) + : undefined; + message.geqo = + object.geqo !== undefined && object.geqo !== null + ? Boolean(object.geqo) + : undefined; + message.geqoThreshold = + object.geqoThreshold !== undefined && object.geqoThreshold !== null + ? Number(object.geqoThreshold) + : undefined; + message.geqoEffort = + object.geqoEffort !== undefined && object.geqoEffort !== null + ? Number(object.geqoEffort) + : undefined; + message.geqoSeed = + object.geqoSeed !== undefined && object.geqoSeed !== null + ? Number(object.geqoSeed) + : undefined; return message; }, @@ -3309,6 +3444,8 @@ export const Postgresqlconfig111c = { (obj.archiveTimeout = message.archiveTimeout); message.trackActivityQuerySize !== undefined && (obj.trackActivityQuerySize = message.trackActivityQuerySize); + message.onlineAnalyzeEnable !== undefined && + (obj.onlineAnalyzeEnable = message.onlineAnalyzeEnable); message.enableBitmapscan !== undefined && (obj.enableBitmapscan = message.enableBitmapscan); message.enableHashagg !== undefined && @@ -3413,6 +3550,15 @@ export const Postgresqlconfig111c = { (obj.pgQualstatsResolveOids = message.pgQualstatsResolveOids); message.pgQualstatsSampleRate !== undefined && (obj.pgQualstatsSampleRate = message.pgQualstatsSampleRate); + message.plantunerFixEmptyTable !== undefined && + (obj.plantunerFixEmptyTable = message.plantunerFixEmptyTable); + message.maxStackDepth !== undefined && + (obj.maxStackDepth = message.maxStackDepth); + message.geqo !== undefined && (obj.geqo = message.geqo); + message.geqoThreshold !== undefined && + (obj.geqoThreshold = message.geqoThreshold); + message.geqoEffort !== undefined && (obj.geqoEffort = message.geqoEffort); + message.geqoSeed !== undefined && (obj.geqoSeed = message.geqoSeed); return obj; }, @@ -3509,6 +3655,7 @@ export const Postgresqlconfig111c = { message.autovacuumNaptime = object.autovacuumNaptime ?? undefined; message.archiveTimeout = object.archiveTimeout ?? undefined; message.trackActivityQuerySize = object.trackActivityQuerySize ?? undefined; + message.onlineAnalyzeEnable = object.onlineAnalyzeEnable ?? undefined; message.enableBitmapscan = object.enableBitmapscan ?? undefined; message.enableHashagg = object.enableHashagg ?? undefined; message.enableHashjoin = object.enableHashjoin ?? undefined; @@ -3570,6 +3717,12 @@ export const Postgresqlconfig111c = { message.pgQualstatsMax = object.pgQualstatsMax ?? undefined; message.pgQualstatsResolveOids = object.pgQualstatsResolveOids ?? undefined; message.pgQualstatsSampleRate = object.pgQualstatsSampleRate ?? undefined; + message.plantunerFixEmptyTable = object.plantunerFixEmptyTable ?? undefined; + message.maxStackDepth = object.maxStackDepth ?? undefined; + message.geqo = object.geqo ?? undefined; + message.geqoThreshold = object.geqoThreshold ?? undefined; + message.geqoEffort = object.geqoEffort ?? undefined; + message.geqoSeed = object.geqoSeed ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql12.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql12.ts index 85503a0c..599d27e9 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql12.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql12.ts @@ -164,105 +164,109 @@ export interface PostgresqlConfig12 { pgQualstatsMax?: number; pgQualstatsResolveOids?: boolean; pgQualstatsSampleRate?: number; + /** in bytes. */ + maxStackDepth?: number; + /** enable Genetic Query Optimizer, by default is on */ + geqo?: boolean; + /** The number of tables to use geqo, default is 12 */ + geqoThreshold?: number; + /** tradeoff between planning time and query plan quality, default is 5 */ + geqoEffort?: number; + /** initial value of the random number generator used by GEQO */ + geqoSeed?: number; } -export enum PostgresqlConfig12_WalLevel { - WAL_LEVEL_UNSPECIFIED = 0, - WAL_LEVEL_REPLICA = 1, - WAL_LEVEL_LOGICAL = 2, +export enum PostgresqlConfig12_BackslashQuote { + BACKSLASH_QUOTE_UNSPECIFIED = 0, + BACKSLASH_QUOTE = 1, + BACKSLASH_QUOTE_ON = 2, + BACKSLASH_QUOTE_OFF = 3, + BACKSLASH_QUOTE_SAFE_ENCODING = 4, UNRECOGNIZED = -1, } -export function postgresqlConfig12_WalLevelFromJSON( +export function postgresqlConfig12_BackslashQuoteFromJSON( object: any -): PostgresqlConfig12_WalLevel { +): PostgresqlConfig12_BackslashQuote { switch (object) { case 0: - case "WAL_LEVEL_UNSPECIFIED": - return PostgresqlConfig12_WalLevel.WAL_LEVEL_UNSPECIFIED; + case "BACKSLASH_QUOTE_UNSPECIFIED": + return PostgresqlConfig12_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED; case 1: - case "WAL_LEVEL_REPLICA": - return PostgresqlConfig12_WalLevel.WAL_LEVEL_REPLICA; + case "BACKSLASH_QUOTE": + return PostgresqlConfig12_BackslashQuote.BACKSLASH_QUOTE; case 2: - case "WAL_LEVEL_LOGICAL": - return PostgresqlConfig12_WalLevel.WAL_LEVEL_LOGICAL; + case "BACKSLASH_QUOTE_ON": + return PostgresqlConfig12_BackslashQuote.BACKSLASH_QUOTE_ON; + case 3: + case "BACKSLASH_QUOTE_OFF": + return PostgresqlConfig12_BackslashQuote.BACKSLASH_QUOTE_OFF; + case 4: + case "BACKSLASH_QUOTE_SAFE_ENCODING": + return PostgresqlConfig12_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING; case -1: case "UNRECOGNIZED": default: - return PostgresqlConfig12_WalLevel.UNRECOGNIZED; + return PostgresqlConfig12_BackslashQuote.UNRECOGNIZED; } } -export function postgresqlConfig12_WalLevelToJSON( - object: PostgresqlConfig12_WalLevel +export function postgresqlConfig12_BackslashQuoteToJSON( + object: PostgresqlConfig12_BackslashQuote ): string { switch (object) { - case PostgresqlConfig12_WalLevel.WAL_LEVEL_UNSPECIFIED: - return "WAL_LEVEL_UNSPECIFIED"; - case PostgresqlConfig12_WalLevel.WAL_LEVEL_REPLICA: - return "WAL_LEVEL_REPLICA"; - case PostgresqlConfig12_WalLevel.WAL_LEVEL_LOGICAL: - return "WAL_LEVEL_LOGICAL"; + case PostgresqlConfig12_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED: + return "BACKSLASH_QUOTE_UNSPECIFIED"; + case PostgresqlConfig12_BackslashQuote.BACKSLASH_QUOTE: + return "BACKSLASH_QUOTE"; + case PostgresqlConfig12_BackslashQuote.BACKSLASH_QUOTE_ON: + return "BACKSLASH_QUOTE_ON"; + case PostgresqlConfig12_BackslashQuote.BACKSLASH_QUOTE_OFF: + return "BACKSLASH_QUOTE_OFF"; + case PostgresqlConfig12_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING: + return "BACKSLASH_QUOTE_SAFE_ENCODING"; default: return "UNKNOWN"; } } -export enum PostgresqlConfig12_SynchronousCommit { - SYNCHRONOUS_COMMIT_UNSPECIFIED = 0, - SYNCHRONOUS_COMMIT_ON = 1, - SYNCHRONOUS_COMMIT_OFF = 2, - SYNCHRONOUS_COMMIT_LOCAL = 3, - SYNCHRONOUS_COMMIT_REMOTE_WRITE = 4, - SYNCHRONOUS_COMMIT_REMOTE_APPLY = 5, +export enum PostgresqlConfig12_ByteaOutput { + BYTEA_OUTPUT_UNSPECIFIED = 0, + BYTEA_OUTPUT_HEX = 1, + BYTEA_OUTPUT_ESCAPED = 2, UNRECOGNIZED = -1, } -export function postgresqlConfig12_SynchronousCommitFromJSON( +export function postgresqlConfig12_ByteaOutputFromJSON( object: any -): PostgresqlConfig12_SynchronousCommit { +): PostgresqlConfig12_ByteaOutput { switch (object) { case 0: - case "SYNCHRONOUS_COMMIT_UNSPECIFIED": - return PostgresqlConfig12_SynchronousCommit.SYNCHRONOUS_COMMIT_UNSPECIFIED; + case "BYTEA_OUTPUT_UNSPECIFIED": + return PostgresqlConfig12_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED; case 1: - case "SYNCHRONOUS_COMMIT_ON": - return PostgresqlConfig12_SynchronousCommit.SYNCHRONOUS_COMMIT_ON; + case "BYTEA_OUTPUT_HEX": + return PostgresqlConfig12_ByteaOutput.BYTEA_OUTPUT_HEX; case 2: - case "SYNCHRONOUS_COMMIT_OFF": - return PostgresqlConfig12_SynchronousCommit.SYNCHRONOUS_COMMIT_OFF; - case 3: - case "SYNCHRONOUS_COMMIT_LOCAL": - return PostgresqlConfig12_SynchronousCommit.SYNCHRONOUS_COMMIT_LOCAL; - case 4: - case "SYNCHRONOUS_COMMIT_REMOTE_WRITE": - return PostgresqlConfig12_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_WRITE; - case 5: - case "SYNCHRONOUS_COMMIT_REMOTE_APPLY": - return PostgresqlConfig12_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_APPLY; + case "BYTEA_OUTPUT_ESCAPED": + return PostgresqlConfig12_ByteaOutput.BYTEA_OUTPUT_ESCAPED; case -1: case "UNRECOGNIZED": default: - return PostgresqlConfig12_SynchronousCommit.UNRECOGNIZED; + return PostgresqlConfig12_ByteaOutput.UNRECOGNIZED; } } -export function postgresqlConfig12_SynchronousCommitToJSON( - object: PostgresqlConfig12_SynchronousCommit +export function postgresqlConfig12_ByteaOutputToJSON( + object: PostgresqlConfig12_ByteaOutput ): string { switch (object) { - case PostgresqlConfig12_SynchronousCommit.SYNCHRONOUS_COMMIT_UNSPECIFIED: - return "SYNCHRONOUS_COMMIT_UNSPECIFIED"; - case PostgresqlConfig12_SynchronousCommit.SYNCHRONOUS_COMMIT_ON: - return "SYNCHRONOUS_COMMIT_ON"; - case PostgresqlConfig12_SynchronousCommit.SYNCHRONOUS_COMMIT_OFF: - return "SYNCHRONOUS_COMMIT_OFF"; - case PostgresqlConfig12_SynchronousCommit.SYNCHRONOUS_COMMIT_LOCAL: - return "SYNCHRONOUS_COMMIT_LOCAL"; - case PostgresqlConfig12_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_WRITE: - return "SYNCHRONOUS_COMMIT_REMOTE_WRITE"; - case PostgresqlConfig12_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_APPLY: - return "SYNCHRONOUS_COMMIT_REMOTE_APPLY"; + case PostgresqlConfig12_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED: + return "BYTEA_OUTPUT_UNSPECIFIED"; + case PostgresqlConfig12_ByteaOutput.BYTEA_OUTPUT_HEX: + return "BYTEA_OUTPUT_HEX"; + case PostgresqlConfig12_ByteaOutput.BYTEA_OUTPUT_ESCAPED: + return "BYTEA_OUTPUT_ESCAPED"; default: return "UNKNOWN"; } @@ -364,6 +368,54 @@ export function postgresqlConfig12_ForceParallelModeToJSON( } } +export enum PostgresqlConfig12_LogErrorVerbosity { + LOG_ERROR_VERBOSITY_UNSPECIFIED = 0, + LOG_ERROR_VERBOSITY_TERSE = 1, + LOG_ERROR_VERBOSITY_DEFAULT = 2, + LOG_ERROR_VERBOSITY_VERBOSE = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig12_LogErrorVerbosityFromJSON( + object: any +): PostgresqlConfig12_LogErrorVerbosity { + switch (object) { + case 0: + case "LOG_ERROR_VERBOSITY_UNSPECIFIED": + return PostgresqlConfig12_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED; + case 1: + case "LOG_ERROR_VERBOSITY_TERSE": + return PostgresqlConfig12_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE; + case 2: + case "LOG_ERROR_VERBOSITY_DEFAULT": + return PostgresqlConfig12_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT; + case 3: + case "LOG_ERROR_VERBOSITY_VERBOSE": + return PostgresqlConfig12_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig12_LogErrorVerbosity.UNRECOGNIZED; + } +} + +export function postgresqlConfig12_LogErrorVerbosityToJSON( + object: PostgresqlConfig12_LogErrorVerbosity +): string { + switch (object) { + case PostgresqlConfig12_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED: + return "LOG_ERROR_VERBOSITY_UNSPECIFIED"; + case PostgresqlConfig12_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE: + return "LOG_ERROR_VERBOSITY_TERSE"; + case PostgresqlConfig12_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT: + return "LOG_ERROR_VERBOSITY_DEFAULT"; + case PostgresqlConfig12_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE: + return "LOG_ERROR_VERBOSITY_VERBOSE"; + default: + return "UNKNOWN"; + } +} + export enum PostgresqlConfig12_LogLevel { LOG_LEVEL_UNSPECIFIED = 0, LOG_LEVEL_DEBUG5 = 1, @@ -460,54 +512,6 @@ export function postgresqlConfig12_LogLevelToJSON( } } -export enum PostgresqlConfig12_LogErrorVerbosity { - LOG_ERROR_VERBOSITY_UNSPECIFIED = 0, - LOG_ERROR_VERBOSITY_TERSE = 1, - LOG_ERROR_VERBOSITY_DEFAULT = 2, - LOG_ERROR_VERBOSITY_VERBOSE = 3, - UNRECOGNIZED = -1, -} - -export function postgresqlConfig12_LogErrorVerbosityFromJSON( - object: any -): PostgresqlConfig12_LogErrorVerbosity { - switch (object) { - case 0: - case "LOG_ERROR_VERBOSITY_UNSPECIFIED": - return PostgresqlConfig12_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED; - case 1: - case "LOG_ERROR_VERBOSITY_TERSE": - return PostgresqlConfig12_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE; - case 2: - case "LOG_ERROR_VERBOSITY_DEFAULT": - return PostgresqlConfig12_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT; - case 3: - case "LOG_ERROR_VERBOSITY_VERBOSE": - return PostgresqlConfig12_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE; - case -1: - case "UNRECOGNIZED": - default: - return PostgresqlConfig12_LogErrorVerbosity.UNRECOGNIZED; - } -} - -export function postgresqlConfig12_LogErrorVerbosityToJSON( - object: PostgresqlConfig12_LogErrorVerbosity -): string { - switch (object) { - case PostgresqlConfig12_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED: - return "LOG_ERROR_VERBOSITY_UNSPECIFIED"; - case PostgresqlConfig12_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE: - return "LOG_ERROR_VERBOSITY_TERSE"; - case PostgresqlConfig12_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT: - return "LOG_ERROR_VERBOSITY_DEFAULT"; - case PostgresqlConfig12_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE: - return "LOG_ERROR_VERBOSITY_VERBOSE"; - default: - return "UNKNOWN"; - } -} - export enum PostgresqlConfig12_LogStatement { LOG_STATEMENT_UNSPECIFIED = 0, LOG_STATEMENT_NONE = 1, @@ -562,391 +566,409 @@ export function postgresqlConfig12_LogStatementToJSON( } } -export enum PostgresqlConfig12_TransactionIsolation { - TRANSACTION_ISOLATION_UNSPECIFIED = 0, - TRANSACTION_ISOLATION_READ_UNCOMMITTED = 1, - TRANSACTION_ISOLATION_READ_COMMITTED = 2, - TRANSACTION_ISOLATION_REPEATABLE_READ = 3, - TRANSACTION_ISOLATION_SERIALIZABLE = 4, +export enum PostgresqlConfig12_PgHintPlanDebugPrint { + PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED = 0, + PG_HINT_PLAN_DEBUG_PRINT_OFF = 1, + PG_HINT_PLAN_DEBUG_PRINT_ON = 2, + PG_HINT_PLAN_DEBUG_PRINT_DETAILED = 3, + PG_HINT_PLAN_DEBUG_PRINT_VERBOSE = 4, UNRECOGNIZED = -1, } -export function postgresqlConfig12_TransactionIsolationFromJSON( +export function postgresqlConfig12_PgHintPlanDebugPrintFromJSON( object: any -): PostgresqlConfig12_TransactionIsolation { +): PostgresqlConfig12_PgHintPlanDebugPrint { switch (object) { case 0: - case "TRANSACTION_ISOLATION_UNSPECIFIED": - return PostgresqlConfig12_TransactionIsolation.TRANSACTION_ISOLATION_UNSPECIFIED; + case "PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED": + return PostgresqlConfig12_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED; case 1: - case "TRANSACTION_ISOLATION_READ_UNCOMMITTED": - return PostgresqlConfig12_TransactionIsolation.TRANSACTION_ISOLATION_READ_UNCOMMITTED; + case "PG_HINT_PLAN_DEBUG_PRINT_OFF": + return PostgresqlConfig12_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_OFF; case 2: - case "TRANSACTION_ISOLATION_READ_COMMITTED": - return PostgresqlConfig12_TransactionIsolation.TRANSACTION_ISOLATION_READ_COMMITTED; + case "PG_HINT_PLAN_DEBUG_PRINT_ON": + return PostgresqlConfig12_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_ON; case 3: - case "TRANSACTION_ISOLATION_REPEATABLE_READ": - return PostgresqlConfig12_TransactionIsolation.TRANSACTION_ISOLATION_REPEATABLE_READ; + case "PG_HINT_PLAN_DEBUG_PRINT_DETAILED": + return PostgresqlConfig12_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_DETAILED; case 4: - case "TRANSACTION_ISOLATION_SERIALIZABLE": - return PostgresqlConfig12_TransactionIsolation.TRANSACTION_ISOLATION_SERIALIZABLE; + case "PG_HINT_PLAN_DEBUG_PRINT_VERBOSE": + return PostgresqlConfig12_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_VERBOSE; case -1: case "UNRECOGNIZED": default: - return PostgresqlConfig12_TransactionIsolation.UNRECOGNIZED; + return PostgresqlConfig12_PgHintPlanDebugPrint.UNRECOGNIZED; } } -export function postgresqlConfig12_TransactionIsolationToJSON( - object: PostgresqlConfig12_TransactionIsolation +export function postgresqlConfig12_PgHintPlanDebugPrintToJSON( + object: PostgresqlConfig12_PgHintPlanDebugPrint ): string { switch (object) { - case PostgresqlConfig12_TransactionIsolation.TRANSACTION_ISOLATION_UNSPECIFIED: - return "TRANSACTION_ISOLATION_UNSPECIFIED"; - case PostgresqlConfig12_TransactionIsolation.TRANSACTION_ISOLATION_READ_UNCOMMITTED: - return "TRANSACTION_ISOLATION_READ_UNCOMMITTED"; - case PostgresqlConfig12_TransactionIsolation.TRANSACTION_ISOLATION_READ_COMMITTED: - return "TRANSACTION_ISOLATION_READ_COMMITTED"; - case PostgresqlConfig12_TransactionIsolation.TRANSACTION_ISOLATION_REPEATABLE_READ: - return "TRANSACTION_ISOLATION_REPEATABLE_READ"; - case PostgresqlConfig12_TransactionIsolation.TRANSACTION_ISOLATION_SERIALIZABLE: - return "TRANSACTION_ISOLATION_SERIALIZABLE"; + case PostgresqlConfig12_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED: + return "PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED"; + case PostgresqlConfig12_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_OFF: + return "PG_HINT_PLAN_DEBUG_PRINT_OFF"; + case PostgresqlConfig12_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_ON: + return "PG_HINT_PLAN_DEBUG_PRINT_ON"; + case PostgresqlConfig12_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_DETAILED: + return "PG_HINT_PLAN_DEBUG_PRINT_DETAILED"; + case PostgresqlConfig12_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_VERBOSE: + return "PG_HINT_PLAN_DEBUG_PRINT_VERBOSE"; default: return "UNKNOWN"; } } -export enum PostgresqlConfig12_ByteaOutput { - BYTEA_OUTPUT_UNSPECIFIED = 0, - BYTEA_OUTPUT_HEX = 1, - BYTEA_OUTPUT_ESCAPED = 2, +export enum PostgresqlConfig12_PlanCacheMode { + PLAN_CACHE_MODE_UNSPECIFIED = 0, + PLAN_CACHE_MODE_AUTO = 1, + PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN = 2, + PLAN_CACHE_MODE_FORCE_GENERIC_PLAN = 3, UNRECOGNIZED = -1, } -export function postgresqlConfig12_ByteaOutputFromJSON( +export function postgresqlConfig12_PlanCacheModeFromJSON( object: any -): PostgresqlConfig12_ByteaOutput { +): PostgresqlConfig12_PlanCacheMode { switch (object) { case 0: - case "BYTEA_OUTPUT_UNSPECIFIED": - return PostgresqlConfig12_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED; + case "PLAN_CACHE_MODE_UNSPECIFIED": + return PostgresqlConfig12_PlanCacheMode.PLAN_CACHE_MODE_UNSPECIFIED; case 1: - case "BYTEA_OUTPUT_HEX": - return PostgresqlConfig12_ByteaOutput.BYTEA_OUTPUT_HEX; + case "PLAN_CACHE_MODE_AUTO": + return PostgresqlConfig12_PlanCacheMode.PLAN_CACHE_MODE_AUTO; case 2: - case "BYTEA_OUTPUT_ESCAPED": - return PostgresqlConfig12_ByteaOutput.BYTEA_OUTPUT_ESCAPED; + case "PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN": + return PostgresqlConfig12_PlanCacheMode.PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN; + case 3: + case "PLAN_CACHE_MODE_FORCE_GENERIC_PLAN": + return PostgresqlConfig12_PlanCacheMode.PLAN_CACHE_MODE_FORCE_GENERIC_PLAN; case -1: case "UNRECOGNIZED": default: - return PostgresqlConfig12_ByteaOutput.UNRECOGNIZED; + return PostgresqlConfig12_PlanCacheMode.UNRECOGNIZED; } } -export function postgresqlConfig12_ByteaOutputToJSON( - object: PostgresqlConfig12_ByteaOutput +export function postgresqlConfig12_PlanCacheModeToJSON( + object: PostgresqlConfig12_PlanCacheMode ): string { switch (object) { - case PostgresqlConfig12_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED: - return "BYTEA_OUTPUT_UNSPECIFIED"; - case PostgresqlConfig12_ByteaOutput.BYTEA_OUTPUT_HEX: - return "BYTEA_OUTPUT_HEX"; - case PostgresqlConfig12_ByteaOutput.BYTEA_OUTPUT_ESCAPED: - return "BYTEA_OUTPUT_ESCAPED"; + case PostgresqlConfig12_PlanCacheMode.PLAN_CACHE_MODE_UNSPECIFIED: + return "PLAN_CACHE_MODE_UNSPECIFIED"; + case PostgresqlConfig12_PlanCacheMode.PLAN_CACHE_MODE_AUTO: + return "PLAN_CACHE_MODE_AUTO"; + case PostgresqlConfig12_PlanCacheMode.PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN: + return "PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN"; + case PostgresqlConfig12_PlanCacheMode.PLAN_CACHE_MODE_FORCE_GENERIC_PLAN: + return "PLAN_CACHE_MODE_FORCE_GENERIC_PLAN"; default: return "UNKNOWN"; } } -export enum PostgresqlConfig12_XmlBinary { - XML_BINARY_UNSPECIFIED = 0, - XML_BINARY_BASE64 = 1, - XML_BINARY_HEX = 2, +export enum PostgresqlConfig12_SharedPreloadLibraries { + SHARED_PRELOAD_LIBRARIES_UNSPECIFIED = 0, + SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN = 1, + SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN = 2, + SHARED_PRELOAD_LIBRARIES_TIMESCALEDB = 3, + SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS = 4, + SHARED_PRELOAD_LIBRARIES_PG_CRON = 5, + SHARED_PRELOAD_LIBRARIES_PGLOGICAL = 6, UNRECOGNIZED = -1, } -export function postgresqlConfig12_XmlBinaryFromJSON( +export function postgresqlConfig12_SharedPreloadLibrariesFromJSON( object: any -): PostgresqlConfig12_XmlBinary { +): PostgresqlConfig12_SharedPreloadLibraries { switch (object) { case 0: - case "XML_BINARY_UNSPECIFIED": - return PostgresqlConfig12_XmlBinary.XML_BINARY_UNSPECIFIED; + case "SHARED_PRELOAD_LIBRARIES_UNSPECIFIED": + return PostgresqlConfig12_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_UNSPECIFIED; case 1: - case "XML_BINARY_BASE64": - return PostgresqlConfig12_XmlBinary.XML_BINARY_BASE64; + case "SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN": + return PostgresqlConfig12_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN; case 2: - case "XML_BINARY_HEX": - return PostgresqlConfig12_XmlBinary.XML_BINARY_HEX; + case "SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN": + return PostgresqlConfig12_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN; + case 3: + case "SHARED_PRELOAD_LIBRARIES_TIMESCALEDB": + return PostgresqlConfig12_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_TIMESCALEDB; + case 4: + case "SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS": + return PostgresqlConfig12_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS; + case 5: + case "SHARED_PRELOAD_LIBRARIES_PG_CRON": + return PostgresqlConfig12_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_CRON; + case 6: + case "SHARED_PRELOAD_LIBRARIES_PGLOGICAL": + return PostgresqlConfig12_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGLOGICAL; case -1: case "UNRECOGNIZED": default: - return PostgresqlConfig12_XmlBinary.UNRECOGNIZED; + return PostgresqlConfig12_SharedPreloadLibraries.UNRECOGNIZED; } } -export function postgresqlConfig12_XmlBinaryToJSON( - object: PostgresqlConfig12_XmlBinary +export function postgresqlConfig12_SharedPreloadLibrariesToJSON( + object: PostgresqlConfig12_SharedPreloadLibraries ): string { switch (object) { - case PostgresqlConfig12_XmlBinary.XML_BINARY_UNSPECIFIED: - return "XML_BINARY_UNSPECIFIED"; - case PostgresqlConfig12_XmlBinary.XML_BINARY_BASE64: - return "XML_BINARY_BASE64"; - case PostgresqlConfig12_XmlBinary.XML_BINARY_HEX: - return "XML_BINARY_HEX"; + case PostgresqlConfig12_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_UNSPECIFIED: + return "SHARED_PRELOAD_LIBRARIES_UNSPECIFIED"; + case PostgresqlConfig12_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN: + return "SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN"; + case PostgresqlConfig12_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN: + return "SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN"; + case PostgresqlConfig12_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_TIMESCALEDB: + return "SHARED_PRELOAD_LIBRARIES_TIMESCALEDB"; + case PostgresqlConfig12_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS: + return "SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS"; + case PostgresqlConfig12_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_CRON: + return "SHARED_PRELOAD_LIBRARIES_PG_CRON"; + case PostgresqlConfig12_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGLOGICAL: + return "SHARED_PRELOAD_LIBRARIES_PGLOGICAL"; default: return "UNKNOWN"; } } -export enum PostgresqlConfig12_XmlOption { - XML_OPTION_UNSPECIFIED = 0, - XML_OPTION_DOCUMENT = 1, - XML_OPTION_CONTENT = 2, +export enum PostgresqlConfig12_SynchronousCommit { + SYNCHRONOUS_COMMIT_UNSPECIFIED = 0, + SYNCHRONOUS_COMMIT_ON = 1, + SYNCHRONOUS_COMMIT_OFF = 2, + SYNCHRONOUS_COMMIT_LOCAL = 3, + SYNCHRONOUS_COMMIT_REMOTE_WRITE = 4, + SYNCHRONOUS_COMMIT_REMOTE_APPLY = 5, UNRECOGNIZED = -1, } -export function postgresqlConfig12_XmlOptionFromJSON( +export function postgresqlConfig12_SynchronousCommitFromJSON( object: any -): PostgresqlConfig12_XmlOption { +): PostgresqlConfig12_SynchronousCommit { switch (object) { case 0: - case "XML_OPTION_UNSPECIFIED": - return PostgresqlConfig12_XmlOption.XML_OPTION_UNSPECIFIED; + case "SYNCHRONOUS_COMMIT_UNSPECIFIED": + return PostgresqlConfig12_SynchronousCommit.SYNCHRONOUS_COMMIT_UNSPECIFIED; case 1: - case "XML_OPTION_DOCUMENT": - return PostgresqlConfig12_XmlOption.XML_OPTION_DOCUMENT; + case "SYNCHRONOUS_COMMIT_ON": + return PostgresqlConfig12_SynchronousCommit.SYNCHRONOUS_COMMIT_ON; case 2: - case "XML_OPTION_CONTENT": - return PostgresqlConfig12_XmlOption.XML_OPTION_CONTENT; + case "SYNCHRONOUS_COMMIT_OFF": + return PostgresqlConfig12_SynchronousCommit.SYNCHRONOUS_COMMIT_OFF; + case 3: + case "SYNCHRONOUS_COMMIT_LOCAL": + return PostgresqlConfig12_SynchronousCommit.SYNCHRONOUS_COMMIT_LOCAL; + case 4: + case "SYNCHRONOUS_COMMIT_REMOTE_WRITE": + return PostgresqlConfig12_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_WRITE; + case 5: + case "SYNCHRONOUS_COMMIT_REMOTE_APPLY": + return PostgresqlConfig12_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_APPLY; case -1: case "UNRECOGNIZED": default: - return PostgresqlConfig12_XmlOption.UNRECOGNIZED; + return PostgresqlConfig12_SynchronousCommit.UNRECOGNIZED; } } -export function postgresqlConfig12_XmlOptionToJSON( - object: PostgresqlConfig12_XmlOption +export function postgresqlConfig12_SynchronousCommitToJSON( + object: PostgresqlConfig12_SynchronousCommit ): string { switch (object) { - case PostgresqlConfig12_XmlOption.XML_OPTION_UNSPECIFIED: - return "XML_OPTION_UNSPECIFIED"; - case PostgresqlConfig12_XmlOption.XML_OPTION_DOCUMENT: - return "XML_OPTION_DOCUMENT"; - case PostgresqlConfig12_XmlOption.XML_OPTION_CONTENT: - return "XML_OPTION_CONTENT"; + case PostgresqlConfig12_SynchronousCommit.SYNCHRONOUS_COMMIT_UNSPECIFIED: + return "SYNCHRONOUS_COMMIT_UNSPECIFIED"; + case PostgresqlConfig12_SynchronousCommit.SYNCHRONOUS_COMMIT_ON: + return "SYNCHRONOUS_COMMIT_ON"; + case PostgresqlConfig12_SynchronousCommit.SYNCHRONOUS_COMMIT_OFF: + return "SYNCHRONOUS_COMMIT_OFF"; + case PostgresqlConfig12_SynchronousCommit.SYNCHRONOUS_COMMIT_LOCAL: + return "SYNCHRONOUS_COMMIT_LOCAL"; + case PostgresqlConfig12_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_WRITE: + return "SYNCHRONOUS_COMMIT_REMOTE_WRITE"; + case PostgresqlConfig12_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_APPLY: + return "SYNCHRONOUS_COMMIT_REMOTE_APPLY"; default: return "UNKNOWN"; } } -export enum PostgresqlConfig12_BackslashQuote { - BACKSLASH_QUOTE_UNSPECIFIED = 0, - BACKSLASH_QUOTE = 1, - BACKSLASH_QUOTE_ON = 2, - BACKSLASH_QUOTE_OFF = 3, - BACKSLASH_QUOTE_SAFE_ENCODING = 4, +export enum PostgresqlConfig12_TransactionIsolation { + TRANSACTION_ISOLATION_UNSPECIFIED = 0, + TRANSACTION_ISOLATION_READ_UNCOMMITTED = 1, + TRANSACTION_ISOLATION_READ_COMMITTED = 2, + TRANSACTION_ISOLATION_REPEATABLE_READ = 3, + TRANSACTION_ISOLATION_SERIALIZABLE = 4, UNRECOGNIZED = -1, } -export function postgresqlConfig12_BackslashQuoteFromJSON( +export function postgresqlConfig12_TransactionIsolationFromJSON( object: any -): PostgresqlConfig12_BackslashQuote { +): PostgresqlConfig12_TransactionIsolation { switch (object) { case 0: - case "BACKSLASH_QUOTE_UNSPECIFIED": - return PostgresqlConfig12_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED; + case "TRANSACTION_ISOLATION_UNSPECIFIED": + return PostgresqlConfig12_TransactionIsolation.TRANSACTION_ISOLATION_UNSPECIFIED; case 1: - case "BACKSLASH_QUOTE": - return PostgresqlConfig12_BackslashQuote.BACKSLASH_QUOTE; + case "TRANSACTION_ISOLATION_READ_UNCOMMITTED": + return PostgresqlConfig12_TransactionIsolation.TRANSACTION_ISOLATION_READ_UNCOMMITTED; case 2: - case "BACKSLASH_QUOTE_ON": - return PostgresqlConfig12_BackslashQuote.BACKSLASH_QUOTE_ON; + case "TRANSACTION_ISOLATION_READ_COMMITTED": + return PostgresqlConfig12_TransactionIsolation.TRANSACTION_ISOLATION_READ_COMMITTED; case 3: - case "BACKSLASH_QUOTE_OFF": - return PostgresqlConfig12_BackslashQuote.BACKSLASH_QUOTE_OFF; + case "TRANSACTION_ISOLATION_REPEATABLE_READ": + return PostgresqlConfig12_TransactionIsolation.TRANSACTION_ISOLATION_REPEATABLE_READ; case 4: - case "BACKSLASH_QUOTE_SAFE_ENCODING": - return PostgresqlConfig12_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING; + case "TRANSACTION_ISOLATION_SERIALIZABLE": + return PostgresqlConfig12_TransactionIsolation.TRANSACTION_ISOLATION_SERIALIZABLE; case -1: case "UNRECOGNIZED": default: - return PostgresqlConfig12_BackslashQuote.UNRECOGNIZED; + return PostgresqlConfig12_TransactionIsolation.UNRECOGNIZED; } } -export function postgresqlConfig12_BackslashQuoteToJSON( - object: PostgresqlConfig12_BackslashQuote +export function postgresqlConfig12_TransactionIsolationToJSON( + object: PostgresqlConfig12_TransactionIsolation ): string { switch (object) { - case PostgresqlConfig12_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED: - return "BACKSLASH_QUOTE_UNSPECIFIED"; - case PostgresqlConfig12_BackslashQuote.BACKSLASH_QUOTE: - return "BACKSLASH_QUOTE"; - case PostgresqlConfig12_BackslashQuote.BACKSLASH_QUOTE_ON: - return "BACKSLASH_QUOTE_ON"; - case PostgresqlConfig12_BackslashQuote.BACKSLASH_QUOTE_OFF: - return "BACKSLASH_QUOTE_OFF"; - case PostgresqlConfig12_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING: - return "BACKSLASH_QUOTE_SAFE_ENCODING"; + case PostgresqlConfig12_TransactionIsolation.TRANSACTION_ISOLATION_UNSPECIFIED: + return "TRANSACTION_ISOLATION_UNSPECIFIED"; + case PostgresqlConfig12_TransactionIsolation.TRANSACTION_ISOLATION_READ_UNCOMMITTED: + return "TRANSACTION_ISOLATION_READ_UNCOMMITTED"; + case PostgresqlConfig12_TransactionIsolation.TRANSACTION_ISOLATION_READ_COMMITTED: + return "TRANSACTION_ISOLATION_READ_COMMITTED"; + case PostgresqlConfig12_TransactionIsolation.TRANSACTION_ISOLATION_REPEATABLE_READ: + return "TRANSACTION_ISOLATION_REPEATABLE_READ"; + case PostgresqlConfig12_TransactionIsolation.TRANSACTION_ISOLATION_SERIALIZABLE: + return "TRANSACTION_ISOLATION_SERIALIZABLE"; default: return "UNKNOWN"; } } -export enum PostgresqlConfig12_PlanCacheMode { - PLAN_CACHE_MODE_UNSPECIFIED = 0, - PLAN_CACHE_MODE_AUTO = 1, - PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN = 2, - PLAN_CACHE_MODE_FORCE_GENERIC_PLAN = 3, +export enum PostgresqlConfig12_WalLevel { + WAL_LEVEL_UNSPECIFIED = 0, + WAL_LEVEL_REPLICA = 1, + WAL_LEVEL_LOGICAL = 2, UNRECOGNIZED = -1, } -export function postgresqlConfig12_PlanCacheModeFromJSON( +export function postgresqlConfig12_WalLevelFromJSON( object: any -): PostgresqlConfig12_PlanCacheMode { +): PostgresqlConfig12_WalLevel { switch (object) { case 0: - case "PLAN_CACHE_MODE_UNSPECIFIED": - return PostgresqlConfig12_PlanCacheMode.PLAN_CACHE_MODE_UNSPECIFIED; + case "WAL_LEVEL_UNSPECIFIED": + return PostgresqlConfig12_WalLevel.WAL_LEVEL_UNSPECIFIED; case 1: - case "PLAN_CACHE_MODE_AUTO": - return PostgresqlConfig12_PlanCacheMode.PLAN_CACHE_MODE_AUTO; + case "WAL_LEVEL_REPLICA": + return PostgresqlConfig12_WalLevel.WAL_LEVEL_REPLICA; case 2: - case "PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN": - return PostgresqlConfig12_PlanCacheMode.PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN; - case 3: - case "PLAN_CACHE_MODE_FORCE_GENERIC_PLAN": - return PostgresqlConfig12_PlanCacheMode.PLAN_CACHE_MODE_FORCE_GENERIC_PLAN; + case "WAL_LEVEL_LOGICAL": + return PostgresqlConfig12_WalLevel.WAL_LEVEL_LOGICAL; case -1: case "UNRECOGNIZED": default: - return PostgresqlConfig12_PlanCacheMode.UNRECOGNIZED; + return PostgresqlConfig12_WalLevel.UNRECOGNIZED; } } -export function postgresqlConfig12_PlanCacheModeToJSON( - object: PostgresqlConfig12_PlanCacheMode +export function postgresqlConfig12_WalLevelToJSON( + object: PostgresqlConfig12_WalLevel ): string { switch (object) { - case PostgresqlConfig12_PlanCacheMode.PLAN_CACHE_MODE_UNSPECIFIED: - return "PLAN_CACHE_MODE_UNSPECIFIED"; - case PostgresqlConfig12_PlanCacheMode.PLAN_CACHE_MODE_AUTO: - return "PLAN_CACHE_MODE_AUTO"; - case PostgresqlConfig12_PlanCacheMode.PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN: - return "PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN"; - case PostgresqlConfig12_PlanCacheMode.PLAN_CACHE_MODE_FORCE_GENERIC_PLAN: - return "PLAN_CACHE_MODE_FORCE_GENERIC_PLAN"; + case PostgresqlConfig12_WalLevel.WAL_LEVEL_UNSPECIFIED: + return "WAL_LEVEL_UNSPECIFIED"; + case PostgresqlConfig12_WalLevel.WAL_LEVEL_REPLICA: + return "WAL_LEVEL_REPLICA"; + case PostgresqlConfig12_WalLevel.WAL_LEVEL_LOGICAL: + return "WAL_LEVEL_LOGICAL"; default: return "UNKNOWN"; } } -export enum PostgresqlConfig12_PgHintPlanDebugPrint { - PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED = 0, - PG_HINT_PLAN_DEBUG_PRINT_OFF = 1, - PG_HINT_PLAN_DEBUG_PRINT_ON = 2, - PG_HINT_PLAN_DEBUG_PRINT_DETAILED = 3, - PG_HINT_PLAN_DEBUG_PRINT_VERBOSE = 4, +export enum PostgresqlConfig12_XmlBinary { + XML_BINARY_UNSPECIFIED = 0, + XML_BINARY_BASE64 = 1, + XML_BINARY_HEX = 2, UNRECOGNIZED = -1, } -export function postgresqlConfig12_PgHintPlanDebugPrintFromJSON( +export function postgresqlConfig12_XmlBinaryFromJSON( object: any -): PostgresqlConfig12_PgHintPlanDebugPrint { +): PostgresqlConfig12_XmlBinary { switch (object) { case 0: - case "PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED": - return PostgresqlConfig12_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED; + case "XML_BINARY_UNSPECIFIED": + return PostgresqlConfig12_XmlBinary.XML_BINARY_UNSPECIFIED; case 1: - case "PG_HINT_PLAN_DEBUG_PRINT_OFF": - return PostgresqlConfig12_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_OFF; + case "XML_BINARY_BASE64": + return PostgresqlConfig12_XmlBinary.XML_BINARY_BASE64; case 2: - case "PG_HINT_PLAN_DEBUG_PRINT_ON": - return PostgresqlConfig12_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_ON; - case 3: - case "PG_HINT_PLAN_DEBUG_PRINT_DETAILED": - return PostgresqlConfig12_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_DETAILED; - case 4: - case "PG_HINT_PLAN_DEBUG_PRINT_VERBOSE": - return PostgresqlConfig12_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_VERBOSE; + case "XML_BINARY_HEX": + return PostgresqlConfig12_XmlBinary.XML_BINARY_HEX; case -1: case "UNRECOGNIZED": default: - return PostgresqlConfig12_PgHintPlanDebugPrint.UNRECOGNIZED; + return PostgresqlConfig12_XmlBinary.UNRECOGNIZED; } } -export function postgresqlConfig12_PgHintPlanDebugPrintToJSON( - object: PostgresqlConfig12_PgHintPlanDebugPrint +export function postgresqlConfig12_XmlBinaryToJSON( + object: PostgresqlConfig12_XmlBinary ): string { switch (object) { - case PostgresqlConfig12_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED: - return "PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED"; - case PostgresqlConfig12_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_OFF: - return "PG_HINT_PLAN_DEBUG_PRINT_OFF"; - case PostgresqlConfig12_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_ON: - return "PG_HINT_PLAN_DEBUG_PRINT_ON"; - case PostgresqlConfig12_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_DETAILED: - return "PG_HINT_PLAN_DEBUG_PRINT_DETAILED"; - case PostgresqlConfig12_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_VERBOSE: - return "PG_HINT_PLAN_DEBUG_PRINT_VERBOSE"; + case PostgresqlConfig12_XmlBinary.XML_BINARY_UNSPECIFIED: + return "XML_BINARY_UNSPECIFIED"; + case PostgresqlConfig12_XmlBinary.XML_BINARY_BASE64: + return "XML_BINARY_BASE64"; + case PostgresqlConfig12_XmlBinary.XML_BINARY_HEX: + return "XML_BINARY_HEX"; default: return "UNKNOWN"; } } -export enum PostgresqlConfig12_SharedPreloadLibraries { - SHARED_PRELOAD_LIBRARIES_UNSPECIFIED = 0, - SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN = 1, - SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN = 2, - SHARED_PRELOAD_LIBRARIES_TIMESCALEDB = 3, - SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS = 4, +export enum PostgresqlConfig12_XmlOption { + XML_OPTION_UNSPECIFIED = 0, + XML_OPTION_DOCUMENT = 1, + XML_OPTION_CONTENT = 2, UNRECOGNIZED = -1, } -export function postgresqlConfig12_SharedPreloadLibrariesFromJSON( +export function postgresqlConfig12_XmlOptionFromJSON( object: any -): PostgresqlConfig12_SharedPreloadLibraries { +): PostgresqlConfig12_XmlOption { switch (object) { case 0: - case "SHARED_PRELOAD_LIBRARIES_UNSPECIFIED": - return PostgresqlConfig12_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_UNSPECIFIED; + case "XML_OPTION_UNSPECIFIED": + return PostgresqlConfig12_XmlOption.XML_OPTION_UNSPECIFIED; case 1: - case "SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN": - return PostgresqlConfig12_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN; + case "XML_OPTION_DOCUMENT": + return PostgresqlConfig12_XmlOption.XML_OPTION_DOCUMENT; case 2: - case "SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN": - return PostgresqlConfig12_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN; - case 3: - case "SHARED_PRELOAD_LIBRARIES_TIMESCALEDB": - return PostgresqlConfig12_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_TIMESCALEDB; - case 4: - case "SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS": - return PostgresqlConfig12_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS; + case "XML_OPTION_CONTENT": + return PostgresqlConfig12_XmlOption.XML_OPTION_CONTENT; case -1: case "UNRECOGNIZED": default: - return PostgresqlConfig12_SharedPreloadLibraries.UNRECOGNIZED; + return PostgresqlConfig12_XmlOption.UNRECOGNIZED; } } -export function postgresqlConfig12_SharedPreloadLibrariesToJSON( - object: PostgresqlConfig12_SharedPreloadLibraries +export function postgresqlConfig12_XmlOptionToJSON( + object: PostgresqlConfig12_XmlOption ): string { switch (object) { - case PostgresqlConfig12_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_UNSPECIFIED: - return "SHARED_PRELOAD_LIBRARIES_UNSPECIFIED"; - case PostgresqlConfig12_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN: - return "SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN"; - case PostgresqlConfig12_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN: - return "SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN"; - case PostgresqlConfig12_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_TIMESCALEDB: - return "SHARED_PRELOAD_LIBRARIES_TIMESCALEDB"; - case PostgresqlConfig12_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS: - return "SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS"; + case PostgresqlConfig12_XmlOption.XML_OPTION_UNSPECIFIED: + return "XML_OPTION_UNSPECIFIED"; + case PostgresqlConfig12_XmlOption.XML_OPTION_DOCUMENT: + return "XML_OPTION_DOCUMENT"; + case PostgresqlConfig12_XmlOption.XML_OPTION_CONTENT: + return "XML_OPTION_CONTENT"; default: return "UNKNOWN"; } @@ -1919,6 +1941,36 @@ export const PostgresqlConfig12 = { writer.uint32(1026).fork() ).ldelim(); } + if (message.maxStackDepth !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.maxStackDepth! }, + writer.uint32(1202).fork() + ).ldelim(); + } + if (message.geqo !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.geqo! }, + writer.uint32(1218).fork() + ).ldelim(); + } + if (message.geqoThreshold !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.geqoThreshold! }, + writer.uint32(1226).fork() + ).ldelim(); + } + if (message.geqoEffort !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.geqoEffort! }, + writer.uint32(1234).fork() + ).ldelim(); + } + if (message.geqoSeed !== undefined) { + DoubleValue.encode( + { $type: "google.protobuf.DoubleValue", value: message.geqoSeed! }, + writer.uint32(1266).fork() + ).ldelim(); + } return writer; }, @@ -2606,6 +2658,27 @@ export const PostgresqlConfig12 = { reader.uint32() ).value; break; + case 150: + message.maxStackDepth = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 152: + message.geqo = BoolValue.decode(reader, reader.uint32()).value; + break; + case 153: + message.geqoThreshold = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 154: + message.geqoEffort = Int64Value.decode(reader, reader.uint32()).value; + break; + case 158: + message.geqoSeed = DoubleValue.decode(reader, reader.uint32()).value; + break; default: reader.skipType(tag & 7); break; @@ -3203,6 +3276,26 @@ export const PostgresqlConfig12 = { object.pgQualstatsSampleRate !== null ? Number(object.pgQualstatsSampleRate) : undefined; + message.maxStackDepth = + object.maxStackDepth !== undefined && object.maxStackDepth !== null + ? Number(object.maxStackDepth) + : undefined; + message.geqo = + object.geqo !== undefined && object.geqo !== null + ? Boolean(object.geqo) + : undefined; + message.geqoThreshold = + object.geqoThreshold !== undefined && object.geqoThreshold !== null + ? Number(object.geqoThreshold) + : undefined; + message.geqoEffort = + object.geqoEffort !== undefined && object.geqoEffort !== null + ? Number(object.geqoEffort) + : undefined; + message.geqoSeed = + object.geqoSeed !== undefined && object.geqoSeed !== null + ? Number(object.geqoSeed) + : undefined; return message; }, @@ -3490,6 +3583,13 @@ export const PostgresqlConfig12 = { (obj.pgQualstatsResolveOids = message.pgQualstatsResolveOids); message.pgQualstatsSampleRate !== undefined && (obj.pgQualstatsSampleRate = message.pgQualstatsSampleRate); + message.maxStackDepth !== undefined && + (obj.maxStackDepth = message.maxStackDepth); + message.geqo !== undefined && (obj.geqo = message.geqo); + message.geqoThreshold !== undefined && + (obj.geqoThreshold = message.geqoThreshold); + message.geqoEffort !== undefined && (obj.geqoEffort = message.geqoEffort); + message.geqoSeed !== undefined && (obj.geqoSeed = message.geqoSeed); return obj; }, @@ -3650,6 +3750,11 @@ export const PostgresqlConfig12 = { message.pgQualstatsMax = object.pgQualstatsMax ?? undefined; message.pgQualstatsResolveOids = object.pgQualstatsResolveOids ?? undefined; message.pgQualstatsSampleRate = object.pgQualstatsSampleRate ?? undefined; + message.maxStackDepth = object.maxStackDepth ?? undefined; + message.geqo = object.geqo ?? undefined; + message.geqoThreshold = object.geqoThreshold ?? undefined; + message.geqoEffort = object.geqoEffort ?? undefined; + message.geqoSeed = object.geqoSeed ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql12_1c.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql12_1c.ts index d79a33ea..59eb96fb 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql12_1c.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql12_1c.ts @@ -114,6 +114,7 @@ export interface Postgresqlconfig121c { /** in milliseconds. */ archiveTimeout?: number; trackActivityQuerySize?: number; + onlineAnalyzeEnable?: boolean; enableBitmapscan?: boolean; enableHashagg?: boolean; enableHashjoin?: boolean; @@ -164,105 +165,110 @@ export interface Postgresqlconfig121c { pgQualstatsMax?: number; pgQualstatsResolveOids?: boolean; pgQualstatsSampleRate?: number; + plantunerFixEmptyTable?: boolean; + /** in bytes. */ + maxStackDepth?: number; + /** enable Genetic Query Optimizer, by default is on */ + geqo?: boolean; + /** The number of tables to use geqo, default is 12 */ + geqoThreshold?: number; + /** tradeoff between planning time and query plan quality, default is 5 */ + geqoEffort?: number; + /** initial value of the random number generator used by GEQO */ + geqoSeed?: number; } -export enum Postgresqlconfig121c_WalLevel { - WAL_LEVEL_UNSPECIFIED = 0, - WAL_LEVEL_REPLICA = 1, - WAL_LEVEL_LOGICAL = 2, +export enum Postgresqlconfig121c_BackslashQuote { + BACKSLASH_QUOTE_UNSPECIFIED = 0, + BACKSLASH_QUOTE = 1, + BACKSLASH_QUOTE_ON = 2, + BACKSLASH_QUOTE_OFF = 3, + BACKSLASH_QUOTE_SAFE_ENCODING = 4, UNRECOGNIZED = -1, } -export function postgresqlconfig121c_WalLevelFromJSON( +export function postgresqlconfig121c_BackslashQuoteFromJSON( object: any -): Postgresqlconfig121c_WalLevel { +): Postgresqlconfig121c_BackslashQuote { switch (object) { case 0: - case "WAL_LEVEL_UNSPECIFIED": - return Postgresqlconfig121c_WalLevel.WAL_LEVEL_UNSPECIFIED; + case "BACKSLASH_QUOTE_UNSPECIFIED": + return Postgresqlconfig121c_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED; case 1: - case "WAL_LEVEL_REPLICA": - return Postgresqlconfig121c_WalLevel.WAL_LEVEL_REPLICA; + case "BACKSLASH_QUOTE": + return Postgresqlconfig121c_BackslashQuote.BACKSLASH_QUOTE; case 2: - case "WAL_LEVEL_LOGICAL": - return Postgresqlconfig121c_WalLevel.WAL_LEVEL_LOGICAL; + case "BACKSLASH_QUOTE_ON": + return Postgresqlconfig121c_BackslashQuote.BACKSLASH_QUOTE_ON; + case 3: + case "BACKSLASH_QUOTE_OFF": + return Postgresqlconfig121c_BackslashQuote.BACKSLASH_QUOTE_OFF; + case 4: + case "BACKSLASH_QUOTE_SAFE_ENCODING": + return Postgresqlconfig121c_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING; case -1: case "UNRECOGNIZED": default: - return Postgresqlconfig121c_WalLevel.UNRECOGNIZED; + return Postgresqlconfig121c_BackslashQuote.UNRECOGNIZED; } } -export function postgresqlconfig121c_WalLevelToJSON( - object: Postgresqlconfig121c_WalLevel +export function postgresqlconfig121c_BackslashQuoteToJSON( + object: Postgresqlconfig121c_BackslashQuote ): string { switch (object) { - case Postgresqlconfig121c_WalLevel.WAL_LEVEL_UNSPECIFIED: - return "WAL_LEVEL_UNSPECIFIED"; - case Postgresqlconfig121c_WalLevel.WAL_LEVEL_REPLICA: - return "WAL_LEVEL_REPLICA"; - case Postgresqlconfig121c_WalLevel.WAL_LEVEL_LOGICAL: - return "WAL_LEVEL_LOGICAL"; + case Postgresqlconfig121c_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED: + return "BACKSLASH_QUOTE_UNSPECIFIED"; + case Postgresqlconfig121c_BackslashQuote.BACKSLASH_QUOTE: + return "BACKSLASH_QUOTE"; + case Postgresqlconfig121c_BackslashQuote.BACKSLASH_QUOTE_ON: + return "BACKSLASH_QUOTE_ON"; + case Postgresqlconfig121c_BackslashQuote.BACKSLASH_QUOTE_OFF: + return "BACKSLASH_QUOTE_OFF"; + case Postgresqlconfig121c_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING: + return "BACKSLASH_QUOTE_SAFE_ENCODING"; default: return "UNKNOWN"; } } -export enum Postgresqlconfig121c_SynchronousCommit { - SYNCHRONOUS_COMMIT_UNSPECIFIED = 0, - SYNCHRONOUS_COMMIT_ON = 1, - SYNCHRONOUS_COMMIT_OFF = 2, - SYNCHRONOUS_COMMIT_LOCAL = 3, - SYNCHRONOUS_COMMIT_REMOTE_WRITE = 4, - SYNCHRONOUS_COMMIT_REMOTE_APPLY = 5, +export enum Postgresqlconfig121c_ByteaOutput { + BYTEA_OUTPUT_UNSPECIFIED = 0, + BYTEA_OUTPUT_HEX = 1, + BYTEA_OUTPUT_ESCAPED = 2, UNRECOGNIZED = -1, } -export function postgresqlconfig121c_SynchronousCommitFromJSON( +export function postgresqlconfig121c_ByteaOutputFromJSON( object: any -): Postgresqlconfig121c_SynchronousCommit { +): Postgresqlconfig121c_ByteaOutput { switch (object) { case 0: - case "SYNCHRONOUS_COMMIT_UNSPECIFIED": - return Postgresqlconfig121c_SynchronousCommit.SYNCHRONOUS_COMMIT_UNSPECIFIED; + case "BYTEA_OUTPUT_UNSPECIFIED": + return Postgresqlconfig121c_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED; case 1: - case "SYNCHRONOUS_COMMIT_ON": - return Postgresqlconfig121c_SynchronousCommit.SYNCHRONOUS_COMMIT_ON; + case "BYTEA_OUTPUT_HEX": + return Postgresqlconfig121c_ByteaOutput.BYTEA_OUTPUT_HEX; case 2: - case "SYNCHRONOUS_COMMIT_OFF": - return Postgresqlconfig121c_SynchronousCommit.SYNCHRONOUS_COMMIT_OFF; - case 3: - case "SYNCHRONOUS_COMMIT_LOCAL": - return Postgresqlconfig121c_SynchronousCommit.SYNCHRONOUS_COMMIT_LOCAL; - case 4: - case "SYNCHRONOUS_COMMIT_REMOTE_WRITE": - return Postgresqlconfig121c_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_WRITE; - case 5: - case "SYNCHRONOUS_COMMIT_REMOTE_APPLY": - return Postgresqlconfig121c_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_APPLY; + case "BYTEA_OUTPUT_ESCAPED": + return Postgresqlconfig121c_ByteaOutput.BYTEA_OUTPUT_ESCAPED; case -1: case "UNRECOGNIZED": default: - return Postgresqlconfig121c_SynchronousCommit.UNRECOGNIZED; + return Postgresqlconfig121c_ByteaOutput.UNRECOGNIZED; } } -export function postgresqlconfig121c_SynchronousCommitToJSON( - object: Postgresqlconfig121c_SynchronousCommit +export function postgresqlconfig121c_ByteaOutputToJSON( + object: Postgresqlconfig121c_ByteaOutput ): string { switch (object) { - case Postgresqlconfig121c_SynchronousCommit.SYNCHRONOUS_COMMIT_UNSPECIFIED: - return "SYNCHRONOUS_COMMIT_UNSPECIFIED"; - case Postgresqlconfig121c_SynchronousCommit.SYNCHRONOUS_COMMIT_ON: - return "SYNCHRONOUS_COMMIT_ON"; - case Postgresqlconfig121c_SynchronousCommit.SYNCHRONOUS_COMMIT_OFF: - return "SYNCHRONOUS_COMMIT_OFF"; - case Postgresqlconfig121c_SynchronousCommit.SYNCHRONOUS_COMMIT_LOCAL: - return "SYNCHRONOUS_COMMIT_LOCAL"; - case Postgresqlconfig121c_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_WRITE: - return "SYNCHRONOUS_COMMIT_REMOTE_WRITE"; - case Postgresqlconfig121c_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_APPLY: - return "SYNCHRONOUS_COMMIT_REMOTE_APPLY"; + case Postgresqlconfig121c_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED: + return "BYTEA_OUTPUT_UNSPECIFIED"; + case Postgresqlconfig121c_ByteaOutput.BYTEA_OUTPUT_HEX: + return "BYTEA_OUTPUT_HEX"; + case Postgresqlconfig121c_ByteaOutput.BYTEA_OUTPUT_ESCAPED: + return "BYTEA_OUTPUT_ESCAPED"; default: return "UNKNOWN"; } @@ -364,6 +370,54 @@ export function postgresqlconfig121c_ForceParallelModeToJSON( } } +export enum Postgresqlconfig121c_LogErrorVerbosity { + LOG_ERROR_VERBOSITY_UNSPECIFIED = 0, + LOG_ERROR_VERBOSITY_TERSE = 1, + LOG_ERROR_VERBOSITY_DEFAULT = 2, + LOG_ERROR_VERBOSITY_VERBOSE = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig121c_LogErrorVerbosityFromJSON( + object: any +): Postgresqlconfig121c_LogErrorVerbosity { + switch (object) { + case 0: + case "LOG_ERROR_VERBOSITY_UNSPECIFIED": + return Postgresqlconfig121c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED; + case 1: + case "LOG_ERROR_VERBOSITY_TERSE": + return Postgresqlconfig121c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE; + case 2: + case "LOG_ERROR_VERBOSITY_DEFAULT": + return Postgresqlconfig121c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT; + case 3: + case "LOG_ERROR_VERBOSITY_VERBOSE": + return Postgresqlconfig121c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig121c_LogErrorVerbosity.UNRECOGNIZED; + } +} + +export function postgresqlconfig121c_LogErrorVerbosityToJSON( + object: Postgresqlconfig121c_LogErrorVerbosity +): string { + switch (object) { + case Postgresqlconfig121c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED: + return "LOG_ERROR_VERBOSITY_UNSPECIFIED"; + case Postgresqlconfig121c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE: + return "LOG_ERROR_VERBOSITY_TERSE"; + case Postgresqlconfig121c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT: + return "LOG_ERROR_VERBOSITY_DEFAULT"; + case Postgresqlconfig121c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE: + return "LOG_ERROR_VERBOSITY_VERBOSE"; + default: + return "UNKNOWN"; + } +} + export enum Postgresqlconfig121c_LogLevel { LOG_LEVEL_UNSPECIFIED = 0, LOG_LEVEL_DEBUG5 = 1, @@ -460,54 +514,6 @@ export function postgresqlconfig121c_LogLevelToJSON( } } -export enum Postgresqlconfig121c_LogErrorVerbosity { - LOG_ERROR_VERBOSITY_UNSPECIFIED = 0, - LOG_ERROR_VERBOSITY_TERSE = 1, - LOG_ERROR_VERBOSITY_DEFAULT = 2, - LOG_ERROR_VERBOSITY_VERBOSE = 3, - UNRECOGNIZED = -1, -} - -export function postgresqlconfig121c_LogErrorVerbosityFromJSON( - object: any -): Postgresqlconfig121c_LogErrorVerbosity { - switch (object) { - case 0: - case "LOG_ERROR_VERBOSITY_UNSPECIFIED": - return Postgresqlconfig121c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED; - case 1: - case "LOG_ERROR_VERBOSITY_TERSE": - return Postgresqlconfig121c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE; - case 2: - case "LOG_ERROR_VERBOSITY_DEFAULT": - return Postgresqlconfig121c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT; - case 3: - case "LOG_ERROR_VERBOSITY_VERBOSE": - return Postgresqlconfig121c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE; - case -1: - case "UNRECOGNIZED": - default: - return Postgresqlconfig121c_LogErrorVerbosity.UNRECOGNIZED; - } -} - -export function postgresqlconfig121c_LogErrorVerbosityToJSON( - object: Postgresqlconfig121c_LogErrorVerbosity -): string { - switch (object) { - case Postgresqlconfig121c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED: - return "LOG_ERROR_VERBOSITY_UNSPECIFIED"; - case Postgresqlconfig121c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE: - return "LOG_ERROR_VERBOSITY_TERSE"; - case Postgresqlconfig121c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT: - return "LOG_ERROR_VERBOSITY_DEFAULT"; - case Postgresqlconfig121c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE: - return "LOG_ERROR_VERBOSITY_VERBOSE"; - default: - return "UNKNOWN"; - } -} - export enum Postgresqlconfig121c_LogStatement { LOG_STATEMENT_UNSPECIFIED = 0, LOG_STATEMENT_NONE = 1, @@ -562,391 +568,409 @@ export function postgresqlconfig121c_LogStatementToJSON( } } -export enum Postgresqlconfig121c_TransactionIsolation { - TRANSACTION_ISOLATION_UNSPECIFIED = 0, - TRANSACTION_ISOLATION_READ_UNCOMMITTED = 1, - TRANSACTION_ISOLATION_READ_COMMITTED = 2, - TRANSACTION_ISOLATION_REPEATABLE_READ = 3, - TRANSACTION_ISOLATION_SERIALIZABLE = 4, +export enum Postgresqlconfig121c_PgHintPlanDebugPrint { + PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED = 0, + PG_HINT_PLAN_DEBUG_PRINT_OFF = 1, + PG_HINT_PLAN_DEBUG_PRINT_ON = 2, + PG_HINT_PLAN_DEBUG_PRINT_DETAILED = 3, + PG_HINT_PLAN_DEBUG_PRINT_VERBOSE = 4, UNRECOGNIZED = -1, } -export function postgresqlconfig121c_TransactionIsolationFromJSON( +export function postgresqlconfig121c_PgHintPlanDebugPrintFromJSON( object: any -): Postgresqlconfig121c_TransactionIsolation { +): Postgresqlconfig121c_PgHintPlanDebugPrint { switch (object) { case 0: - case "TRANSACTION_ISOLATION_UNSPECIFIED": - return Postgresqlconfig121c_TransactionIsolation.TRANSACTION_ISOLATION_UNSPECIFIED; + case "PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED": + return Postgresqlconfig121c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED; case 1: - case "TRANSACTION_ISOLATION_READ_UNCOMMITTED": - return Postgresqlconfig121c_TransactionIsolation.TRANSACTION_ISOLATION_READ_UNCOMMITTED; + case "PG_HINT_PLAN_DEBUG_PRINT_OFF": + return Postgresqlconfig121c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_OFF; case 2: - case "TRANSACTION_ISOLATION_READ_COMMITTED": - return Postgresqlconfig121c_TransactionIsolation.TRANSACTION_ISOLATION_READ_COMMITTED; + case "PG_HINT_PLAN_DEBUG_PRINT_ON": + return Postgresqlconfig121c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_ON; case 3: - case "TRANSACTION_ISOLATION_REPEATABLE_READ": - return Postgresqlconfig121c_TransactionIsolation.TRANSACTION_ISOLATION_REPEATABLE_READ; + case "PG_HINT_PLAN_DEBUG_PRINT_DETAILED": + return Postgresqlconfig121c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_DETAILED; case 4: - case "TRANSACTION_ISOLATION_SERIALIZABLE": - return Postgresqlconfig121c_TransactionIsolation.TRANSACTION_ISOLATION_SERIALIZABLE; + case "PG_HINT_PLAN_DEBUG_PRINT_VERBOSE": + return Postgresqlconfig121c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_VERBOSE; case -1: case "UNRECOGNIZED": default: - return Postgresqlconfig121c_TransactionIsolation.UNRECOGNIZED; + return Postgresqlconfig121c_PgHintPlanDebugPrint.UNRECOGNIZED; } } -export function postgresqlconfig121c_TransactionIsolationToJSON( - object: Postgresqlconfig121c_TransactionIsolation +export function postgresqlconfig121c_PgHintPlanDebugPrintToJSON( + object: Postgresqlconfig121c_PgHintPlanDebugPrint ): string { switch (object) { - case Postgresqlconfig121c_TransactionIsolation.TRANSACTION_ISOLATION_UNSPECIFIED: - return "TRANSACTION_ISOLATION_UNSPECIFIED"; - case Postgresqlconfig121c_TransactionIsolation.TRANSACTION_ISOLATION_READ_UNCOMMITTED: - return "TRANSACTION_ISOLATION_READ_UNCOMMITTED"; - case Postgresqlconfig121c_TransactionIsolation.TRANSACTION_ISOLATION_READ_COMMITTED: - return "TRANSACTION_ISOLATION_READ_COMMITTED"; - case Postgresqlconfig121c_TransactionIsolation.TRANSACTION_ISOLATION_REPEATABLE_READ: - return "TRANSACTION_ISOLATION_REPEATABLE_READ"; - case Postgresqlconfig121c_TransactionIsolation.TRANSACTION_ISOLATION_SERIALIZABLE: - return "TRANSACTION_ISOLATION_SERIALIZABLE"; + case Postgresqlconfig121c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED: + return "PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED"; + case Postgresqlconfig121c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_OFF: + return "PG_HINT_PLAN_DEBUG_PRINT_OFF"; + case Postgresqlconfig121c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_ON: + return "PG_HINT_PLAN_DEBUG_PRINT_ON"; + case Postgresqlconfig121c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_DETAILED: + return "PG_HINT_PLAN_DEBUG_PRINT_DETAILED"; + case Postgresqlconfig121c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_VERBOSE: + return "PG_HINT_PLAN_DEBUG_PRINT_VERBOSE"; default: return "UNKNOWN"; } } -export enum Postgresqlconfig121c_ByteaOutput { - BYTEA_OUTPUT_UNSPECIFIED = 0, - BYTEA_OUTPUT_HEX = 1, - BYTEA_OUTPUT_ESCAPED = 2, +export enum Postgresqlconfig121c_PlanCacheMode { + PLAN_CACHE_MODE_UNSPECIFIED = 0, + PLAN_CACHE_MODE_AUTO = 1, + PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN = 2, + PLAN_CACHE_MODE_FORCE_GENERIC_PLAN = 3, UNRECOGNIZED = -1, } -export function postgresqlconfig121c_ByteaOutputFromJSON( +export function postgresqlconfig121c_PlanCacheModeFromJSON( object: any -): Postgresqlconfig121c_ByteaOutput { +): Postgresqlconfig121c_PlanCacheMode { switch (object) { case 0: - case "BYTEA_OUTPUT_UNSPECIFIED": - return Postgresqlconfig121c_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED; + case "PLAN_CACHE_MODE_UNSPECIFIED": + return Postgresqlconfig121c_PlanCacheMode.PLAN_CACHE_MODE_UNSPECIFIED; case 1: - case "BYTEA_OUTPUT_HEX": - return Postgresqlconfig121c_ByteaOutput.BYTEA_OUTPUT_HEX; + case "PLAN_CACHE_MODE_AUTO": + return Postgresqlconfig121c_PlanCacheMode.PLAN_CACHE_MODE_AUTO; case 2: - case "BYTEA_OUTPUT_ESCAPED": - return Postgresqlconfig121c_ByteaOutput.BYTEA_OUTPUT_ESCAPED; + case "PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN": + return Postgresqlconfig121c_PlanCacheMode.PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN; + case 3: + case "PLAN_CACHE_MODE_FORCE_GENERIC_PLAN": + return Postgresqlconfig121c_PlanCacheMode.PLAN_CACHE_MODE_FORCE_GENERIC_PLAN; case -1: case "UNRECOGNIZED": default: - return Postgresqlconfig121c_ByteaOutput.UNRECOGNIZED; + return Postgresqlconfig121c_PlanCacheMode.UNRECOGNIZED; } } -export function postgresqlconfig121c_ByteaOutputToJSON( - object: Postgresqlconfig121c_ByteaOutput +export function postgresqlconfig121c_PlanCacheModeToJSON( + object: Postgresqlconfig121c_PlanCacheMode ): string { switch (object) { - case Postgresqlconfig121c_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED: - return "BYTEA_OUTPUT_UNSPECIFIED"; - case Postgresqlconfig121c_ByteaOutput.BYTEA_OUTPUT_HEX: - return "BYTEA_OUTPUT_HEX"; - case Postgresqlconfig121c_ByteaOutput.BYTEA_OUTPUT_ESCAPED: - return "BYTEA_OUTPUT_ESCAPED"; + case Postgresqlconfig121c_PlanCacheMode.PLAN_CACHE_MODE_UNSPECIFIED: + return "PLAN_CACHE_MODE_UNSPECIFIED"; + case Postgresqlconfig121c_PlanCacheMode.PLAN_CACHE_MODE_AUTO: + return "PLAN_CACHE_MODE_AUTO"; + case Postgresqlconfig121c_PlanCacheMode.PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN: + return "PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN"; + case Postgresqlconfig121c_PlanCacheMode.PLAN_CACHE_MODE_FORCE_GENERIC_PLAN: + return "PLAN_CACHE_MODE_FORCE_GENERIC_PLAN"; default: return "UNKNOWN"; } } -export enum Postgresqlconfig121c_XmlBinary { - XML_BINARY_UNSPECIFIED = 0, - XML_BINARY_BASE64 = 1, - XML_BINARY_HEX = 2, +export enum Postgresqlconfig121c_SharedPreloadLibraries { + SHARED_PRELOAD_LIBRARIES_UNSPECIFIED = 0, + SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN = 1, + SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN = 2, + SHARED_PRELOAD_LIBRARIES_TIMESCALEDB = 3, + SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS = 4, + SHARED_PRELOAD_LIBRARIES_PG_CRON = 5, + SHARED_PRELOAD_LIBRARIES_PGLOGICAL = 6, UNRECOGNIZED = -1, } -export function postgresqlconfig121c_XmlBinaryFromJSON( +export function postgresqlconfig121c_SharedPreloadLibrariesFromJSON( object: any -): Postgresqlconfig121c_XmlBinary { +): Postgresqlconfig121c_SharedPreloadLibraries { switch (object) { case 0: - case "XML_BINARY_UNSPECIFIED": - return Postgresqlconfig121c_XmlBinary.XML_BINARY_UNSPECIFIED; + case "SHARED_PRELOAD_LIBRARIES_UNSPECIFIED": + return Postgresqlconfig121c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_UNSPECIFIED; case 1: - case "XML_BINARY_BASE64": - return Postgresqlconfig121c_XmlBinary.XML_BINARY_BASE64; + case "SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN": + return Postgresqlconfig121c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN; case 2: - case "XML_BINARY_HEX": - return Postgresqlconfig121c_XmlBinary.XML_BINARY_HEX; + case "SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN": + return Postgresqlconfig121c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN; + case 3: + case "SHARED_PRELOAD_LIBRARIES_TIMESCALEDB": + return Postgresqlconfig121c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_TIMESCALEDB; + case 4: + case "SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS": + return Postgresqlconfig121c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS; + case 5: + case "SHARED_PRELOAD_LIBRARIES_PG_CRON": + return Postgresqlconfig121c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_CRON; + case 6: + case "SHARED_PRELOAD_LIBRARIES_PGLOGICAL": + return Postgresqlconfig121c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGLOGICAL; case -1: case "UNRECOGNIZED": default: - return Postgresqlconfig121c_XmlBinary.UNRECOGNIZED; + return Postgresqlconfig121c_SharedPreloadLibraries.UNRECOGNIZED; } } -export function postgresqlconfig121c_XmlBinaryToJSON( - object: Postgresqlconfig121c_XmlBinary +export function postgresqlconfig121c_SharedPreloadLibrariesToJSON( + object: Postgresqlconfig121c_SharedPreloadLibraries ): string { switch (object) { - case Postgresqlconfig121c_XmlBinary.XML_BINARY_UNSPECIFIED: - return "XML_BINARY_UNSPECIFIED"; - case Postgresqlconfig121c_XmlBinary.XML_BINARY_BASE64: - return "XML_BINARY_BASE64"; - case Postgresqlconfig121c_XmlBinary.XML_BINARY_HEX: - return "XML_BINARY_HEX"; + case Postgresqlconfig121c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_UNSPECIFIED: + return "SHARED_PRELOAD_LIBRARIES_UNSPECIFIED"; + case Postgresqlconfig121c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN: + return "SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN"; + case Postgresqlconfig121c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN: + return "SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN"; + case Postgresqlconfig121c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_TIMESCALEDB: + return "SHARED_PRELOAD_LIBRARIES_TIMESCALEDB"; + case Postgresqlconfig121c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS: + return "SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS"; + case Postgresqlconfig121c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_CRON: + return "SHARED_PRELOAD_LIBRARIES_PG_CRON"; + case Postgresqlconfig121c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGLOGICAL: + return "SHARED_PRELOAD_LIBRARIES_PGLOGICAL"; default: return "UNKNOWN"; } } -export enum Postgresqlconfig121c_XmlOption { - XML_OPTION_UNSPECIFIED = 0, - XML_OPTION_DOCUMENT = 1, - XML_OPTION_CONTENT = 2, +export enum Postgresqlconfig121c_SynchronousCommit { + SYNCHRONOUS_COMMIT_UNSPECIFIED = 0, + SYNCHRONOUS_COMMIT_ON = 1, + SYNCHRONOUS_COMMIT_OFF = 2, + SYNCHRONOUS_COMMIT_LOCAL = 3, + SYNCHRONOUS_COMMIT_REMOTE_WRITE = 4, + SYNCHRONOUS_COMMIT_REMOTE_APPLY = 5, UNRECOGNIZED = -1, } -export function postgresqlconfig121c_XmlOptionFromJSON( +export function postgresqlconfig121c_SynchronousCommitFromJSON( object: any -): Postgresqlconfig121c_XmlOption { +): Postgresqlconfig121c_SynchronousCommit { switch (object) { case 0: - case "XML_OPTION_UNSPECIFIED": - return Postgresqlconfig121c_XmlOption.XML_OPTION_UNSPECIFIED; + case "SYNCHRONOUS_COMMIT_UNSPECIFIED": + return Postgresqlconfig121c_SynchronousCommit.SYNCHRONOUS_COMMIT_UNSPECIFIED; case 1: - case "XML_OPTION_DOCUMENT": - return Postgresqlconfig121c_XmlOption.XML_OPTION_DOCUMENT; + case "SYNCHRONOUS_COMMIT_ON": + return Postgresqlconfig121c_SynchronousCommit.SYNCHRONOUS_COMMIT_ON; case 2: - case "XML_OPTION_CONTENT": - return Postgresqlconfig121c_XmlOption.XML_OPTION_CONTENT; + case "SYNCHRONOUS_COMMIT_OFF": + return Postgresqlconfig121c_SynchronousCommit.SYNCHRONOUS_COMMIT_OFF; + case 3: + case "SYNCHRONOUS_COMMIT_LOCAL": + return Postgresqlconfig121c_SynchronousCommit.SYNCHRONOUS_COMMIT_LOCAL; + case 4: + case "SYNCHRONOUS_COMMIT_REMOTE_WRITE": + return Postgresqlconfig121c_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_WRITE; + case 5: + case "SYNCHRONOUS_COMMIT_REMOTE_APPLY": + return Postgresqlconfig121c_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_APPLY; case -1: case "UNRECOGNIZED": default: - return Postgresqlconfig121c_XmlOption.UNRECOGNIZED; + return Postgresqlconfig121c_SynchronousCommit.UNRECOGNIZED; } } -export function postgresqlconfig121c_XmlOptionToJSON( - object: Postgresqlconfig121c_XmlOption +export function postgresqlconfig121c_SynchronousCommitToJSON( + object: Postgresqlconfig121c_SynchronousCommit ): string { switch (object) { - case Postgresqlconfig121c_XmlOption.XML_OPTION_UNSPECIFIED: - return "XML_OPTION_UNSPECIFIED"; - case Postgresqlconfig121c_XmlOption.XML_OPTION_DOCUMENT: - return "XML_OPTION_DOCUMENT"; - case Postgresqlconfig121c_XmlOption.XML_OPTION_CONTENT: - return "XML_OPTION_CONTENT"; + case Postgresqlconfig121c_SynchronousCommit.SYNCHRONOUS_COMMIT_UNSPECIFIED: + return "SYNCHRONOUS_COMMIT_UNSPECIFIED"; + case Postgresqlconfig121c_SynchronousCommit.SYNCHRONOUS_COMMIT_ON: + return "SYNCHRONOUS_COMMIT_ON"; + case Postgresqlconfig121c_SynchronousCommit.SYNCHRONOUS_COMMIT_OFF: + return "SYNCHRONOUS_COMMIT_OFF"; + case Postgresqlconfig121c_SynchronousCommit.SYNCHRONOUS_COMMIT_LOCAL: + return "SYNCHRONOUS_COMMIT_LOCAL"; + case Postgresqlconfig121c_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_WRITE: + return "SYNCHRONOUS_COMMIT_REMOTE_WRITE"; + case Postgresqlconfig121c_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_APPLY: + return "SYNCHRONOUS_COMMIT_REMOTE_APPLY"; default: return "UNKNOWN"; } } -export enum Postgresqlconfig121c_BackslashQuote { - BACKSLASH_QUOTE_UNSPECIFIED = 0, - BACKSLASH_QUOTE = 1, - BACKSLASH_QUOTE_ON = 2, - BACKSLASH_QUOTE_OFF = 3, - BACKSLASH_QUOTE_SAFE_ENCODING = 4, +export enum Postgresqlconfig121c_TransactionIsolation { + TRANSACTION_ISOLATION_UNSPECIFIED = 0, + TRANSACTION_ISOLATION_READ_UNCOMMITTED = 1, + TRANSACTION_ISOLATION_READ_COMMITTED = 2, + TRANSACTION_ISOLATION_REPEATABLE_READ = 3, + TRANSACTION_ISOLATION_SERIALIZABLE = 4, UNRECOGNIZED = -1, } -export function postgresqlconfig121c_BackslashQuoteFromJSON( +export function postgresqlconfig121c_TransactionIsolationFromJSON( object: any -): Postgresqlconfig121c_BackslashQuote { +): Postgresqlconfig121c_TransactionIsolation { switch (object) { case 0: - case "BACKSLASH_QUOTE_UNSPECIFIED": - return Postgresqlconfig121c_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED; + case "TRANSACTION_ISOLATION_UNSPECIFIED": + return Postgresqlconfig121c_TransactionIsolation.TRANSACTION_ISOLATION_UNSPECIFIED; case 1: - case "BACKSLASH_QUOTE": - return Postgresqlconfig121c_BackslashQuote.BACKSLASH_QUOTE; + case "TRANSACTION_ISOLATION_READ_UNCOMMITTED": + return Postgresqlconfig121c_TransactionIsolation.TRANSACTION_ISOLATION_READ_UNCOMMITTED; case 2: - case "BACKSLASH_QUOTE_ON": - return Postgresqlconfig121c_BackslashQuote.BACKSLASH_QUOTE_ON; + case "TRANSACTION_ISOLATION_READ_COMMITTED": + return Postgresqlconfig121c_TransactionIsolation.TRANSACTION_ISOLATION_READ_COMMITTED; case 3: - case "BACKSLASH_QUOTE_OFF": - return Postgresqlconfig121c_BackslashQuote.BACKSLASH_QUOTE_OFF; + case "TRANSACTION_ISOLATION_REPEATABLE_READ": + return Postgresqlconfig121c_TransactionIsolation.TRANSACTION_ISOLATION_REPEATABLE_READ; case 4: - case "BACKSLASH_QUOTE_SAFE_ENCODING": - return Postgresqlconfig121c_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING; + case "TRANSACTION_ISOLATION_SERIALIZABLE": + return Postgresqlconfig121c_TransactionIsolation.TRANSACTION_ISOLATION_SERIALIZABLE; case -1: case "UNRECOGNIZED": default: - return Postgresqlconfig121c_BackslashQuote.UNRECOGNIZED; + return Postgresqlconfig121c_TransactionIsolation.UNRECOGNIZED; } } -export function postgresqlconfig121c_BackslashQuoteToJSON( - object: Postgresqlconfig121c_BackslashQuote +export function postgresqlconfig121c_TransactionIsolationToJSON( + object: Postgresqlconfig121c_TransactionIsolation ): string { switch (object) { - case Postgresqlconfig121c_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED: - return "BACKSLASH_QUOTE_UNSPECIFIED"; - case Postgresqlconfig121c_BackslashQuote.BACKSLASH_QUOTE: - return "BACKSLASH_QUOTE"; - case Postgresqlconfig121c_BackslashQuote.BACKSLASH_QUOTE_ON: - return "BACKSLASH_QUOTE_ON"; - case Postgresqlconfig121c_BackslashQuote.BACKSLASH_QUOTE_OFF: - return "BACKSLASH_QUOTE_OFF"; - case Postgresqlconfig121c_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING: - return "BACKSLASH_QUOTE_SAFE_ENCODING"; + case Postgresqlconfig121c_TransactionIsolation.TRANSACTION_ISOLATION_UNSPECIFIED: + return "TRANSACTION_ISOLATION_UNSPECIFIED"; + case Postgresqlconfig121c_TransactionIsolation.TRANSACTION_ISOLATION_READ_UNCOMMITTED: + return "TRANSACTION_ISOLATION_READ_UNCOMMITTED"; + case Postgresqlconfig121c_TransactionIsolation.TRANSACTION_ISOLATION_READ_COMMITTED: + return "TRANSACTION_ISOLATION_READ_COMMITTED"; + case Postgresqlconfig121c_TransactionIsolation.TRANSACTION_ISOLATION_REPEATABLE_READ: + return "TRANSACTION_ISOLATION_REPEATABLE_READ"; + case Postgresqlconfig121c_TransactionIsolation.TRANSACTION_ISOLATION_SERIALIZABLE: + return "TRANSACTION_ISOLATION_SERIALIZABLE"; default: return "UNKNOWN"; } } -export enum Postgresqlconfig121c_PlanCacheMode { - PLAN_CACHE_MODE_UNSPECIFIED = 0, - PLAN_CACHE_MODE_AUTO = 1, - PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN = 2, - PLAN_CACHE_MODE_FORCE_GENERIC_PLAN = 3, +export enum Postgresqlconfig121c_WalLevel { + WAL_LEVEL_UNSPECIFIED = 0, + WAL_LEVEL_REPLICA = 1, + WAL_LEVEL_LOGICAL = 2, UNRECOGNIZED = -1, } -export function postgresqlconfig121c_PlanCacheModeFromJSON( +export function postgresqlconfig121c_WalLevelFromJSON( object: any -): Postgresqlconfig121c_PlanCacheMode { +): Postgresqlconfig121c_WalLevel { switch (object) { case 0: - case "PLAN_CACHE_MODE_UNSPECIFIED": - return Postgresqlconfig121c_PlanCacheMode.PLAN_CACHE_MODE_UNSPECIFIED; + case "WAL_LEVEL_UNSPECIFIED": + return Postgresqlconfig121c_WalLevel.WAL_LEVEL_UNSPECIFIED; case 1: - case "PLAN_CACHE_MODE_AUTO": - return Postgresqlconfig121c_PlanCacheMode.PLAN_CACHE_MODE_AUTO; + case "WAL_LEVEL_REPLICA": + return Postgresqlconfig121c_WalLevel.WAL_LEVEL_REPLICA; case 2: - case "PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN": - return Postgresqlconfig121c_PlanCacheMode.PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN; - case 3: - case "PLAN_CACHE_MODE_FORCE_GENERIC_PLAN": - return Postgresqlconfig121c_PlanCacheMode.PLAN_CACHE_MODE_FORCE_GENERIC_PLAN; + case "WAL_LEVEL_LOGICAL": + return Postgresqlconfig121c_WalLevel.WAL_LEVEL_LOGICAL; case -1: case "UNRECOGNIZED": default: - return Postgresqlconfig121c_PlanCacheMode.UNRECOGNIZED; + return Postgresqlconfig121c_WalLevel.UNRECOGNIZED; } } -export function postgresqlconfig121c_PlanCacheModeToJSON( - object: Postgresqlconfig121c_PlanCacheMode +export function postgresqlconfig121c_WalLevelToJSON( + object: Postgresqlconfig121c_WalLevel ): string { switch (object) { - case Postgresqlconfig121c_PlanCacheMode.PLAN_CACHE_MODE_UNSPECIFIED: - return "PLAN_CACHE_MODE_UNSPECIFIED"; - case Postgresqlconfig121c_PlanCacheMode.PLAN_CACHE_MODE_AUTO: - return "PLAN_CACHE_MODE_AUTO"; - case Postgresqlconfig121c_PlanCacheMode.PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN: - return "PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN"; - case Postgresqlconfig121c_PlanCacheMode.PLAN_CACHE_MODE_FORCE_GENERIC_PLAN: - return "PLAN_CACHE_MODE_FORCE_GENERIC_PLAN"; + case Postgresqlconfig121c_WalLevel.WAL_LEVEL_UNSPECIFIED: + return "WAL_LEVEL_UNSPECIFIED"; + case Postgresqlconfig121c_WalLevel.WAL_LEVEL_REPLICA: + return "WAL_LEVEL_REPLICA"; + case Postgresqlconfig121c_WalLevel.WAL_LEVEL_LOGICAL: + return "WAL_LEVEL_LOGICAL"; default: return "UNKNOWN"; } } -export enum Postgresqlconfig121c_PgHintPlanDebugPrint { - PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED = 0, - PG_HINT_PLAN_DEBUG_PRINT_OFF = 1, - PG_HINT_PLAN_DEBUG_PRINT_ON = 2, - PG_HINT_PLAN_DEBUG_PRINT_DETAILED = 3, - PG_HINT_PLAN_DEBUG_PRINT_VERBOSE = 4, +export enum Postgresqlconfig121c_XmlBinary { + XML_BINARY_UNSPECIFIED = 0, + XML_BINARY_BASE64 = 1, + XML_BINARY_HEX = 2, UNRECOGNIZED = -1, } -export function postgresqlconfig121c_PgHintPlanDebugPrintFromJSON( +export function postgresqlconfig121c_XmlBinaryFromJSON( object: any -): Postgresqlconfig121c_PgHintPlanDebugPrint { +): Postgresqlconfig121c_XmlBinary { switch (object) { case 0: - case "PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED": - return Postgresqlconfig121c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED; + case "XML_BINARY_UNSPECIFIED": + return Postgresqlconfig121c_XmlBinary.XML_BINARY_UNSPECIFIED; case 1: - case "PG_HINT_PLAN_DEBUG_PRINT_OFF": - return Postgresqlconfig121c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_OFF; + case "XML_BINARY_BASE64": + return Postgresqlconfig121c_XmlBinary.XML_BINARY_BASE64; case 2: - case "PG_HINT_PLAN_DEBUG_PRINT_ON": - return Postgresqlconfig121c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_ON; - case 3: - case "PG_HINT_PLAN_DEBUG_PRINT_DETAILED": - return Postgresqlconfig121c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_DETAILED; - case 4: - case "PG_HINT_PLAN_DEBUG_PRINT_VERBOSE": - return Postgresqlconfig121c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_VERBOSE; + case "XML_BINARY_HEX": + return Postgresqlconfig121c_XmlBinary.XML_BINARY_HEX; case -1: case "UNRECOGNIZED": default: - return Postgresqlconfig121c_PgHintPlanDebugPrint.UNRECOGNIZED; + return Postgresqlconfig121c_XmlBinary.UNRECOGNIZED; } } -export function postgresqlconfig121c_PgHintPlanDebugPrintToJSON( - object: Postgresqlconfig121c_PgHintPlanDebugPrint +export function postgresqlconfig121c_XmlBinaryToJSON( + object: Postgresqlconfig121c_XmlBinary ): string { switch (object) { - case Postgresqlconfig121c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED: - return "PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED"; - case Postgresqlconfig121c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_OFF: - return "PG_HINT_PLAN_DEBUG_PRINT_OFF"; - case Postgresqlconfig121c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_ON: - return "PG_HINT_PLAN_DEBUG_PRINT_ON"; - case Postgresqlconfig121c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_DETAILED: - return "PG_HINT_PLAN_DEBUG_PRINT_DETAILED"; - case Postgresqlconfig121c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_VERBOSE: - return "PG_HINT_PLAN_DEBUG_PRINT_VERBOSE"; + case Postgresqlconfig121c_XmlBinary.XML_BINARY_UNSPECIFIED: + return "XML_BINARY_UNSPECIFIED"; + case Postgresqlconfig121c_XmlBinary.XML_BINARY_BASE64: + return "XML_BINARY_BASE64"; + case Postgresqlconfig121c_XmlBinary.XML_BINARY_HEX: + return "XML_BINARY_HEX"; default: return "UNKNOWN"; } } -export enum Postgresqlconfig121c_SharedPreloadLibraries { - SHARED_PRELOAD_LIBRARIES_UNSPECIFIED = 0, - SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN = 1, - SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN = 2, - SHARED_PRELOAD_LIBRARIES_TIMESCALEDB = 3, - SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS = 4, +export enum Postgresqlconfig121c_XmlOption { + XML_OPTION_UNSPECIFIED = 0, + XML_OPTION_DOCUMENT = 1, + XML_OPTION_CONTENT = 2, UNRECOGNIZED = -1, } -export function postgresqlconfig121c_SharedPreloadLibrariesFromJSON( +export function postgresqlconfig121c_XmlOptionFromJSON( object: any -): Postgresqlconfig121c_SharedPreloadLibraries { +): Postgresqlconfig121c_XmlOption { switch (object) { case 0: - case "SHARED_PRELOAD_LIBRARIES_UNSPECIFIED": - return Postgresqlconfig121c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_UNSPECIFIED; + case "XML_OPTION_UNSPECIFIED": + return Postgresqlconfig121c_XmlOption.XML_OPTION_UNSPECIFIED; case 1: - case "SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN": - return Postgresqlconfig121c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN; + case "XML_OPTION_DOCUMENT": + return Postgresqlconfig121c_XmlOption.XML_OPTION_DOCUMENT; case 2: - case "SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN": - return Postgresqlconfig121c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN; - case 3: - case "SHARED_PRELOAD_LIBRARIES_TIMESCALEDB": - return Postgresqlconfig121c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_TIMESCALEDB; - case 4: - case "SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS": - return Postgresqlconfig121c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS; + case "XML_OPTION_CONTENT": + return Postgresqlconfig121c_XmlOption.XML_OPTION_CONTENT; case -1: case "UNRECOGNIZED": default: - return Postgresqlconfig121c_SharedPreloadLibraries.UNRECOGNIZED; + return Postgresqlconfig121c_XmlOption.UNRECOGNIZED; } } -export function postgresqlconfig121c_SharedPreloadLibrariesToJSON( - object: Postgresqlconfig121c_SharedPreloadLibraries +export function postgresqlconfig121c_XmlOptionToJSON( + object: Postgresqlconfig121c_XmlOption ): string { switch (object) { - case Postgresqlconfig121c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_UNSPECIFIED: - return "SHARED_PRELOAD_LIBRARIES_UNSPECIFIED"; - case Postgresqlconfig121c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN: - return "SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN"; - case Postgresqlconfig121c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN: - return "SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN"; - case Postgresqlconfig121c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_TIMESCALEDB: - return "SHARED_PRELOAD_LIBRARIES_TIMESCALEDB"; - case Postgresqlconfig121c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS: - return "SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS"; + case Postgresqlconfig121c_XmlOption.XML_OPTION_UNSPECIFIED: + return "XML_OPTION_UNSPECIFIED"; + case Postgresqlconfig121c_XmlOption.XML_OPTION_DOCUMENT: + return "XML_OPTION_DOCUMENT"; + case Postgresqlconfig121c_XmlOption.XML_OPTION_CONTENT: + return "XML_OPTION_CONTENT"; default: return "UNKNOWN"; } @@ -1539,6 +1563,15 @@ export const Postgresqlconfig121c = { writer.uint32(618).fork() ).ldelim(); } + if (message.onlineAnalyzeEnable !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.onlineAnalyzeEnable!, + }, + writer.uint32(634).fork() + ).ldelim(); + } if (message.enableBitmapscan !== undefined) { BoolValue.encode( { @@ -1919,6 +1952,45 @@ export const Postgresqlconfig121c = { writer.uint32(1026).fork() ).ldelim(); } + if (message.plantunerFixEmptyTable !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.plantunerFixEmptyTable!, + }, + writer.uint32(1194).fork() + ).ldelim(); + } + if (message.maxStackDepth !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.maxStackDepth! }, + writer.uint32(1202).fork() + ).ldelim(); + } + if (message.geqo !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.geqo! }, + writer.uint32(1218).fork() + ).ldelim(); + } + if (message.geqoThreshold !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.geqoThreshold! }, + writer.uint32(1226).fork() + ).ldelim(); + } + if (message.geqoEffort !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.geqoEffort! }, + writer.uint32(1234).fork() + ).ldelim(); + } + if (message.geqoSeed !== undefined) { + DoubleValue.encode( + { $type: "google.protobuf.DoubleValue", value: message.geqoSeed! }, + writer.uint32(1266).fork() + ).ldelim(); + } return writer; }, @@ -2329,6 +2401,12 @@ export const Postgresqlconfig121c = { reader.uint32() ).value; break; + case 79: + message.onlineAnalyzeEnable = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; case 80: message.enableBitmapscan = BoolValue.decode( reader, @@ -2609,6 +2687,33 @@ export const Postgresqlconfig121c = { reader.uint32() ).value; break; + case 149: + message.plantunerFixEmptyTable = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 150: + message.maxStackDepth = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 152: + message.geqo = BoolValue.decode(reader, reader.uint32()).value; + break; + case 153: + message.geqoThreshold = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 154: + message.geqoEffort = Int64Value.decode(reader, reader.uint32()).value; + break; + case 158: + message.geqoSeed = DoubleValue.decode(reader, reader.uint32()).value; + break; default: reader.skipType(tag & 7); break; @@ -2981,6 +3086,11 @@ export const Postgresqlconfig121c = { object.trackActivityQuerySize !== null ? Number(object.trackActivityQuerySize) : undefined; + message.onlineAnalyzeEnable = + object.onlineAnalyzeEnable !== undefined && + object.onlineAnalyzeEnable !== null + ? Boolean(object.onlineAnalyzeEnable) + : undefined; message.enableBitmapscan = object.enableBitmapscan !== undefined && object.enableBitmapscan !== null ? Boolean(object.enableBitmapscan) @@ -3212,6 +3322,31 @@ export const Postgresqlconfig121c = { object.pgQualstatsSampleRate !== null ? Number(object.pgQualstatsSampleRate) : undefined; + message.plantunerFixEmptyTable = + object.plantunerFixEmptyTable !== undefined && + object.plantunerFixEmptyTable !== null + ? Boolean(object.plantunerFixEmptyTable) + : undefined; + message.maxStackDepth = + object.maxStackDepth !== undefined && object.maxStackDepth !== null + ? Number(object.maxStackDepth) + : undefined; + message.geqo = + object.geqo !== undefined && object.geqo !== null + ? Boolean(object.geqo) + : undefined; + message.geqoThreshold = + object.geqoThreshold !== undefined && object.geqoThreshold !== null + ? Number(object.geqoThreshold) + : undefined; + message.geqoEffort = + object.geqoEffort !== undefined && object.geqoEffort !== null + ? Number(object.geqoEffort) + : undefined; + message.geqoSeed = + object.geqoSeed !== undefined && object.geqoSeed !== null + ? Number(object.geqoSeed) + : undefined; return message; }, @@ -3390,6 +3525,8 @@ export const Postgresqlconfig121c = { (obj.archiveTimeout = message.archiveTimeout); message.trackActivityQuerySize !== undefined && (obj.trackActivityQuerySize = message.trackActivityQuerySize); + message.onlineAnalyzeEnable !== undefined && + (obj.onlineAnalyzeEnable = message.onlineAnalyzeEnable); message.enableBitmapscan !== undefined && (obj.enableBitmapscan = message.enableBitmapscan); message.enableHashagg !== undefined && @@ -3500,6 +3637,15 @@ export const Postgresqlconfig121c = { (obj.pgQualstatsResolveOids = message.pgQualstatsResolveOids); message.pgQualstatsSampleRate !== undefined && (obj.pgQualstatsSampleRate = message.pgQualstatsSampleRate); + message.plantunerFixEmptyTable !== undefined && + (obj.plantunerFixEmptyTable = message.plantunerFixEmptyTable); + message.maxStackDepth !== undefined && + (obj.maxStackDepth = message.maxStackDepth); + message.geqo !== undefined && (obj.geqo = message.geqo); + message.geqoThreshold !== undefined && + (obj.geqoThreshold = message.geqoThreshold); + message.geqoEffort !== undefined && (obj.geqoEffort = message.geqoEffort); + message.geqoSeed !== undefined && (obj.geqoSeed = message.geqoSeed); return obj; }, @@ -3596,6 +3742,7 @@ export const Postgresqlconfig121c = { message.autovacuumNaptime = object.autovacuumNaptime ?? undefined; message.archiveTimeout = object.archiveTimeout ?? undefined; message.trackActivityQuerySize = object.trackActivityQuerySize ?? undefined; + message.onlineAnalyzeEnable = object.onlineAnalyzeEnable ?? undefined; message.enableBitmapscan = object.enableBitmapscan ?? undefined; message.enableHashagg = object.enableHashagg ?? undefined; message.enableHashjoin = object.enableHashjoin ?? undefined; @@ -3660,6 +3807,12 @@ export const Postgresqlconfig121c = { message.pgQualstatsMax = object.pgQualstatsMax ?? undefined; message.pgQualstatsResolveOids = object.pgQualstatsResolveOids ?? undefined; message.pgQualstatsSampleRate = object.pgQualstatsSampleRate ?? undefined; + message.plantunerFixEmptyTable = object.plantunerFixEmptyTable ?? undefined; + message.maxStackDepth = object.maxStackDepth ?? undefined; + message.geqo = object.geqo ?? undefined; + message.geqoThreshold = object.geqoThreshold ?? undefined; + message.geqoEffort = object.geqoEffort ?? undefined; + message.geqoSeed = object.geqoSeed ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql13.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql13.ts index 518a877b..680fefbd 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql13.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql13.ts @@ -182,105 +182,109 @@ export interface PostgresqlConfig13 { pgQualstatsMax?: number; pgQualstatsResolveOids?: boolean; pgQualstatsSampleRate?: number; + /** in bytes. */ + maxStackDepth?: number; + /** enable Genetic Query Optimizer, by default is on */ + geqo?: boolean; + /** The number of tables to use geqo, default is 12 */ + geqoThreshold?: number; + /** tradeoff between planning time and query plan quality, default is 5 */ + geqoEffort?: number; + /** initial value of the random number generator used by GEQO */ + geqoSeed?: number; } -export enum PostgresqlConfig13_WalLevel { - WAL_LEVEL_UNSPECIFIED = 0, - WAL_LEVEL_REPLICA = 1, - WAL_LEVEL_LOGICAL = 2, +export enum PostgresqlConfig13_BackslashQuote { + BACKSLASH_QUOTE_UNSPECIFIED = 0, + BACKSLASH_QUOTE = 1, + BACKSLASH_QUOTE_ON = 2, + BACKSLASH_QUOTE_OFF = 3, + BACKSLASH_QUOTE_SAFE_ENCODING = 4, UNRECOGNIZED = -1, } -export function postgresqlConfig13_WalLevelFromJSON( +export function postgresqlConfig13_BackslashQuoteFromJSON( object: any -): PostgresqlConfig13_WalLevel { +): PostgresqlConfig13_BackslashQuote { switch (object) { case 0: - case "WAL_LEVEL_UNSPECIFIED": - return PostgresqlConfig13_WalLevel.WAL_LEVEL_UNSPECIFIED; + case "BACKSLASH_QUOTE_UNSPECIFIED": + return PostgresqlConfig13_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED; case 1: - case "WAL_LEVEL_REPLICA": - return PostgresqlConfig13_WalLevel.WAL_LEVEL_REPLICA; + case "BACKSLASH_QUOTE": + return PostgresqlConfig13_BackslashQuote.BACKSLASH_QUOTE; case 2: - case "WAL_LEVEL_LOGICAL": - return PostgresqlConfig13_WalLevel.WAL_LEVEL_LOGICAL; + case "BACKSLASH_QUOTE_ON": + return PostgresqlConfig13_BackslashQuote.BACKSLASH_QUOTE_ON; + case 3: + case "BACKSLASH_QUOTE_OFF": + return PostgresqlConfig13_BackslashQuote.BACKSLASH_QUOTE_OFF; + case 4: + case "BACKSLASH_QUOTE_SAFE_ENCODING": + return PostgresqlConfig13_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING; case -1: case "UNRECOGNIZED": default: - return PostgresqlConfig13_WalLevel.UNRECOGNIZED; + return PostgresqlConfig13_BackslashQuote.UNRECOGNIZED; } } -export function postgresqlConfig13_WalLevelToJSON( - object: PostgresqlConfig13_WalLevel +export function postgresqlConfig13_BackslashQuoteToJSON( + object: PostgresqlConfig13_BackslashQuote ): string { switch (object) { - case PostgresqlConfig13_WalLevel.WAL_LEVEL_UNSPECIFIED: - return "WAL_LEVEL_UNSPECIFIED"; - case PostgresqlConfig13_WalLevel.WAL_LEVEL_REPLICA: - return "WAL_LEVEL_REPLICA"; - case PostgresqlConfig13_WalLevel.WAL_LEVEL_LOGICAL: - return "WAL_LEVEL_LOGICAL"; + case PostgresqlConfig13_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED: + return "BACKSLASH_QUOTE_UNSPECIFIED"; + case PostgresqlConfig13_BackslashQuote.BACKSLASH_QUOTE: + return "BACKSLASH_QUOTE"; + case PostgresqlConfig13_BackslashQuote.BACKSLASH_QUOTE_ON: + return "BACKSLASH_QUOTE_ON"; + case PostgresqlConfig13_BackslashQuote.BACKSLASH_QUOTE_OFF: + return "BACKSLASH_QUOTE_OFF"; + case PostgresqlConfig13_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING: + return "BACKSLASH_QUOTE_SAFE_ENCODING"; default: return "UNKNOWN"; } } -export enum PostgresqlConfig13_SynchronousCommit { - SYNCHRONOUS_COMMIT_UNSPECIFIED = 0, - SYNCHRONOUS_COMMIT_ON = 1, - SYNCHRONOUS_COMMIT_OFF = 2, - SYNCHRONOUS_COMMIT_LOCAL = 3, - SYNCHRONOUS_COMMIT_REMOTE_WRITE = 4, - SYNCHRONOUS_COMMIT_REMOTE_APPLY = 5, +export enum PostgresqlConfig13_ByteaOutput { + BYTEA_OUTPUT_UNSPECIFIED = 0, + BYTEA_OUTPUT_HEX = 1, + BYTEA_OUTPUT_ESCAPED = 2, UNRECOGNIZED = -1, } -export function postgresqlConfig13_SynchronousCommitFromJSON( +export function postgresqlConfig13_ByteaOutputFromJSON( object: any -): PostgresqlConfig13_SynchronousCommit { +): PostgresqlConfig13_ByteaOutput { switch (object) { case 0: - case "SYNCHRONOUS_COMMIT_UNSPECIFIED": - return PostgresqlConfig13_SynchronousCommit.SYNCHRONOUS_COMMIT_UNSPECIFIED; + case "BYTEA_OUTPUT_UNSPECIFIED": + return PostgresqlConfig13_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED; case 1: - case "SYNCHRONOUS_COMMIT_ON": - return PostgresqlConfig13_SynchronousCommit.SYNCHRONOUS_COMMIT_ON; + case "BYTEA_OUTPUT_HEX": + return PostgresqlConfig13_ByteaOutput.BYTEA_OUTPUT_HEX; case 2: - case "SYNCHRONOUS_COMMIT_OFF": - return PostgresqlConfig13_SynchronousCommit.SYNCHRONOUS_COMMIT_OFF; - case 3: - case "SYNCHRONOUS_COMMIT_LOCAL": - return PostgresqlConfig13_SynchronousCommit.SYNCHRONOUS_COMMIT_LOCAL; - case 4: - case "SYNCHRONOUS_COMMIT_REMOTE_WRITE": - return PostgresqlConfig13_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_WRITE; - case 5: - case "SYNCHRONOUS_COMMIT_REMOTE_APPLY": - return PostgresqlConfig13_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_APPLY; + case "BYTEA_OUTPUT_ESCAPED": + return PostgresqlConfig13_ByteaOutput.BYTEA_OUTPUT_ESCAPED; case -1: case "UNRECOGNIZED": default: - return PostgresqlConfig13_SynchronousCommit.UNRECOGNIZED; + return PostgresqlConfig13_ByteaOutput.UNRECOGNIZED; } } -export function postgresqlConfig13_SynchronousCommitToJSON( - object: PostgresqlConfig13_SynchronousCommit +export function postgresqlConfig13_ByteaOutputToJSON( + object: PostgresqlConfig13_ByteaOutput ): string { switch (object) { - case PostgresqlConfig13_SynchronousCommit.SYNCHRONOUS_COMMIT_UNSPECIFIED: - return "SYNCHRONOUS_COMMIT_UNSPECIFIED"; - case PostgresqlConfig13_SynchronousCommit.SYNCHRONOUS_COMMIT_ON: - return "SYNCHRONOUS_COMMIT_ON"; - case PostgresqlConfig13_SynchronousCommit.SYNCHRONOUS_COMMIT_OFF: - return "SYNCHRONOUS_COMMIT_OFF"; - case PostgresqlConfig13_SynchronousCommit.SYNCHRONOUS_COMMIT_LOCAL: - return "SYNCHRONOUS_COMMIT_LOCAL"; - case PostgresqlConfig13_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_WRITE: - return "SYNCHRONOUS_COMMIT_REMOTE_WRITE"; - case PostgresqlConfig13_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_APPLY: - return "SYNCHRONOUS_COMMIT_REMOTE_APPLY"; + case PostgresqlConfig13_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED: + return "BYTEA_OUTPUT_UNSPECIFIED"; + case PostgresqlConfig13_ByteaOutput.BYTEA_OUTPUT_HEX: + return "BYTEA_OUTPUT_HEX"; + case PostgresqlConfig13_ByteaOutput.BYTEA_OUTPUT_ESCAPED: + return "BYTEA_OUTPUT_ESCAPED"; default: return "UNKNOWN"; } @@ -382,6 +386,54 @@ export function postgresqlConfig13_ForceParallelModeToJSON( } } +export enum PostgresqlConfig13_LogErrorVerbosity { + LOG_ERROR_VERBOSITY_UNSPECIFIED = 0, + LOG_ERROR_VERBOSITY_TERSE = 1, + LOG_ERROR_VERBOSITY_DEFAULT = 2, + LOG_ERROR_VERBOSITY_VERBOSE = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig13_LogErrorVerbosityFromJSON( + object: any +): PostgresqlConfig13_LogErrorVerbosity { + switch (object) { + case 0: + case "LOG_ERROR_VERBOSITY_UNSPECIFIED": + return PostgresqlConfig13_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED; + case 1: + case "LOG_ERROR_VERBOSITY_TERSE": + return PostgresqlConfig13_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE; + case 2: + case "LOG_ERROR_VERBOSITY_DEFAULT": + return PostgresqlConfig13_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT; + case 3: + case "LOG_ERROR_VERBOSITY_VERBOSE": + return PostgresqlConfig13_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig13_LogErrorVerbosity.UNRECOGNIZED; + } +} + +export function postgresqlConfig13_LogErrorVerbosityToJSON( + object: PostgresqlConfig13_LogErrorVerbosity +): string { + switch (object) { + case PostgresqlConfig13_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED: + return "LOG_ERROR_VERBOSITY_UNSPECIFIED"; + case PostgresqlConfig13_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE: + return "LOG_ERROR_VERBOSITY_TERSE"; + case PostgresqlConfig13_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT: + return "LOG_ERROR_VERBOSITY_DEFAULT"; + case PostgresqlConfig13_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE: + return "LOG_ERROR_VERBOSITY_VERBOSE"; + default: + return "UNKNOWN"; + } +} + export enum PostgresqlConfig13_LogLevel { LOG_LEVEL_UNSPECIFIED = 0, LOG_LEVEL_DEBUG5 = 1, @@ -478,54 +530,6 @@ export function postgresqlConfig13_LogLevelToJSON( } } -export enum PostgresqlConfig13_LogErrorVerbosity { - LOG_ERROR_VERBOSITY_UNSPECIFIED = 0, - LOG_ERROR_VERBOSITY_TERSE = 1, - LOG_ERROR_VERBOSITY_DEFAULT = 2, - LOG_ERROR_VERBOSITY_VERBOSE = 3, - UNRECOGNIZED = -1, -} - -export function postgresqlConfig13_LogErrorVerbosityFromJSON( - object: any -): PostgresqlConfig13_LogErrorVerbosity { - switch (object) { - case 0: - case "LOG_ERROR_VERBOSITY_UNSPECIFIED": - return PostgresqlConfig13_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED; - case 1: - case "LOG_ERROR_VERBOSITY_TERSE": - return PostgresqlConfig13_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE; - case 2: - case "LOG_ERROR_VERBOSITY_DEFAULT": - return PostgresqlConfig13_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT; - case 3: - case "LOG_ERROR_VERBOSITY_VERBOSE": - return PostgresqlConfig13_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE; - case -1: - case "UNRECOGNIZED": - default: - return PostgresqlConfig13_LogErrorVerbosity.UNRECOGNIZED; - } -} - -export function postgresqlConfig13_LogErrorVerbosityToJSON( - object: PostgresqlConfig13_LogErrorVerbosity -): string { - switch (object) { - case PostgresqlConfig13_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED: - return "LOG_ERROR_VERBOSITY_UNSPECIFIED"; - case PostgresqlConfig13_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE: - return "LOG_ERROR_VERBOSITY_TERSE"; - case PostgresqlConfig13_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT: - return "LOG_ERROR_VERBOSITY_DEFAULT"; - case PostgresqlConfig13_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE: - return "LOG_ERROR_VERBOSITY_VERBOSE"; - default: - return "UNKNOWN"; - } -} - export enum PostgresqlConfig13_LogStatement { LOG_STATEMENT_UNSPECIFIED = 0, LOG_STATEMENT_NONE = 1, @@ -580,391 +584,409 @@ export function postgresqlConfig13_LogStatementToJSON( } } -export enum PostgresqlConfig13_TransactionIsolation { - TRANSACTION_ISOLATION_UNSPECIFIED = 0, - TRANSACTION_ISOLATION_READ_UNCOMMITTED = 1, - TRANSACTION_ISOLATION_READ_COMMITTED = 2, - TRANSACTION_ISOLATION_REPEATABLE_READ = 3, - TRANSACTION_ISOLATION_SERIALIZABLE = 4, +export enum PostgresqlConfig13_PgHintPlanDebugPrint { + PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED = 0, + PG_HINT_PLAN_DEBUG_PRINT_OFF = 1, + PG_HINT_PLAN_DEBUG_PRINT_ON = 2, + PG_HINT_PLAN_DEBUG_PRINT_DETAILED = 3, + PG_HINT_PLAN_DEBUG_PRINT_VERBOSE = 4, UNRECOGNIZED = -1, } -export function postgresqlConfig13_TransactionIsolationFromJSON( +export function postgresqlConfig13_PgHintPlanDebugPrintFromJSON( object: any -): PostgresqlConfig13_TransactionIsolation { +): PostgresqlConfig13_PgHintPlanDebugPrint { switch (object) { case 0: - case "TRANSACTION_ISOLATION_UNSPECIFIED": - return PostgresqlConfig13_TransactionIsolation.TRANSACTION_ISOLATION_UNSPECIFIED; + case "PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED": + return PostgresqlConfig13_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED; case 1: - case "TRANSACTION_ISOLATION_READ_UNCOMMITTED": - return PostgresqlConfig13_TransactionIsolation.TRANSACTION_ISOLATION_READ_UNCOMMITTED; + case "PG_HINT_PLAN_DEBUG_PRINT_OFF": + return PostgresqlConfig13_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_OFF; case 2: - case "TRANSACTION_ISOLATION_READ_COMMITTED": - return PostgresqlConfig13_TransactionIsolation.TRANSACTION_ISOLATION_READ_COMMITTED; + case "PG_HINT_PLAN_DEBUG_PRINT_ON": + return PostgresqlConfig13_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_ON; case 3: - case "TRANSACTION_ISOLATION_REPEATABLE_READ": - return PostgresqlConfig13_TransactionIsolation.TRANSACTION_ISOLATION_REPEATABLE_READ; + case "PG_HINT_PLAN_DEBUG_PRINT_DETAILED": + return PostgresqlConfig13_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_DETAILED; case 4: - case "TRANSACTION_ISOLATION_SERIALIZABLE": - return PostgresqlConfig13_TransactionIsolation.TRANSACTION_ISOLATION_SERIALIZABLE; + case "PG_HINT_PLAN_DEBUG_PRINT_VERBOSE": + return PostgresqlConfig13_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_VERBOSE; case -1: case "UNRECOGNIZED": default: - return PostgresqlConfig13_TransactionIsolation.UNRECOGNIZED; + return PostgresqlConfig13_PgHintPlanDebugPrint.UNRECOGNIZED; } } -export function postgresqlConfig13_TransactionIsolationToJSON( - object: PostgresqlConfig13_TransactionIsolation +export function postgresqlConfig13_PgHintPlanDebugPrintToJSON( + object: PostgresqlConfig13_PgHintPlanDebugPrint ): string { switch (object) { - case PostgresqlConfig13_TransactionIsolation.TRANSACTION_ISOLATION_UNSPECIFIED: - return "TRANSACTION_ISOLATION_UNSPECIFIED"; - case PostgresqlConfig13_TransactionIsolation.TRANSACTION_ISOLATION_READ_UNCOMMITTED: - return "TRANSACTION_ISOLATION_READ_UNCOMMITTED"; - case PostgresqlConfig13_TransactionIsolation.TRANSACTION_ISOLATION_READ_COMMITTED: - return "TRANSACTION_ISOLATION_READ_COMMITTED"; - case PostgresqlConfig13_TransactionIsolation.TRANSACTION_ISOLATION_REPEATABLE_READ: - return "TRANSACTION_ISOLATION_REPEATABLE_READ"; - case PostgresqlConfig13_TransactionIsolation.TRANSACTION_ISOLATION_SERIALIZABLE: - return "TRANSACTION_ISOLATION_SERIALIZABLE"; + case PostgresqlConfig13_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED: + return "PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED"; + case PostgresqlConfig13_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_OFF: + return "PG_HINT_PLAN_DEBUG_PRINT_OFF"; + case PostgresqlConfig13_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_ON: + return "PG_HINT_PLAN_DEBUG_PRINT_ON"; + case PostgresqlConfig13_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_DETAILED: + return "PG_HINT_PLAN_DEBUG_PRINT_DETAILED"; + case PostgresqlConfig13_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_VERBOSE: + return "PG_HINT_PLAN_DEBUG_PRINT_VERBOSE"; default: return "UNKNOWN"; } } -export enum PostgresqlConfig13_ByteaOutput { - BYTEA_OUTPUT_UNSPECIFIED = 0, - BYTEA_OUTPUT_HEX = 1, - BYTEA_OUTPUT_ESCAPED = 2, +export enum PostgresqlConfig13_PlanCacheMode { + PLAN_CACHE_MODE_UNSPECIFIED = 0, + PLAN_CACHE_MODE_AUTO = 1, + PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN = 2, + PLAN_CACHE_MODE_FORCE_GENERIC_PLAN = 3, UNRECOGNIZED = -1, } -export function postgresqlConfig13_ByteaOutputFromJSON( +export function postgresqlConfig13_PlanCacheModeFromJSON( object: any -): PostgresqlConfig13_ByteaOutput { +): PostgresqlConfig13_PlanCacheMode { switch (object) { case 0: - case "BYTEA_OUTPUT_UNSPECIFIED": - return PostgresqlConfig13_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED; + case "PLAN_CACHE_MODE_UNSPECIFIED": + return PostgresqlConfig13_PlanCacheMode.PLAN_CACHE_MODE_UNSPECIFIED; case 1: - case "BYTEA_OUTPUT_HEX": - return PostgresqlConfig13_ByteaOutput.BYTEA_OUTPUT_HEX; + case "PLAN_CACHE_MODE_AUTO": + return PostgresqlConfig13_PlanCacheMode.PLAN_CACHE_MODE_AUTO; case 2: - case "BYTEA_OUTPUT_ESCAPED": - return PostgresqlConfig13_ByteaOutput.BYTEA_OUTPUT_ESCAPED; + case "PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN": + return PostgresqlConfig13_PlanCacheMode.PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN; + case 3: + case "PLAN_CACHE_MODE_FORCE_GENERIC_PLAN": + return PostgresqlConfig13_PlanCacheMode.PLAN_CACHE_MODE_FORCE_GENERIC_PLAN; case -1: case "UNRECOGNIZED": default: - return PostgresqlConfig13_ByteaOutput.UNRECOGNIZED; + return PostgresqlConfig13_PlanCacheMode.UNRECOGNIZED; } } -export function postgresqlConfig13_ByteaOutputToJSON( - object: PostgresqlConfig13_ByteaOutput +export function postgresqlConfig13_PlanCacheModeToJSON( + object: PostgresqlConfig13_PlanCacheMode ): string { switch (object) { - case PostgresqlConfig13_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED: - return "BYTEA_OUTPUT_UNSPECIFIED"; - case PostgresqlConfig13_ByteaOutput.BYTEA_OUTPUT_HEX: - return "BYTEA_OUTPUT_HEX"; - case PostgresqlConfig13_ByteaOutput.BYTEA_OUTPUT_ESCAPED: - return "BYTEA_OUTPUT_ESCAPED"; + case PostgresqlConfig13_PlanCacheMode.PLAN_CACHE_MODE_UNSPECIFIED: + return "PLAN_CACHE_MODE_UNSPECIFIED"; + case PostgresqlConfig13_PlanCacheMode.PLAN_CACHE_MODE_AUTO: + return "PLAN_CACHE_MODE_AUTO"; + case PostgresqlConfig13_PlanCacheMode.PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN: + return "PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN"; + case PostgresqlConfig13_PlanCacheMode.PLAN_CACHE_MODE_FORCE_GENERIC_PLAN: + return "PLAN_CACHE_MODE_FORCE_GENERIC_PLAN"; default: return "UNKNOWN"; } } -export enum PostgresqlConfig13_XmlBinary { - XML_BINARY_UNSPECIFIED = 0, - XML_BINARY_BASE64 = 1, - XML_BINARY_HEX = 2, +export enum PostgresqlConfig13_SharedPreloadLibraries { + SHARED_PRELOAD_LIBRARIES_UNSPECIFIED = 0, + SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN = 1, + SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN = 2, + SHARED_PRELOAD_LIBRARIES_TIMESCALEDB = 3, + SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS = 4, + SHARED_PRELOAD_LIBRARIES_PG_CRON = 5, + SHARED_PRELOAD_LIBRARIES_PGLOGICAL = 6, UNRECOGNIZED = -1, } -export function postgresqlConfig13_XmlBinaryFromJSON( +export function postgresqlConfig13_SharedPreloadLibrariesFromJSON( object: any -): PostgresqlConfig13_XmlBinary { +): PostgresqlConfig13_SharedPreloadLibraries { switch (object) { case 0: - case "XML_BINARY_UNSPECIFIED": - return PostgresqlConfig13_XmlBinary.XML_BINARY_UNSPECIFIED; + case "SHARED_PRELOAD_LIBRARIES_UNSPECIFIED": + return PostgresqlConfig13_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_UNSPECIFIED; case 1: - case "XML_BINARY_BASE64": - return PostgresqlConfig13_XmlBinary.XML_BINARY_BASE64; + case "SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN": + return PostgresqlConfig13_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN; case 2: - case "XML_BINARY_HEX": - return PostgresqlConfig13_XmlBinary.XML_BINARY_HEX; + case "SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN": + return PostgresqlConfig13_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN; + case 3: + case "SHARED_PRELOAD_LIBRARIES_TIMESCALEDB": + return PostgresqlConfig13_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_TIMESCALEDB; + case 4: + case "SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS": + return PostgresqlConfig13_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS; + case 5: + case "SHARED_PRELOAD_LIBRARIES_PG_CRON": + return PostgresqlConfig13_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_CRON; + case 6: + case "SHARED_PRELOAD_LIBRARIES_PGLOGICAL": + return PostgresqlConfig13_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGLOGICAL; case -1: case "UNRECOGNIZED": default: - return PostgresqlConfig13_XmlBinary.UNRECOGNIZED; + return PostgresqlConfig13_SharedPreloadLibraries.UNRECOGNIZED; } } -export function postgresqlConfig13_XmlBinaryToJSON( - object: PostgresqlConfig13_XmlBinary +export function postgresqlConfig13_SharedPreloadLibrariesToJSON( + object: PostgresqlConfig13_SharedPreloadLibraries ): string { switch (object) { - case PostgresqlConfig13_XmlBinary.XML_BINARY_UNSPECIFIED: - return "XML_BINARY_UNSPECIFIED"; - case PostgresqlConfig13_XmlBinary.XML_BINARY_BASE64: - return "XML_BINARY_BASE64"; - case PostgresqlConfig13_XmlBinary.XML_BINARY_HEX: - return "XML_BINARY_HEX"; + case PostgresqlConfig13_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_UNSPECIFIED: + return "SHARED_PRELOAD_LIBRARIES_UNSPECIFIED"; + case PostgresqlConfig13_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN: + return "SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN"; + case PostgresqlConfig13_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN: + return "SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN"; + case PostgresqlConfig13_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_TIMESCALEDB: + return "SHARED_PRELOAD_LIBRARIES_TIMESCALEDB"; + case PostgresqlConfig13_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS: + return "SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS"; + case PostgresqlConfig13_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_CRON: + return "SHARED_PRELOAD_LIBRARIES_PG_CRON"; + case PostgresqlConfig13_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGLOGICAL: + return "SHARED_PRELOAD_LIBRARIES_PGLOGICAL"; default: return "UNKNOWN"; } } -export enum PostgresqlConfig13_XmlOption { - XML_OPTION_UNSPECIFIED = 0, - XML_OPTION_DOCUMENT = 1, - XML_OPTION_CONTENT = 2, +export enum PostgresqlConfig13_SynchronousCommit { + SYNCHRONOUS_COMMIT_UNSPECIFIED = 0, + SYNCHRONOUS_COMMIT_ON = 1, + SYNCHRONOUS_COMMIT_OFF = 2, + SYNCHRONOUS_COMMIT_LOCAL = 3, + SYNCHRONOUS_COMMIT_REMOTE_WRITE = 4, + SYNCHRONOUS_COMMIT_REMOTE_APPLY = 5, UNRECOGNIZED = -1, } -export function postgresqlConfig13_XmlOptionFromJSON( +export function postgresqlConfig13_SynchronousCommitFromJSON( object: any -): PostgresqlConfig13_XmlOption { +): PostgresqlConfig13_SynchronousCommit { switch (object) { case 0: - case "XML_OPTION_UNSPECIFIED": - return PostgresqlConfig13_XmlOption.XML_OPTION_UNSPECIFIED; + case "SYNCHRONOUS_COMMIT_UNSPECIFIED": + return PostgresqlConfig13_SynchronousCommit.SYNCHRONOUS_COMMIT_UNSPECIFIED; case 1: - case "XML_OPTION_DOCUMENT": - return PostgresqlConfig13_XmlOption.XML_OPTION_DOCUMENT; + case "SYNCHRONOUS_COMMIT_ON": + return PostgresqlConfig13_SynchronousCommit.SYNCHRONOUS_COMMIT_ON; case 2: - case "XML_OPTION_CONTENT": - return PostgresqlConfig13_XmlOption.XML_OPTION_CONTENT; + case "SYNCHRONOUS_COMMIT_OFF": + return PostgresqlConfig13_SynchronousCommit.SYNCHRONOUS_COMMIT_OFF; + case 3: + case "SYNCHRONOUS_COMMIT_LOCAL": + return PostgresqlConfig13_SynchronousCommit.SYNCHRONOUS_COMMIT_LOCAL; + case 4: + case "SYNCHRONOUS_COMMIT_REMOTE_WRITE": + return PostgresqlConfig13_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_WRITE; + case 5: + case "SYNCHRONOUS_COMMIT_REMOTE_APPLY": + return PostgresqlConfig13_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_APPLY; case -1: case "UNRECOGNIZED": default: - return PostgresqlConfig13_XmlOption.UNRECOGNIZED; + return PostgresqlConfig13_SynchronousCommit.UNRECOGNIZED; } } -export function postgresqlConfig13_XmlOptionToJSON( - object: PostgresqlConfig13_XmlOption +export function postgresqlConfig13_SynchronousCommitToJSON( + object: PostgresqlConfig13_SynchronousCommit ): string { switch (object) { - case PostgresqlConfig13_XmlOption.XML_OPTION_UNSPECIFIED: - return "XML_OPTION_UNSPECIFIED"; - case PostgresqlConfig13_XmlOption.XML_OPTION_DOCUMENT: - return "XML_OPTION_DOCUMENT"; - case PostgresqlConfig13_XmlOption.XML_OPTION_CONTENT: - return "XML_OPTION_CONTENT"; + case PostgresqlConfig13_SynchronousCommit.SYNCHRONOUS_COMMIT_UNSPECIFIED: + return "SYNCHRONOUS_COMMIT_UNSPECIFIED"; + case PostgresqlConfig13_SynchronousCommit.SYNCHRONOUS_COMMIT_ON: + return "SYNCHRONOUS_COMMIT_ON"; + case PostgresqlConfig13_SynchronousCommit.SYNCHRONOUS_COMMIT_OFF: + return "SYNCHRONOUS_COMMIT_OFF"; + case PostgresqlConfig13_SynchronousCommit.SYNCHRONOUS_COMMIT_LOCAL: + return "SYNCHRONOUS_COMMIT_LOCAL"; + case PostgresqlConfig13_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_WRITE: + return "SYNCHRONOUS_COMMIT_REMOTE_WRITE"; + case PostgresqlConfig13_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_APPLY: + return "SYNCHRONOUS_COMMIT_REMOTE_APPLY"; default: return "UNKNOWN"; } } -export enum PostgresqlConfig13_BackslashQuote { - BACKSLASH_QUOTE_UNSPECIFIED = 0, - BACKSLASH_QUOTE = 1, - BACKSLASH_QUOTE_ON = 2, - BACKSLASH_QUOTE_OFF = 3, - BACKSLASH_QUOTE_SAFE_ENCODING = 4, +export enum PostgresqlConfig13_TransactionIsolation { + TRANSACTION_ISOLATION_UNSPECIFIED = 0, + TRANSACTION_ISOLATION_READ_UNCOMMITTED = 1, + TRANSACTION_ISOLATION_READ_COMMITTED = 2, + TRANSACTION_ISOLATION_REPEATABLE_READ = 3, + TRANSACTION_ISOLATION_SERIALIZABLE = 4, UNRECOGNIZED = -1, } -export function postgresqlConfig13_BackslashQuoteFromJSON( +export function postgresqlConfig13_TransactionIsolationFromJSON( object: any -): PostgresqlConfig13_BackslashQuote { +): PostgresqlConfig13_TransactionIsolation { switch (object) { case 0: - case "BACKSLASH_QUOTE_UNSPECIFIED": - return PostgresqlConfig13_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED; + case "TRANSACTION_ISOLATION_UNSPECIFIED": + return PostgresqlConfig13_TransactionIsolation.TRANSACTION_ISOLATION_UNSPECIFIED; case 1: - case "BACKSLASH_QUOTE": - return PostgresqlConfig13_BackslashQuote.BACKSLASH_QUOTE; + case "TRANSACTION_ISOLATION_READ_UNCOMMITTED": + return PostgresqlConfig13_TransactionIsolation.TRANSACTION_ISOLATION_READ_UNCOMMITTED; case 2: - case "BACKSLASH_QUOTE_ON": - return PostgresqlConfig13_BackslashQuote.BACKSLASH_QUOTE_ON; + case "TRANSACTION_ISOLATION_READ_COMMITTED": + return PostgresqlConfig13_TransactionIsolation.TRANSACTION_ISOLATION_READ_COMMITTED; case 3: - case "BACKSLASH_QUOTE_OFF": - return PostgresqlConfig13_BackslashQuote.BACKSLASH_QUOTE_OFF; + case "TRANSACTION_ISOLATION_REPEATABLE_READ": + return PostgresqlConfig13_TransactionIsolation.TRANSACTION_ISOLATION_REPEATABLE_READ; case 4: - case "BACKSLASH_QUOTE_SAFE_ENCODING": - return PostgresqlConfig13_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING; + case "TRANSACTION_ISOLATION_SERIALIZABLE": + return PostgresqlConfig13_TransactionIsolation.TRANSACTION_ISOLATION_SERIALIZABLE; case -1: case "UNRECOGNIZED": default: - return PostgresqlConfig13_BackslashQuote.UNRECOGNIZED; + return PostgresqlConfig13_TransactionIsolation.UNRECOGNIZED; } } -export function postgresqlConfig13_BackslashQuoteToJSON( - object: PostgresqlConfig13_BackslashQuote +export function postgresqlConfig13_TransactionIsolationToJSON( + object: PostgresqlConfig13_TransactionIsolation ): string { switch (object) { - case PostgresqlConfig13_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED: - return "BACKSLASH_QUOTE_UNSPECIFIED"; - case PostgresqlConfig13_BackslashQuote.BACKSLASH_QUOTE: - return "BACKSLASH_QUOTE"; - case PostgresqlConfig13_BackslashQuote.BACKSLASH_QUOTE_ON: - return "BACKSLASH_QUOTE_ON"; - case PostgresqlConfig13_BackslashQuote.BACKSLASH_QUOTE_OFF: - return "BACKSLASH_QUOTE_OFF"; - case PostgresqlConfig13_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING: - return "BACKSLASH_QUOTE_SAFE_ENCODING"; + case PostgresqlConfig13_TransactionIsolation.TRANSACTION_ISOLATION_UNSPECIFIED: + return "TRANSACTION_ISOLATION_UNSPECIFIED"; + case PostgresqlConfig13_TransactionIsolation.TRANSACTION_ISOLATION_READ_UNCOMMITTED: + return "TRANSACTION_ISOLATION_READ_UNCOMMITTED"; + case PostgresqlConfig13_TransactionIsolation.TRANSACTION_ISOLATION_READ_COMMITTED: + return "TRANSACTION_ISOLATION_READ_COMMITTED"; + case PostgresqlConfig13_TransactionIsolation.TRANSACTION_ISOLATION_REPEATABLE_READ: + return "TRANSACTION_ISOLATION_REPEATABLE_READ"; + case PostgresqlConfig13_TransactionIsolation.TRANSACTION_ISOLATION_SERIALIZABLE: + return "TRANSACTION_ISOLATION_SERIALIZABLE"; default: return "UNKNOWN"; } } -export enum PostgresqlConfig13_PlanCacheMode { - PLAN_CACHE_MODE_UNSPECIFIED = 0, - PLAN_CACHE_MODE_AUTO = 1, - PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN = 2, - PLAN_CACHE_MODE_FORCE_GENERIC_PLAN = 3, +export enum PostgresqlConfig13_WalLevel { + WAL_LEVEL_UNSPECIFIED = 0, + WAL_LEVEL_REPLICA = 1, + WAL_LEVEL_LOGICAL = 2, UNRECOGNIZED = -1, } -export function postgresqlConfig13_PlanCacheModeFromJSON( +export function postgresqlConfig13_WalLevelFromJSON( object: any -): PostgresqlConfig13_PlanCacheMode { +): PostgresqlConfig13_WalLevel { switch (object) { case 0: - case "PLAN_CACHE_MODE_UNSPECIFIED": - return PostgresqlConfig13_PlanCacheMode.PLAN_CACHE_MODE_UNSPECIFIED; + case "WAL_LEVEL_UNSPECIFIED": + return PostgresqlConfig13_WalLevel.WAL_LEVEL_UNSPECIFIED; case 1: - case "PLAN_CACHE_MODE_AUTO": - return PostgresqlConfig13_PlanCacheMode.PLAN_CACHE_MODE_AUTO; + case "WAL_LEVEL_REPLICA": + return PostgresqlConfig13_WalLevel.WAL_LEVEL_REPLICA; case 2: - case "PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN": - return PostgresqlConfig13_PlanCacheMode.PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN; - case 3: - case "PLAN_CACHE_MODE_FORCE_GENERIC_PLAN": - return PostgresqlConfig13_PlanCacheMode.PLAN_CACHE_MODE_FORCE_GENERIC_PLAN; + case "WAL_LEVEL_LOGICAL": + return PostgresqlConfig13_WalLevel.WAL_LEVEL_LOGICAL; case -1: case "UNRECOGNIZED": default: - return PostgresqlConfig13_PlanCacheMode.UNRECOGNIZED; + return PostgresqlConfig13_WalLevel.UNRECOGNIZED; } } -export function postgresqlConfig13_PlanCacheModeToJSON( - object: PostgresqlConfig13_PlanCacheMode +export function postgresqlConfig13_WalLevelToJSON( + object: PostgresqlConfig13_WalLevel ): string { switch (object) { - case PostgresqlConfig13_PlanCacheMode.PLAN_CACHE_MODE_UNSPECIFIED: - return "PLAN_CACHE_MODE_UNSPECIFIED"; - case PostgresqlConfig13_PlanCacheMode.PLAN_CACHE_MODE_AUTO: - return "PLAN_CACHE_MODE_AUTO"; - case PostgresqlConfig13_PlanCacheMode.PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN: - return "PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN"; - case PostgresqlConfig13_PlanCacheMode.PLAN_CACHE_MODE_FORCE_GENERIC_PLAN: - return "PLAN_CACHE_MODE_FORCE_GENERIC_PLAN"; + case PostgresqlConfig13_WalLevel.WAL_LEVEL_UNSPECIFIED: + return "WAL_LEVEL_UNSPECIFIED"; + case PostgresqlConfig13_WalLevel.WAL_LEVEL_REPLICA: + return "WAL_LEVEL_REPLICA"; + case PostgresqlConfig13_WalLevel.WAL_LEVEL_LOGICAL: + return "WAL_LEVEL_LOGICAL"; default: return "UNKNOWN"; } } -export enum PostgresqlConfig13_PgHintPlanDebugPrint { - PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED = 0, - PG_HINT_PLAN_DEBUG_PRINT_OFF = 1, - PG_HINT_PLAN_DEBUG_PRINT_ON = 2, - PG_HINT_PLAN_DEBUG_PRINT_DETAILED = 3, - PG_HINT_PLAN_DEBUG_PRINT_VERBOSE = 4, +export enum PostgresqlConfig13_XmlBinary { + XML_BINARY_UNSPECIFIED = 0, + XML_BINARY_BASE64 = 1, + XML_BINARY_HEX = 2, UNRECOGNIZED = -1, } -export function postgresqlConfig13_PgHintPlanDebugPrintFromJSON( +export function postgresqlConfig13_XmlBinaryFromJSON( object: any -): PostgresqlConfig13_PgHintPlanDebugPrint { +): PostgresqlConfig13_XmlBinary { switch (object) { case 0: - case "PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED": - return PostgresqlConfig13_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED; + case "XML_BINARY_UNSPECIFIED": + return PostgresqlConfig13_XmlBinary.XML_BINARY_UNSPECIFIED; case 1: - case "PG_HINT_PLAN_DEBUG_PRINT_OFF": - return PostgresqlConfig13_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_OFF; + case "XML_BINARY_BASE64": + return PostgresqlConfig13_XmlBinary.XML_BINARY_BASE64; case 2: - case "PG_HINT_PLAN_DEBUG_PRINT_ON": - return PostgresqlConfig13_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_ON; - case 3: - case "PG_HINT_PLAN_DEBUG_PRINT_DETAILED": - return PostgresqlConfig13_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_DETAILED; - case 4: - case "PG_HINT_PLAN_DEBUG_PRINT_VERBOSE": - return PostgresqlConfig13_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_VERBOSE; + case "XML_BINARY_HEX": + return PostgresqlConfig13_XmlBinary.XML_BINARY_HEX; case -1: case "UNRECOGNIZED": default: - return PostgresqlConfig13_PgHintPlanDebugPrint.UNRECOGNIZED; + return PostgresqlConfig13_XmlBinary.UNRECOGNIZED; } } -export function postgresqlConfig13_PgHintPlanDebugPrintToJSON( - object: PostgresqlConfig13_PgHintPlanDebugPrint +export function postgresqlConfig13_XmlBinaryToJSON( + object: PostgresqlConfig13_XmlBinary ): string { switch (object) { - case PostgresqlConfig13_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED: - return "PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED"; - case PostgresqlConfig13_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_OFF: - return "PG_HINT_PLAN_DEBUG_PRINT_OFF"; - case PostgresqlConfig13_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_ON: - return "PG_HINT_PLAN_DEBUG_PRINT_ON"; - case PostgresqlConfig13_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_DETAILED: - return "PG_HINT_PLAN_DEBUG_PRINT_DETAILED"; - case PostgresqlConfig13_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_VERBOSE: - return "PG_HINT_PLAN_DEBUG_PRINT_VERBOSE"; + case PostgresqlConfig13_XmlBinary.XML_BINARY_UNSPECIFIED: + return "XML_BINARY_UNSPECIFIED"; + case PostgresqlConfig13_XmlBinary.XML_BINARY_BASE64: + return "XML_BINARY_BASE64"; + case PostgresqlConfig13_XmlBinary.XML_BINARY_HEX: + return "XML_BINARY_HEX"; default: return "UNKNOWN"; } } -export enum PostgresqlConfig13_SharedPreloadLibraries { - SHARED_PRELOAD_LIBRARIES_UNSPECIFIED = 0, - SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN = 1, - SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN = 2, - SHARED_PRELOAD_LIBRARIES_TIMESCALEDB = 3, - SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS = 4, +export enum PostgresqlConfig13_XmlOption { + XML_OPTION_UNSPECIFIED = 0, + XML_OPTION_DOCUMENT = 1, + XML_OPTION_CONTENT = 2, UNRECOGNIZED = -1, } -export function postgresqlConfig13_SharedPreloadLibrariesFromJSON( +export function postgresqlConfig13_XmlOptionFromJSON( object: any -): PostgresqlConfig13_SharedPreloadLibraries { +): PostgresqlConfig13_XmlOption { switch (object) { case 0: - case "SHARED_PRELOAD_LIBRARIES_UNSPECIFIED": - return PostgresqlConfig13_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_UNSPECIFIED; + case "XML_OPTION_UNSPECIFIED": + return PostgresqlConfig13_XmlOption.XML_OPTION_UNSPECIFIED; case 1: - case "SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN": - return PostgresqlConfig13_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN; + case "XML_OPTION_DOCUMENT": + return PostgresqlConfig13_XmlOption.XML_OPTION_DOCUMENT; case 2: - case "SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN": - return PostgresqlConfig13_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN; - case 3: - case "SHARED_PRELOAD_LIBRARIES_TIMESCALEDB": - return PostgresqlConfig13_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_TIMESCALEDB; - case 4: - case "SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS": - return PostgresqlConfig13_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS; + case "XML_OPTION_CONTENT": + return PostgresqlConfig13_XmlOption.XML_OPTION_CONTENT; case -1: case "UNRECOGNIZED": default: - return PostgresqlConfig13_SharedPreloadLibraries.UNRECOGNIZED; + return PostgresqlConfig13_XmlOption.UNRECOGNIZED; } } -export function postgresqlConfig13_SharedPreloadLibrariesToJSON( - object: PostgresqlConfig13_SharedPreloadLibraries +export function postgresqlConfig13_XmlOptionToJSON( + object: PostgresqlConfig13_XmlOption ): string { switch (object) { - case PostgresqlConfig13_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_UNSPECIFIED: - return "SHARED_PRELOAD_LIBRARIES_UNSPECIFIED"; - case PostgresqlConfig13_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN: - return "SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN"; - case PostgresqlConfig13_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN: - return "SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN"; - case PostgresqlConfig13_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_TIMESCALEDB: - return "SHARED_PRELOAD_LIBRARIES_TIMESCALEDB"; - case PostgresqlConfig13_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS: - return "SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS"; + case PostgresqlConfig13_XmlOption.XML_OPTION_UNSPECIFIED: + return "XML_OPTION_UNSPECIFIED"; + case PostgresqlConfig13_XmlOption.XML_OPTION_DOCUMENT: + return "XML_OPTION_DOCUMENT"; + case PostgresqlConfig13_XmlOption.XML_OPTION_CONTENT: + return "XML_OPTION_CONTENT"; default: return "UNKNOWN"; } @@ -2042,6 +2064,36 @@ export const PostgresqlConfig13 = { writer.uint32(1130).fork() ).ldelim(); } + if (message.maxStackDepth !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.maxStackDepth! }, + writer.uint32(1202).fork() + ).ldelim(); + } + if (message.geqo !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.geqo! }, + writer.uint32(1218).fork() + ).ldelim(); + } + if (message.geqoThreshold !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.geqoThreshold! }, + writer.uint32(1226).fork() + ).ldelim(); + } + if (message.geqoEffort !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.geqoEffort! }, + writer.uint32(1234).fork() + ).ldelim(); + } + if (message.geqoSeed !== undefined) { + DoubleValue.encode( + { $type: "google.protobuf.DoubleValue", value: message.geqoSeed! }, + writer.uint32(1266).fork() + ).ldelim(); + } return writer; }, @@ -2801,6 +2853,27 @@ export const PostgresqlConfig13 = { reader.uint32() ).value; break; + case 150: + message.maxStackDepth = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 152: + message.geqo = BoolValue.decode(reader, reader.uint32()).value; + break; + case 153: + message.geqoThreshold = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 154: + message.geqoEffort = Int64Value.decode(reader, reader.uint32()).value; + break; + case 158: + message.geqoSeed = DoubleValue.decode(reader, reader.uint32()).value; + break; default: reader.skipType(tag & 7); break; @@ -3457,6 +3530,26 @@ export const PostgresqlConfig13 = { object.pgQualstatsSampleRate !== null ? Number(object.pgQualstatsSampleRate) : undefined; + message.maxStackDepth = + object.maxStackDepth !== undefined && object.maxStackDepth !== null + ? Number(object.maxStackDepth) + : undefined; + message.geqo = + object.geqo !== undefined && object.geqo !== null + ? Boolean(object.geqo) + : undefined; + message.geqoThreshold = + object.geqoThreshold !== undefined && object.geqoThreshold !== null + ? Number(object.geqoThreshold) + : undefined; + message.geqoEffort = + object.geqoEffort !== undefined && object.geqoEffort !== null + ? Number(object.geqoEffort) + : undefined; + message.geqoSeed = + object.geqoSeed !== undefined && object.geqoSeed !== null + ? Number(object.geqoSeed) + : undefined; return message; }, @@ -3770,6 +3863,13 @@ export const PostgresqlConfig13 = { (obj.pgQualstatsResolveOids = message.pgQualstatsResolveOids); message.pgQualstatsSampleRate !== undefined && (obj.pgQualstatsSampleRate = message.pgQualstatsSampleRate); + message.maxStackDepth !== undefined && + (obj.maxStackDepth = message.maxStackDepth); + message.geqo !== undefined && (obj.geqo = message.geqo); + message.geqoThreshold !== undefined && + (obj.geqoThreshold = message.geqoThreshold); + message.geqoEffort !== undefined && (obj.geqoEffort = message.geqoEffort); + message.geqoSeed !== undefined && (obj.geqoSeed = message.geqoSeed); return obj; }, @@ -3946,6 +4046,11 @@ export const PostgresqlConfig13 = { message.pgQualstatsMax = object.pgQualstatsMax ?? undefined; message.pgQualstatsResolveOids = object.pgQualstatsResolveOids ?? undefined; message.pgQualstatsSampleRate = object.pgQualstatsSampleRate ?? undefined; + message.maxStackDepth = object.maxStackDepth ?? undefined; + message.geqo = object.geqo ?? undefined; + message.geqoThreshold = object.geqoThreshold ?? undefined; + message.geqoEffort = object.geqoEffort ?? undefined; + message.geqoSeed = object.geqoSeed ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql13_1c.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql13_1c.ts new file mode 100644 index 00000000..57598ef0 --- /dev/null +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql13_1c.ts @@ -0,0 +1,4258 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { + Int64Value, + DoubleValue, + BoolValue, +} from "../../../../../../google/protobuf/wrappers"; + +export const protobufPackage = "yandex.cloud.mdb.postgresql.v1.config"; + +/** + * Options and structure of `PostgresqlConfig` reflects PostgreSQL configuration file + * parameters which detailed description is available in + * [PostgreSQL documentation](https://www.postgresql.org/docs/11/runtime-config.html). + */ +export interface Postgresqlconfig131c { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13_1C"; + maxConnections?: number; + /** in bytes. */ + sharedBuffers?: number; + /** in bytes. */ + tempBuffers?: number; + maxPreparedTransactions?: number; + /** in bytes. */ + workMem?: number; + /** in bytes. */ + maintenanceWorkMem?: number; + /** in bytes. */ + autovacuumWorkMem?: number; + /** in bytes. */ + tempFileLimit?: number; + /** in milliseconds. */ + vacuumCostDelay?: number; + vacuumCostPageHit?: number; + vacuumCostPageMiss?: number; + vacuumCostPageDirty?: number; + vacuumCostLimit?: number; + /** in milliseconds. */ + bgwriterDelay?: number; + bgwriterLruMaxpages?: number; + bgwriterLruMultiplier?: number; + bgwriterFlushAfter?: number; + backendFlushAfter?: number; + oldSnapshotThreshold?: number; + walLevel: Postgresqlconfig131c_WalLevel; + synchronousCommit: Postgresqlconfig131c_SynchronousCommit; + /** in milliseconds. */ + checkpointTimeout?: number; + checkpointCompletionTarget?: number; + checkpointFlushAfter?: number; + /** in bytes. */ + maxWalSize?: number; + /** in bytes. */ + minWalSize?: number; + /** in milliseconds. */ + maxStandbyStreamingDelay?: number; + defaultStatisticsTarget?: number; + constraintExclusion: Postgresqlconfig131c_ConstraintExclusion; + cursorTupleFraction?: number; + fromCollapseLimit?: number; + joinCollapseLimit?: number; + forceParallelMode: Postgresqlconfig131c_ForceParallelMode; + clientMinMessages: Postgresqlconfig131c_LogLevel; + logMinMessages: Postgresqlconfig131c_LogLevel; + logMinErrorStatement: Postgresqlconfig131c_LogLevel; + /** in milliseconds. */ + logMinDurationStatement?: number; + logCheckpoints?: boolean; + logConnections?: boolean; + logDisconnections?: boolean; + logDuration?: boolean; + logErrorVerbosity: Postgresqlconfig131c_LogErrorVerbosity; + logLockWaits?: boolean; + logStatement: Postgresqlconfig131c_LogStatement; + logTempFiles?: number; + searchPath: string; + rowSecurity?: boolean; + defaultTransactionIsolation: Postgresqlconfig131c_TransactionIsolation; + /** in milliseconds. */ + statementTimeout?: number; + /** in milliseconds. */ + lockTimeout?: number; + /** in milliseconds. */ + idleInTransactionSessionTimeout?: number; + byteaOutput: Postgresqlconfig131c_ByteaOutput; + xmlbinary: Postgresqlconfig131c_XmlBinary; + xmloption: Postgresqlconfig131c_XmlOption; + /** in bytes. */ + ginPendingListLimit?: number; + /** in milliseconds. */ + deadlockTimeout?: number; + maxLocksPerTransaction?: number; + maxPredLocksPerTransaction?: number; + arrayNulls?: boolean; + backslashQuote: Postgresqlconfig131c_BackslashQuote; + defaultWithOids?: boolean; + escapeStringWarning?: boolean; + loCompatPrivileges?: boolean; + operatorPrecedenceWarning?: boolean; + quoteAllIdentifiers?: boolean; + standardConformingStrings?: boolean; + synchronizeSeqscans?: boolean; + transformNullEquals?: boolean; + exitOnError?: boolean; + seqPageCost?: number; + randomPageCost?: number; + autovacuumMaxWorkers?: number; + autovacuumVacuumCostDelay?: number; + autovacuumVacuumCostLimit?: number; + /** in milliseconds. */ + autovacuumNaptime?: number; + /** in milliseconds. */ + archiveTimeout?: number; + trackActivityQuerySize?: number; + onlineAnalyzeEnable?: boolean; + enableBitmapscan?: boolean; + enableHashagg?: boolean; + enableHashjoin?: boolean; + enableIndexscan?: boolean; + enableIndexonlyscan?: boolean; + enableMaterial?: boolean; + enableMergejoin?: boolean; + enableNestloop?: boolean; + enableSeqscan?: boolean; + enableSort?: boolean; + enableTidscan?: boolean; + maxWorkerProcesses?: number; + maxParallelWorkers?: number; + maxParallelWorkersPerGather?: number; + autovacuumVacuumScaleFactor?: number; + autovacuumAnalyzeScaleFactor?: number; + defaultTransactionReadOnly?: boolean; + timezone: string; + enableParallelAppend?: boolean; + enableParallelHash?: boolean; + enablePartitionPruning?: boolean; + enablePartitionwiseAggregate?: boolean; + enablePartitionwiseJoin?: boolean; + jit?: boolean; + maxParallelMaintenanceWorkers?: number; + parallelLeaderParticipation?: boolean; + vacuumCleanupIndexScaleFactor?: number; + logTransactionSampleRate?: number; + planCacheMode: Postgresqlconfig131c_PlanCacheMode; + effectiveIoConcurrency?: number; + effectiveCacheSize?: number; + sharedPreloadLibraries: Postgresqlconfig131c_SharedPreloadLibraries[]; + /** in milliseconds. */ + autoExplainLogMinDuration?: number; + autoExplainLogAnalyze?: boolean; + autoExplainLogBuffers?: boolean; + autoExplainLogTiming?: boolean; + autoExplainLogTriggers?: boolean; + autoExplainLogVerbose?: boolean; + autoExplainLogNestedStatements?: boolean; + autoExplainSampleRate?: number; + pgHintPlanEnableHint?: boolean; + pgHintPlanEnableHintTable?: boolean; + pgHintPlanDebugPrint: Postgresqlconfig131c_PgHintPlanDebugPrint; + pgHintPlanMessageLevel: Postgresqlconfig131c_LogLevel; + hashMemMultiplier?: number; + /** in bytes. */ + logicalDecodingWorkMem?: number; + maintenanceIoConcurrency?: number; + /** in bytes. */ + maxSlotWalKeepSize?: number; + /** in bytes. */ + walKeepSize?: number; + enableIncrementalSort?: boolean; + autovacuumVacuumInsertThreshold?: number; + autovacuumVacuumInsertScaleFactor?: number; + /** in milliseconds. */ + logMinDurationSample?: number; + logStatementSampleRate?: number; + /** in bytes. */ + logParameterMaxLength?: number; + /** in bytes. */ + logParameterMaxLengthOnError?: number; + pgQualstatsEnabled?: boolean; + pgQualstatsTrackConstants?: boolean; + pgQualstatsMax?: number; + pgQualstatsResolveOids?: boolean; + pgQualstatsSampleRate?: number; + plantunerFixEmptyTable?: boolean; + /** enable Genetic Query Optimizer, by default is on */ + geqo?: boolean; + /** The number of tables to use geqo, default is 12 */ + geqoThreshold?: number; + /** tradeoff between planning time and query plan quality, default is 5 */ + geqoEffort?: number; + /** initial value of the random number generator used by GEQO */ + geqoSeed?: number; +} + +export enum Postgresqlconfig131c_BackslashQuote { + BACKSLASH_QUOTE_UNSPECIFIED = 0, + BACKSLASH_QUOTE = 1, + BACKSLASH_QUOTE_ON = 2, + BACKSLASH_QUOTE_OFF = 3, + BACKSLASH_QUOTE_SAFE_ENCODING = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig131c_BackslashQuoteFromJSON( + object: any +): Postgresqlconfig131c_BackslashQuote { + switch (object) { + case 0: + case "BACKSLASH_QUOTE_UNSPECIFIED": + return Postgresqlconfig131c_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED; + case 1: + case "BACKSLASH_QUOTE": + return Postgresqlconfig131c_BackslashQuote.BACKSLASH_QUOTE; + case 2: + case "BACKSLASH_QUOTE_ON": + return Postgresqlconfig131c_BackslashQuote.BACKSLASH_QUOTE_ON; + case 3: + case "BACKSLASH_QUOTE_OFF": + return Postgresqlconfig131c_BackslashQuote.BACKSLASH_QUOTE_OFF; + case 4: + case "BACKSLASH_QUOTE_SAFE_ENCODING": + return Postgresqlconfig131c_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig131c_BackslashQuote.UNRECOGNIZED; + } +} + +export function postgresqlconfig131c_BackslashQuoteToJSON( + object: Postgresqlconfig131c_BackslashQuote +): string { + switch (object) { + case Postgresqlconfig131c_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED: + return "BACKSLASH_QUOTE_UNSPECIFIED"; + case Postgresqlconfig131c_BackslashQuote.BACKSLASH_QUOTE: + return "BACKSLASH_QUOTE"; + case Postgresqlconfig131c_BackslashQuote.BACKSLASH_QUOTE_ON: + return "BACKSLASH_QUOTE_ON"; + case Postgresqlconfig131c_BackslashQuote.BACKSLASH_QUOTE_OFF: + return "BACKSLASH_QUOTE_OFF"; + case Postgresqlconfig131c_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING: + return "BACKSLASH_QUOTE_SAFE_ENCODING"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig131c_ByteaOutput { + BYTEA_OUTPUT_UNSPECIFIED = 0, + BYTEA_OUTPUT_HEX = 1, + BYTEA_OUTPUT_ESCAPED = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig131c_ByteaOutputFromJSON( + object: any +): Postgresqlconfig131c_ByteaOutput { + switch (object) { + case 0: + case "BYTEA_OUTPUT_UNSPECIFIED": + return Postgresqlconfig131c_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED; + case 1: + case "BYTEA_OUTPUT_HEX": + return Postgresqlconfig131c_ByteaOutput.BYTEA_OUTPUT_HEX; + case 2: + case "BYTEA_OUTPUT_ESCAPED": + return Postgresqlconfig131c_ByteaOutput.BYTEA_OUTPUT_ESCAPED; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig131c_ByteaOutput.UNRECOGNIZED; + } +} + +export function postgresqlconfig131c_ByteaOutputToJSON( + object: Postgresqlconfig131c_ByteaOutput +): string { + switch (object) { + case Postgresqlconfig131c_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED: + return "BYTEA_OUTPUT_UNSPECIFIED"; + case Postgresqlconfig131c_ByteaOutput.BYTEA_OUTPUT_HEX: + return "BYTEA_OUTPUT_HEX"; + case Postgresqlconfig131c_ByteaOutput.BYTEA_OUTPUT_ESCAPED: + return "BYTEA_OUTPUT_ESCAPED"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig131c_ConstraintExclusion { + CONSTRAINT_EXCLUSION_UNSPECIFIED = 0, + CONSTRAINT_EXCLUSION_ON = 1, + CONSTRAINT_EXCLUSION_OFF = 2, + CONSTRAINT_EXCLUSION_PARTITION = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig131c_ConstraintExclusionFromJSON( + object: any +): Postgresqlconfig131c_ConstraintExclusion { + switch (object) { + case 0: + case "CONSTRAINT_EXCLUSION_UNSPECIFIED": + return Postgresqlconfig131c_ConstraintExclusion.CONSTRAINT_EXCLUSION_UNSPECIFIED; + case 1: + case "CONSTRAINT_EXCLUSION_ON": + return Postgresqlconfig131c_ConstraintExclusion.CONSTRAINT_EXCLUSION_ON; + case 2: + case "CONSTRAINT_EXCLUSION_OFF": + return Postgresqlconfig131c_ConstraintExclusion.CONSTRAINT_EXCLUSION_OFF; + case 3: + case "CONSTRAINT_EXCLUSION_PARTITION": + return Postgresqlconfig131c_ConstraintExclusion.CONSTRAINT_EXCLUSION_PARTITION; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig131c_ConstraintExclusion.UNRECOGNIZED; + } +} + +export function postgresqlconfig131c_ConstraintExclusionToJSON( + object: Postgresqlconfig131c_ConstraintExclusion +): string { + switch (object) { + case Postgresqlconfig131c_ConstraintExclusion.CONSTRAINT_EXCLUSION_UNSPECIFIED: + return "CONSTRAINT_EXCLUSION_UNSPECIFIED"; + case Postgresqlconfig131c_ConstraintExclusion.CONSTRAINT_EXCLUSION_ON: + return "CONSTRAINT_EXCLUSION_ON"; + case Postgresqlconfig131c_ConstraintExclusion.CONSTRAINT_EXCLUSION_OFF: + return "CONSTRAINT_EXCLUSION_OFF"; + case Postgresqlconfig131c_ConstraintExclusion.CONSTRAINT_EXCLUSION_PARTITION: + return "CONSTRAINT_EXCLUSION_PARTITION"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig131c_ForceParallelMode { + FORCE_PARALLEL_MODE_UNSPECIFIED = 0, + FORCE_PARALLEL_MODE_ON = 1, + FORCE_PARALLEL_MODE_OFF = 2, + FORCE_PARALLEL_MODE_REGRESS = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig131c_ForceParallelModeFromJSON( + object: any +): Postgresqlconfig131c_ForceParallelMode { + switch (object) { + case 0: + case "FORCE_PARALLEL_MODE_UNSPECIFIED": + return Postgresqlconfig131c_ForceParallelMode.FORCE_PARALLEL_MODE_UNSPECIFIED; + case 1: + case "FORCE_PARALLEL_MODE_ON": + return Postgresqlconfig131c_ForceParallelMode.FORCE_PARALLEL_MODE_ON; + case 2: + case "FORCE_PARALLEL_MODE_OFF": + return Postgresqlconfig131c_ForceParallelMode.FORCE_PARALLEL_MODE_OFF; + case 3: + case "FORCE_PARALLEL_MODE_REGRESS": + return Postgresqlconfig131c_ForceParallelMode.FORCE_PARALLEL_MODE_REGRESS; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig131c_ForceParallelMode.UNRECOGNIZED; + } +} + +export function postgresqlconfig131c_ForceParallelModeToJSON( + object: Postgresqlconfig131c_ForceParallelMode +): string { + switch (object) { + case Postgresqlconfig131c_ForceParallelMode.FORCE_PARALLEL_MODE_UNSPECIFIED: + return "FORCE_PARALLEL_MODE_UNSPECIFIED"; + case Postgresqlconfig131c_ForceParallelMode.FORCE_PARALLEL_MODE_ON: + return "FORCE_PARALLEL_MODE_ON"; + case Postgresqlconfig131c_ForceParallelMode.FORCE_PARALLEL_MODE_OFF: + return "FORCE_PARALLEL_MODE_OFF"; + case Postgresqlconfig131c_ForceParallelMode.FORCE_PARALLEL_MODE_REGRESS: + return "FORCE_PARALLEL_MODE_REGRESS"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig131c_LogErrorVerbosity { + LOG_ERROR_VERBOSITY_UNSPECIFIED = 0, + LOG_ERROR_VERBOSITY_TERSE = 1, + LOG_ERROR_VERBOSITY_DEFAULT = 2, + LOG_ERROR_VERBOSITY_VERBOSE = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig131c_LogErrorVerbosityFromJSON( + object: any +): Postgresqlconfig131c_LogErrorVerbosity { + switch (object) { + case 0: + case "LOG_ERROR_VERBOSITY_UNSPECIFIED": + return Postgresqlconfig131c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED; + case 1: + case "LOG_ERROR_VERBOSITY_TERSE": + return Postgresqlconfig131c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE; + case 2: + case "LOG_ERROR_VERBOSITY_DEFAULT": + return Postgresqlconfig131c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT; + case 3: + case "LOG_ERROR_VERBOSITY_VERBOSE": + return Postgresqlconfig131c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig131c_LogErrorVerbosity.UNRECOGNIZED; + } +} + +export function postgresqlconfig131c_LogErrorVerbosityToJSON( + object: Postgresqlconfig131c_LogErrorVerbosity +): string { + switch (object) { + case Postgresqlconfig131c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED: + return "LOG_ERROR_VERBOSITY_UNSPECIFIED"; + case Postgresqlconfig131c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE: + return "LOG_ERROR_VERBOSITY_TERSE"; + case Postgresqlconfig131c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT: + return "LOG_ERROR_VERBOSITY_DEFAULT"; + case Postgresqlconfig131c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE: + return "LOG_ERROR_VERBOSITY_VERBOSE"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig131c_LogLevel { + LOG_LEVEL_UNSPECIFIED = 0, + LOG_LEVEL_DEBUG5 = 1, + LOG_LEVEL_DEBUG4 = 2, + LOG_LEVEL_DEBUG3 = 3, + LOG_LEVEL_DEBUG2 = 4, + LOG_LEVEL_DEBUG1 = 5, + LOG_LEVEL_LOG = 6, + LOG_LEVEL_NOTICE = 7, + LOG_LEVEL_WARNING = 8, + LOG_LEVEL_ERROR = 9, + LOG_LEVEL_FATAL = 10, + LOG_LEVEL_PANIC = 11, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig131c_LogLevelFromJSON( + object: any +): Postgresqlconfig131c_LogLevel { + switch (object) { + case 0: + case "LOG_LEVEL_UNSPECIFIED": + return Postgresqlconfig131c_LogLevel.LOG_LEVEL_UNSPECIFIED; + case 1: + case "LOG_LEVEL_DEBUG5": + return Postgresqlconfig131c_LogLevel.LOG_LEVEL_DEBUG5; + case 2: + case "LOG_LEVEL_DEBUG4": + return Postgresqlconfig131c_LogLevel.LOG_LEVEL_DEBUG4; + case 3: + case "LOG_LEVEL_DEBUG3": + return Postgresqlconfig131c_LogLevel.LOG_LEVEL_DEBUG3; + case 4: + case "LOG_LEVEL_DEBUG2": + return Postgresqlconfig131c_LogLevel.LOG_LEVEL_DEBUG2; + case 5: + case "LOG_LEVEL_DEBUG1": + return Postgresqlconfig131c_LogLevel.LOG_LEVEL_DEBUG1; + case 6: + case "LOG_LEVEL_LOG": + return Postgresqlconfig131c_LogLevel.LOG_LEVEL_LOG; + case 7: + case "LOG_LEVEL_NOTICE": + return Postgresqlconfig131c_LogLevel.LOG_LEVEL_NOTICE; + case 8: + case "LOG_LEVEL_WARNING": + return Postgresqlconfig131c_LogLevel.LOG_LEVEL_WARNING; + case 9: + case "LOG_LEVEL_ERROR": + return Postgresqlconfig131c_LogLevel.LOG_LEVEL_ERROR; + case 10: + case "LOG_LEVEL_FATAL": + return Postgresqlconfig131c_LogLevel.LOG_LEVEL_FATAL; + case 11: + case "LOG_LEVEL_PANIC": + return Postgresqlconfig131c_LogLevel.LOG_LEVEL_PANIC; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig131c_LogLevel.UNRECOGNIZED; + } +} + +export function postgresqlconfig131c_LogLevelToJSON( + object: Postgresqlconfig131c_LogLevel +): string { + switch (object) { + case Postgresqlconfig131c_LogLevel.LOG_LEVEL_UNSPECIFIED: + return "LOG_LEVEL_UNSPECIFIED"; + case Postgresqlconfig131c_LogLevel.LOG_LEVEL_DEBUG5: + return "LOG_LEVEL_DEBUG5"; + case Postgresqlconfig131c_LogLevel.LOG_LEVEL_DEBUG4: + return "LOG_LEVEL_DEBUG4"; + case Postgresqlconfig131c_LogLevel.LOG_LEVEL_DEBUG3: + return "LOG_LEVEL_DEBUG3"; + case Postgresqlconfig131c_LogLevel.LOG_LEVEL_DEBUG2: + return "LOG_LEVEL_DEBUG2"; + case Postgresqlconfig131c_LogLevel.LOG_LEVEL_DEBUG1: + return "LOG_LEVEL_DEBUG1"; + case Postgresqlconfig131c_LogLevel.LOG_LEVEL_LOG: + return "LOG_LEVEL_LOG"; + case Postgresqlconfig131c_LogLevel.LOG_LEVEL_NOTICE: + return "LOG_LEVEL_NOTICE"; + case Postgresqlconfig131c_LogLevel.LOG_LEVEL_WARNING: + return "LOG_LEVEL_WARNING"; + case Postgresqlconfig131c_LogLevel.LOG_LEVEL_ERROR: + return "LOG_LEVEL_ERROR"; + case Postgresqlconfig131c_LogLevel.LOG_LEVEL_FATAL: + return "LOG_LEVEL_FATAL"; + case Postgresqlconfig131c_LogLevel.LOG_LEVEL_PANIC: + return "LOG_LEVEL_PANIC"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig131c_LogStatement { + LOG_STATEMENT_UNSPECIFIED = 0, + LOG_STATEMENT_NONE = 1, + LOG_STATEMENT_DDL = 2, + LOG_STATEMENT_MOD = 3, + LOG_STATEMENT_ALL = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig131c_LogStatementFromJSON( + object: any +): Postgresqlconfig131c_LogStatement { + switch (object) { + case 0: + case "LOG_STATEMENT_UNSPECIFIED": + return Postgresqlconfig131c_LogStatement.LOG_STATEMENT_UNSPECIFIED; + case 1: + case "LOG_STATEMENT_NONE": + return Postgresqlconfig131c_LogStatement.LOG_STATEMENT_NONE; + case 2: + case "LOG_STATEMENT_DDL": + return Postgresqlconfig131c_LogStatement.LOG_STATEMENT_DDL; + case 3: + case "LOG_STATEMENT_MOD": + return Postgresqlconfig131c_LogStatement.LOG_STATEMENT_MOD; + case 4: + case "LOG_STATEMENT_ALL": + return Postgresqlconfig131c_LogStatement.LOG_STATEMENT_ALL; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig131c_LogStatement.UNRECOGNIZED; + } +} + +export function postgresqlconfig131c_LogStatementToJSON( + object: Postgresqlconfig131c_LogStatement +): string { + switch (object) { + case Postgresqlconfig131c_LogStatement.LOG_STATEMENT_UNSPECIFIED: + return "LOG_STATEMENT_UNSPECIFIED"; + case Postgresqlconfig131c_LogStatement.LOG_STATEMENT_NONE: + return "LOG_STATEMENT_NONE"; + case Postgresqlconfig131c_LogStatement.LOG_STATEMENT_DDL: + return "LOG_STATEMENT_DDL"; + case Postgresqlconfig131c_LogStatement.LOG_STATEMENT_MOD: + return "LOG_STATEMENT_MOD"; + case Postgresqlconfig131c_LogStatement.LOG_STATEMENT_ALL: + return "LOG_STATEMENT_ALL"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig131c_PgHintPlanDebugPrint { + PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED = 0, + PG_HINT_PLAN_DEBUG_PRINT_OFF = 1, + PG_HINT_PLAN_DEBUG_PRINT_ON = 2, + PG_HINT_PLAN_DEBUG_PRINT_DETAILED = 3, + PG_HINT_PLAN_DEBUG_PRINT_VERBOSE = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig131c_PgHintPlanDebugPrintFromJSON( + object: any +): Postgresqlconfig131c_PgHintPlanDebugPrint { + switch (object) { + case 0: + case "PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED": + return Postgresqlconfig131c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED; + case 1: + case "PG_HINT_PLAN_DEBUG_PRINT_OFF": + return Postgresqlconfig131c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_OFF; + case 2: + case "PG_HINT_PLAN_DEBUG_PRINT_ON": + return Postgresqlconfig131c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_ON; + case 3: + case "PG_HINT_PLAN_DEBUG_PRINT_DETAILED": + return Postgresqlconfig131c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_DETAILED; + case 4: + case "PG_HINT_PLAN_DEBUG_PRINT_VERBOSE": + return Postgresqlconfig131c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_VERBOSE; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig131c_PgHintPlanDebugPrint.UNRECOGNIZED; + } +} + +export function postgresqlconfig131c_PgHintPlanDebugPrintToJSON( + object: Postgresqlconfig131c_PgHintPlanDebugPrint +): string { + switch (object) { + case Postgresqlconfig131c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED: + return "PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED"; + case Postgresqlconfig131c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_OFF: + return "PG_HINT_PLAN_DEBUG_PRINT_OFF"; + case Postgresqlconfig131c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_ON: + return "PG_HINT_PLAN_DEBUG_PRINT_ON"; + case Postgresqlconfig131c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_DETAILED: + return "PG_HINT_PLAN_DEBUG_PRINT_DETAILED"; + case Postgresqlconfig131c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_VERBOSE: + return "PG_HINT_PLAN_DEBUG_PRINT_VERBOSE"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig131c_PlanCacheMode { + PLAN_CACHE_MODE_UNSPECIFIED = 0, + PLAN_CACHE_MODE_AUTO = 1, + PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN = 2, + PLAN_CACHE_MODE_FORCE_GENERIC_PLAN = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig131c_PlanCacheModeFromJSON( + object: any +): Postgresqlconfig131c_PlanCacheMode { + switch (object) { + case 0: + case "PLAN_CACHE_MODE_UNSPECIFIED": + return Postgresqlconfig131c_PlanCacheMode.PLAN_CACHE_MODE_UNSPECIFIED; + case 1: + case "PLAN_CACHE_MODE_AUTO": + return Postgresqlconfig131c_PlanCacheMode.PLAN_CACHE_MODE_AUTO; + case 2: + case "PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN": + return Postgresqlconfig131c_PlanCacheMode.PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN; + case 3: + case "PLAN_CACHE_MODE_FORCE_GENERIC_PLAN": + return Postgresqlconfig131c_PlanCacheMode.PLAN_CACHE_MODE_FORCE_GENERIC_PLAN; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig131c_PlanCacheMode.UNRECOGNIZED; + } +} + +export function postgresqlconfig131c_PlanCacheModeToJSON( + object: Postgresqlconfig131c_PlanCacheMode +): string { + switch (object) { + case Postgresqlconfig131c_PlanCacheMode.PLAN_CACHE_MODE_UNSPECIFIED: + return "PLAN_CACHE_MODE_UNSPECIFIED"; + case Postgresqlconfig131c_PlanCacheMode.PLAN_CACHE_MODE_AUTO: + return "PLAN_CACHE_MODE_AUTO"; + case Postgresqlconfig131c_PlanCacheMode.PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN: + return "PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN"; + case Postgresqlconfig131c_PlanCacheMode.PLAN_CACHE_MODE_FORCE_GENERIC_PLAN: + return "PLAN_CACHE_MODE_FORCE_GENERIC_PLAN"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig131c_SharedPreloadLibraries { + SHARED_PRELOAD_LIBRARIES_UNSPECIFIED = 0, + SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN = 1, + SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN = 2, + SHARED_PRELOAD_LIBRARIES_TIMESCALEDB = 3, + SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS = 4, + SHARED_PRELOAD_LIBRARIES_PG_CRON = 5, + SHARED_PRELOAD_LIBRARIES_PGLOGICAL = 6, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig131c_SharedPreloadLibrariesFromJSON( + object: any +): Postgresqlconfig131c_SharedPreloadLibraries { + switch (object) { + case 0: + case "SHARED_PRELOAD_LIBRARIES_UNSPECIFIED": + return Postgresqlconfig131c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_UNSPECIFIED; + case 1: + case "SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN": + return Postgresqlconfig131c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN; + case 2: + case "SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN": + return Postgresqlconfig131c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN; + case 3: + case "SHARED_PRELOAD_LIBRARIES_TIMESCALEDB": + return Postgresqlconfig131c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_TIMESCALEDB; + case 4: + case "SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS": + return Postgresqlconfig131c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS; + case 5: + case "SHARED_PRELOAD_LIBRARIES_PG_CRON": + return Postgresqlconfig131c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_CRON; + case 6: + case "SHARED_PRELOAD_LIBRARIES_PGLOGICAL": + return Postgresqlconfig131c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGLOGICAL; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig131c_SharedPreloadLibraries.UNRECOGNIZED; + } +} + +export function postgresqlconfig131c_SharedPreloadLibrariesToJSON( + object: Postgresqlconfig131c_SharedPreloadLibraries +): string { + switch (object) { + case Postgresqlconfig131c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_UNSPECIFIED: + return "SHARED_PRELOAD_LIBRARIES_UNSPECIFIED"; + case Postgresqlconfig131c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN: + return "SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN"; + case Postgresqlconfig131c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN: + return "SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN"; + case Postgresqlconfig131c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_TIMESCALEDB: + return "SHARED_PRELOAD_LIBRARIES_TIMESCALEDB"; + case Postgresqlconfig131c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS: + return "SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS"; + case Postgresqlconfig131c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_CRON: + return "SHARED_PRELOAD_LIBRARIES_PG_CRON"; + case Postgresqlconfig131c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGLOGICAL: + return "SHARED_PRELOAD_LIBRARIES_PGLOGICAL"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig131c_SynchronousCommit { + SYNCHRONOUS_COMMIT_UNSPECIFIED = 0, + SYNCHRONOUS_COMMIT_ON = 1, + SYNCHRONOUS_COMMIT_OFF = 2, + SYNCHRONOUS_COMMIT_LOCAL = 3, + SYNCHRONOUS_COMMIT_REMOTE_WRITE = 4, + SYNCHRONOUS_COMMIT_REMOTE_APPLY = 5, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig131c_SynchronousCommitFromJSON( + object: any +): Postgresqlconfig131c_SynchronousCommit { + switch (object) { + case 0: + case "SYNCHRONOUS_COMMIT_UNSPECIFIED": + return Postgresqlconfig131c_SynchronousCommit.SYNCHRONOUS_COMMIT_UNSPECIFIED; + case 1: + case "SYNCHRONOUS_COMMIT_ON": + return Postgresqlconfig131c_SynchronousCommit.SYNCHRONOUS_COMMIT_ON; + case 2: + case "SYNCHRONOUS_COMMIT_OFF": + return Postgresqlconfig131c_SynchronousCommit.SYNCHRONOUS_COMMIT_OFF; + case 3: + case "SYNCHRONOUS_COMMIT_LOCAL": + return Postgresqlconfig131c_SynchronousCommit.SYNCHRONOUS_COMMIT_LOCAL; + case 4: + case "SYNCHRONOUS_COMMIT_REMOTE_WRITE": + return Postgresqlconfig131c_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_WRITE; + case 5: + case "SYNCHRONOUS_COMMIT_REMOTE_APPLY": + return Postgresqlconfig131c_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_APPLY; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig131c_SynchronousCommit.UNRECOGNIZED; + } +} + +export function postgresqlconfig131c_SynchronousCommitToJSON( + object: Postgresqlconfig131c_SynchronousCommit +): string { + switch (object) { + case Postgresqlconfig131c_SynchronousCommit.SYNCHRONOUS_COMMIT_UNSPECIFIED: + return "SYNCHRONOUS_COMMIT_UNSPECIFIED"; + case Postgresqlconfig131c_SynchronousCommit.SYNCHRONOUS_COMMIT_ON: + return "SYNCHRONOUS_COMMIT_ON"; + case Postgresqlconfig131c_SynchronousCommit.SYNCHRONOUS_COMMIT_OFF: + return "SYNCHRONOUS_COMMIT_OFF"; + case Postgresqlconfig131c_SynchronousCommit.SYNCHRONOUS_COMMIT_LOCAL: + return "SYNCHRONOUS_COMMIT_LOCAL"; + case Postgresqlconfig131c_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_WRITE: + return "SYNCHRONOUS_COMMIT_REMOTE_WRITE"; + case Postgresqlconfig131c_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_APPLY: + return "SYNCHRONOUS_COMMIT_REMOTE_APPLY"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig131c_TransactionIsolation { + TRANSACTION_ISOLATION_UNSPECIFIED = 0, + TRANSACTION_ISOLATION_READ_UNCOMMITTED = 1, + TRANSACTION_ISOLATION_READ_COMMITTED = 2, + TRANSACTION_ISOLATION_REPEATABLE_READ = 3, + TRANSACTION_ISOLATION_SERIALIZABLE = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig131c_TransactionIsolationFromJSON( + object: any +): Postgresqlconfig131c_TransactionIsolation { + switch (object) { + case 0: + case "TRANSACTION_ISOLATION_UNSPECIFIED": + return Postgresqlconfig131c_TransactionIsolation.TRANSACTION_ISOLATION_UNSPECIFIED; + case 1: + case "TRANSACTION_ISOLATION_READ_UNCOMMITTED": + return Postgresqlconfig131c_TransactionIsolation.TRANSACTION_ISOLATION_READ_UNCOMMITTED; + case 2: + case "TRANSACTION_ISOLATION_READ_COMMITTED": + return Postgresqlconfig131c_TransactionIsolation.TRANSACTION_ISOLATION_READ_COMMITTED; + case 3: + case "TRANSACTION_ISOLATION_REPEATABLE_READ": + return Postgresqlconfig131c_TransactionIsolation.TRANSACTION_ISOLATION_REPEATABLE_READ; + case 4: + case "TRANSACTION_ISOLATION_SERIALIZABLE": + return Postgresqlconfig131c_TransactionIsolation.TRANSACTION_ISOLATION_SERIALIZABLE; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig131c_TransactionIsolation.UNRECOGNIZED; + } +} + +export function postgresqlconfig131c_TransactionIsolationToJSON( + object: Postgresqlconfig131c_TransactionIsolation +): string { + switch (object) { + case Postgresqlconfig131c_TransactionIsolation.TRANSACTION_ISOLATION_UNSPECIFIED: + return "TRANSACTION_ISOLATION_UNSPECIFIED"; + case Postgresqlconfig131c_TransactionIsolation.TRANSACTION_ISOLATION_READ_UNCOMMITTED: + return "TRANSACTION_ISOLATION_READ_UNCOMMITTED"; + case Postgresqlconfig131c_TransactionIsolation.TRANSACTION_ISOLATION_READ_COMMITTED: + return "TRANSACTION_ISOLATION_READ_COMMITTED"; + case Postgresqlconfig131c_TransactionIsolation.TRANSACTION_ISOLATION_REPEATABLE_READ: + return "TRANSACTION_ISOLATION_REPEATABLE_READ"; + case Postgresqlconfig131c_TransactionIsolation.TRANSACTION_ISOLATION_SERIALIZABLE: + return "TRANSACTION_ISOLATION_SERIALIZABLE"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig131c_WalLevel { + WAL_LEVEL_UNSPECIFIED = 0, + WAL_LEVEL_REPLICA = 1, + WAL_LEVEL_LOGICAL = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig131c_WalLevelFromJSON( + object: any +): Postgresqlconfig131c_WalLevel { + switch (object) { + case 0: + case "WAL_LEVEL_UNSPECIFIED": + return Postgresqlconfig131c_WalLevel.WAL_LEVEL_UNSPECIFIED; + case 1: + case "WAL_LEVEL_REPLICA": + return Postgresqlconfig131c_WalLevel.WAL_LEVEL_REPLICA; + case 2: + case "WAL_LEVEL_LOGICAL": + return Postgresqlconfig131c_WalLevel.WAL_LEVEL_LOGICAL; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig131c_WalLevel.UNRECOGNIZED; + } +} + +export function postgresqlconfig131c_WalLevelToJSON( + object: Postgresqlconfig131c_WalLevel +): string { + switch (object) { + case Postgresqlconfig131c_WalLevel.WAL_LEVEL_UNSPECIFIED: + return "WAL_LEVEL_UNSPECIFIED"; + case Postgresqlconfig131c_WalLevel.WAL_LEVEL_REPLICA: + return "WAL_LEVEL_REPLICA"; + case Postgresqlconfig131c_WalLevel.WAL_LEVEL_LOGICAL: + return "WAL_LEVEL_LOGICAL"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig131c_XmlBinary { + XML_BINARY_UNSPECIFIED = 0, + XML_BINARY_BASE64 = 1, + XML_BINARY_HEX = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig131c_XmlBinaryFromJSON( + object: any +): Postgresqlconfig131c_XmlBinary { + switch (object) { + case 0: + case "XML_BINARY_UNSPECIFIED": + return Postgresqlconfig131c_XmlBinary.XML_BINARY_UNSPECIFIED; + case 1: + case "XML_BINARY_BASE64": + return Postgresqlconfig131c_XmlBinary.XML_BINARY_BASE64; + case 2: + case "XML_BINARY_HEX": + return Postgresqlconfig131c_XmlBinary.XML_BINARY_HEX; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig131c_XmlBinary.UNRECOGNIZED; + } +} + +export function postgresqlconfig131c_XmlBinaryToJSON( + object: Postgresqlconfig131c_XmlBinary +): string { + switch (object) { + case Postgresqlconfig131c_XmlBinary.XML_BINARY_UNSPECIFIED: + return "XML_BINARY_UNSPECIFIED"; + case Postgresqlconfig131c_XmlBinary.XML_BINARY_BASE64: + return "XML_BINARY_BASE64"; + case Postgresqlconfig131c_XmlBinary.XML_BINARY_HEX: + return "XML_BINARY_HEX"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig131c_XmlOption { + XML_OPTION_UNSPECIFIED = 0, + XML_OPTION_DOCUMENT = 1, + XML_OPTION_CONTENT = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig131c_XmlOptionFromJSON( + object: any +): Postgresqlconfig131c_XmlOption { + switch (object) { + case 0: + case "XML_OPTION_UNSPECIFIED": + return Postgresqlconfig131c_XmlOption.XML_OPTION_UNSPECIFIED; + case 1: + case "XML_OPTION_DOCUMENT": + return Postgresqlconfig131c_XmlOption.XML_OPTION_DOCUMENT; + case 2: + case "XML_OPTION_CONTENT": + return Postgresqlconfig131c_XmlOption.XML_OPTION_CONTENT; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig131c_XmlOption.UNRECOGNIZED; + } +} + +export function postgresqlconfig131c_XmlOptionToJSON( + object: Postgresqlconfig131c_XmlOption +): string { + switch (object) { + case Postgresqlconfig131c_XmlOption.XML_OPTION_UNSPECIFIED: + return "XML_OPTION_UNSPECIFIED"; + case Postgresqlconfig131c_XmlOption.XML_OPTION_DOCUMENT: + return "XML_OPTION_DOCUMENT"; + case Postgresqlconfig131c_XmlOption.XML_OPTION_CONTENT: + return "XML_OPTION_CONTENT"; + default: + return "UNKNOWN"; + } +} + +export interface Postgresqlconfigset131c { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfigSet13_1C"; + /** + * Effective settings for a PostgreSQL 13 1C cluster (a combination of settings defined + * in [user_config] and [default_config]). + */ + effectiveConfig?: Postgresqlconfig131c; + /** User-defined settings for a PostgreSQL 13 1C cluster. */ + userConfig?: Postgresqlconfig131c; + /** Default configuration for a PostgreSQL 13 1C cluster. */ + defaultConfig?: Postgresqlconfig131c; +} + +const basePostgresqlconfig131c: object = { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13_1C", + walLevel: 0, + synchronousCommit: 0, + constraintExclusion: 0, + forceParallelMode: 0, + clientMinMessages: 0, + logMinMessages: 0, + logMinErrorStatement: 0, + logErrorVerbosity: 0, + logStatement: 0, + searchPath: "", + defaultTransactionIsolation: 0, + byteaOutput: 0, + xmlbinary: 0, + xmloption: 0, + backslashQuote: 0, + timezone: "", + planCacheMode: 0, + sharedPreloadLibraries: 0, + pgHintPlanDebugPrint: 0, + pgHintPlanMessageLevel: 0, +}; + +export const Postgresqlconfig131c = { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig13_1C" as const, + + encode( + message: Postgresqlconfig131c, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.maxConnections !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.maxConnections! }, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.sharedBuffers !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.sharedBuffers! }, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.tempBuffers !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.tempBuffers! }, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.maxPreparedTransactions !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxPreparedTransactions!, + }, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.workMem !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.workMem! }, + writer.uint32(42).fork() + ).ldelim(); + } + if (message.maintenanceWorkMem !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maintenanceWorkMem!, + }, + writer.uint32(50).fork() + ).ldelim(); + } + if (message.autovacuumWorkMem !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autovacuumWorkMem!, + }, + writer.uint32(58).fork() + ).ldelim(); + } + if (message.tempFileLimit !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.tempFileLimit! }, + writer.uint32(66).fork() + ).ldelim(); + } + if (message.vacuumCostDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumCostDelay!, + }, + writer.uint32(74).fork() + ).ldelim(); + } + if (message.vacuumCostPageHit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumCostPageHit!, + }, + writer.uint32(82).fork() + ).ldelim(); + } + if (message.vacuumCostPageMiss !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumCostPageMiss!, + }, + writer.uint32(90).fork() + ).ldelim(); + } + if (message.vacuumCostPageDirty !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumCostPageDirty!, + }, + writer.uint32(98).fork() + ).ldelim(); + } + if (message.vacuumCostLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumCostLimit!, + }, + writer.uint32(106).fork() + ).ldelim(); + } + if (message.bgwriterDelay !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.bgwriterDelay! }, + writer.uint32(114).fork() + ).ldelim(); + } + if (message.bgwriterLruMaxpages !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.bgwriterLruMaxpages!, + }, + writer.uint32(122).fork() + ).ldelim(); + } + if (message.bgwriterLruMultiplier !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.bgwriterLruMultiplier!, + }, + writer.uint32(130).fork() + ).ldelim(); + } + if (message.bgwriterFlushAfter !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.bgwriterFlushAfter!, + }, + writer.uint32(138).fork() + ).ldelim(); + } + if (message.backendFlushAfter !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.backendFlushAfter!, + }, + writer.uint32(146).fork() + ).ldelim(); + } + if (message.oldSnapshotThreshold !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.oldSnapshotThreshold!, + }, + writer.uint32(154).fork() + ).ldelim(); + } + if (message.walLevel !== 0) { + writer.uint32(160).int32(message.walLevel); + } + if (message.synchronousCommit !== 0) { + writer.uint32(168).int32(message.synchronousCommit); + } + if (message.checkpointTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.checkpointTimeout!, + }, + writer.uint32(178).fork() + ).ldelim(); + } + if (message.checkpointCompletionTarget !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.checkpointCompletionTarget!, + }, + writer.uint32(186).fork() + ).ldelim(); + } + if (message.checkpointFlushAfter !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.checkpointFlushAfter!, + }, + writer.uint32(194).fork() + ).ldelim(); + } + if (message.maxWalSize !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.maxWalSize! }, + writer.uint32(202).fork() + ).ldelim(); + } + if (message.minWalSize !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.minWalSize! }, + writer.uint32(210).fork() + ).ldelim(); + } + if (message.maxStandbyStreamingDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxStandbyStreamingDelay!, + }, + writer.uint32(218).fork() + ).ldelim(); + } + if (message.defaultStatisticsTarget !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.defaultStatisticsTarget!, + }, + writer.uint32(226).fork() + ).ldelim(); + } + if (message.constraintExclusion !== 0) { + writer.uint32(232).int32(message.constraintExclusion); + } + if (message.cursorTupleFraction !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.cursorTupleFraction!, + }, + writer.uint32(242).fork() + ).ldelim(); + } + if (message.fromCollapseLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.fromCollapseLimit!, + }, + writer.uint32(250).fork() + ).ldelim(); + } + if (message.joinCollapseLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.joinCollapseLimit!, + }, + writer.uint32(258).fork() + ).ldelim(); + } + if (message.forceParallelMode !== 0) { + writer.uint32(264).int32(message.forceParallelMode); + } + if (message.clientMinMessages !== 0) { + writer.uint32(272).int32(message.clientMinMessages); + } + if (message.logMinMessages !== 0) { + writer.uint32(280).int32(message.logMinMessages); + } + if (message.logMinErrorStatement !== 0) { + writer.uint32(288).int32(message.logMinErrorStatement); + } + if (message.logMinDurationStatement !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logMinDurationStatement!, + }, + writer.uint32(298).fork() + ).ldelim(); + } + if (message.logCheckpoints !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logCheckpoints! }, + writer.uint32(306).fork() + ).ldelim(); + } + if (message.logConnections !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logConnections! }, + writer.uint32(314).fork() + ).ldelim(); + } + if (message.logDisconnections !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.logDisconnections!, + }, + writer.uint32(322).fork() + ).ldelim(); + } + if (message.logDuration !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logDuration! }, + writer.uint32(330).fork() + ).ldelim(); + } + if (message.logErrorVerbosity !== 0) { + writer.uint32(336).int32(message.logErrorVerbosity); + } + if (message.logLockWaits !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logLockWaits! }, + writer.uint32(346).fork() + ).ldelim(); + } + if (message.logStatement !== 0) { + writer.uint32(352).int32(message.logStatement); + } + if (message.logTempFiles !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.logTempFiles! }, + writer.uint32(362).fork() + ).ldelim(); + } + if (message.searchPath !== "") { + writer.uint32(370).string(message.searchPath); + } + if (message.rowSecurity !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.rowSecurity! }, + writer.uint32(378).fork() + ).ldelim(); + } + if (message.defaultTransactionIsolation !== 0) { + writer.uint32(384).int32(message.defaultTransactionIsolation); + } + if (message.statementTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.statementTimeout!, + }, + writer.uint32(394).fork() + ).ldelim(); + } + if (message.lockTimeout !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.lockTimeout! }, + writer.uint32(402).fork() + ).ldelim(); + } + if (message.idleInTransactionSessionTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.idleInTransactionSessionTimeout!, + }, + writer.uint32(410).fork() + ).ldelim(); + } + if (message.byteaOutput !== 0) { + writer.uint32(416).int32(message.byteaOutput); + } + if (message.xmlbinary !== 0) { + writer.uint32(424).int32(message.xmlbinary); + } + if (message.xmloption !== 0) { + writer.uint32(432).int32(message.xmloption); + } + if (message.ginPendingListLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.ginPendingListLimit!, + }, + writer.uint32(442).fork() + ).ldelim(); + } + if (message.deadlockTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.deadlockTimeout!, + }, + writer.uint32(450).fork() + ).ldelim(); + } + if (message.maxLocksPerTransaction !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxLocksPerTransaction!, + }, + writer.uint32(458).fork() + ).ldelim(); + } + if (message.maxPredLocksPerTransaction !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxPredLocksPerTransaction!, + }, + writer.uint32(466).fork() + ).ldelim(); + } + if (message.arrayNulls !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.arrayNulls! }, + writer.uint32(474).fork() + ).ldelim(); + } + if (message.backslashQuote !== 0) { + writer.uint32(480).int32(message.backslashQuote); + } + if (message.defaultWithOids !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.defaultWithOids! }, + writer.uint32(490).fork() + ).ldelim(); + } + if (message.escapeStringWarning !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.escapeStringWarning!, + }, + writer.uint32(498).fork() + ).ldelim(); + } + if (message.loCompatPrivileges !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.loCompatPrivileges!, + }, + writer.uint32(506).fork() + ).ldelim(); + } + if (message.operatorPrecedenceWarning !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.operatorPrecedenceWarning!, + }, + writer.uint32(514).fork() + ).ldelim(); + } + if (message.quoteAllIdentifiers !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.quoteAllIdentifiers!, + }, + writer.uint32(522).fork() + ).ldelim(); + } + if (message.standardConformingStrings !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.standardConformingStrings!, + }, + writer.uint32(530).fork() + ).ldelim(); + } + if (message.synchronizeSeqscans !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.synchronizeSeqscans!, + }, + writer.uint32(538).fork() + ).ldelim(); + } + if (message.transformNullEquals !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.transformNullEquals!, + }, + writer.uint32(546).fork() + ).ldelim(); + } + if (message.exitOnError !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.exitOnError! }, + writer.uint32(554).fork() + ).ldelim(); + } + if (message.seqPageCost !== undefined) { + DoubleValue.encode( + { $type: "google.protobuf.DoubleValue", value: message.seqPageCost! }, + writer.uint32(562).fork() + ).ldelim(); + } + if (message.randomPageCost !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.randomPageCost!, + }, + writer.uint32(570).fork() + ).ldelim(); + } + if (message.autovacuumMaxWorkers !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autovacuumMaxWorkers!, + }, + writer.uint32(578).fork() + ).ldelim(); + } + if (message.autovacuumVacuumCostDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autovacuumVacuumCostDelay!, + }, + writer.uint32(586).fork() + ).ldelim(); + } + if (message.autovacuumVacuumCostLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autovacuumVacuumCostLimit!, + }, + writer.uint32(594).fork() + ).ldelim(); + } + if (message.autovacuumNaptime !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autovacuumNaptime!, + }, + writer.uint32(602).fork() + ).ldelim(); + } + if (message.archiveTimeout !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.archiveTimeout! }, + writer.uint32(610).fork() + ).ldelim(); + } + if (message.trackActivityQuerySize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.trackActivityQuerySize!, + }, + writer.uint32(618).fork() + ).ldelim(); + } + if (message.onlineAnalyzeEnable !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.onlineAnalyzeEnable!, + }, + writer.uint32(634).fork() + ).ldelim(); + } + if (message.enableBitmapscan !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableBitmapscan!, + }, + writer.uint32(642).fork() + ).ldelim(); + } + if (message.enableHashagg !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableHashagg! }, + writer.uint32(650).fork() + ).ldelim(); + } + if (message.enableHashjoin !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableHashjoin! }, + writer.uint32(658).fork() + ).ldelim(); + } + if (message.enableIndexscan !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableIndexscan! }, + writer.uint32(666).fork() + ).ldelim(); + } + if (message.enableIndexonlyscan !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableIndexonlyscan!, + }, + writer.uint32(674).fork() + ).ldelim(); + } + if (message.enableMaterial !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableMaterial! }, + writer.uint32(682).fork() + ).ldelim(); + } + if (message.enableMergejoin !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableMergejoin! }, + writer.uint32(690).fork() + ).ldelim(); + } + if (message.enableNestloop !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableNestloop! }, + writer.uint32(698).fork() + ).ldelim(); + } + if (message.enableSeqscan !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableSeqscan! }, + writer.uint32(706).fork() + ).ldelim(); + } + if (message.enableSort !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableSort! }, + writer.uint32(714).fork() + ).ldelim(); + } + if (message.enableTidscan !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableTidscan! }, + writer.uint32(722).fork() + ).ldelim(); + } + if (message.maxWorkerProcesses !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxWorkerProcesses!, + }, + writer.uint32(730).fork() + ).ldelim(); + } + if (message.maxParallelWorkers !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxParallelWorkers!, + }, + writer.uint32(738).fork() + ).ldelim(); + } + if (message.maxParallelWorkersPerGather !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxParallelWorkersPerGather!, + }, + writer.uint32(746).fork() + ).ldelim(); + } + if (message.autovacuumVacuumScaleFactor !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.autovacuumVacuumScaleFactor!, + }, + writer.uint32(754).fork() + ).ldelim(); + } + if (message.autovacuumAnalyzeScaleFactor !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.autovacuumAnalyzeScaleFactor!, + }, + writer.uint32(762).fork() + ).ldelim(); + } + if (message.defaultTransactionReadOnly !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.defaultTransactionReadOnly!, + }, + writer.uint32(770).fork() + ).ldelim(); + } + if (message.timezone !== "") { + writer.uint32(778).string(message.timezone); + } + if (message.enableParallelAppend !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableParallelAppend!, + }, + writer.uint32(786).fork() + ).ldelim(); + } + if (message.enableParallelHash !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableParallelHash!, + }, + writer.uint32(794).fork() + ).ldelim(); + } + if (message.enablePartitionPruning !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enablePartitionPruning!, + }, + writer.uint32(802).fork() + ).ldelim(); + } + if (message.enablePartitionwiseAggregate !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enablePartitionwiseAggregate!, + }, + writer.uint32(810).fork() + ).ldelim(); + } + if (message.enablePartitionwiseJoin !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enablePartitionwiseJoin!, + }, + writer.uint32(818).fork() + ).ldelim(); + } + if (message.jit !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.jit! }, + writer.uint32(826).fork() + ).ldelim(); + } + if (message.maxParallelMaintenanceWorkers !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxParallelMaintenanceWorkers!, + }, + writer.uint32(834).fork() + ).ldelim(); + } + if (message.parallelLeaderParticipation !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.parallelLeaderParticipation!, + }, + writer.uint32(842).fork() + ).ldelim(); + } + if (message.vacuumCleanupIndexScaleFactor !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.vacuumCleanupIndexScaleFactor!, + }, + writer.uint32(850).fork() + ).ldelim(); + } + if (message.logTransactionSampleRate !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.logTransactionSampleRate!, + }, + writer.uint32(858).fork() + ).ldelim(); + } + if (message.planCacheMode !== 0) { + writer.uint32(864).int32(message.planCacheMode); + } + if (message.effectiveIoConcurrency !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.effectiveIoConcurrency!, + }, + writer.uint32(874).fork() + ).ldelim(); + } + if (message.effectiveCacheSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.effectiveCacheSize!, + }, + writer.uint32(882).fork() + ).ldelim(); + } + writer.uint32(890).fork(); + for (const v of message.sharedPreloadLibraries) { + writer.int32(v); + } + writer.ldelim(); + if (message.autoExplainLogMinDuration !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autoExplainLogMinDuration!, + }, + writer.uint32(898).fork() + ).ldelim(); + } + if (message.autoExplainLogAnalyze !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.autoExplainLogAnalyze!, + }, + writer.uint32(906).fork() + ).ldelim(); + } + if (message.autoExplainLogBuffers !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.autoExplainLogBuffers!, + }, + writer.uint32(914).fork() + ).ldelim(); + } + if (message.autoExplainLogTiming !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.autoExplainLogTiming!, + }, + writer.uint32(922).fork() + ).ldelim(); + } + if (message.autoExplainLogTriggers !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.autoExplainLogTriggers!, + }, + writer.uint32(930).fork() + ).ldelim(); + } + if (message.autoExplainLogVerbose !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.autoExplainLogVerbose!, + }, + writer.uint32(938).fork() + ).ldelim(); + } + if (message.autoExplainLogNestedStatements !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.autoExplainLogNestedStatements!, + }, + writer.uint32(946).fork() + ).ldelim(); + } + if (message.autoExplainSampleRate !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.autoExplainSampleRate!, + }, + writer.uint32(954).fork() + ).ldelim(); + } + if (message.pgHintPlanEnableHint !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgHintPlanEnableHint!, + }, + writer.uint32(962).fork() + ).ldelim(); + } + if (message.pgHintPlanEnableHintTable !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgHintPlanEnableHintTable!, + }, + writer.uint32(970).fork() + ).ldelim(); + } + if (message.pgHintPlanDebugPrint !== 0) { + writer.uint32(976).int32(message.pgHintPlanDebugPrint); + } + if (message.pgHintPlanMessageLevel !== 0) { + writer.uint32(984).int32(message.pgHintPlanMessageLevel); + } + if (message.hashMemMultiplier !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.hashMemMultiplier!, + }, + writer.uint32(994).fork() + ).ldelim(); + } + if (message.logicalDecodingWorkMem !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logicalDecodingWorkMem!, + }, + writer.uint32(1010).fork() + ).ldelim(); + } + if (message.maintenanceIoConcurrency !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maintenanceIoConcurrency!, + }, + writer.uint32(1018).fork() + ).ldelim(); + } + if (message.maxSlotWalKeepSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxSlotWalKeepSize!, + }, + writer.uint32(1026).fork() + ).ldelim(); + } + if (message.walKeepSize !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.walKeepSize! }, + writer.uint32(1034).fork() + ).ldelim(); + } + if (message.enableIncrementalSort !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableIncrementalSort!, + }, + writer.uint32(1042).fork() + ).ldelim(); + } + if (message.autovacuumVacuumInsertThreshold !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autovacuumVacuumInsertThreshold!, + }, + writer.uint32(1050).fork() + ).ldelim(); + } + if (message.autovacuumVacuumInsertScaleFactor !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.autovacuumVacuumInsertScaleFactor!, + }, + writer.uint32(1058).fork() + ).ldelim(); + } + if (message.logMinDurationSample !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logMinDurationSample!, + }, + writer.uint32(1066).fork() + ).ldelim(); + } + if (message.logStatementSampleRate !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.logStatementSampleRate!, + }, + writer.uint32(1074).fork() + ).ldelim(); + } + if (message.logParameterMaxLength !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logParameterMaxLength!, + }, + writer.uint32(1082).fork() + ).ldelim(); + } + if (message.logParameterMaxLengthOnError !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logParameterMaxLengthOnError!, + }, + writer.uint32(1090).fork() + ).ldelim(); + } + if (message.pgQualstatsEnabled !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsEnabled!, + }, + writer.uint32(1098).fork() + ).ldelim(); + } + if (message.pgQualstatsTrackConstants !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsTrackConstants!, + }, + writer.uint32(1106).fork() + ).ldelim(); + } + if (message.pgQualstatsMax !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.pgQualstatsMax! }, + writer.uint32(1114).fork() + ).ldelim(); + } + if (message.pgQualstatsResolveOids !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsResolveOids!, + }, + writer.uint32(1122).fork() + ).ldelim(); + } + if (message.pgQualstatsSampleRate !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgQualstatsSampleRate!, + }, + writer.uint32(1130).fork() + ).ldelim(); + } + if (message.plantunerFixEmptyTable !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.plantunerFixEmptyTable!, + }, + writer.uint32(1194).fork() + ).ldelim(); + } + if (message.geqo !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.geqo! }, + writer.uint32(1218).fork() + ).ldelim(); + } + if (message.geqoThreshold !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.geqoThreshold! }, + writer.uint32(1226).fork() + ).ldelim(); + } + if (message.geqoEffort !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.geqoEffort! }, + writer.uint32(1234).fork() + ).ldelim(); + } + if (message.geqoSeed !== undefined) { + DoubleValue.encode( + { $type: "google.protobuf.DoubleValue", value: message.geqoSeed! }, + writer.uint32(1266).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Postgresqlconfig131c { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...basePostgresqlconfig131c } as Postgresqlconfig131c; + message.sharedPreloadLibraries = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.maxConnections = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 2: + message.sharedBuffers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 3: + message.tempBuffers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 4: + message.maxPreparedTransactions = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 5: + message.workMem = Int64Value.decode(reader, reader.uint32()).value; + break; + case 6: + message.maintenanceWorkMem = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 7: + message.autovacuumWorkMem = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 8: + message.tempFileLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 9: + message.vacuumCostDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 10: + message.vacuumCostPageHit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 11: + message.vacuumCostPageMiss = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 12: + message.vacuumCostPageDirty = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 13: + message.vacuumCostLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 14: + message.bgwriterDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 15: + message.bgwriterLruMaxpages = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 16: + message.bgwriterLruMultiplier = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 17: + message.bgwriterFlushAfter = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 18: + message.backendFlushAfter = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 19: + message.oldSnapshotThreshold = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 20: + message.walLevel = reader.int32() as any; + break; + case 21: + message.synchronousCommit = reader.int32() as any; + break; + case 22: + message.checkpointTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 23: + message.checkpointCompletionTarget = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 24: + message.checkpointFlushAfter = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 25: + message.maxWalSize = Int64Value.decode(reader, reader.uint32()).value; + break; + case 26: + message.minWalSize = Int64Value.decode(reader, reader.uint32()).value; + break; + case 27: + message.maxStandbyStreamingDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 28: + message.defaultStatisticsTarget = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 29: + message.constraintExclusion = reader.int32() as any; + break; + case 30: + message.cursorTupleFraction = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 31: + message.fromCollapseLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 32: + message.joinCollapseLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 33: + message.forceParallelMode = reader.int32() as any; + break; + case 34: + message.clientMinMessages = reader.int32() as any; + break; + case 35: + message.logMinMessages = reader.int32() as any; + break; + case 36: + message.logMinErrorStatement = reader.int32() as any; + break; + case 37: + message.logMinDurationStatement = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 38: + message.logCheckpoints = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 39: + message.logConnections = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 40: + message.logDisconnections = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 41: + message.logDuration = BoolValue.decode(reader, reader.uint32()).value; + break; + case 42: + message.logErrorVerbosity = reader.int32() as any; + break; + case 43: + message.logLockWaits = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 44: + message.logStatement = reader.int32() as any; + break; + case 45: + message.logTempFiles = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 46: + message.searchPath = reader.string(); + break; + case 47: + message.rowSecurity = BoolValue.decode(reader, reader.uint32()).value; + break; + case 48: + message.defaultTransactionIsolation = reader.int32() as any; + break; + case 49: + message.statementTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 50: + message.lockTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 51: + message.idleInTransactionSessionTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 52: + message.byteaOutput = reader.int32() as any; + break; + case 53: + message.xmlbinary = reader.int32() as any; + break; + case 54: + message.xmloption = reader.int32() as any; + break; + case 55: + message.ginPendingListLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 56: + message.deadlockTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 57: + message.maxLocksPerTransaction = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 58: + message.maxPredLocksPerTransaction = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 59: + message.arrayNulls = BoolValue.decode(reader, reader.uint32()).value; + break; + case 60: + message.backslashQuote = reader.int32() as any; + break; + case 61: + message.defaultWithOids = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 62: + message.escapeStringWarning = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 63: + message.loCompatPrivileges = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 64: + message.operatorPrecedenceWarning = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 65: + message.quoteAllIdentifiers = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 66: + message.standardConformingStrings = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 67: + message.synchronizeSeqscans = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 68: + message.transformNullEquals = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 69: + message.exitOnError = BoolValue.decode(reader, reader.uint32()).value; + break; + case 70: + message.seqPageCost = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 71: + message.randomPageCost = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 72: + message.autovacuumMaxWorkers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 73: + message.autovacuumVacuumCostDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 74: + message.autovacuumVacuumCostLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 75: + message.autovacuumNaptime = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 76: + message.archiveTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 77: + message.trackActivityQuerySize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 79: + message.onlineAnalyzeEnable = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 80: + message.enableBitmapscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 81: + message.enableHashagg = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 82: + message.enableHashjoin = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 83: + message.enableIndexscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 84: + message.enableIndexonlyscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 85: + message.enableMaterial = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 86: + message.enableMergejoin = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 87: + message.enableNestloop = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 88: + message.enableSeqscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 89: + message.enableSort = BoolValue.decode(reader, reader.uint32()).value; + break; + case 90: + message.enableTidscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 91: + message.maxWorkerProcesses = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 92: + message.maxParallelWorkers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 93: + message.maxParallelWorkersPerGather = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 94: + message.autovacuumVacuumScaleFactor = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 95: + message.autovacuumAnalyzeScaleFactor = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 96: + message.defaultTransactionReadOnly = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 97: + message.timezone = reader.string(); + break; + case 98: + message.enableParallelAppend = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 99: + message.enableParallelHash = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 100: + message.enablePartitionPruning = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 101: + message.enablePartitionwiseAggregate = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 102: + message.enablePartitionwiseJoin = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 103: + message.jit = BoolValue.decode(reader, reader.uint32()).value; + break; + case 104: + message.maxParallelMaintenanceWorkers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 105: + message.parallelLeaderParticipation = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 106: + message.vacuumCleanupIndexScaleFactor = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 107: + message.logTransactionSampleRate = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 108: + message.planCacheMode = reader.int32() as any; + break; + case 109: + message.effectiveIoConcurrency = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 110: + message.effectiveCacheSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 111: + if ((tag & 7) === 2) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.sharedPreloadLibraries.push(reader.int32() as any); + } + } else { + message.sharedPreloadLibraries.push(reader.int32() as any); + } + break; + case 112: + message.autoExplainLogMinDuration = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 113: + message.autoExplainLogAnalyze = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 114: + message.autoExplainLogBuffers = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 115: + message.autoExplainLogTiming = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 116: + message.autoExplainLogTriggers = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 117: + message.autoExplainLogVerbose = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 118: + message.autoExplainLogNestedStatements = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 119: + message.autoExplainSampleRate = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 120: + message.pgHintPlanEnableHint = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 121: + message.pgHintPlanEnableHintTable = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 122: + message.pgHintPlanDebugPrint = reader.int32() as any; + break; + case 123: + message.pgHintPlanMessageLevel = reader.int32() as any; + break; + case 124: + message.hashMemMultiplier = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 126: + message.logicalDecodingWorkMem = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 127: + message.maintenanceIoConcurrency = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 128: + message.maxSlotWalKeepSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 129: + message.walKeepSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 130: + message.enableIncrementalSort = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 131: + message.autovacuumVacuumInsertThreshold = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 132: + message.autovacuumVacuumInsertScaleFactor = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 133: + message.logMinDurationSample = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 134: + message.logStatementSampleRate = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 135: + message.logParameterMaxLength = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 136: + message.logParameterMaxLengthOnError = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 137: + message.pgQualstatsEnabled = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 138: + message.pgQualstatsTrackConstants = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 139: + message.pgQualstatsMax = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 140: + message.pgQualstatsResolveOids = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 141: + message.pgQualstatsSampleRate = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 149: + message.plantunerFixEmptyTable = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 152: + message.geqo = BoolValue.decode(reader, reader.uint32()).value; + break; + case 153: + message.geqoThreshold = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 154: + message.geqoEffort = Int64Value.decode(reader, reader.uint32()).value; + break; + case 158: + message.geqoSeed = DoubleValue.decode(reader, reader.uint32()).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Postgresqlconfig131c { + const message = { ...basePostgresqlconfig131c } as Postgresqlconfig131c; + message.maxConnections = + object.maxConnections !== undefined && object.maxConnections !== null + ? Number(object.maxConnections) + : undefined; + message.sharedBuffers = + object.sharedBuffers !== undefined && object.sharedBuffers !== null + ? Number(object.sharedBuffers) + : undefined; + message.tempBuffers = + object.tempBuffers !== undefined && object.tempBuffers !== null + ? Number(object.tempBuffers) + : undefined; + message.maxPreparedTransactions = + object.maxPreparedTransactions !== undefined && + object.maxPreparedTransactions !== null + ? Number(object.maxPreparedTransactions) + : undefined; + message.workMem = + object.workMem !== undefined && object.workMem !== null + ? Number(object.workMem) + : undefined; + message.maintenanceWorkMem = + object.maintenanceWorkMem !== undefined && + object.maintenanceWorkMem !== null + ? Number(object.maintenanceWorkMem) + : undefined; + message.autovacuumWorkMem = + object.autovacuumWorkMem !== undefined && + object.autovacuumWorkMem !== null + ? Number(object.autovacuumWorkMem) + : undefined; + message.tempFileLimit = + object.tempFileLimit !== undefined && object.tempFileLimit !== null + ? Number(object.tempFileLimit) + : undefined; + message.vacuumCostDelay = + object.vacuumCostDelay !== undefined && object.vacuumCostDelay !== null + ? Number(object.vacuumCostDelay) + : undefined; + message.vacuumCostPageHit = + object.vacuumCostPageHit !== undefined && + object.vacuumCostPageHit !== null + ? Number(object.vacuumCostPageHit) + : undefined; + message.vacuumCostPageMiss = + object.vacuumCostPageMiss !== undefined && + object.vacuumCostPageMiss !== null + ? Number(object.vacuumCostPageMiss) + : undefined; + message.vacuumCostPageDirty = + object.vacuumCostPageDirty !== undefined && + object.vacuumCostPageDirty !== null + ? Number(object.vacuumCostPageDirty) + : undefined; + message.vacuumCostLimit = + object.vacuumCostLimit !== undefined && object.vacuumCostLimit !== null + ? Number(object.vacuumCostLimit) + : undefined; + message.bgwriterDelay = + object.bgwriterDelay !== undefined && object.bgwriterDelay !== null + ? Number(object.bgwriterDelay) + : undefined; + message.bgwriterLruMaxpages = + object.bgwriterLruMaxpages !== undefined && + object.bgwriterLruMaxpages !== null + ? Number(object.bgwriterLruMaxpages) + : undefined; + message.bgwriterLruMultiplier = + object.bgwriterLruMultiplier !== undefined && + object.bgwriterLruMultiplier !== null + ? Number(object.bgwriterLruMultiplier) + : undefined; + message.bgwriterFlushAfter = + object.bgwriterFlushAfter !== undefined && + object.bgwriterFlushAfter !== null + ? Number(object.bgwriterFlushAfter) + : undefined; + message.backendFlushAfter = + object.backendFlushAfter !== undefined && + object.backendFlushAfter !== null + ? Number(object.backendFlushAfter) + : undefined; + message.oldSnapshotThreshold = + object.oldSnapshotThreshold !== undefined && + object.oldSnapshotThreshold !== null + ? Number(object.oldSnapshotThreshold) + : undefined; + message.walLevel = + object.walLevel !== undefined && object.walLevel !== null + ? postgresqlconfig131c_WalLevelFromJSON(object.walLevel) + : 0; + message.synchronousCommit = + object.synchronousCommit !== undefined && + object.synchronousCommit !== null + ? postgresqlconfig131c_SynchronousCommitFromJSON( + object.synchronousCommit + ) + : 0; + message.checkpointTimeout = + object.checkpointTimeout !== undefined && + object.checkpointTimeout !== null + ? Number(object.checkpointTimeout) + : undefined; + message.checkpointCompletionTarget = + object.checkpointCompletionTarget !== undefined && + object.checkpointCompletionTarget !== null + ? Number(object.checkpointCompletionTarget) + : undefined; + message.checkpointFlushAfter = + object.checkpointFlushAfter !== undefined && + object.checkpointFlushAfter !== null + ? Number(object.checkpointFlushAfter) + : undefined; + message.maxWalSize = + object.maxWalSize !== undefined && object.maxWalSize !== null + ? Number(object.maxWalSize) + : undefined; + message.minWalSize = + object.minWalSize !== undefined && object.minWalSize !== null + ? Number(object.minWalSize) + : undefined; + message.maxStandbyStreamingDelay = + object.maxStandbyStreamingDelay !== undefined && + object.maxStandbyStreamingDelay !== null + ? Number(object.maxStandbyStreamingDelay) + : undefined; + message.defaultStatisticsTarget = + object.defaultStatisticsTarget !== undefined && + object.defaultStatisticsTarget !== null + ? Number(object.defaultStatisticsTarget) + : undefined; + message.constraintExclusion = + object.constraintExclusion !== undefined && + object.constraintExclusion !== null + ? postgresqlconfig131c_ConstraintExclusionFromJSON( + object.constraintExclusion + ) + : 0; + message.cursorTupleFraction = + object.cursorTupleFraction !== undefined && + object.cursorTupleFraction !== null + ? Number(object.cursorTupleFraction) + : undefined; + message.fromCollapseLimit = + object.fromCollapseLimit !== undefined && + object.fromCollapseLimit !== null + ? Number(object.fromCollapseLimit) + : undefined; + message.joinCollapseLimit = + object.joinCollapseLimit !== undefined && + object.joinCollapseLimit !== null + ? Number(object.joinCollapseLimit) + : undefined; + message.forceParallelMode = + object.forceParallelMode !== undefined && + object.forceParallelMode !== null + ? postgresqlconfig131c_ForceParallelModeFromJSON( + object.forceParallelMode + ) + : 0; + message.clientMinMessages = + object.clientMinMessages !== undefined && + object.clientMinMessages !== null + ? postgresqlconfig131c_LogLevelFromJSON(object.clientMinMessages) + : 0; + message.logMinMessages = + object.logMinMessages !== undefined && object.logMinMessages !== null + ? postgresqlconfig131c_LogLevelFromJSON(object.logMinMessages) + : 0; + message.logMinErrorStatement = + object.logMinErrorStatement !== undefined && + object.logMinErrorStatement !== null + ? postgresqlconfig131c_LogLevelFromJSON(object.logMinErrorStatement) + : 0; + message.logMinDurationStatement = + object.logMinDurationStatement !== undefined && + object.logMinDurationStatement !== null + ? Number(object.logMinDurationStatement) + : undefined; + message.logCheckpoints = + object.logCheckpoints !== undefined && object.logCheckpoints !== null + ? Boolean(object.logCheckpoints) + : undefined; + message.logConnections = + object.logConnections !== undefined && object.logConnections !== null + ? Boolean(object.logConnections) + : undefined; + message.logDisconnections = + object.logDisconnections !== undefined && + object.logDisconnections !== null + ? Boolean(object.logDisconnections) + : undefined; + message.logDuration = + object.logDuration !== undefined && object.logDuration !== null + ? Boolean(object.logDuration) + : undefined; + message.logErrorVerbosity = + object.logErrorVerbosity !== undefined && + object.logErrorVerbosity !== null + ? postgresqlconfig131c_LogErrorVerbosityFromJSON( + object.logErrorVerbosity + ) + : 0; + message.logLockWaits = + object.logLockWaits !== undefined && object.logLockWaits !== null + ? Boolean(object.logLockWaits) + : undefined; + message.logStatement = + object.logStatement !== undefined && object.logStatement !== null + ? postgresqlconfig131c_LogStatementFromJSON(object.logStatement) + : 0; + message.logTempFiles = + object.logTempFiles !== undefined && object.logTempFiles !== null + ? Number(object.logTempFiles) + : undefined; + message.searchPath = + object.searchPath !== undefined && object.searchPath !== null + ? String(object.searchPath) + : ""; + message.rowSecurity = + object.rowSecurity !== undefined && object.rowSecurity !== null + ? Boolean(object.rowSecurity) + : undefined; + message.defaultTransactionIsolation = + object.defaultTransactionIsolation !== undefined && + object.defaultTransactionIsolation !== null + ? postgresqlconfig131c_TransactionIsolationFromJSON( + object.defaultTransactionIsolation + ) + : 0; + message.statementTimeout = + object.statementTimeout !== undefined && object.statementTimeout !== null + ? Number(object.statementTimeout) + : undefined; + message.lockTimeout = + object.lockTimeout !== undefined && object.lockTimeout !== null + ? Number(object.lockTimeout) + : undefined; + message.idleInTransactionSessionTimeout = + object.idleInTransactionSessionTimeout !== undefined && + object.idleInTransactionSessionTimeout !== null + ? Number(object.idleInTransactionSessionTimeout) + : undefined; + message.byteaOutput = + object.byteaOutput !== undefined && object.byteaOutput !== null + ? postgresqlconfig131c_ByteaOutputFromJSON(object.byteaOutput) + : 0; + message.xmlbinary = + object.xmlbinary !== undefined && object.xmlbinary !== null + ? postgresqlconfig131c_XmlBinaryFromJSON(object.xmlbinary) + : 0; + message.xmloption = + object.xmloption !== undefined && object.xmloption !== null + ? postgresqlconfig131c_XmlOptionFromJSON(object.xmloption) + : 0; + message.ginPendingListLimit = + object.ginPendingListLimit !== undefined && + object.ginPendingListLimit !== null + ? Number(object.ginPendingListLimit) + : undefined; + message.deadlockTimeout = + object.deadlockTimeout !== undefined && object.deadlockTimeout !== null + ? Number(object.deadlockTimeout) + : undefined; + message.maxLocksPerTransaction = + object.maxLocksPerTransaction !== undefined && + object.maxLocksPerTransaction !== null + ? Number(object.maxLocksPerTransaction) + : undefined; + message.maxPredLocksPerTransaction = + object.maxPredLocksPerTransaction !== undefined && + object.maxPredLocksPerTransaction !== null + ? Number(object.maxPredLocksPerTransaction) + : undefined; + message.arrayNulls = + object.arrayNulls !== undefined && object.arrayNulls !== null + ? Boolean(object.arrayNulls) + : undefined; + message.backslashQuote = + object.backslashQuote !== undefined && object.backslashQuote !== null + ? postgresqlconfig131c_BackslashQuoteFromJSON(object.backslashQuote) + : 0; + message.defaultWithOids = + object.defaultWithOids !== undefined && object.defaultWithOids !== null + ? Boolean(object.defaultWithOids) + : undefined; + message.escapeStringWarning = + object.escapeStringWarning !== undefined && + object.escapeStringWarning !== null + ? Boolean(object.escapeStringWarning) + : undefined; + message.loCompatPrivileges = + object.loCompatPrivileges !== undefined && + object.loCompatPrivileges !== null + ? Boolean(object.loCompatPrivileges) + : undefined; + message.operatorPrecedenceWarning = + object.operatorPrecedenceWarning !== undefined && + object.operatorPrecedenceWarning !== null + ? Boolean(object.operatorPrecedenceWarning) + : undefined; + message.quoteAllIdentifiers = + object.quoteAllIdentifiers !== undefined && + object.quoteAllIdentifiers !== null + ? Boolean(object.quoteAllIdentifiers) + : undefined; + message.standardConformingStrings = + object.standardConformingStrings !== undefined && + object.standardConformingStrings !== null + ? Boolean(object.standardConformingStrings) + : undefined; + message.synchronizeSeqscans = + object.synchronizeSeqscans !== undefined && + object.synchronizeSeqscans !== null + ? Boolean(object.synchronizeSeqscans) + : undefined; + message.transformNullEquals = + object.transformNullEquals !== undefined && + object.transformNullEquals !== null + ? Boolean(object.transformNullEquals) + : undefined; + message.exitOnError = + object.exitOnError !== undefined && object.exitOnError !== null + ? Boolean(object.exitOnError) + : undefined; + message.seqPageCost = + object.seqPageCost !== undefined && object.seqPageCost !== null + ? Number(object.seqPageCost) + : undefined; + message.randomPageCost = + object.randomPageCost !== undefined && object.randomPageCost !== null + ? Number(object.randomPageCost) + : undefined; + message.autovacuumMaxWorkers = + object.autovacuumMaxWorkers !== undefined && + object.autovacuumMaxWorkers !== null + ? Number(object.autovacuumMaxWorkers) + : undefined; + message.autovacuumVacuumCostDelay = + object.autovacuumVacuumCostDelay !== undefined && + object.autovacuumVacuumCostDelay !== null + ? Number(object.autovacuumVacuumCostDelay) + : undefined; + message.autovacuumVacuumCostLimit = + object.autovacuumVacuumCostLimit !== undefined && + object.autovacuumVacuumCostLimit !== null + ? Number(object.autovacuumVacuumCostLimit) + : undefined; + message.autovacuumNaptime = + object.autovacuumNaptime !== undefined && + object.autovacuumNaptime !== null + ? Number(object.autovacuumNaptime) + : undefined; + message.archiveTimeout = + object.archiveTimeout !== undefined && object.archiveTimeout !== null + ? Number(object.archiveTimeout) + : undefined; + message.trackActivityQuerySize = + object.trackActivityQuerySize !== undefined && + object.trackActivityQuerySize !== null + ? Number(object.trackActivityQuerySize) + : undefined; + message.onlineAnalyzeEnable = + object.onlineAnalyzeEnable !== undefined && + object.onlineAnalyzeEnable !== null + ? Boolean(object.onlineAnalyzeEnable) + : undefined; + message.enableBitmapscan = + object.enableBitmapscan !== undefined && object.enableBitmapscan !== null + ? Boolean(object.enableBitmapscan) + : undefined; + message.enableHashagg = + object.enableHashagg !== undefined && object.enableHashagg !== null + ? Boolean(object.enableHashagg) + : undefined; + message.enableHashjoin = + object.enableHashjoin !== undefined && object.enableHashjoin !== null + ? Boolean(object.enableHashjoin) + : undefined; + message.enableIndexscan = + object.enableIndexscan !== undefined && object.enableIndexscan !== null + ? Boolean(object.enableIndexscan) + : undefined; + message.enableIndexonlyscan = + object.enableIndexonlyscan !== undefined && + object.enableIndexonlyscan !== null + ? Boolean(object.enableIndexonlyscan) + : undefined; + message.enableMaterial = + object.enableMaterial !== undefined && object.enableMaterial !== null + ? Boolean(object.enableMaterial) + : undefined; + message.enableMergejoin = + object.enableMergejoin !== undefined && object.enableMergejoin !== null + ? Boolean(object.enableMergejoin) + : undefined; + message.enableNestloop = + object.enableNestloop !== undefined && object.enableNestloop !== null + ? Boolean(object.enableNestloop) + : undefined; + message.enableSeqscan = + object.enableSeqscan !== undefined && object.enableSeqscan !== null + ? Boolean(object.enableSeqscan) + : undefined; + message.enableSort = + object.enableSort !== undefined && object.enableSort !== null + ? Boolean(object.enableSort) + : undefined; + message.enableTidscan = + object.enableTidscan !== undefined && object.enableTidscan !== null + ? Boolean(object.enableTidscan) + : undefined; + message.maxWorkerProcesses = + object.maxWorkerProcesses !== undefined && + object.maxWorkerProcesses !== null + ? Number(object.maxWorkerProcesses) + : undefined; + message.maxParallelWorkers = + object.maxParallelWorkers !== undefined && + object.maxParallelWorkers !== null + ? Number(object.maxParallelWorkers) + : undefined; + message.maxParallelWorkersPerGather = + object.maxParallelWorkersPerGather !== undefined && + object.maxParallelWorkersPerGather !== null + ? Number(object.maxParallelWorkersPerGather) + : undefined; + message.autovacuumVacuumScaleFactor = + object.autovacuumVacuumScaleFactor !== undefined && + object.autovacuumVacuumScaleFactor !== null + ? Number(object.autovacuumVacuumScaleFactor) + : undefined; + message.autovacuumAnalyzeScaleFactor = + object.autovacuumAnalyzeScaleFactor !== undefined && + object.autovacuumAnalyzeScaleFactor !== null + ? Number(object.autovacuumAnalyzeScaleFactor) + : undefined; + message.defaultTransactionReadOnly = + object.defaultTransactionReadOnly !== undefined && + object.defaultTransactionReadOnly !== null + ? Boolean(object.defaultTransactionReadOnly) + : undefined; + message.timezone = + object.timezone !== undefined && object.timezone !== null + ? String(object.timezone) + : ""; + message.enableParallelAppend = + object.enableParallelAppend !== undefined && + object.enableParallelAppend !== null + ? Boolean(object.enableParallelAppend) + : undefined; + message.enableParallelHash = + object.enableParallelHash !== undefined && + object.enableParallelHash !== null + ? Boolean(object.enableParallelHash) + : undefined; + message.enablePartitionPruning = + object.enablePartitionPruning !== undefined && + object.enablePartitionPruning !== null + ? Boolean(object.enablePartitionPruning) + : undefined; + message.enablePartitionwiseAggregate = + object.enablePartitionwiseAggregate !== undefined && + object.enablePartitionwiseAggregate !== null + ? Boolean(object.enablePartitionwiseAggregate) + : undefined; + message.enablePartitionwiseJoin = + object.enablePartitionwiseJoin !== undefined && + object.enablePartitionwiseJoin !== null + ? Boolean(object.enablePartitionwiseJoin) + : undefined; + message.jit = + object.jit !== undefined && object.jit !== null + ? Boolean(object.jit) + : undefined; + message.maxParallelMaintenanceWorkers = + object.maxParallelMaintenanceWorkers !== undefined && + object.maxParallelMaintenanceWorkers !== null + ? Number(object.maxParallelMaintenanceWorkers) + : undefined; + message.parallelLeaderParticipation = + object.parallelLeaderParticipation !== undefined && + object.parallelLeaderParticipation !== null + ? Boolean(object.parallelLeaderParticipation) + : undefined; + message.vacuumCleanupIndexScaleFactor = + object.vacuumCleanupIndexScaleFactor !== undefined && + object.vacuumCleanupIndexScaleFactor !== null + ? Number(object.vacuumCleanupIndexScaleFactor) + : undefined; + message.logTransactionSampleRate = + object.logTransactionSampleRate !== undefined && + object.logTransactionSampleRate !== null + ? Number(object.logTransactionSampleRate) + : undefined; + message.planCacheMode = + object.planCacheMode !== undefined && object.planCacheMode !== null + ? postgresqlconfig131c_PlanCacheModeFromJSON(object.planCacheMode) + : 0; + message.effectiveIoConcurrency = + object.effectiveIoConcurrency !== undefined && + object.effectiveIoConcurrency !== null + ? Number(object.effectiveIoConcurrency) + : undefined; + message.effectiveCacheSize = + object.effectiveCacheSize !== undefined && + object.effectiveCacheSize !== null + ? Number(object.effectiveCacheSize) + : undefined; + message.sharedPreloadLibraries = (object.sharedPreloadLibraries ?? []).map( + (e: any) => postgresqlconfig131c_SharedPreloadLibrariesFromJSON(e) + ); + message.autoExplainLogMinDuration = + object.autoExplainLogMinDuration !== undefined && + object.autoExplainLogMinDuration !== null + ? Number(object.autoExplainLogMinDuration) + : undefined; + message.autoExplainLogAnalyze = + object.autoExplainLogAnalyze !== undefined && + object.autoExplainLogAnalyze !== null + ? Boolean(object.autoExplainLogAnalyze) + : undefined; + message.autoExplainLogBuffers = + object.autoExplainLogBuffers !== undefined && + object.autoExplainLogBuffers !== null + ? Boolean(object.autoExplainLogBuffers) + : undefined; + message.autoExplainLogTiming = + object.autoExplainLogTiming !== undefined && + object.autoExplainLogTiming !== null + ? Boolean(object.autoExplainLogTiming) + : undefined; + message.autoExplainLogTriggers = + object.autoExplainLogTriggers !== undefined && + object.autoExplainLogTriggers !== null + ? Boolean(object.autoExplainLogTriggers) + : undefined; + message.autoExplainLogVerbose = + object.autoExplainLogVerbose !== undefined && + object.autoExplainLogVerbose !== null + ? Boolean(object.autoExplainLogVerbose) + : undefined; + message.autoExplainLogNestedStatements = + object.autoExplainLogNestedStatements !== undefined && + object.autoExplainLogNestedStatements !== null + ? Boolean(object.autoExplainLogNestedStatements) + : undefined; + message.autoExplainSampleRate = + object.autoExplainSampleRate !== undefined && + object.autoExplainSampleRate !== null + ? Number(object.autoExplainSampleRate) + : undefined; + message.pgHintPlanEnableHint = + object.pgHintPlanEnableHint !== undefined && + object.pgHintPlanEnableHint !== null + ? Boolean(object.pgHintPlanEnableHint) + : undefined; + message.pgHintPlanEnableHintTable = + object.pgHintPlanEnableHintTable !== undefined && + object.pgHintPlanEnableHintTable !== null + ? Boolean(object.pgHintPlanEnableHintTable) + : undefined; + message.pgHintPlanDebugPrint = + object.pgHintPlanDebugPrint !== undefined && + object.pgHintPlanDebugPrint !== null + ? postgresqlconfig131c_PgHintPlanDebugPrintFromJSON( + object.pgHintPlanDebugPrint + ) + : 0; + message.pgHintPlanMessageLevel = + object.pgHintPlanMessageLevel !== undefined && + object.pgHintPlanMessageLevel !== null + ? postgresqlconfig131c_LogLevelFromJSON(object.pgHintPlanMessageLevel) + : 0; + message.hashMemMultiplier = + object.hashMemMultiplier !== undefined && + object.hashMemMultiplier !== null + ? Number(object.hashMemMultiplier) + : undefined; + message.logicalDecodingWorkMem = + object.logicalDecodingWorkMem !== undefined && + object.logicalDecodingWorkMem !== null + ? Number(object.logicalDecodingWorkMem) + : undefined; + message.maintenanceIoConcurrency = + object.maintenanceIoConcurrency !== undefined && + object.maintenanceIoConcurrency !== null + ? Number(object.maintenanceIoConcurrency) + : undefined; + message.maxSlotWalKeepSize = + object.maxSlotWalKeepSize !== undefined && + object.maxSlotWalKeepSize !== null + ? Number(object.maxSlotWalKeepSize) + : undefined; + message.walKeepSize = + object.walKeepSize !== undefined && object.walKeepSize !== null + ? Number(object.walKeepSize) + : undefined; + message.enableIncrementalSort = + object.enableIncrementalSort !== undefined && + object.enableIncrementalSort !== null + ? Boolean(object.enableIncrementalSort) + : undefined; + message.autovacuumVacuumInsertThreshold = + object.autovacuumVacuumInsertThreshold !== undefined && + object.autovacuumVacuumInsertThreshold !== null + ? Number(object.autovacuumVacuumInsertThreshold) + : undefined; + message.autovacuumVacuumInsertScaleFactor = + object.autovacuumVacuumInsertScaleFactor !== undefined && + object.autovacuumVacuumInsertScaleFactor !== null + ? Number(object.autovacuumVacuumInsertScaleFactor) + : undefined; + message.logMinDurationSample = + object.logMinDurationSample !== undefined && + object.logMinDurationSample !== null + ? Number(object.logMinDurationSample) + : undefined; + message.logStatementSampleRate = + object.logStatementSampleRate !== undefined && + object.logStatementSampleRate !== null + ? Number(object.logStatementSampleRate) + : undefined; + message.logParameterMaxLength = + object.logParameterMaxLength !== undefined && + object.logParameterMaxLength !== null + ? Number(object.logParameterMaxLength) + : undefined; + message.logParameterMaxLengthOnError = + object.logParameterMaxLengthOnError !== undefined && + object.logParameterMaxLengthOnError !== null + ? Number(object.logParameterMaxLengthOnError) + : undefined; + message.pgQualstatsEnabled = + object.pgQualstatsEnabled !== undefined && + object.pgQualstatsEnabled !== null + ? Boolean(object.pgQualstatsEnabled) + : undefined; + message.pgQualstatsTrackConstants = + object.pgQualstatsTrackConstants !== undefined && + object.pgQualstatsTrackConstants !== null + ? Boolean(object.pgQualstatsTrackConstants) + : undefined; + message.pgQualstatsMax = + object.pgQualstatsMax !== undefined && object.pgQualstatsMax !== null + ? Number(object.pgQualstatsMax) + : undefined; + message.pgQualstatsResolveOids = + object.pgQualstatsResolveOids !== undefined && + object.pgQualstatsResolveOids !== null + ? Boolean(object.pgQualstatsResolveOids) + : undefined; + message.pgQualstatsSampleRate = + object.pgQualstatsSampleRate !== undefined && + object.pgQualstatsSampleRate !== null + ? Number(object.pgQualstatsSampleRate) + : undefined; + message.plantunerFixEmptyTable = + object.plantunerFixEmptyTable !== undefined && + object.plantunerFixEmptyTable !== null + ? Boolean(object.plantunerFixEmptyTable) + : undefined; + message.geqo = + object.geqo !== undefined && object.geqo !== null + ? Boolean(object.geqo) + : undefined; + message.geqoThreshold = + object.geqoThreshold !== undefined && object.geqoThreshold !== null + ? Number(object.geqoThreshold) + : undefined; + message.geqoEffort = + object.geqoEffort !== undefined && object.geqoEffort !== null + ? Number(object.geqoEffort) + : undefined; + message.geqoSeed = + object.geqoSeed !== undefined && object.geqoSeed !== null + ? Number(object.geqoSeed) + : undefined; + return message; + }, + + toJSON(message: Postgresqlconfig131c): unknown { + const obj: any = {}; + message.maxConnections !== undefined && + (obj.maxConnections = message.maxConnections); + message.sharedBuffers !== undefined && + (obj.sharedBuffers = message.sharedBuffers); + message.tempBuffers !== undefined && + (obj.tempBuffers = message.tempBuffers); + message.maxPreparedTransactions !== undefined && + (obj.maxPreparedTransactions = message.maxPreparedTransactions); + message.workMem !== undefined && (obj.workMem = message.workMem); + message.maintenanceWorkMem !== undefined && + (obj.maintenanceWorkMem = message.maintenanceWorkMem); + message.autovacuumWorkMem !== undefined && + (obj.autovacuumWorkMem = message.autovacuumWorkMem); + message.tempFileLimit !== undefined && + (obj.tempFileLimit = message.tempFileLimit); + message.vacuumCostDelay !== undefined && + (obj.vacuumCostDelay = message.vacuumCostDelay); + message.vacuumCostPageHit !== undefined && + (obj.vacuumCostPageHit = message.vacuumCostPageHit); + message.vacuumCostPageMiss !== undefined && + (obj.vacuumCostPageMiss = message.vacuumCostPageMiss); + message.vacuumCostPageDirty !== undefined && + (obj.vacuumCostPageDirty = message.vacuumCostPageDirty); + message.vacuumCostLimit !== undefined && + (obj.vacuumCostLimit = message.vacuumCostLimit); + message.bgwriterDelay !== undefined && + (obj.bgwriterDelay = message.bgwriterDelay); + message.bgwriterLruMaxpages !== undefined && + (obj.bgwriterLruMaxpages = message.bgwriterLruMaxpages); + message.bgwriterLruMultiplier !== undefined && + (obj.bgwriterLruMultiplier = message.bgwriterLruMultiplier); + message.bgwriterFlushAfter !== undefined && + (obj.bgwriterFlushAfter = message.bgwriterFlushAfter); + message.backendFlushAfter !== undefined && + (obj.backendFlushAfter = message.backendFlushAfter); + message.oldSnapshotThreshold !== undefined && + (obj.oldSnapshotThreshold = message.oldSnapshotThreshold); + message.walLevel !== undefined && + (obj.walLevel = postgresqlconfig131c_WalLevelToJSON(message.walLevel)); + message.synchronousCommit !== undefined && + (obj.synchronousCommit = postgresqlconfig131c_SynchronousCommitToJSON( + message.synchronousCommit + )); + message.checkpointTimeout !== undefined && + (obj.checkpointTimeout = message.checkpointTimeout); + message.checkpointCompletionTarget !== undefined && + (obj.checkpointCompletionTarget = message.checkpointCompletionTarget); + message.checkpointFlushAfter !== undefined && + (obj.checkpointFlushAfter = message.checkpointFlushAfter); + message.maxWalSize !== undefined && (obj.maxWalSize = message.maxWalSize); + message.minWalSize !== undefined && (obj.minWalSize = message.minWalSize); + message.maxStandbyStreamingDelay !== undefined && + (obj.maxStandbyStreamingDelay = message.maxStandbyStreamingDelay); + message.defaultStatisticsTarget !== undefined && + (obj.defaultStatisticsTarget = message.defaultStatisticsTarget); + message.constraintExclusion !== undefined && + (obj.constraintExclusion = postgresqlconfig131c_ConstraintExclusionToJSON( + message.constraintExclusion + )); + message.cursorTupleFraction !== undefined && + (obj.cursorTupleFraction = message.cursorTupleFraction); + message.fromCollapseLimit !== undefined && + (obj.fromCollapseLimit = message.fromCollapseLimit); + message.joinCollapseLimit !== undefined && + (obj.joinCollapseLimit = message.joinCollapseLimit); + message.forceParallelMode !== undefined && + (obj.forceParallelMode = postgresqlconfig131c_ForceParallelModeToJSON( + message.forceParallelMode + )); + message.clientMinMessages !== undefined && + (obj.clientMinMessages = postgresqlconfig131c_LogLevelToJSON( + message.clientMinMessages + )); + message.logMinMessages !== undefined && + (obj.logMinMessages = postgresqlconfig131c_LogLevelToJSON( + message.logMinMessages + )); + message.logMinErrorStatement !== undefined && + (obj.logMinErrorStatement = postgresqlconfig131c_LogLevelToJSON( + message.logMinErrorStatement + )); + message.logMinDurationStatement !== undefined && + (obj.logMinDurationStatement = message.logMinDurationStatement); + message.logCheckpoints !== undefined && + (obj.logCheckpoints = message.logCheckpoints); + message.logConnections !== undefined && + (obj.logConnections = message.logConnections); + message.logDisconnections !== undefined && + (obj.logDisconnections = message.logDisconnections); + message.logDuration !== undefined && + (obj.logDuration = message.logDuration); + message.logErrorVerbosity !== undefined && + (obj.logErrorVerbosity = postgresqlconfig131c_LogErrorVerbosityToJSON( + message.logErrorVerbosity + )); + message.logLockWaits !== undefined && + (obj.logLockWaits = message.logLockWaits); + message.logStatement !== undefined && + (obj.logStatement = postgresqlconfig131c_LogStatementToJSON( + message.logStatement + )); + message.logTempFiles !== undefined && + (obj.logTempFiles = message.logTempFiles); + message.searchPath !== undefined && (obj.searchPath = message.searchPath); + message.rowSecurity !== undefined && + (obj.rowSecurity = message.rowSecurity); + message.defaultTransactionIsolation !== undefined && + (obj.defaultTransactionIsolation = + postgresqlconfig131c_TransactionIsolationToJSON( + message.defaultTransactionIsolation + )); + message.statementTimeout !== undefined && + (obj.statementTimeout = message.statementTimeout); + message.lockTimeout !== undefined && + (obj.lockTimeout = message.lockTimeout); + message.idleInTransactionSessionTimeout !== undefined && + (obj.idleInTransactionSessionTimeout = + message.idleInTransactionSessionTimeout); + message.byteaOutput !== undefined && + (obj.byteaOutput = postgresqlconfig131c_ByteaOutputToJSON( + message.byteaOutput + )); + message.xmlbinary !== undefined && + (obj.xmlbinary = postgresqlconfig131c_XmlBinaryToJSON(message.xmlbinary)); + message.xmloption !== undefined && + (obj.xmloption = postgresqlconfig131c_XmlOptionToJSON(message.xmloption)); + message.ginPendingListLimit !== undefined && + (obj.ginPendingListLimit = message.ginPendingListLimit); + message.deadlockTimeout !== undefined && + (obj.deadlockTimeout = message.deadlockTimeout); + message.maxLocksPerTransaction !== undefined && + (obj.maxLocksPerTransaction = message.maxLocksPerTransaction); + message.maxPredLocksPerTransaction !== undefined && + (obj.maxPredLocksPerTransaction = message.maxPredLocksPerTransaction); + message.arrayNulls !== undefined && (obj.arrayNulls = message.arrayNulls); + message.backslashQuote !== undefined && + (obj.backslashQuote = postgresqlconfig131c_BackslashQuoteToJSON( + message.backslashQuote + )); + message.defaultWithOids !== undefined && + (obj.defaultWithOids = message.defaultWithOids); + message.escapeStringWarning !== undefined && + (obj.escapeStringWarning = message.escapeStringWarning); + message.loCompatPrivileges !== undefined && + (obj.loCompatPrivileges = message.loCompatPrivileges); + message.operatorPrecedenceWarning !== undefined && + (obj.operatorPrecedenceWarning = message.operatorPrecedenceWarning); + message.quoteAllIdentifiers !== undefined && + (obj.quoteAllIdentifiers = message.quoteAllIdentifiers); + message.standardConformingStrings !== undefined && + (obj.standardConformingStrings = message.standardConformingStrings); + message.synchronizeSeqscans !== undefined && + (obj.synchronizeSeqscans = message.synchronizeSeqscans); + message.transformNullEquals !== undefined && + (obj.transformNullEquals = message.transformNullEquals); + message.exitOnError !== undefined && + (obj.exitOnError = message.exitOnError); + message.seqPageCost !== undefined && + (obj.seqPageCost = message.seqPageCost); + message.randomPageCost !== undefined && + (obj.randomPageCost = message.randomPageCost); + message.autovacuumMaxWorkers !== undefined && + (obj.autovacuumMaxWorkers = message.autovacuumMaxWorkers); + message.autovacuumVacuumCostDelay !== undefined && + (obj.autovacuumVacuumCostDelay = message.autovacuumVacuumCostDelay); + message.autovacuumVacuumCostLimit !== undefined && + (obj.autovacuumVacuumCostLimit = message.autovacuumVacuumCostLimit); + message.autovacuumNaptime !== undefined && + (obj.autovacuumNaptime = message.autovacuumNaptime); + message.archiveTimeout !== undefined && + (obj.archiveTimeout = message.archiveTimeout); + message.trackActivityQuerySize !== undefined && + (obj.trackActivityQuerySize = message.trackActivityQuerySize); + message.onlineAnalyzeEnable !== undefined && + (obj.onlineAnalyzeEnable = message.onlineAnalyzeEnable); + message.enableBitmapscan !== undefined && + (obj.enableBitmapscan = message.enableBitmapscan); + message.enableHashagg !== undefined && + (obj.enableHashagg = message.enableHashagg); + message.enableHashjoin !== undefined && + (obj.enableHashjoin = message.enableHashjoin); + message.enableIndexscan !== undefined && + (obj.enableIndexscan = message.enableIndexscan); + message.enableIndexonlyscan !== undefined && + (obj.enableIndexonlyscan = message.enableIndexonlyscan); + message.enableMaterial !== undefined && + (obj.enableMaterial = message.enableMaterial); + message.enableMergejoin !== undefined && + (obj.enableMergejoin = message.enableMergejoin); + message.enableNestloop !== undefined && + (obj.enableNestloop = message.enableNestloop); + message.enableSeqscan !== undefined && + (obj.enableSeqscan = message.enableSeqscan); + message.enableSort !== undefined && (obj.enableSort = message.enableSort); + message.enableTidscan !== undefined && + (obj.enableTidscan = message.enableTidscan); + message.maxWorkerProcesses !== undefined && + (obj.maxWorkerProcesses = message.maxWorkerProcesses); + message.maxParallelWorkers !== undefined && + (obj.maxParallelWorkers = message.maxParallelWorkers); + message.maxParallelWorkersPerGather !== undefined && + (obj.maxParallelWorkersPerGather = message.maxParallelWorkersPerGather); + message.autovacuumVacuumScaleFactor !== undefined && + (obj.autovacuumVacuumScaleFactor = message.autovacuumVacuumScaleFactor); + message.autovacuumAnalyzeScaleFactor !== undefined && + (obj.autovacuumAnalyzeScaleFactor = message.autovacuumAnalyzeScaleFactor); + message.defaultTransactionReadOnly !== undefined && + (obj.defaultTransactionReadOnly = message.defaultTransactionReadOnly); + message.timezone !== undefined && (obj.timezone = message.timezone); + message.enableParallelAppend !== undefined && + (obj.enableParallelAppend = message.enableParallelAppend); + message.enableParallelHash !== undefined && + (obj.enableParallelHash = message.enableParallelHash); + message.enablePartitionPruning !== undefined && + (obj.enablePartitionPruning = message.enablePartitionPruning); + message.enablePartitionwiseAggregate !== undefined && + (obj.enablePartitionwiseAggregate = message.enablePartitionwiseAggregate); + message.enablePartitionwiseJoin !== undefined && + (obj.enablePartitionwiseJoin = message.enablePartitionwiseJoin); + message.jit !== undefined && (obj.jit = message.jit); + message.maxParallelMaintenanceWorkers !== undefined && + (obj.maxParallelMaintenanceWorkers = + message.maxParallelMaintenanceWorkers); + message.parallelLeaderParticipation !== undefined && + (obj.parallelLeaderParticipation = message.parallelLeaderParticipation); + message.vacuumCleanupIndexScaleFactor !== undefined && + (obj.vacuumCleanupIndexScaleFactor = + message.vacuumCleanupIndexScaleFactor); + message.logTransactionSampleRate !== undefined && + (obj.logTransactionSampleRate = message.logTransactionSampleRate); + message.planCacheMode !== undefined && + (obj.planCacheMode = postgresqlconfig131c_PlanCacheModeToJSON( + message.planCacheMode + )); + message.effectiveIoConcurrency !== undefined && + (obj.effectiveIoConcurrency = message.effectiveIoConcurrency); + message.effectiveCacheSize !== undefined && + (obj.effectiveCacheSize = message.effectiveCacheSize); + if (message.sharedPreloadLibraries) { + obj.sharedPreloadLibraries = message.sharedPreloadLibraries.map((e) => + postgresqlconfig131c_SharedPreloadLibrariesToJSON(e) + ); + } else { + obj.sharedPreloadLibraries = []; + } + message.autoExplainLogMinDuration !== undefined && + (obj.autoExplainLogMinDuration = message.autoExplainLogMinDuration); + message.autoExplainLogAnalyze !== undefined && + (obj.autoExplainLogAnalyze = message.autoExplainLogAnalyze); + message.autoExplainLogBuffers !== undefined && + (obj.autoExplainLogBuffers = message.autoExplainLogBuffers); + message.autoExplainLogTiming !== undefined && + (obj.autoExplainLogTiming = message.autoExplainLogTiming); + message.autoExplainLogTriggers !== undefined && + (obj.autoExplainLogTriggers = message.autoExplainLogTriggers); + message.autoExplainLogVerbose !== undefined && + (obj.autoExplainLogVerbose = message.autoExplainLogVerbose); + message.autoExplainLogNestedStatements !== undefined && + (obj.autoExplainLogNestedStatements = + message.autoExplainLogNestedStatements); + message.autoExplainSampleRate !== undefined && + (obj.autoExplainSampleRate = message.autoExplainSampleRate); + message.pgHintPlanEnableHint !== undefined && + (obj.pgHintPlanEnableHint = message.pgHintPlanEnableHint); + message.pgHintPlanEnableHintTable !== undefined && + (obj.pgHintPlanEnableHintTable = message.pgHintPlanEnableHintTable); + message.pgHintPlanDebugPrint !== undefined && + (obj.pgHintPlanDebugPrint = + postgresqlconfig131c_PgHintPlanDebugPrintToJSON( + message.pgHintPlanDebugPrint + )); + message.pgHintPlanMessageLevel !== undefined && + (obj.pgHintPlanMessageLevel = postgresqlconfig131c_LogLevelToJSON( + message.pgHintPlanMessageLevel + )); + message.hashMemMultiplier !== undefined && + (obj.hashMemMultiplier = message.hashMemMultiplier); + message.logicalDecodingWorkMem !== undefined && + (obj.logicalDecodingWorkMem = message.logicalDecodingWorkMem); + message.maintenanceIoConcurrency !== undefined && + (obj.maintenanceIoConcurrency = message.maintenanceIoConcurrency); + message.maxSlotWalKeepSize !== undefined && + (obj.maxSlotWalKeepSize = message.maxSlotWalKeepSize); + message.walKeepSize !== undefined && + (obj.walKeepSize = message.walKeepSize); + message.enableIncrementalSort !== undefined && + (obj.enableIncrementalSort = message.enableIncrementalSort); + message.autovacuumVacuumInsertThreshold !== undefined && + (obj.autovacuumVacuumInsertThreshold = + message.autovacuumVacuumInsertThreshold); + message.autovacuumVacuumInsertScaleFactor !== undefined && + (obj.autovacuumVacuumInsertScaleFactor = + message.autovacuumVacuumInsertScaleFactor); + message.logMinDurationSample !== undefined && + (obj.logMinDurationSample = message.logMinDurationSample); + message.logStatementSampleRate !== undefined && + (obj.logStatementSampleRate = message.logStatementSampleRate); + message.logParameterMaxLength !== undefined && + (obj.logParameterMaxLength = message.logParameterMaxLength); + message.logParameterMaxLengthOnError !== undefined && + (obj.logParameterMaxLengthOnError = message.logParameterMaxLengthOnError); + message.pgQualstatsEnabled !== undefined && + (obj.pgQualstatsEnabled = message.pgQualstatsEnabled); + message.pgQualstatsTrackConstants !== undefined && + (obj.pgQualstatsTrackConstants = message.pgQualstatsTrackConstants); + message.pgQualstatsMax !== undefined && + (obj.pgQualstatsMax = message.pgQualstatsMax); + message.pgQualstatsResolveOids !== undefined && + (obj.pgQualstatsResolveOids = message.pgQualstatsResolveOids); + message.pgQualstatsSampleRate !== undefined && + (obj.pgQualstatsSampleRate = message.pgQualstatsSampleRate); + message.plantunerFixEmptyTable !== undefined && + (obj.plantunerFixEmptyTable = message.plantunerFixEmptyTable); + message.geqo !== undefined && (obj.geqo = message.geqo); + message.geqoThreshold !== undefined && + (obj.geqoThreshold = message.geqoThreshold); + message.geqoEffort !== undefined && (obj.geqoEffort = message.geqoEffort); + message.geqoSeed !== undefined && (obj.geqoSeed = message.geqoSeed); + return obj; + }, + + fromPartial, I>>( + object: I + ): Postgresqlconfig131c { + const message = { ...basePostgresqlconfig131c } as Postgresqlconfig131c; + message.maxConnections = object.maxConnections ?? undefined; + message.sharedBuffers = object.sharedBuffers ?? undefined; + message.tempBuffers = object.tempBuffers ?? undefined; + message.maxPreparedTransactions = + object.maxPreparedTransactions ?? undefined; + message.workMem = object.workMem ?? undefined; + message.maintenanceWorkMem = object.maintenanceWorkMem ?? undefined; + message.autovacuumWorkMem = object.autovacuumWorkMem ?? undefined; + message.tempFileLimit = object.tempFileLimit ?? undefined; + message.vacuumCostDelay = object.vacuumCostDelay ?? undefined; + message.vacuumCostPageHit = object.vacuumCostPageHit ?? undefined; + message.vacuumCostPageMiss = object.vacuumCostPageMiss ?? undefined; + message.vacuumCostPageDirty = object.vacuumCostPageDirty ?? undefined; + message.vacuumCostLimit = object.vacuumCostLimit ?? undefined; + message.bgwriterDelay = object.bgwriterDelay ?? undefined; + message.bgwriterLruMaxpages = object.bgwriterLruMaxpages ?? undefined; + message.bgwriterLruMultiplier = object.bgwriterLruMultiplier ?? undefined; + message.bgwriterFlushAfter = object.bgwriterFlushAfter ?? undefined; + message.backendFlushAfter = object.backendFlushAfter ?? undefined; + message.oldSnapshotThreshold = object.oldSnapshotThreshold ?? undefined; + message.walLevel = object.walLevel ?? 0; + message.synchronousCommit = object.synchronousCommit ?? 0; + message.checkpointTimeout = object.checkpointTimeout ?? undefined; + message.checkpointCompletionTarget = + object.checkpointCompletionTarget ?? undefined; + message.checkpointFlushAfter = object.checkpointFlushAfter ?? undefined; + message.maxWalSize = object.maxWalSize ?? undefined; + message.minWalSize = object.minWalSize ?? undefined; + message.maxStandbyStreamingDelay = + object.maxStandbyStreamingDelay ?? undefined; + message.defaultStatisticsTarget = + object.defaultStatisticsTarget ?? undefined; + message.constraintExclusion = object.constraintExclusion ?? 0; + message.cursorTupleFraction = object.cursorTupleFraction ?? undefined; + message.fromCollapseLimit = object.fromCollapseLimit ?? undefined; + message.joinCollapseLimit = object.joinCollapseLimit ?? undefined; + message.forceParallelMode = object.forceParallelMode ?? 0; + message.clientMinMessages = object.clientMinMessages ?? 0; + message.logMinMessages = object.logMinMessages ?? 0; + message.logMinErrorStatement = object.logMinErrorStatement ?? 0; + message.logMinDurationStatement = + object.logMinDurationStatement ?? undefined; + message.logCheckpoints = object.logCheckpoints ?? undefined; + message.logConnections = object.logConnections ?? undefined; + message.logDisconnections = object.logDisconnections ?? undefined; + message.logDuration = object.logDuration ?? undefined; + message.logErrorVerbosity = object.logErrorVerbosity ?? 0; + message.logLockWaits = object.logLockWaits ?? undefined; + message.logStatement = object.logStatement ?? 0; + message.logTempFiles = object.logTempFiles ?? undefined; + message.searchPath = object.searchPath ?? ""; + message.rowSecurity = object.rowSecurity ?? undefined; + message.defaultTransactionIsolation = + object.defaultTransactionIsolation ?? 0; + message.statementTimeout = object.statementTimeout ?? undefined; + message.lockTimeout = object.lockTimeout ?? undefined; + message.idleInTransactionSessionTimeout = + object.idleInTransactionSessionTimeout ?? undefined; + message.byteaOutput = object.byteaOutput ?? 0; + message.xmlbinary = object.xmlbinary ?? 0; + message.xmloption = object.xmloption ?? 0; + message.ginPendingListLimit = object.ginPendingListLimit ?? undefined; + message.deadlockTimeout = object.deadlockTimeout ?? undefined; + message.maxLocksPerTransaction = object.maxLocksPerTransaction ?? undefined; + message.maxPredLocksPerTransaction = + object.maxPredLocksPerTransaction ?? undefined; + message.arrayNulls = object.arrayNulls ?? undefined; + message.backslashQuote = object.backslashQuote ?? 0; + message.defaultWithOids = object.defaultWithOids ?? undefined; + message.escapeStringWarning = object.escapeStringWarning ?? undefined; + message.loCompatPrivileges = object.loCompatPrivileges ?? undefined; + message.operatorPrecedenceWarning = + object.operatorPrecedenceWarning ?? undefined; + message.quoteAllIdentifiers = object.quoteAllIdentifiers ?? undefined; + message.standardConformingStrings = + object.standardConformingStrings ?? undefined; + message.synchronizeSeqscans = object.synchronizeSeqscans ?? undefined; + message.transformNullEquals = object.transformNullEquals ?? undefined; + message.exitOnError = object.exitOnError ?? undefined; + message.seqPageCost = object.seqPageCost ?? undefined; + message.randomPageCost = object.randomPageCost ?? undefined; + message.autovacuumMaxWorkers = object.autovacuumMaxWorkers ?? undefined; + message.autovacuumVacuumCostDelay = + object.autovacuumVacuumCostDelay ?? undefined; + message.autovacuumVacuumCostLimit = + object.autovacuumVacuumCostLimit ?? undefined; + message.autovacuumNaptime = object.autovacuumNaptime ?? undefined; + message.archiveTimeout = object.archiveTimeout ?? undefined; + message.trackActivityQuerySize = object.trackActivityQuerySize ?? undefined; + message.onlineAnalyzeEnable = object.onlineAnalyzeEnable ?? undefined; + message.enableBitmapscan = object.enableBitmapscan ?? undefined; + message.enableHashagg = object.enableHashagg ?? undefined; + message.enableHashjoin = object.enableHashjoin ?? undefined; + message.enableIndexscan = object.enableIndexscan ?? undefined; + message.enableIndexonlyscan = object.enableIndexonlyscan ?? undefined; + message.enableMaterial = object.enableMaterial ?? undefined; + message.enableMergejoin = object.enableMergejoin ?? undefined; + message.enableNestloop = object.enableNestloop ?? undefined; + message.enableSeqscan = object.enableSeqscan ?? undefined; + message.enableSort = object.enableSort ?? undefined; + message.enableTidscan = object.enableTidscan ?? undefined; + message.maxWorkerProcesses = object.maxWorkerProcesses ?? undefined; + message.maxParallelWorkers = object.maxParallelWorkers ?? undefined; + message.maxParallelWorkersPerGather = + object.maxParallelWorkersPerGather ?? undefined; + message.autovacuumVacuumScaleFactor = + object.autovacuumVacuumScaleFactor ?? undefined; + message.autovacuumAnalyzeScaleFactor = + object.autovacuumAnalyzeScaleFactor ?? undefined; + message.defaultTransactionReadOnly = + object.defaultTransactionReadOnly ?? undefined; + message.timezone = object.timezone ?? ""; + message.enableParallelAppend = object.enableParallelAppend ?? undefined; + message.enableParallelHash = object.enableParallelHash ?? undefined; + message.enablePartitionPruning = object.enablePartitionPruning ?? undefined; + message.enablePartitionwiseAggregate = + object.enablePartitionwiseAggregate ?? undefined; + message.enablePartitionwiseJoin = + object.enablePartitionwiseJoin ?? undefined; + message.jit = object.jit ?? undefined; + message.maxParallelMaintenanceWorkers = + object.maxParallelMaintenanceWorkers ?? undefined; + message.parallelLeaderParticipation = + object.parallelLeaderParticipation ?? undefined; + message.vacuumCleanupIndexScaleFactor = + object.vacuumCleanupIndexScaleFactor ?? undefined; + message.logTransactionSampleRate = + object.logTransactionSampleRate ?? undefined; + message.planCacheMode = object.planCacheMode ?? 0; + message.effectiveIoConcurrency = object.effectiveIoConcurrency ?? undefined; + message.effectiveCacheSize = object.effectiveCacheSize ?? undefined; + message.sharedPreloadLibraries = + object.sharedPreloadLibraries?.map((e) => e) || []; + message.autoExplainLogMinDuration = + object.autoExplainLogMinDuration ?? undefined; + message.autoExplainLogAnalyze = object.autoExplainLogAnalyze ?? undefined; + message.autoExplainLogBuffers = object.autoExplainLogBuffers ?? undefined; + message.autoExplainLogTiming = object.autoExplainLogTiming ?? undefined; + message.autoExplainLogTriggers = object.autoExplainLogTriggers ?? undefined; + message.autoExplainLogVerbose = object.autoExplainLogVerbose ?? undefined; + message.autoExplainLogNestedStatements = + object.autoExplainLogNestedStatements ?? undefined; + message.autoExplainSampleRate = object.autoExplainSampleRate ?? undefined; + message.pgHintPlanEnableHint = object.pgHintPlanEnableHint ?? undefined; + message.pgHintPlanEnableHintTable = + object.pgHintPlanEnableHintTable ?? undefined; + message.pgHintPlanDebugPrint = object.pgHintPlanDebugPrint ?? 0; + message.pgHintPlanMessageLevel = object.pgHintPlanMessageLevel ?? 0; + message.hashMemMultiplier = object.hashMemMultiplier ?? undefined; + message.logicalDecodingWorkMem = object.logicalDecodingWorkMem ?? undefined; + message.maintenanceIoConcurrency = + object.maintenanceIoConcurrency ?? undefined; + message.maxSlotWalKeepSize = object.maxSlotWalKeepSize ?? undefined; + message.walKeepSize = object.walKeepSize ?? undefined; + message.enableIncrementalSort = object.enableIncrementalSort ?? undefined; + message.autovacuumVacuumInsertThreshold = + object.autovacuumVacuumInsertThreshold ?? undefined; + message.autovacuumVacuumInsertScaleFactor = + object.autovacuumVacuumInsertScaleFactor ?? undefined; + message.logMinDurationSample = object.logMinDurationSample ?? undefined; + message.logStatementSampleRate = object.logStatementSampleRate ?? undefined; + message.logParameterMaxLength = object.logParameterMaxLength ?? undefined; + message.logParameterMaxLengthOnError = + object.logParameterMaxLengthOnError ?? undefined; + message.pgQualstatsEnabled = object.pgQualstatsEnabled ?? undefined; + message.pgQualstatsTrackConstants = + object.pgQualstatsTrackConstants ?? undefined; + message.pgQualstatsMax = object.pgQualstatsMax ?? undefined; + message.pgQualstatsResolveOids = object.pgQualstatsResolveOids ?? undefined; + message.pgQualstatsSampleRate = object.pgQualstatsSampleRate ?? undefined; + message.plantunerFixEmptyTable = object.plantunerFixEmptyTable ?? undefined; + message.geqo = object.geqo ?? undefined; + message.geqoThreshold = object.geqoThreshold ?? undefined; + message.geqoEffort = object.geqoEffort ?? undefined; + message.geqoSeed = object.geqoSeed ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(Postgresqlconfig131c.$type, Postgresqlconfig131c); + +const basePostgresqlconfigset131c: object = { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfigSet13_1C", +}; + +export const Postgresqlconfigset131c = { + $type: + "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfigSet13_1C" as const, + + encode( + message: Postgresqlconfigset131c, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.effectiveConfig !== undefined) { + Postgresqlconfig131c.encode( + message.effectiveConfig, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.userConfig !== undefined) { + Postgresqlconfig131c.encode( + message.userConfig, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.defaultConfig !== undefined) { + Postgresqlconfig131c.encode( + message.defaultConfig, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Postgresqlconfigset131c { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...basePostgresqlconfigset131c, + } as Postgresqlconfigset131c; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.effectiveConfig = Postgresqlconfig131c.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.userConfig = Postgresqlconfig131c.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.defaultConfig = Postgresqlconfig131c.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Postgresqlconfigset131c { + const message = { + ...basePostgresqlconfigset131c, + } as Postgresqlconfigset131c; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Postgresqlconfig131c.fromJSON(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Postgresqlconfig131c.fromJSON(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Postgresqlconfig131c.fromJSON(object.defaultConfig) + : undefined; + return message; + }, + + toJSON(message: Postgresqlconfigset131c): unknown { + const obj: any = {}; + message.effectiveConfig !== undefined && + (obj.effectiveConfig = message.effectiveConfig + ? Postgresqlconfig131c.toJSON(message.effectiveConfig) + : undefined); + message.userConfig !== undefined && + (obj.userConfig = message.userConfig + ? Postgresqlconfig131c.toJSON(message.userConfig) + : undefined); + message.defaultConfig !== undefined && + (obj.defaultConfig = message.defaultConfig + ? Postgresqlconfig131c.toJSON(message.defaultConfig) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Postgresqlconfigset131c { + const message = { + ...basePostgresqlconfigset131c, + } as Postgresqlconfigset131c; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Postgresqlconfig131c.fromPartial(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Postgresqlconfig131c.fromPartial(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Postgresqlconfig131c.fromPartial(object.defaultConfig) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Postgresqlconfigset131c.$type, Postgresqlconfigset131c); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql14.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql14.ts index de6d6a7b..90d6e092 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql14.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql14.ts @@ -191,105 +191,109 @@ export interface PostgresqlConfig14 { pgQualstatsMax?: number; pgQualstatsResolveOids?: boolean; pgQualstatsSampleRate?: number; + /** in bytes. */ + maxStackDepth?: number; + /** enable Genetic Query Optimizer, by default is on */ + geqo?: boolean; + /** The number of tables to use geqo, default is 12 */ + geqoThreshold?: number; + /** tradeoff between planning time and query plan quality, default is 5 */ + geqoEffort?: number; + /** initial value of the random number generator used by GEQO */ + geqoSeed?: number; } -export enum PostgresqlConfig14_WalLevel { - WAL_LEVEL_UNSPECIFIED = 0, - WAL_LEVEL_REPLICA = 1, - WAL_LEVEL_LOGICAL = 2, +export enum PostgresqlConfig14_BackslashQuote { + BACKSLASH_QUOTE_UNSPECIFIED = 0, + BACKSLASH_QUOTE = 1, + BACKSLASH_QUOTE_ON = 2, + BACKSLASH_QUOTE_OFF = 3, + BACKSLASH_QUOTE_SAFE_ENCODING = 4, UNRECOGNIZED = -1, } -export function postgresqlConfig14_WalLevelFromJSON( +export function postgresqlConfig14_BackslashQuoteFromJSON( object: any -): PostgresqlConfig14_WalLevel { +): PostgresqlConfig14_BackslashQuote { switch (object) { case 0: - case "WAL_LEVEL_UNSPECIFIED": - return PostgresqlConfig14_WalLevel.WAL_LEVEL_UNSPECIFIED; + case "BACKSLASH_QUOTE_UNSPECIFIED": + return PostgresqlConfig14_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED; case 1: - case "WAL_LEVEL_REPLICA": - return PostgresqlConfig14_WalLevel.WAL_LEVEL_REPLICA; + case "BACKSLASH_QUOTE": + return PostgresqlConfig14_BackslashQuote.BACKSLASH_QUOTE; case 2: - case "WAL_LEVEL_LOGICAL": - return PostgresqlConfig14_WalLevel.WAL_LEVEL_LOGICAL; + case "BACKSLASH_QUOTE_ON": + return PostgresqlConfig14_BackslashQuote.BACKSLASH_QUOTE_ON; + case 3: + case "BACKSLASH_QUOTE_OFF": + return PostgresqlConfig14_BackslashQuote.BACKSLASH_QUOTE_OFF; + case 4: + case "BACKSLASH_QUOTE_SAFE_ENCODING": + return PostgresqlConfig14_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING; case -1: case "UNRECOGNIZED": default: - return PostgresqlConfig14_WalLevel.UNRECOGNIZED; + return PostgresqlConfig14_BackslashQuote.UNRECOGNIZED; } } -export function postgresqlConfig14_WalLevelToJSON( - object: PostgresqlConfig14_WalLevel +export function postgresqlConfig14_BackslashQuoteToJSON( + object: PostgresqlConfig14_BackslashQuote ): string { switch (object) { - case PostgresqlConfig14_WalLevel.WAL_LEVEL_UNSPECIFIED: - return "WAL_LEVEL_UNSPECIFIED"; - case PostgresqlConfig14_WalLevel.WAL_LEVEL_REPLICA: - return "WAL_LEVEL_REPLICA"; - case PostgresqlConfig14_WalLevel.WAL_LEVEL_LOGICAL: - return "WAL_LEVEL_LOGICAL"; + case PostgresqlConfig14_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED: + return "BACKSLASH_QUOTE_UNSPECIFIED"; + case PostgresqlConfig14_BackslashQuote.BACKSLASH_QUOTE: + return "BACKSLASH_QUOTE"; + case PostgresqlConfig14_BackslashQuote.BACKSLASH_QUOTE_ON: + return "BACKSLASH_QUOTE_ON"; + case PostgresqlConfig14_BackslashQuote.BACKSLASH_QUOTE_OFF: + return "BACKSLASH_QUOTE_OFF"; + case PostgresqlConfig14_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING: + return "BACKSLASH_QUOTE_SAFE_ENCODING"; default: return "UNKNOWN"; } } -export enum PostgresqlConfig14_SynchronousCommit { - SYNCHRONOUS_COMMIT_UNSPECIFIED = 0, - SYNCHRONOUS_COMMIT_ON = 1, - SYNCHRONOUS_COMMIT_OFF = 2, - SYNCHRONOUS_COMMIT_LOCAL = 3, - SYNCHRONOUS_COMMIT_REMOTE_WRITE = 4, - SYNCHRONOUS_COMMIT_REMOTE_APPLY = 5, +export enum PostgresqlConfig14_ByteaOutput { + BYTEA_OUTPUT_UNSPECIFIED = 0, + BYTEA_OUTPUT_HEX = 1, + BYTEA_OUTPUT_ESCAPED = 2, UNRECOGNIZED = -1, } -export function postgresqlConfig14_SynchronousCommitFromJSON( +export function postgresqlConfig14_ByteaOutputFromJSON( object: any -): PostgresqlConfig14_SynchronousCommit { +): PostgresqlConfig14_ByteaOutput { switch (object) { case 0: - case "SYNCHRONOUS_COMMIT_UNSPECIFIED": - return PostgresqlConfig14_SynchronousCommit.SYNCHRONOUS_COMMIT_UNSPECIFIED; + case "BYTEA_OUTPUT_UNSPECIFIED": + return PostgresqlConfig14_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED; case 1: - case "SYNCHRONOUS_COMMIT_ON": - return PostgresqlConfig14_SynchronousCommit.SYNCHRONOUS_COMMIT_ON; + case "BYTEA_OUTPUT_HEX": + return PostgresqlConfig14_ByteaOutput.BYTEA_OUTPUT_HEX; case 2: - case "SYNCHRONOUS_COMMIT_OFF": - return PostgresqlConfig14_SynchronousCommit.SYNCHRONOUS_COMMIT_OFF; - case 3: - case "SYNCHRONOUS_COMMIT_LOCAL": - return PostgresqlConfig14_SynchronousCommit.SYNCHRONOUS_COMMIT_LOCAL; - case 4: - case "SYNCHRONOUS_COMMIT_REMOTE_WRITE": - return PostgresqlConfig14_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_WRITE; - case 5: - case "SYNCHRONOUS_COMMIT_REMOTE_APPLY": - return PostgresqlConfig14_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_APPLY; + case "BYTEA_OUTPUT_ESCAPED": + return PostgresqlConfig14_ByteaOutput.BYTEA_OUTPUT_ESCAPED; case -1: case "UNRECOGNIZED": default: - return PostgresqlConfig14_SynchronousCommit.UNRECOGNIZED; + return PostgresqlConfig14_ByteaOutput.UNRECOGNIZED; } } -export function postgresqlConfig14_SynchronousCommitToJSON( - object: PostgresqlConfig14_SynchronousCommit +export function postgresqlConfig14_ByteaOutputToJSON( + object: PostgresqlConfig14_ByteaOutput ): string { switch (object) { - case PostgresqlConfig14_SynchronousCommit.SYNCHRONOUS_COMMIT_UNSPECIFIED: - return "SYNCHRONOUS_COMMIT_UNSPECIFIED"; - case PostgresqlConfig14_SynchronousCommit.SYNCHRONOUS_COMMIT_ON: - return "SYNCHRONOUS_COMMIT_ON"; - case PostgresqlConfig14_SynchronousCommit.SYNCHRONOUS_COMMIT_OFF: - return "SYNCHRONOUS_COMMIT_OFF"; - case PostgresqlConfig14_SynchronousCommit.SYNCHRONOUS_COMMIT_LOCAL: - return "SYNCHRONOUS_COMMIT_LOCAL"; - case PostgresqlConfig14_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_WRITE: - return "SYNCHRONOUS_COMMIT_REMOTE_WRITE"; - case PostgresqlConfig14_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_APPLY: - return "SYNCHRONOUS_COMMIT_REMOTE_APPLY"; + case PostgresqlConfig14_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED: + return "BYTEA_OUTPUT_UNSPECIFIED"; + case PostgresqlConfig14_ByteaOutput.BYTEA_OUTPUT_HEX: + return "BYTEA_OUTPUT_HEX"; + case PostgresqlConfig14_ByteaOutput.BYTEA_OUTPUT_ESCAPED: + return "BYTEA_OUTPUT_ESCAPED"; default: return "UNKNOWN"; } @@ -391,6 +395,54 @@ export function postgresqlConfig14_ForceParallelModeToJSON( } } +export enum PostgresqlConfig14_LogErrorVerbosity { + LOG_ERROR_VERBOSITY_UNSPECIFIED = 0, + LOG_ERROR_VERBOSITY_TERSE = 1, + LOG_ERROR_VERBOSITY_DEFAULT = 2, + LOG_ERROR_VERBOSITY_VERBOSE = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig14_LogErrorVerbosityFromJSON( + object: any +): PostgresqlConfig14_LogErrorVerbosity { + switch (object) { + case 0: + case "LOG_ERROR_VERBOSITY_UNSPECIFIED": + return PostgresqlConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED; + case 1: + case "LOG_ERROR_VERBOSITY_TERSE": + return PostgresqlConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE; + case 2: + case "LOG_ERROR_VERBOSITY_DEFAULT": + return PostgresqlConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT; + case 3: + case "LOG_ERROR_VERBOSITY_VERBOSE": + return PostgresqlConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig14_LogErrorVerbosity.UNRECOGNIZED; + } +} + +export function postgresqlConfig14_LogErrorVerbosityToJSON( + object: PostgresqlConfig14_LogErrorVerbosity +): string { + switch (object) { + case PostgresqlConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED: + return "LOG_ERROR_VERBOSITY_UNSPECIFIED"; + case PostgresqlConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE: + return "LOG_ERROR_VERBOSITY_TERSE"; + case PostgresqlConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT: + return "LOG_ERROR_VERBOSITY_DEFAULT"; + case PostgresqlConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE: + return "LOG_ERROR_VERBOSITY_VERBOSE"; + default: + return "UNKNOWN"; + } +} + export enum PostgresqlConfig14_LogLevel { LOG_LEVEL_UNSPECIFIED = 0, LOG_LEVEL_DEBUG5 = 1, @@ -487,54 +539,6 @@ export function postgresqlConfig14_LogLevelToJSON( } } -export enum PostgresqlConfig14_LogErrorVerbosity { - LOG_ERROR_VERBOSITY_UNSPECIFIED = 0, - LOG_ERROR_VERBOSITY_TERSE = 1, - LOG_ERROR_VERBOSITY_DEFAULT = 2, - LOG_ERROR_VERBOSITY_VERBOSE = 3, - UNRECOGNIZED = -1, -} - -export function postgresqlConfig14_LogErrorVerbosityFromJSON( - object: any -): PostgresqlConfig14_LogErrorVerbosity { - switch (object) { - case 0: - case "LOG_ERROR_VERBOSITY_UNSPECIFIED": - return PostgresqlConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED; - case 1: - case "LOG_ERROR_VERBOSITY_TERSE": - return PostgresqlConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE; - case 2: - case "LOG_ERROR_VERBOSITY_DEFAULT": - return PostgresqlConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT; - case 3: - case "LOG_ERROR_VERBOSITY_VERBOSE": - return PostgresqlConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE; - case -1: - case "UNRECOGNIZED": - default: - return PostgresqlConfig14_LogErrorVerbosity.UNRECOGNIZED; - } -} - -export function postgresqlConfig14_LogErrorVerbosityToJSON( - object: PostgresqlConfig14_LogErrorVerbosity -): string { - switch (object) { - case PostgresqlConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED: - return "LOG_ERROR_VERBOSITY_UNSPECIFIED"; - case PostgresqlConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE: - return "LOG_ERROR_VERBOSITY_TERSE"; - case PostgresqlConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT: - return "LOG_ERROR_VERBOSITY_DEFAULT"; - case PostgresqlConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE: - return "LOG_ERROR_VERBOSITY_VERBOSE"; - default: - return "UNKNOWN"; - } -} - export enum PostgresqlConfig14_LogStatement { LOG_STATEMENT_UNSPECIFIED = 0, LOG_STATEMENT_NONE = 1, @@ -589,391 +593,409 @@ export function postgresqlConfig14_LogStatementToJSON( } } -export enum PostgresqlConfig14_TransactionIsolation { - TRANSACTION_ISOLATION_UNSPECIFIED = 0, - TRANSACTION_ISOLATION_READ_UNCOMMITTED = 1, - TRANSACTION_ISOLATION_READ_COMMITTED = 2, - TRANSACTION_ISOLATION_REPEATABLE_READ = 3, - TRANSACTION_ISOLATION_SERIALIZABLE = 4, +export enum PostgresqlConfig14_PgHintPlanDebugPrint { + PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED = 0, + PG_HINT_PLAN_DEBUG_PRINT_OFF = 1, + PG_HINT_PLAN_DEBUG_PRINT_ON = 2, + PG_HINT_PLAN_DEBUG_PRINT_DETAILED = 3, + PG_HINT_PLAN_DEBUG_PRINT_VERBOSE = 4, UNRECOGNIZED = -1, } -export function postgresqlConfig14_TransactionIsolationFromJSON( +export function postgresqlConfig14_PgHintPlanDebugPrintFromJSON( object: any -): PostgresqlConfig14_TransactionIsolation { +): PostgresqlConfig14_PgHintPlanDebugPrint { switch (object) { case 0: - case "TRANSACTION_ISOLATION_UNSPECIFIED": - return PostgresqlConfig14_TransactionIsolation.TRANSACTION_ISOLATION_UNSPECIFIED; + case "PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED": + return PostgresqlConfig14_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED; case 1: - case "TRANSACTION_ISOLATION_READ_UNCOMMITTED": - return PostgresqlConfig14_TransactionIsolation.TRANSACTION_ISOLATION_READ_UNCOMMITTED; + case "PG_HINT_PLAN_DEBUG_PRINT_OFF": + return PostgresqlConfig14_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_OFF; case 2: - case "TRANSACTION_ISOLATION_READ_COMMITTED": - return PostgresqlConfig14_TransactionIsolation.TRANSACTION_ISOLATION_READ_COMMITTED; + case "PG_HINT_PLAN_DEBUG_PRINT_ON": + return PostgresqlConfig14_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_ON; case 3: - case "TRANSACTION_ISOLATION_REPEATABLE_READ": - return PostgresqlConfig14_TransactionIsolation.TRANSACTION_ISOLATION_REPEATABLE_READ; + case "PG_HINT_PLAN_DEBUG_PRINT_DETAILED": + return PostgresqlConfig14_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_DETAILED; case 4: - case "TRANSACTION_ISOLATION_SERIALIZABLE": - return PostgresqlConfig14_TransactionIsolation.TRANSACTION_ISOLATION_SERIALIZABLE; + case "PG_HINT_PLAN_DEBUG_PRINT_VERBOSE": + return PostgresqlConfig14_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_VERBOSE; case -1: case "UNRECOGNIZED": default: - return PostgresqlConfig14_TransactionIsolation.UNRECOGNIZED; + return PostgresqlConfig14_PgHintPlanDebugPrint.UNRECOGNIZED; } } -export function postgresqlConfig14_TransactionIsolationToJSON( - object: PostgresqlConfig14_TransactionIsolation +export function postgresqlConfig14_PgHintPlanDebugPrintToJSON( + object: PostgresqlConfig14_PgHintPlanDebugPrint ): string { switch (object) { - case PostgresqlConfig14_TransactionIsolation.TRANSACTION_ISOLATION_UNSPECIFIED: - return "TRANSACTION_ISOLATION_UNSPECIFIED"; - case PostgresqlConfig14_TransactionIsolation.TRANSACTION_ISOLATION_READ_UNCOMMITTED: - return "TRANSACTION_ISOLATION_READ_UNCOMMITTED"; - case PostgresqlConfig14_TransactionIsolation.TRANSACTION_ISOLATION_READ_COMMITTED: - return "TRANSACTION_ISOLATION_READ_COMMITTED"; - case PostgresqlConfig14_TransactionIsolation.TRANSACTION_ISOLATION_REPEATABLE_READ: - return "TRANSACTION_ISOLATION_REPEATABLE_READ"; - case PostgresqlConfig14_TransactionIsolation.TRANSACTION_ISOLATION_SERIALIZABLE: - return "TRANSACTION_ISOLATION_SERIALIZABLE"; + case PostgresqlConfig14_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED: + return "PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED"; + case PostgresqlConfig14_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_OFF: + return "PG_HINT_PLAN_DEBUG_PRINT_OFF"; + case PostgresqlConfig14_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_ON: + return "PG_HINT_PLAN_DEBUG_PRINT_ON"; + case PostgresqlConfig14_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_DETAILED: + return "PG_HINT_PLAN_DEBUG_PRINT_DETAILED"; + case PostgresqlConfig14_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_VERBOSE: + return "PG_HINT_PLAN_DEBUG_PRINT_VERBOSE"; default: return "UNKNOWN"; } } -export enum PostgresqlConfig14_ByteaOutput { - BYTEA_OUTPUT_UNSPECIFIED = 0, - BYTEA_OUTPUT_HEX = 1, - BYTEA_OUTPUT_ESCAPED = 2, +export enum PostgresqlConfig14_PlanCacheMode { + PLAN_CACHE_MODE_UNSPECIFIED = 0, + PLAN_CACHE_MODE_AUTO = 1, + PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN = 2, + PLAN_CACHE_MODE_FORCE_GENERIC_PLAN = 3, UNRECOGNIZED = -1, } -export function postgresqlConfig14_ByteaOutputFromJSON( +export function postgresqlConfig14_PlanCacheModeFromJSON( object: any -): PostgresqlConfig14_ByteaOutput { +): PostgresqlConfig14_PlanCacheMode { switch (object) { case 0: - case "BYTEA_OUTPUT_UNSPECIFIED": - return PostgresqlConfig14_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED; + case "PLAN_CACHE_MODE_UNSPECIFIED": + return PostgresqlConfig14_PlanCacheMode.PLAN_CACHE_MODE_UNSPECIFIED; case 1: - case "BYTEA_OUTPUT_HEX": - return PostgresqlConfig14_ByteaOutput.BYTEA_OUTPUT_HEX; + case "PLAN_CACHE_MODE_AUTO": + return PostgresqlConfig14_PlanCacheMode.PLAN_CACHE_MODE_AUTO; case 2: - case "BYTEA_OUTPUT_ESCAPED": - return PostgresqlConfig14_ByteaOutput.BYTEA_OUTPUT_ESCAPED; + case "PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN": + return PostgresqlConfig14_PlanCacheMode.PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN; + case 3: + case "PLAN_CACHE_MODE_FORCE_GENERIC_PLAN": + return PostgresqlConfig14_PlanCacheMode.PLAN_CACHE_MODE_FORCE_GENERIC_PLAN; case -1: case "UNRECOGNIZED": default: - return PostgresqlConfig14_ByteaOutput.UNRECOGNIZED; + return PostgresqlConfig14_PlanCacheMode.UNRECOGNIZED; } } -export function postgresqlConfig14_ByteaOutputToJSON( - object: PostgresqlConfig14_ByteaOutput +export function postgresqlConfig14_PlanCacheModeToJSON( + object: PostgresqlConfig14_PlanCacheMode ): string { switch (object) { - case PostgresqlConfig14_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED: - return "BYTEA_OUTPUT_UNSPECIFIED"; - case PostgresqlConfig14_ByteaOutput.BYTEA_OUTPUT_HEX: - return "BYTEA_OUTPUT_HEX"; - case PostgresqlConfig14_ByteaOutput.BYTEA_OUTPUT_ESCAPED: - return "BYTEA_OUTPUT_ESCAPED"; + case PostgresqlConfig14_PlanCacheMode.PLAN_CACHE_MODE_UNSPECIFIED: + return "PLAN_CACHE_MODE_UNSPECIFIED"; + case PostgresqlConfig14_PlanCacheMode.PLAN_CACHE_MODE_AUTO: + return "PLAN_CACHE_MODE_AUTO"; + case PostgresqlConfig14_PlanCacheMode.PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN: + return "PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN"; + case PostgresqlConfig14_PlanCacheMode.PLAN_CACHE_MODE_FORCE_GENERIC_PLAN: + return "PLAN_CACHE_MODE_FORCE_GENERIC_PLAN"; default: return "UNKNOWN"; } } -export enum PostgresqlConfig14_XmlBinary { - XML_BINARY_UNSPECIFIED = 0, - XML_BINARY_BASE64 = 1, - XML_BINARY_HEX = 2, +export enum PostgresqlConfig14_SharedPreloadLibraries { + SHARED_PRELOAD_LIBRARIES_UNSPECIFIED = 0, + SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN = 1, + SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN = 2, + SHARED_PRELOAD_LIBRARIES_TIMESCALEDB = 3, + SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS = 4, + SHARED_PRELOAD_LIBRARIES_PG_CRON = 5, + SHARED_PRELOAD_LIBRARIES_PGLOGICAL = 6, UNRECOGNIZED = -1, } -export function postgresqlConfig14_XmlBinaryFromJSON( +export function postgresqlConfig14_SharedPreloadLibrariesFromJSON( object: any -): PostgresqlConfig14_XmlBinary { +): PostgresqlConfig14_SharedPreloadLibraries { switch (object) { case 0: - case "XML_BINARY_UNSPECIFIED": - return PostgresqlConfig14_XmlBinary.XML_BINARY_UNSPECIFIED; + case "SHARED_PRELOAD_LIBRARIES_UNSPECIFIED": + return PostgresqlConfig14_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_UNSPECIFIED; case 1: - case "XML_BINARY_BASE64": - return PostgresqlConfig14_XmlBinary.XML_BINARY_BASE64; + case "SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN": + return PostgresqlConfig14_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN; case 2: - case "XML_BINARY_HEX": - return PostgresqlConfig14_XmlBinary.XML_BINARY_HEX; + case "SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN": + return PostgresqlConfig14_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN; + case 3: + case "SHARED_PRELOAD_LIBRARIES_TIMESCALEDB": + return PostgresqlConfig14_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_TIMESCALEDB; + case 4: + case "SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS": + return PostgresqlConfig14_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS; + case 5: + case "SHARED_PRELOAD_LIBRARIES_PG_CRON": + return PostgresqlConfig14_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_CRON; + case 6: + case "SHARED_PRELOAD_LIBRARIES_PGLOGICAL": + return PostgresqlConfig14_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGLOGICAL; case -1: case "UNRECOGNIZED": default: - return PostgresqlConfig14_XmlBinary.UNRECOGNIZED; + return PostgresqlConfig14_SharedPreloadLibraries.UNRECOGNIZED; } } -export function postgresqlConfig14_XmlBinaryToJSON( - object: PostgresqlConfig14_XmlBinary +export function postgresqlConfig14_SharedPreloadLibrariesToJSON( + object: PostgresqlConfig14_SharedPreloadLibraries ): string { switch (object) { - case PostgresqlConfig14_XmlBinary.XML_BINARY_UNSPECIFIED: - return "XML_BINARY_UNSPECIFIED"; - case PostgresqlConfig14_XmlBinary.XML_BINARY_BASE64: - return "XML_BINARY_BASE64"; - case PostgresqlConfig14_XmlBinary.XML_BINARY_HEX: - return "XML_BINARY_HEX"; + case PostgresqlConfig14_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_UNSPECIFIED: + return "SHARED_PRELOAD_LIBRARIES_UNSPECIFIED"; + case PostgresqlConfig14_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN: + return "SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN"; + case PostgresqlConfig14_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN: + return "SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN"; + case PostgresqlConfig14_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_TIMESCALEDB: + return "SHARED_PRELOAD_LIBRARIES_TIMESCALEDB"; + case PostgresqlConfig14_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS: + return "SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS"; + case PostgresqlConfig14_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_CRON: + return "SHARED_PRELOAD_LIBRARIES_PG_CRON"; + case PostgresqlConfig14_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGLOGICAL: + return "SHARED_PRELOAD_LIBRARIES_PGLOGICAL"; default: return "UNKNOWN"; } } -export enum PostgresqlConfig14_XmlOption { - XML_OPTION_UNSPECIFIED = 0, - XML_OPTION_DOCUMENT = 1, - XML_OPTION_CONTENT = 2, +export enum PostgresqlConfig14_SynchronousCommit { + SYNCHRONOUS_COMMIT_UNSPECIFIED = 0, + SYNCHRONOUS_COMMIT_ON = 1, + SYNCHRONOUS_COMMIT_OFF = 2, + SYNCHRONOUS_COMMIT_LOCAL = 3, + SYNCHRONOUS_COMMIT_REMOTE_WRITE = 4, + SYNCHRONOUS_COMMIT_REMOTE_APPLY = 5, UNRECOGNIZED = -1, } -export function postgresqlConfig14_XmlOptionFromJSON( +export function postgresqlConfig14_SynchronousCommitFromJSON( object: any -): PostgresqlConfig14_XmlOption { +): PostgresqlConfig14_SynchronousCommit { switch (object) { case 0: - case "XML_OPTION_UNSPECIFIED": - return PostgresqlConfig14_XmlOption.XML_OPTION_UNSPECIFIED; + case "SYNCHRONOUS_COMMIT_UNSPECIFIED": + return PostgresqlConfig14_SynchronousCommit.SYNCHRONOUS_COMMIT_UNSPECIFIED; case 1: - case "XML_OPTION_DOCUMENT": - return PostgresqlConfig14_XmlOption.XML_OPTION_DOCUMENT; + case "SYNCHRONOUS_COMMIT_ON": + return PostgresqlConfig14_SynchronousCommit.SYNCHRONOUS_COMMIT_ON; case 2: - case "XML_OPTION_CONTENT": - return PostgresqlConfig14_XmlOption.XML_OPTION_CONTENT; + case "SYNCHRONOUS_COMMIT_OFF": + return PostgresqlConfig14_SynchronousCommit.SYNCHRONOUS_COMMIT_OFF; + case 3: + case "SYNCHRONOUS_COMMIT_LOCAL": + return PostgresqlConfig14_SynchronousCommit.SYNCHRONOUS_COMMIT_LOCAL; + case 4: + case "SYNCHRONOUS_COMMIT_REMOTE_WRITE": + return PostgresqlConfig14_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_WRITE; + case 5: + case "SYNCHRONOUS_COMMIT_REMOTE_APPLY": + return PostgresqlConfig14_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_APPLY; case -1: case "UNRECOGNIZED": default: - return PostgresqlConfig14_XmlOption.UNRECOGNIZED; + return PostgresqlConfig14_SynchronousCommit.UNRECOGNIZED; } } -export function postgresqlConfig14_XmlOptionToJSON( - object: PostgresqlConfig14_XmlOption +export function postgresqlConfig14_SynchronousCommitToJSON( + object: PostgresqlConfig14_SynchronousCommit ): string { switch (object) { - case PostgresqlConfig14_XmlOption.XML_OPTION_UNSPECIFIED: - return "XML_OPTION_UNSPECIFIED"; - case PostgresqlConfig14_XmlOption.XML_OPTION_DOCUMENT: - return "XML_OPTION_DOCUMENT"; - case PostgresqlConfig14_XmlOption.XML_OPTION_CONTENT: - return "XML_OPTION_CONTENT"; + case PostgresqlConfig14_SynchronousCommit.SYNCHRONOUS_COMMIT_UNSPECIFIED: + return "SYNCHRONOUS_COMMIT_UNSPECIFIED"; + case PostgresqlConfig14_SynchronousCommit.SYNCHRONOUS_COMMIT_ON: + return "SYNCHRONOUS_COMMIT_ON"; + case PostgresqlConfig14_SynchronousCommit.SYNCHRONOUS_COMMIT_OFF: + return "SYNCHRONOUS_COMMIT_OFF"; + case PostgresqlConfig14_SynchronousCommit.SYNCHRONOUS_COMMIT_LOCAL: + return "SYNCHRONOUS_COMMIT_LOCAL"; + case PostgresqlConfig14_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_WRITE: + return "SYNCHRONOUS_COMMIT_REMOTE_WRITE"; + case PostgresqlConfig14_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_APPLY: + return "SYNCHRONOUS_COMMIT_REMOTE_APPLY"; default: return "UNKNOWN"; } } -export enum PostgresqlConfig14_BackslashQuote { - BACKSLASH_QUOTE_UNSPECIFIED = 0, - BACKSLASH_QUOTE = 1, - BACKSLASH_QUOTE_ON = 2, - BACKSLASH_QUOTE_OFF = 3, - BACKSLASH_QUOTE_SAFE_ENCODING = 4, +export enum PostgresqlConfig14_TransactionIsolation { + TRANSACTION_ISOLATION_UNSPECIFIED = 0, + TRANSACTION_ISOLATION_READ_UNCOMMITTED = 1, + TRANSACTION_ISOLATION_READ_COMMITTED = 2, + TRANSACTION_ISOLATION_REPEATABLE_READ = 3, + TRANSACTION_ISOLATION_SERIALIZABLE = 4, UNRECOGNIZED = -1, } -export function postgresqlConfig14_BackslashQuoteFromJSON( +export function postgresqlConfig14_TransactionIsolationFromJSON( object: any -): PostgresqlConfig14_BackslashQuote { +): PostgresqlConfig14_TransactionIsolation { switch (object) { case 0: - case "BACKSLASH_QUOTE_UNSPECIFIED": - return PostgresqlConfig14_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED; + case "TRANSACTION_ISOLATION_UNSPECIFIED": + return PostgresqlConfig14_TransactionIsolation.TRANSACTION_ISOLATION_UNSPECIFIED; case 1: - case "BACKSLASH_QUOTE": - return PostgresqlConfig14_BackslashQuote.BACKSLASH_QUOTE; + case "TRANSACTION_ISOLATION_READ_UNCOMMITTED": + return PostgresqlConfig14_TransactionIsolation.TRANSACTION_ISOLATION_READ_UNCOMMITTED; case 2: - case "BACKSLASH_QUOTE_ON": - return PostgresqlConfig14_BackslashQuote.BACKSLASH_QUOTE_ON; + case "TRANSACTION_ISOLATION_READ_COMMITTED": + return PostgresqlConfig14_TransactionIsolation.TRANSACTION_ISOLATION_READ_COMMITTED; case 3: - case "BACKSLASH_QUOTE_OFF": - return PostgresqlConfig14_BackslashQuote.BACKSLASH_QUOTE_OFF; + case "TRANSACTION_ISOLATION_REPEATABLE_READ": + return PostgresqlConfig14_TransactionIsolation.TRANSACTION_ISOLATION_REPEATABLE_READ; case 4: - case "BACKSLASH_QUOTE_SAFE_ENCODING": - return PostgresqlConfig14_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING; + case "TRANSACTION_ISOLATION_SERIALIZABLE": + return PostgresqlConfig14_TransactionIsolation.TRANSACTION_ISOLATION_SERIALIZABLE; case -1: case "UNRECOGNIZED": default: - return PostgresqlConfig14_BackslashQuote.UNRECOGNIZED; + return PostgresqlConfig14_TransactionIsolation.UNRECOGNIZED; } } -export function postgresqlConfig14_BackslashQuoteToJSON( - object: PostgresqlConfig14_BackslashQuote +export function postgresqlConfig14_TransactionIsolationToJSON( + object: PostgresqlConfig14_TransactionIsolation ): string { switch (object) { - case PostgresqlConfig14_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED: - return "BACKSLASH_QUOTE_UNSPECIFIED"; - case PostgresqlConfig14_BackslashQuote.BACKSLASH_QUOTE: - return "BACKSLASH_QUOTE"; - case PostgresqlConfig14_BackslashQuote.BACKSLASH_QUOTE_ON: - return "BACKSLASH_QUOTE_ON"; - case PostgresqlConfig14_BackslashQuote.BACKSLASH_QUOTE_OFF: - return "BACKSLASH_QUOTE_OFF"; - case PostgresqlConfig14_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING: - return "BACKSLASH_QUOTE_SAFE_ENCODING"; + case PostgresqlConfig14_TransactionIsolation.TRANSACTION_ISOLATION_UNSPECIFIED: + return "TRANSACTION_ISOLATION_UNSPECIFIED"; + case PostgresqlConfig14_TransactionIsolation.TRANSACTION_ISOLATION_READ_UNCOMMITTED: + return "TRANSACTION_ISOLATION_READ_UNCOMMITTED"; + case PostgresqlConfig14_TransactionIsolation.TRANSACTION_ISOLATION_READ_COMMITTED: + return "TRANSACTION_ISOLATION_READ_COMMITTED"; + case PostgresqlConfig14_TransactionIsolation.TRANSACTION_ISOLATION_REPEATABLE_READ: + return "TRANSACTION_ISOLATION_REPEATABLE_READ"; + case PostgresqlConfig14_TransactionIsolation.TRANSACTION_ISOLATION_SERIALIZABLE: + return "TRANSACTION_ISOLATION_SERIALIZABLE"; default: return "UNKNOWN"; } } -export enum PostgresqlConfig14_PlanCacheMode { - PLAN_CACHE_MODE_UNSPECIFIED = 0, - PLAN_CACHE_MODE_AUTO = 1, - PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN = 2, - PLAN_CACHE_MODE_FORCE_GENERIC_PLAN = 3, +export enum PostgresqlConfig14_WalLevel { + WAL_LEVEL_UNSPECIFIED = 0, + WAL_LEVEL_REPLICA = 1, + WAL_LEVEL_LOGICAL = 2, UNRECOGNIZED = -1, } -export function postgresqlConfig14_PlanCacheModeFromJSON( +export function postgresqlConfig14_WalLevelFromJSON( object: any -): PostgresqlConfig14_PlanCacheMode { +): PostgresqlConfig14_WalLevel { switch (object) { case 0: - case "PLAN_CACHE_MODE_UNSPECIFIED": - return PostgresqlConfig14_PlanCacheMode.PLAN_CACHE_MODE_UNSPECIFIED; + case "WAL_LEVEL_UNSPECIFIED": + return PostgresqlConfig14_WalLevel.WAL_LEVEL_UNSPECIFIED; case 1: - case "PLAN_CACHE_MODE_AUTO": - return PostgresqlConfig14_PlanCacheMode.PLAN_CACHE_MODE_AUTO; + case "WAL_LEVEL_REPLICA": + return PostgresqlConfig14_WalLevel.WAL_LEVEL_REPLICA; case 2: - case "PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN": - return PostgresqlConfig14_PlanCacheMode.PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN; - case 3: - case "PLAN_CACHE_MODE_FORCE_GENERIC_PLAN": - return PostgresqlConfig14_PlanCacheMode.PLAN_CACHE_MODE_FORCE_GENERIC_PLAN; + case "WAL_LEVEL_LOGICAL": + return PostgresqlConfig14_WalLevel.WAL_LEVEL_LOGICAL; case -1: case "UNRECOGNIZED": default: - return PostgresqlConfig14_PlanCacheMode.UNRECOGNIZED; + return PostgresqlConfig14_WalLevel.UNRECOGNIZED; } } -export function postgresqlConfig14_PlanCacheModeToJSON( - object: PostgresqlConfig14_PlanCacheMode +export function postgresqlConfig14_WalLevelToJSON( + object: PostgresqlConfig14_WalLevel ): string { switch (object) { - case PostgresqlConfig14_PlanCacheMode.PLAN_CACHE_MODE_UNSPECIFIED: - return "PLAN_CACHE_MODE_UNSPECIFIED"; - case PostgresqlConfig14_PlanCacheMode.PLAN_CACHE_MODE_AUTO: - return "PLAN_CACHE_MODE_AUTO"; - case PostgresqlConfig14_PlanCacheMode.PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN: - return "PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN"; - case PostgresqlConfig14_PlanCacheMode.PLAN_CACHE_MODE_FORCE_GENERIC_PLAN: - return "PLAN_CACHE_MODE_FORCE_GENERIC_PLAN"; + case PostgresqlConfig14_WalLevel.WAL_LEVEL_UNSPECIFIED: + return "WAL_LEVEL_UNSPECIFIED"; + case PostgresqlConfig14_WalLevel.WAL_LEVEL_REPLICA: + return "WAL_LEVEL_REPLICA"; + case PostgresqlConfig14_WalLevel.WAL_LEVEL_LOGICAL: + return "WAL_LEVEL_LOGICAL"; default: return "UNKNOWN"; } } -export enum PostgresqlConfig14_PgHintPlanDebugPrint { - PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED = 0, - PG_HINT_PLAN_DEBUG_PRINT_OFF = 1, - PG_HINT_PLAN_DEBUG_PRINT_ON = 2, - PG_HINT_PLAN_DEBUG_PRINT_DETAILED = 3, - PG_HINT_PLAN_DEBUG_PRINT_VERBOSE = 4, +export enum PostgresqlConfig14_XmlBinary { + XML_BINARY_UNSPECIFIED = 0, + XML_BINARY_BASE64 = 1, + XML_BINARY_HEX = 2, UNRECOGNIZED = -1, } -export function postgresqlConfig14_PgHintPlanDebugPrintFromJSON( +export function postgresqlConfig14_XmlBinaryFromJSON( object: any -): PostgresqlConfig14_PgHintPlanDebugPrint { +): PostgresqlConfig14_XmlBinary { switch (object) { case 0: - case "PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED": - return PostgresqlConfig14_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED; + case "XML_BINARY_UNSPECIFIED": + return PostgresqlConfig14_XmlBinary.XML_BINARY_UNSPECIFIED; case 1: - case "PG_HINT_PLAN_DEBUG_PRINT_OFF": - return PostgresqlConfig14_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_OFF; + case "XML_BINARY_BASE64": + return PostgresqlConfig14_XmlBinary.XML_BINARY_BASE64; case 2: - case "PG_HINT_PLAN_DEBUG_PRINT_ON": - return PostgresqlConfig14_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_ON; - case 3: - case "PG_HINT_PLAN_DEBUG_PRINT_DETAILED": - return PostgresqlConfig14_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_DETAILED; - case 4: - case "PG_HINT_PLAN_DEBUG_PRINT_VERBOSE": - return PostgresqlConfig14_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_VERBOSE; + case "XML_BINARY_HEX": + return PostgresqlConfig14_XmlBinary.XML_BINARY_HEX; case -1: case "UNRECOGNIZED": default: - return PostgresqlConfig14_PgHintPlanDebugPrint.UNRECOGNIZED; + return PostgresqlConfig14_XmlBinary.UNRECOGNIZED; } } -export function postgresqlConfig14_PgHintPlanDebugPrintToJSON( - object: PostgresqlConfig14_PgHintPlanDebugPrint +export function postgresqlConfig14_XmlBinaryToJSON( + object: PostgresqlConfig14_XmlBinary ): string { switch (object) { - case PostgresqlConfig14_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED: - return "PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED"; - case PostgresqlConfig14_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_OFF: - return "PG_HINT_PLAN_DEBUG_PRINT_OFF"; - case PostgresqlConfig14_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_ON: - return "PG_HINT_PLAN_DEBUG_PRINT_ON"; - case PostgresqlConfig14_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_DETAILED: - return "PG_HINT_PLAN_DEBUG_PRINT_DETAILED"; - case PostgresqlConfig14_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_VERBOSE: - return "PG_HINT_PLAN_DEBUG_PRINT_VERBOSE"; + case PostgresqlConfig14_XmlBinary.XML_BINARY_UNSPECIFIED: + return "XML_BINARY_UNSPECIFIED"; + case PostgresqlConfig14_XmlBinary.XML_BINARY_BASE64: + return "XML_BINARY_BASE64"; + case PostgresqlConfig14_XmlBinary.XML_BINARY_HEX: + return "XML_BINARY_HEX"; default: return "UNKNOWN"; } } -export enum PostgresqlConfig14_SharedPreloadLibraries { - SHARED_PRELOAD_LIBRARIES_UNSPECIFIED = 0, - SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN = 1, - SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN = 2, - SHARED_PRELOAD_LIBRARIES_TIMESCALEDB = 3, - SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS = 4, +export enum PostgresqlConfig14_XmlOption { + XML_OPTION_UNSPECIFIED = 0, + XML_OPTION_DOCUMENT = 1, + XML_OPTION_CONTENT = 2, UNRECOGNIZED = -1, } -export function postgresqlConfig14_SharedPreloadLibrariesFromJSON( +export function postgresqlConfig14_XmlOptionFromJSON( object: any -): PostgresqlConfig14_SharedPreloadLibraries { +): PostgresqlConfig14_XmlOption { switch (object) { case 0: - case "SHARED_PRELOAD_LIBRARIES_UNSPECIFIED": - return PostgresqlConfig14_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_UNSPECIFIED; + case "XML_OPTION_UNSPECIFIED": + return PostgresqlConfig14_XmlOption.XML_OPTION_UNSPECIFIED; case 1: - case "SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN": - return PostgresqlConfig14_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN; + case "XML_OPTION_DOCUMENT": + return PostgresqlConfig14_XmlOption.XML_OPTION_DOCUMENT; case 2: - case "SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN": - return PostgresqlConfig14_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN; - case 3: - case "SHARED_PRELOAD_LIBRARIES_TIMESCALEDB": - return PostgresqlConfig14_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_TIMESCALEDB; - case 4: - case "SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS": - return PostgresqlConfig14_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS; + case "XML_OPTION_CONTENT": + return PostgresqlConfig14_XmlOption.XML_OPTION_CONTENT; case -1: case "UNRECOGNIZED": default: - return PostgresqlConfig14_SharedPreloadLibraries.UNRECOGNIZED; + return PostgresqlConfig14_XmlOption.UNRECOGNIZED; } } -export function postgresqlConfig14_SharedPreloadLibrariesToJSON( - object: PostgresqlConfig14_SharedPreloadLibraries +export function postgresqlConfig14_XmlOptionToJSON( + object: PostgresqlConfig14_XmlOption ): string { switch (object) { - case PostgresqlConfig14_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_UNSPECIFIED: - return "SHARED_PRELOAD_LIBRARIES_UNSPECIFIED"; - case PostgresqlConfig14_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN: - return "SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN"; - case PostgresqlConfig14_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN: - return "SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN"; - case PostgresqlConfig14_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_TIMESCALEDB: - return "SHARED_PRELOAD_LIBRARIES_TIMESCALEDB"; - case PostgresqlConfig14_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS: - return "SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS"; + case PostgresqlConfig14_XmlOption.XML_OPTION_UNSPECIFIED: + return "XML_OPTION_UNSPECIFIED"; + case PostgresqlConfig14_XmlOption.XML_OPTION_DOCUMENT: + return "XML_OPTION_DOCUMENT"; + case PostgresqlConfig14_XmlOption.XML_OPTION_CONTENT: + return "XML_OPTION_CONTENT"; default: return "UNKNOWN"; } @@ -2093,6 +2115,36 @@ export const PostgresqlConfig14 = { writer.uint32(1186).fork() ).ldelim(); } + if (message.maxStackDepth !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.maxStackDepth! }, + writer.uint32(1202).fork() + ).ldelim(); + } + if (message.geqo !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.geqo! }, + writer.uint32(1218).fork() + ).ldelim(); + } + if (message.geqoThreshold !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.geqoThreshold! }, + writer.uint32(1226).fork() + ).ldelim(); + } + if (message.geqoEffort !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.geqoEffort! }, + writer.uint32(1234).fork() + ).ldelim(); + } + if (message.geqoSeed !== undefined) { + DoubleValue.encode( + { $type: "google.protobuf.DoubleValue", value: message.geqoSeed! }, + writer.uint32(1266).fork() + ).ldelim(); + } return writer; }, @@ -2882,6 +2934,27 @@ export const PostgresqlConfig14 = { reader.uint32() ).value; break; + case 150: + message.maxStackDepth = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 152: + message.geqo = BoolValue.decode(reader, reader.uint32()).value; + break; + case 153: + message.geqoThreshold = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 154: + message.geqoEffort = Int64Value.decode(reader, reader.uint32()).value; + break; + case 158: + message.geqoSeed = DoubleValue.decode(reader, reader.uint32()).value; + break; default: reader.skipType(tag & 7); break; @@ -3562,6 +3635,26 @@ export const PostgresqlConfig14 = { object.pgQualstatsSampleRate !== null ? Number(object.pgQualstatsSampleRate) : undefined; + message.maxStackDepth = + object.maxStackDepth !== undefined && object.maxStackDepth !== null + ? Number(object.maxStackDepth) + : undefined; + message.geqo = + object.geqo !== undefined && object.geqo !== null + ? Boolean(object.geqo) + : undefined; + message.geqoThreshold = + object.geqoThreshold !== undefined && object.geqoThreshold !== null + ? Number(object.geqoThreshold) + : undefined; + message.geqoEffort = + object.geqoEffort !== undefined && object.geqoEffort !== null + ? Number(object.geqoEffort) + : undefined; + message.geqoSeed = + object.geqoSeed !== undefined && object.geqoSeed !== null + ? Number(object.geqoSeed) + : undefined; return message; }, @@ -3885,6 +3978,13 @@ export const PostgresqlConfig14 = { (obj.pgQualstatsResolveOids = message.pgQualstatsResolveOids); message.pgQualstatsSampleRate !== undefined && (obj.pgQualstatsSampleRate = message.pgQualstatsSampleRate); + message.maxStackDepth !== undefined && + (obj.maxStackDepth = message.maxStackDepth); + message.geqo !== undefined && (obj.geqo = message.geqo); + message.geqoThreshold !== undefined && + (obj.geqoThreshold = message.geqoThreshold); + message.geqoEffort !== undefined && (obj.geqoEffort = message.geqoEffort); + message.geqoSeed !== undefined && (obj.geqoSeed = message.geqoSeed); return obj; }, @@ -4067,6 +4167,11 @@ export const PostgresqlConfig14 = { message.pgQualstatsMax = object.pgQualstatsMax ?? undefined; message.pgQualstatsResolveOids = object.pgQualstatsResolveOids ?? undefined; message.pgQualstatsSampleRate = object.pgQualstatsSampleRate ?? undefined; + message.maxStackDepth = object.maxStackDepth ?? undefined; + message.geqo = object.geqo ?? undefined; + message.geqoThreshold = object.geqoThreshold ?? undefined; + message.geqoEffort = object.geqoEffort ?? undefined; + message.geqoSeed = object.geqoSeed ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql14_1c.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql14_1c.ts new file mode 100644 index 00000000..92d5bb8d --- /dev/null +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql14_1c.ts @@ -0,0 +1,4379 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { + Int64Value, + DoubleValue, + BoolValue, +} from "../../../../../../google/protobuf/wrappers"; + +export const protobufPackage = "yandex.cloud.mdb.postgresql.v1.config"; + +/** + * Options and structure of `PostgresqlConfig` reflects PostgreSQL configuration file + * parameters which detailed description is available in + * [PostgreSQL documentation](https://www.postgresql.org/docs/11/runtime-config.html). + */ +export interface Postgresqlconfig141c { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig14_1C"; + maxConnections?: number; + /** in bytes. */ + sharedBuffers?: number; + /** in bytes. */ + tempBuffers?: number; + maxPreparedTransactions?: number; + /** in bytes. */ + workMem?: number; + /** in bytes. */ + maintenanceWorkMem?: number; + /** in bytes. */ + autovacuumWorkMem?: number; + /** in bytes. */ + tempFileLimit?: number; + /** in milliseconds. */ + vacuumCostDelay?: number; + vacuumCostPageHit?: number; + vacuumCostPageMiss?: number; + vacuumCostPageDirty?: number; + vacuumCostLimit?: number; + /** in milliseconds. */ + bgwriterDelay?: number; + bgwriterLruMaxpages?: number; + bgwriterLruMultiplier?: number; + bgwriterFlushAfter?: number; + backendFlushAfter?: number; + oldSnapshotThreshold?: number; + walLevel: Postgresqlconfig141c_WalLevel; + synchronousCommit: Postgresqlconfig141c_SynchronousCommit; + /** in milliseconds. */ + checkpointTimeout?: number; + checkpointCompletionTarget?: number; + checkpointFlushAfter?: number; + /** in bytes. */ + maxWalSize?: number; + /** in bytes. */ + minWalSize?: number; + /** in milliseconds. */ + maxStandbyStreamingDelay?: number; + defaultStatisticsTarget?: number; + constraintExclusion: Postgresqlconfig141c_ConstraintExclusion; + cursorTupleFraction?: number; + fromCollapseLimit?: number; + joinCollapseLimit?: number; + forceParallelMode: Postgresqlconfig141c_ForceParallelMode; + clientMinMessages: Postgresqlconfig141c_LogLevel; + logMinMessages: Postgresqlconfig141c_LogLevel; + logMinErrorStatement: Postgresqlconfig141c_LogLevel; + /** in milliseconds. */ + logMinDurationStatement?: number; + logCheckpoints?: boolean; + logConnections?: boolean; + logDisconnections?: boolean; + logDuration?: boolean; + logErrorVerbosity: Postgresqlconfig141c_LogErrorVerbosity; + logLockWaits?: boolean; + logStatement: Postgresqlconfig141c_LogStatement; + logTempFiles?: number; + searchPath: string; + rowSecurity?: boolean; + defaultTransactionIsolation: Postgresqlconfig141c_TransactionIsolation; + /** in milliseconds. */ + statementTimeout?: number; + /** in milliseconds. */ + lockTimeout?: number; + /** in milliseconds. */ + idleInTransactionSessionTimeout?: number; + byteaOutput: Postgresqlconfig141c_ByteaOutput; + xmlbinary: Postgresqlconfig141c_XmlBinary; + xmloption: Postgresqlconfig141c_XmlOption; + /** in bytes. */ + ginPendingListLimit?: number; + /** in milliseconds. */ + deadlockTimeout?: number; + maxLocksPerTransaction?: number; + maxPredLocksPerTransaction?: number; + arrayNulls?: boolean; + backslashQuote: Postgresqlconfig141c_BackslashQuote; + defaultWithOids?: boolean; + escapeStringWarning?: boolean; + loCompatPrivileges?: boolean; + quoteAllIdentifiers?: boolean; + standardConformingStrings?: boolean; + synchronizeSeqscans?: boolean; + transformNullEquals?: boolean; + exitOnError?: boolean; + seqPageCost?: number; + randomPageCost?: number; + autovacuumMaxWorkers?: number; + autovacuumVacuumCostDelay?: number; + autovacuumVacuumCostLimit?: number; + /** in milliseconds. */ + autovacuumNaptime?: number; + /** in milliseconds. */ + archiveTimeout?: number; + trackActivityQuerySize?: number; + onlineAnalyzeEnable?: boolean; + enableBitmapscan?: boolean; + enableHashagg?: boolean; + enableHashjoin?: boolean; + enableIndexscan?: boolean; + enableIndexonlyscan?: boolean; + enableMaterial?: boolean; + enableMergejoin?: boolean; + enableNestloop?: boolean; + enableSeqscan?: boolean; + enableSort?: boolean; + enableTidscan?: boolean; + maxWorkerProcesses?: number; + maxParallelWorkers?: number; + maxParallelWorkersPerGather?: number; + autovacuumVacuumScaleFactor?: number; + autovacuumAnalyzeScaleFactor?: number; + defaultTransactionReadOnly?: boolean; + timezone: string; + enableParallelAppend?: boolean; + enableParallelHash?: boolean; + enablePartitionPruning?: boolean; + enablePartitionwiseAggregate?: boolean; + enablePartitionwiseJoin?: boolean; + jit?: boolean; + maxParallelMaintenanceWorkers?: number; + parallelLeaderParticipation?: boolean; + logTransactionSampleRate?: number; + planCacheMode: Postgresqlconfig141c_PlanCacheMode; + effectiveIoConcurrency?: number; + effectiveCacheSize?: number; + sharedPreloadLibraries: Postgresqlconfig141c_SharedPreloadLibraries[]; + /** in milliseconds. */ + autoExplainLogMinDuration?: number; + autoExplainLogAnalyze?: boolean; + autoExplainLogBuffers?: boolean; + autoExplainLogTiming?: boolean; + autoExplainLogTriggers?: boolean; + autoExplainLogVerbose?: boolean; + autoExplainLogNestedStatements?: boolean; + autoExplainSampleRate?: number; + pgHintPlanEnableHint?: boolean; + pgHintPlanEnableHintTable?: boolean; + pgHintPlanDebugPrint: Postgresqlconfig141c_PgHintPlanDebugPrint; + pgHintPlanMessageLevel: Postgresqlconfig141c_LogLevel; + hashMemMultiplier?: number; + /** in bytes. */ + logicalDecodingWorkMem?: number; + maintenanceIoConcurrency?: number; + /** in bytes. */ + maxSlotWalKeepSize?: number; + /** in bytes. */ + walKeepSize?: number; + enableIncrementalSort?: boolean; + autovacuumVacuumInsertThreshold?: number; + autovacuumVacuumInsertScaleFactor?: number; + /** in milliseconds. */ + logMinDurationSample?: number; + logStatementSampleRate?: number; + /** in bytes. */ + logParameterMaxLength?: number; + /** in bytes. */ + logParameterMaxLengthOnError?: number; + /** in milliseconds. */ + clientConnectionCheckInterval?: number; + enableAsyncAppend?: boolean; + enableGathermerge?: boolean; + enableMemoize?: boolean; + /** in milliseconds. */ + logRecoveryConflictWaits?: boolean; + /** in milliseconds. */ + vacuumFailsafeAge?: number; + /** in milliseconds. */ + vacuumMultixactFailsafeAge?: number; + pgQualstatsEnabled?: boolean; + pgQualstatsTrackConstants?: boolean; + pgQualstatsMax?: number; + pgQualstatsResolveOids?: boolean; + pgQualstatsSampleRate?: number; + plantunerFixEmptyTable?: boolean; + /** enable Genetic Query Optimizer, by default is on */ + geqo?: boolean; + /** The number of tables to use geqo, default is 12 */ + geqoThreshold?: number; + /** tradeoff between planning time and query plan quality, default is 5 */ + geqoEffort?: number; + /** initial value of the random number generator used by GEQO */ + geqoSeed?: number; +} + +export enum Postgresqlconfig141c_BackslashQuote { + BACKSLASH_QUOTE_UNSPECIFIED = 0, + BACKSLASH_QUOTE = 1, + BACKSLASH_QUOTE_ON = 2, + BACKSLASH_QUOTE_OFF = 3, + BACKSLASH_QUOTE_SAFE_ENCODING = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig141c_BackslashQuoteFromJSON( + object: any +): Postgresqlconfig141c_BackslashQuote { + switch (object) { + case 0: + case "BACKSLASH_QUOTE_UNSPECIFIED": + return Postgresqlconfig141c_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED; + case 1: + case "BACKSLASH_QUOTE": + return Postgresqlconfig141c_BackslashQuote.BACKSLASH_QUOTE; + case 2: + case "BACKSLASH_QUOTE_ON": + return Postgresqlconfig141c_BackslashQuote.BACKSLASH_QUOTE_ON; + case 3: + case "BACKSLASH_QUOTE_OFF": + return Postgresqlconfig141c_BackslashQuote.BACKSLASH_QUOTE_OFF; + case 4: + case "BACKSLASH_QUOTE_SAFE_ENCODING": + return Postgresqlconfig141c_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig141c_BackslashQuote.UNRECOGNIZED; + } +} + +export function postgresqlconfig141c_BackslashQuoteToJSON( + object: Postgresqlconfig141c_BackslashQuote +): string { + switch (object) { + case Postgresqlconfig141c_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED: + return "BACKSLASH_QUOTE_UNSPECIFIED"; + case Postgresqlconfig141c_BackslashQuote.BACKSLASH_QUOTE: + return "BACKSLASH_QUOTE"; + case Postgresqlconfig141c_BackslashQuote.BACKSLASH_QUOTE_ON: + return "BACKSLASH_QUOTE_ON"; + case Postgresqlconfig141c_BackslashQuote.BACKSLASH_QUOTE_OFF: + return "BACKSLASH_QUOTE_OFF"; + case Postgresqlconfig141c_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING: + return "BACKSLASH_QUOTE_SAFE_ENCODING"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig141c_ByteaOutput { + BYTEA_OUTPUT_UNSPECIFIED = 0, + BYTEA_OUTPUT_HEX = 1, + BYTEA_OUTPUT_ESCAPED = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig141c_ByteaOutputFromJSON( + object: any +): Postgresqlconfig141c_ByteaOutput { + switch (object) { + case 0: + case "BYTEA_OUTPUT_UNSPECIFIED": + return Postgresqlconfig141c_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED; + case 1: + case "BYTEA_OUTPUT_HEX": + return Postgresqlconfig141c_ByteaOutput.BYTEA_OUTPUT_HEX; + case 2: + case "BYTEA_OUTPUT_ESCAPED": + return Postgresqlconfig141c_ByteaOutput.BYTEA_OUTPUT_ESCAPED; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig141c_ByteaOutput.UNRECOGNIZED; + } +} + +export function postgresqlconfig141c_ByteaOutputToJSON( + object: Postgresqlconfig141c_ByteaOutput +): string { + switch (object) { + case Postgresqlconfig141c_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED: + return "BYTEA_OUTPUT_UNSPECIFIED"; + case Postgresqlconfig141c_ByteaOutput.BYTEA_OUTPUT_HEX: + return "BYTEA_OUTPUT_HEX"; + case Postgresqlconfig141c_ByteaOutput.BYTEA_OUTPUT_ESCAPED: + return "BYTEA_OUTPUT_ESCAPED"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig141c_ConstraintExclusion { + CONSTRAINT_EXCLUSION_UNSPECIFIED = 0, + CONSTRAINT_EXCLUSION_ON = 1, + CONSTRAINT_EXCLUSION_OFF = 2, + CONSTRAINT_EXCLUSION_PARTITION = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig141c_ConstraintExclusionFromJSON( + object: any +): Postgresqlconfig141c_ConstraintExclusion { + switch (object) { + case 0: + case "CONSTRAINT_EXCLUSION_UNSPECIFIED": + return Postgresqlconfig141c_ConstraintExclusion.CONSTRAINT_EXCLUSION_UNSPECIFIED; + case 1: + case "CONSTRAINT_EXCLUSION_ON": + return Postgresqlconfig141c_ConstraintExclusion.CONSTRAINT_EXCLUSION_ON; + case 2: + case "CONSTRAINT_EXCLUSION_OFF": + return Postgresqlconfig141c_ConstraintExclusion.CONSTRAINT_EXCLUSION_OFF; + case 3: + case "CONSTRAINT_EXCLUSION_PARTITION": + return Postgresqlconfig141c_ConstraintExclusion.CONSTRAINT_EXCLUSION_PARTITION; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig141c_ConstraintExclusion.UNRECOGNIZED; + } +} + +export function postgresqlconfig141c_ConstraintExclusionToJSON( + object: Postgresqlconfig141c_ConstraintExclusion +): string { + switch (object) { + case Postgresqlconfig141c_ConstraintExclusion.CONSTRAINT_EXCLUSION_UNSPECIFIED: + return "CONSTRAINT_EXCLUSION_UNSPECIFIED"; + case Postgresqlconfig141c_ConstraintExclusion.CONSTRAINT_EXCLUSION_ON: + return "CONSTRAINT_EXCLUSION_ON"; + case Postgresqlconfig141c_ConstraintExclusion.CONSTRAINT_EXCLUSION_OFF: + return "CONSTRAINT_EXCLUSION_OFF"; + case Postgresqlconfig141c_ConstraintExclusion.CONSTRAINT_EXCLUSION_PARTITION: + return "CONSTRAINT_EXCLUSION_PARTITION"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig141c_ForceParallelMode { + FORCE_PARALLEL_MODE_UNSPECIFIED = 0, + FORCE_PARALLEL_MODE_ON = 1, + FORCE_PARALLEL_MODE_OFF = 2, + FORCE_PARALLEL_MODE_REGRESS = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig141c_ForceParallelModeFromJSON( + object: any +): Postgresqlconfig141c_ForceParallelMode { + switch (object) { + case 0: + case "FORCE_PARALLEL_MODE_UNSPECIFIED": + return Postgresqlconfig141c_ForceParallelMode.FORCE_PARALLEL_MODE_UNSPECIFIED; + case 1: + case "FORCE_PARALLEL_MODE_ON": + return Postgresqlconfig141c_ForceParallelMode.FORCE_PARALLEL_MODE_ON; + case 2: + case "FORCE_PARALLEL_MODE_OFF": + return Postgresqlconfig141c_ForceParallelMode.FORCE_PARALLEL_MODE_OFF; + case 3: + case "FORCE_PARALLEL_MODE_REGRESS": + return Postgresqlconfig141c_ForceParallelMode.FORCE_PARALLEL_MODE_REGRESS; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig141c_ForceParallelMode.UNRECOGNIZED; + } +} + +export function postgresqlconfig141c_ForceParallelModeToJSON( + object: Postgresqlconfig141c_ForceParallelMode +): string { + switch (object) { + case Postgresqlconfig141c_ForceParallelMode.FORCE_PARALLEL_MODE_UNSPECIFIED: + return "FORCE_PARALLEL_MODE_UNSPECIFIED"; + case Postgresqlconfig141c_ForceParallelMode.FORCE_PARALLEL_MODE_ON: + return "FORCE_PARALLEL_MODE_ON"; + case Postgresqlconfig141c_ForceParallelMode.FORCE_PARALLEL_MODE_OFF: + return "FORCE_PARALLEL_MODE_OFF"; + case Postgresqlconfig141c_ForceParallelMode.FORCE_PARALLEL_MODE_REGRESS: + return "FORCE_PARALLEL_MODE_REGRESS"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig141c_LogErrorVerbosity { + LOG_ERROR_VERBOSITY_UNSPECIFIED = 0, + LOG_ERROR_VERBOSITY_TERSE = 1, + LOG_ERROR_VERBOSITY_DEFAULT = 2, + LOG_ERROR_VERBOSITY_VERBOSE = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig141c_LogErrorVerbosityFromJSON( + object: any +): Postgresqlconfig141c_LogErrorVerbosity { + switch (object) { + case 0: + case "LOG_ERROR_VERBOSITY_UNSPECIFIED": + return Postgresqlconfig141c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED; + case 1: + case "LOG_ERROR_VERBOSITY_TERSE": + return Postgresqlconfig141c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE; + case 2: + case "LOG_ERROR_VERBOSITY_DEFAULT": + return Postgresqlconfig141c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT; + case 3: + case "LOG_ERROR_VERBOSITY_VERBOSE": + return Postgresqlconfig141c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig141c_LogErrorVerbosity.UNRECOGNIZED; + } +} + +export function postgresqlconfig141c_LogErrorVerbosityToJSON( + object: Postgresqlconfig141c_LogErrorVerbosity +): string { + switch (object) { + case Postgresqlconfig141c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED: + return "LOG_ERROR_VERBOSITY_UNSPECIFIED"; + case Postgresqlconfig141c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE: + return "LOG_ERROR_VERBOSITY_TERSE"; + case Postgresqlconfig141c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT: + return "LOG_ERROR_VERBOSITY_DEFAULT"; + case Postgresqlconfig141c_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE: + return "LOG_ERROR_VERBOSITY_VERBOSE"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig141c_LogLevel { + LOG_LEVEL_UNSPECIFIED = 0, + LOG_LEVEL_DEBUG5 = 1, + LOG_LEVEL_DEBUG4 = 2, + LOG_LEVEL_DEBUG3 = 3, + LOG_LEVEL_DEBUG2 = 4, + LOG_LEVEL_DEBUG1 = 5, + LOG_LEVEL_LOG = 6, + LOG_LEVEL_NOTICE = 7, + LOG_LEVEL_WARNING = 8, + LOG_LEVEL_ERROR = 9, + LOG_LEVEL_FATAL = 10, + LOG_LEVEL_PANIC = 11, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig141c_LogLevelFromJSON( + object: any +): Postgresqlconfig141c_LogLevel { + switch (object) { + case 0: + case "LOG_LEVEL_UNSPECIFIED": + return Postgresqlconfig141c_LogLevel.LOG_LEVEL_UNSPECIFIED; + case 1: + case "LOG_LEVEL_DEBUG5": + return Postgresqlconfig141c_LogLevel.LOG_LEVEL_DEBUG5; + case 2: + case "LOG_LEVEL_DEBUG4": + return Postgresqlconfig141c_LogLevel.LOG_LEVEL_DEBUG4; + case 3: + case "LOG_LEVEL_DEBUG3": + return Postgresqlconfig141c_LogLevel.LOG_LEVEL_DEBUG3; + case 4: + case "LOG_LEVEL_DEBUG2": + return Postgresqlconfig141c_LogLevel.LOG_LEVEL_DEBUG2; + case 5: + case "LOG_LEVEL_DEBUG1": + return Postgresqlconfig141c_LogLevel.LOG_LEVEL_DEBUG1; + case 6: + case "LOG_LEVEL_LOG": + return Postgresqlconfig141c_LogLevel.LOG_LEVEL_LOG; + case 7: + case "LOG_LEVEL_NOTICE": + return Postgresqlconfig141c_LogLevel.LOG_LEVEL_NOTICE; + case 8: + case "LOG_LEVEL_WARNING": + return Postgresqlconfig141c_LogLevel.LOG_LEVEL_WARNING; + case 9: + case "LOG_LEVEL_ERROR": + return Postgresqlconfig141c_LogLevel.LOG_LEVEL_ERROR; + case 10: + case "LOG_LEVEL_FATAL": + return Postgresqlconfig141c_LogLevel.LOG_LEVEL_FATAL; + case 11: + case "LOG_LEVEL_PANIC": + return Postgresqlconfig141c_LogLevel.LOG_LEVEL_PANIC; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig141c_LogLevel.UNRECOGNIZED; + } +} + +export function postgresqlconfig141c_LogLevelToJSON( + object: Postgresqlconfig141c_LogLevel +): string { + switch (object) { + case Postgresqlconfig141c_LogLevel.LOG_LEVEL_UNSPECIFIED: + return "LOG_LEVEL_UNSPECIFIED"; + case Postgresqlconfig141c_LogLevel.LOG_LEVEL_DEBUG5: + return "LOG_LEVEL_DEBUG5"; + case Postgresqlconfig141c_LogLevel.LOG_LEVEL_DEBUG4: + return "LOG_LEVEL_DEBUG4"; + case Postgresqlconfig141c_LogLevel.LOG_LEVEL_DEBUG3: + return "LOG_LEVEL_DEBUG3"; + case Postgresqlconfig141c_LogLevel.LOG_LEVEL_DEBUG2: + return "LOG_LEVEL_DEBUG2"; + case Postgresqlconfig141c_LogLevel.LOG_LEVEL_DEBUG1: + return "LOG_LEVEL_DEBUG1"; + case Postgresqlconfig141c_LogLevel.LOG_LEVEL_LOG: + return "LOG_LEVEL_LOG"; + case Postgresqlconfig141c_LogLevel.LOG_LEVEL_NOTICE: + return "LOG_LEVEL_NOTICE"; + case Postgresqlconfig141c_LogLevel.LOG_LEVEL_WARNING: + return "LOG_LEVEL_WARNING"; + case Postgresqlconfig141c_LogLevel.LOG_LEVEL_ERROR: + return "LOG_LEVEL_ERROR"; + case Postgresqlconfig141c_LogLevel.LOG_LEVEL_FATAL: + return "LOG_LEVEL_FATAL"; + case Postgresqlconfig141c_LogLevel.LOG_LEVEL_PANIC: + return "LOG_LEVEL_PANIC"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig141c_LogStatement { + LOG_STATEMENT_UNSPECIFIED = 0, + LOG_STATEMENT_NONE = 1, + LOG_STATEMENT_DDL = 2, + LOG_STATEMENT_MOD = 3, + LOG_STATEMENT_ALL = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig141c_LogStatementFromJSON( + object: any +): Postgresqlconfig141c_LogStatement { + switch (object) { + case 0: + case "LOG_STATEMENT_UNSPECIFIED": + return Postgresqlconfig141c_LogStatement.LOG_STATEMENT_UNSPECIFIED; + case 1: + case "LOG_STATEMENT_NONE": + return Postgresqlconfig141c_LogStatement.LOG_STATEMENT_NONE; + case 2: + case "LOG_STATEMENT_DDL": + return Postgresqlconfig141c_LogStatement.LOG_STATEMENT_DDL; + case 3: + case "LOG_STATEMENT_MOD": + return Postgresqlconfig141c_LogStatement.LOG_STATEMENT_MOD; + case 4: + case "LOG_STATEMENT_ALL": + return Postgresqlconfig141c_LogStatement.LOG_STATEMENT_ALL; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig141c_LogStatement.UNRECOGNIZED; + } +} + +export function postgresqlconfig141c_LogStatementToJSON( + object: Postgresqlconfig141c_LogStatement +): string { + switch (object) { + case Postgresqlconfig141c_LogStatement.LOG_STATEMENT_UNSPECIFIED: + return "LOG_STATEMENT_UNSPECIFIED"; + case Postgresqlconfig141c_LogStatement.LOG_STATEMENT_NONE: + return "LOG_STATEMENT_NONE"; + case Postgresqlconfig141c_LogStatement.LOG_STATEMENT_DDL: + return "LOG_STATEMENT_DDL"; + case Postgresqlconfig141c_LogStatement.LOG_STATEMENT_MOD: + return "LOG_STATEMENT_MOD"; + case Postgresqlconfig141c_LogStatement.LOG_STATEMENT_ALL: + return "LOG_STATEMENT_ALL"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig141c_PgHintPlanDebugPrint { + PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED = 0, + PG_HINT_PLAN_DEBUG_PRINT_OFF = 1, + PG_HINT_PLAN_DEBUG_PRINT_ON = 2, + PG_HINT_PLAN_DEBUG_PRINT_DETAILED = 3, + PG_HINT_PLAN_DEBUG_PRINT_VERBOSE = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig141c_PgHintPlanDebugPrintFromJSON( + object: any +): Postgresqlconfig141c_PgHintPlanDebugPrint { + switch (object) { + case 0: + case "PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED": + return Postgresqlconfig141c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED; + case 1: + case "PG_HINT_PLAN_DEBUG_PRINT_OFF": + return Postgresqlconfig141c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_OFF; + case 2: + case "PG_HINT_PLAN_DEBUG_PRINT_ON": + return Postgresqlconfig141c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_ON; + case 3: + case "PG_HINT_PLAN_DEBUG_PRINT_DETAILED": + return Postgresqlconfig141c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_DETAILED; + case 4: + case "PG_HINT_PLAN_DEBUG_PRINT_VERBOSE": + return Postgresqlconfig141c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_VERBOSE; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig141c_PgHintPlanDebugPrint.UNRECOGNIZED; + } +} + +export function postgresqlconfig141c_PgHintPlanDebugPrintToJSON( + object: Postgresqlconfig141c_PgHintPlanDebugPrint +): string { + switch (object) { + case Postgresqlconfig141c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED: + return "PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED"; + case Postgresqlconfig141c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_OFF: + return "PG_HINT_PLAN_DEBUG_PRINT_OFF"; + case Postgresqlconfig141c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_ON: + return "PG_HINT_PLAN_DEBUG_PRINT_ON"; + case Postgresqlconfig141c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_DETAILED: + return "PG_HINT_PLAN_DEBUG_PRINT_DETAILED"; + case Postgresqlconfig141c_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_VERBOSE: + return "PG_HINT_PLAN_DEBUG_PRINT_VERBOSE"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig141c_PlanCacheMode { + PLAN_CACHE_MODE_UNSPECIFIED = 0, + PLAN_CACHE_MODE_AUTO = 1, + PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN = 2, + PLAN_CACHE_MODE_FORCE_GENERIC_PLAN = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig141c_PlanCacheModeFromJSON( + object: any +): Postgresqlconfig141c_PlanCacheMode { + switch (object) { + case 0: + case "PLAN_CACHE_MODE_UNSPECIFIED": + return Postgresqlconfig141c_PlanCacheMode.PLAN_CACHE_MODE_UNSPECIFIED; + case 1: + case "PLAN_CACHE_MODE_AUTO": + return Postgresqlconfig141c_PlanCacheMode.PLAN_CACHE_MODE_AUTO; + case 2: + case "PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN": + return Postgresqlconfig141c_PlanCacheMode.PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN; + case 3: + case "PLAN_CACHE_MODE_FORCE_GENERIC_PLAN": + return Postgresqlconfig141c_PlanCacheMode.PLAN_CACHE_MODE_FORCE_GENERIC_PLAN; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig141c_PlanCacheMode.UNRECOGNIZED; + } +} + +export function postgresqlconfig141c_PlanCacheModeToJSON( + object: Postgresqlconfig141c_PlanCacheMode +): string { + switch (object) { + case Postgresqlconfig141c_PlanCacheMode.PLAN_CACHE_MODE_UNSPECIFIED: + return "PLAN_CACHE_MODE_UNSPECIFIED"; + case Postgresqlconfig141c_PlanCacheMode.PLAN_CACHE_MODE_AUTO: + return "PLAN_CACHE_MODE_AUTO"; + case Postgresqlconfig141c_PlanCacheMode.PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN: + return "PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN"; + case Postgresqlconfig141c_PlanCacheMode.PLAN_CACHE_MODE_FORCE_GENERIC_PLAN: + return "PLAN_CACHE_MODE_FORCE_GENERIC_PLAN"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig141c_SharedPreloadLibraries { + SHARED_PRELOAD_LIBRARIES_UNSPECIFIED = 0, + SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN = 1, + SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN = 2, + SHARED_PRELOAD_LIBRARIES_TIMESCALEDB = 3, + SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS = 4, + SHARED_PRELOAD_LIBRARIES_PG_CRON = 5, + SHARED_PRELOAD_LIBRARIES_PGLOGICAL = 6, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig141c_SharedPreloadLibrariesFromJSON( + object: any +): Postgresqlconfig141c_SharedPreloadLibraries { + switch (object) { + case 0: + case "SHARED_PRELOAD_LIBRARIES_UNSPECIFIED": + return Postgresqlconfig141c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_UNSPECIFIED; + case 1: + case "SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN": + return Postgresqlconfig141c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN; + case 2: + case "SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN": + return Postgresqlconfig141c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN; + case 3: + case "SHARED_PRELOAD_LIBRARIES_TIMESCALEDB": + return Postgresqlconfig141c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_TIMESCALEDB; + case 4: + case "SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS": + return Postgresqlconfig141c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS; + case 5: + case "SHARED_PRELOAD_LIBRARIES_PG_CRON": + return Postgresqlconfig141c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_CRON; + case 6: + case "SHARED_PRELOAD_LIBRARIES_PGLOGICAL": + return Postgresqlconfig141c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGLOGICAL; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig141c_SharedPreloadLibraries.UNRECOGNIZED; + } +} + +export function postgresqlconfig141c_SharedPreloadLibrariesToJSON( + object: Postgresqlconfig141c_SharedPreloadLibraries +): string { + switch (object) { + case Postgresqlconfig141c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_UNSPECIFIED: + return "SHARED_PRELOAD_LIBRARIES_UNSPECIFIED"; + case Postgresqlconfig141c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN: + return "SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN"; + case Postgresqlconfig141c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN: + return "SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN"; + case Postgresqlconfig141c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_TIMESCALEDB: + return "SHARED_PRELOAD_LIBRARIES_TIMESCALEDB"; + case Postgresqlconfig141c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS: + return "SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS"; + case Postgresqlconfig141c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_CRON: + return "SHARED_PRELOAD_LIBRARIES_PG_CRON"; + case Postgresqlconfig141c_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PGLOGICAL: + return "SHARED_PRELOAD_LIBRARIES_PGLOGICAL"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig141c_SynchronousCommit { + SYNCHRONOUS_COMMIT_UNSPECIFIED = 0, + SYNCHRONOUS_COMMIT_ON = 1, + SYNCHRONOUS_COMMIT_OFF = 2, + SYNCHRONOUS_COMMIT_LOCAL = 3, + SYNCHRONOUS_COMMIT_REMOTE_WRITE = 4, + SYNCHRONOUS_COMMIT_REMOTE_APPLY = 5, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig141c_SynchronousCommitFromJSON( + object: any +): Postgresqlconfig141c_SynchronousCommit { + switch (object) { + case 0: + case "SYNCHRONOUS_COMMIT_UNSPECIFIED": + return Postgresqlconfig141c_SynchronousCommit.SYNCHRONOUS_COMMIT_UNSPECIFIED; + case 1: + case "SYNCHRONOUS_COMMIT_ON": + return Postgresqlconfig141c_SynchronousCommit.SYNCHRONOUS_COMMIT_ON; + case 2: + case "SYNCHRONOUS_COMMIT_OFF": + return Postgresqlconfig141c_SynchronousCommit.SYNCHRONOUS_COMMIT_OFF; + case 3: + case "SYNCHRONOUS_COMMIT_LOCAL": + return Postgresqlconfig141c_SynchronousCommit.SYNCHRONOUS_COMMIT_LOCAL; + case 4: + case "SYNCHRONOUS_COMMIT_REMOTE_WRITE": + return Postgresqlconfig141c_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_WRITE; + case 5: + case "SYNCHRONOUS_COMMIT_REMOTE_APPLY": + return Postgresqlconfig141c_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_APPLY; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig141c_SynchronousCommit.UNRECOGNIZED; + } +} + +export function postgresqlconfig141c_SynchronousCommitToJSON( + object: Postgresqlconfig141c_SynchronousCommit +): string { + switch (object) { + case Postgresqlconfig141c_SynchronousCommit.SYNCHRONOUS_COMMIT_UNSPECIFIED: + return "SYNCHRONOUS_COMMIT_UNSPECIFIED"; + case Postgresqlconfig141c_SynchronousCommit.SYNCHRONOUS_COMMIT_ON: + return "SYNCHRONOUS_COMMIT_ON"; + case Postgresqlconfig141c_SynchronousCommit.SYNCHRONOUS_COMMIT_OFF: + return "SYNCHRONOUS_COMMIT_OFF"; + case Postgresqlconfig141c_SynchronousCommit.SYNCHRONOUS_COMMIT_LOCAL: + return "SYNCHRONOUS_COMMIT_LOCAL"; + case Postgresqlconfig141c_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_WRITE: + return "SYNCHRONOUS_COMMIT_REMOTE_WRITE"; + case Postgresqlconfig141c_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_APPLY: + return "SYNCHRONOUS_COMMIT_REMOTE_APPLY"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig141c_TransactionIsolation { + TRANSACTION_ISOLATION_UNSPECIFIED = 0, + TRANSACTION_ISOLATION_READ_UNCOMMITTED = 1, + TRANSACTION_ISOLATION_READ_COMMITTED = 2, + TRANSACTION_ISOLATION_REPEATABLE_READ = 3, + TRANSACTION_ISOLATION_SERIALIZABLE = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig141c_TransactionIsolationFromJSON( + object: any +): Postgresqlconfig141c_TransactionIsolation { + switch (object) { + case 0: + case "TRANSACTION_ISOLATION_UNSPECIFIED": + return Postgresqlconfig141c_TransactionIsolation.TRANSACTION_ISOLATION_UNSPECIFIED; + case 1: + case "TRANSACTION_ISOLATION_READ_UNCOMMITTED": + return Postgresqlconfig141c_TransactionIsolation.TRANSACTION_ISOLATION_READ_UNCOMMITTED; + case 2: + case "TRANSACTION_ISOLATION_READ_COMMITTED": + return Postgresqlconfig141c_TransactionIsolation.TRANSACTION_ISOLATION_READ_COMMITTED; + case 3: + case "TRANSACTION_ISOLATION_REPEATABLE_READ": + return Postgresqlconfig141c_TransactionIsolation.TRANSACTION_ISOLATION_REPEATABLE_READ; + case 4: + case "TRANSACTION_ISOLATION_SERIALIZABLE": + return Postgresqlconfig141c_TransactionIsolation.TRANSACTION_ISOLATION_SERIALIZABLE; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig141c_TransactionIsolation.UNRECOGNIZED; + } +} + +export function postgresqlconfig141c_TransactionIsolationToJSON( + object: Postgresqlconfig141c_TransactionIsolation +): string { + switch (object) { + case Postgresqlconfig141c_TransactionIsolation.TRANSACTION_ISOLATION_UNSPECIFIED: + return "TRANSACTION_ISOLATION_UNSPECIFIED"; + case Postgresqlconfig141c_TransactionIsolation.TRANSACTION_ISOLATION_READ_UNCOMMITTED: + return "TRANSACTION_ISOLATION_READ_UNCOMMITTED"; + case Postgresqlconfig141c_TransactionIsolation.TRANSACTION_ISOLATION_READ_COMMITTED: + return "TRANSACTION_ISOLATION_READ_COMMITTED"; + case Postgresqlconfig141c_TransactionIsolation.TRANSACTION_ISOLATION_REPEATABLE_READ: + return "TRANSACTION_ISOLATION_REPEATABLE_READ"; + case Postgresqlconfig141c_TransactionIsolation.TRANSACTION_ISOLATION_SERIALIZABLE: + return "TRANSACTION_ISOLATION_SERIALIZABLE"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig141c_WalLevel { + WAL_LEVEL_UNSPECIFIED = 0, + WAL_LEVEL_REPLICA = 1, + WAL_LEVEL_LOGICAL = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig141c_WalLevelFromJSON( + object: any +): Postgresqlconfig141c_WalLevel { + switch (object) { + case 0: + case "WAL_LEVEL_UNSPECIFIED": + return Postgresqlconfig141c_WalLevel.WAL_LEVEL_UNSPECIFIED; + case 1: + case "WAL_LEVEL_REPLICA": + return Postgresqlconfig141c_WalLevel.WAL_LEVEL_REPLICA; + case 2: + case "WAL_LEVEL_LOGICAL": + return Postgresqlconfig141c_WalLevel.WAL_LEVEL_LOGICAL; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig141c_WalLevel.UNRECOGNIZED; + } +} + +export function postgresqlconfig141c_WalLevelToJSON( + object: Postgresqlconfig141c_WalLevel +): string { + switch (object) { + case Postgresqlconfig141c_WalLevel.WAL_LEVEL_UNSPECIFIED: + return "WAL_LEVEL_UNSPECIFIED"; + case Postgresqlconfig141c_WalLevel.WAL_LEVEL_REPLICA: + return "WAL_LEVEL_REPLICA"; + case Postgresqlconfig141c_WalLevel.WAL_LEVEL_LOGICAL: + return "WAL_LEVEL_LOGICAL"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig141c_XmlBinary { + XML_BINARY_UNSPECIFIED = 0, + XML_BINARY_BASE64 = 1, + XML_BINARY_HEX = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig141c_XmlBinaryFromJSON( + object: any +): Postgresqlconfig141c_XmlBinary { + switch (object) { + case 0: + case "XML_BINARY_UNSPECIFIED": + return Postgresqlconfig141c_XmlBinary.XML_BINARY_UNSPECIFIED; + case 1: + case "XML_BINARY_BASE64": + return Postgresqlconfig141c_XmlBinary.XML_BINARY_BASE64; + case 2: + case "XML_BINARY_HEX": + return Postgresqlconfig141c_XmlBinary.XML_BINARY_HEX; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig141c_XmlBinary.UNRECOGNIZED; + } +} + +export function postgresqlconfig141c_XmlBinaryToJSON( + object: Postgresqlconfig141c_XmlBinary +): string { + switch (object) { + case Postgresqlconfig141c_XmlBinary.XML_BINARY_UNSPECIFIED: + return "XML_BINARY_UNSPECIFIED"; + case Postgresqlconfig141c_XmlBinary.XML_BINARY_BASE64: + return "XML_BINARY_BASE64"; + case Postgresqlconfig141c_XmlBinary.XML_BINARY_HEX: + return "XML_BINARY_HEX"; + default: + return "UNKNOWN"; + } +} + +export enum Postgresqlconfig141c_XmlOption { + XML_OPTION_UNSPECIFIED = 0, + XML_OPTION_DOCUMENT = 1, + XML_OPTION_CONTENT = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlconfig141c_XmlOptionFromJSON( + object: any +): Postgresqlconfig141c_XmlOption { + switch (object) { + case 0: + case "XML_OPTION_UNSPECIFIED": + return Postgresqlconfig141c_XmlOption.XML_OPTION_UNSPECIFIED; + case 1: + case "XML_OPTION_DOCUMENT": + return Postgresqlconfig141c_XmlOption.XML_OPTION_DOCUMENT; + case 2: + case "XML_OPTION_CONTENT": + return Postgresqlconfig141c_XmlOption.XML_OPTION_CONTENT; + case -1: + case "UNRECOGNIZED": + default: + return Postgresqlconfig141c_XmlOption.UNRECOGNIZED; + } +} + +export function postgresqlconfig141c_XmlOptionToJSON( + object: Postgresqlconfig141c_XmlOption +): string { + switch (object) { + case Postgresqlconfig141c_XmlOption.XML_OPTION_UNSPECIFIED: + return "XML_OPTION_UNSPECIFIED"; + case Postgresqlconfig141c_XmlOption.XML_OPTION_DOCUMENT: + return "XML_OPTION_DOCUMENT"; + case Postgresqlconfig141c_XmlOption.XML_OPTION_CONTENT: + return "XML_OPTION_CONTENT"; + default: + return "UNKNOWN"; + } +} + +export interface Postgresqlconfigset141c { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfigSet14_1C"; + /** + * Effective settings for a PostgreSQL 14 1C cluster (a combination of settings defined + * in [user_config] and [default_config]). + */ + effectiveConfig?: Postgresqlconfig141c; + /** User-defined settings for a PostgreSQL 14 1C cluster. */ + userConfig?: Postgresqlconfig141c; + /** Default configuration for a PostgreSQL 14 1C cluster. */ + defaultConfig?: Postgresqlconfig141c; +} + +const basePostgresqlconfig141c: object = { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig14_1C", + walLevel: 0, + synchronousCommit: 0, + constraintExclusion: 0, + forceParallelMode: 0, + clientMinMessages: 0, + logMinMessages: 0, + logMinErrorStatement: 0, + logErrorVerbosity: 0, + logStatement: 0, + searchPath: "", + defaultTransactionIsolation: 0, + byteaOutput: 0, + xmlbinary: 0, + xmloption: 0, + backslashQuote: 0, + timezone: "", + planCacheMode: 0, + sharedPreloadLibraries: 0, + pgHintPlanDebugPrint: 0, + pgHintPlanMessageLevel: 0, +}; + +export const Postgresqlconfig141c = { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig14_1C" as const, + + encode( + message: Postgresqlconfig141c, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.maxConnections !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.maxConnections! }, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.sharedBuffers !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.sharedBuffers! }, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.tempBuffers !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.tempBuffers! }, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.maxPreparedTransactions !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxPreparedTransactions!, + }, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.workMem !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.workMem! }, + writer.uint32(42).fork() + ).ldelim(); + } + if (message.maintenanceWorkMem !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maintenanceWorkMem!, + }, + writer.uint32(50).fork() + ).ldelim(); + } + if (message.autovacuumWorkMem !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autovacuumWorkMem!, + }, + writer.uint32(58).fork() + ).ldelim(); + } + if (message.tempFileLimit !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.tempFileLimit! }, + writer.uint32(66).fork() + ).ldelim(); + } + if (message.vacuumCostDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumCostDelay!, + }, + writer.uint32(74).fork() + ).ldelim(); + } + if (message.vacuumCostPageHit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumCostPageHit!, + }, + writer.uint32(82).fork() + ).ldelim(); + } + if (message.vacuumCostPageMiss !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumCostPageMiss!, + }, + writer.uint32(90).fork() + ).ldelim(); + } + if (message.vacuumCostPageDirty !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumCostPageDirty!, + }, + writer.uint32(98).fork() + ).ldelim(); + } + if (message.vacuumCostLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumCostLimit!, + }, + writer.uint32(106).fork() + ).ldelim(); + } + if (message.bgwriterDelay !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.bgwriterDelay! }, + writer.uint32(114).fork() + ).ldelim(); + } + if (message.bgwriterLruMaxpages !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.bgwriterLruMaxpages!, + }, + writer.uint32(122).fork() + ).ldelim(); + } + if (message.bgwriterLruMultiplier !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.bgwriterLruMultiplier!, + }, + writer.uint32(130).fork() + ).ldelim(); + } + if (message.bgwriterFlushAfter !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.bgwriterFlushAfter!, + }, + writer.uint32(138).fork() + ).ldelim(); + } + if (message.backendFlushAfter !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.backendFlushAfter!, + }, + writer.uint32(146).fork() + ).ldelim(); + } + if (message.oldSnapshotThreshold !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.oldSnapshotThreshold!, + }, + writer.uint32(154).fork() + ).ldelim(); + } + if (message.walLevel !== 0) { + writer.uint32(160).int32(message.walLevel); + } + if (message.synchronousCommit !== 0) { + writer.uint32(168).int32(message.synchronousCommit); + } + if (message.checkpointTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.checkpointTimeout!, + }, + writer.uint32(178).fork() + ).ldelim(); + } + if (message.checkpointCompletionTarget !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.checkpointCompletionTarget!, + }, + writer.uint32(186).fork() + ).ldelim(); + } + if (message.checkpointFlushAfter !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.checkpointFlushAfter!, + }, + writer.uint32(194).fork() + ).ldelim(); + } + if (message.maxWalSize !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.maxWalSize! }, + writer.uint32(202).fork() + ).ldelim(); + } + if (message.minWalSize !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.minWalSize! }, + writer.uint32(210).fork() + ).ldelim(); + } + if (message.maxStandbyStreamingDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxStandbyStreamingDelay!, + }, + writer.uint32(218).fork() + ).ldelim(); + } + if (message.defaultStatisticsTarget !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.defaultStatisticsTarget!, + }, + writer.uint32(226).fork() + ).ldelim(); + } + if (message.constraintExclusion !== 0) { + writer.uint32(232).int32(message.constraintExclusion); + } + if (message.cursorTupleFraction !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.cursorTupleFraction!, + }, + writer.uint32(242).fork() + ).ldelim(); + } + if (message.fromCollapseLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.fromCollapseLimit!, + }, + writer.uint32(250).fork() + ).ldelim(); + } + if (message.joinCollapseLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.joinCollapseLimit!, + }, + writer.uint32(258).fork() + ).ldelim(); + } + if (message.forceParallelMode !== 0) { + writer.uint32(264).int32(message.forceParallelMode); + } + if (message.clientMinMessages !== 0) { + writer.uint32(272).int32(message.clientMinMessages); + } + if (message.logMinMessages !== 0) { + writer.uint32(280).int32(message.logMinMessages); + } + if (message.logMinErrorStatement !== 0) { + writer.uint32(288).int32(message.logMinErrorStatement); + } + if (message.logMinDurationStatement !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logMinDurationStatement!, + }, + writer.uint32(298).fork() + ).ldelim(); + } + if (message.logCheckpoints !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logCheckpoints! }, + writer.uint32(306).fork() + ).ldelim(); + } + if (message.logConnections !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logConnections! }, + writer.uint32(314).fork() + ).ldelim(); + } + if (message.logDisconnections !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.logDisconnections!, + }, + writer.uint32(322).fork() + ).ldelim(); + } + if (message.logDuration !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logDuration! }, + writer.uint32(330).fork() + ).ldelim(); + } + if (message.logErrorVerbosity !== 0) { + writer.uint32(336).int32(message.logErrorVerbosity); + } + if (message.logLockWaits !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logLockWaits! }, + writer.uint32(346).fork() + ).ldelim(); + } + if (message.logStatement !== 0) { + writer.uint32(352).int32(message.logStatement); + } + if (message.logTempFiles !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.logTempFiles! }, + writer.uint32(362).fork() + ).ldelim(); + } + if (message.searchPath !== "") { + writer.uint32(370).string(message.searchPath); + } + if (message.rowSecurity !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.rowSecurity! }, + writer.uint32(378).fork() + ).ldelim(); + } + if (message.defaultTransactionIsolation !== 0) { + writer.uint32(384).int32(message.defaultTransactionIsolation); + } + if (message.statementTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.statementTimeout!, + }, + writer.uint32(394).fork() + ).ldelim(); + } + if (message.lockTimeout !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.lockTimeout! }, + writer.uint32(402).fork() + ).ldelim(); + } + if (message.idleInTransactionSessionTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.idleInTransactionSessionTimeout!, + }, + writer.uint32(410).fork() + ).ldelim(); + } + if (message.byteaOutput !== 0) { + writer.uint32(416).int32(message.byteaOutput); + } + if (message.xmlbinary !== 0) { + writer.uint32(424).int32(message.xmlbinary); + } + if (message.xmloption !== 0) { + writer.uint32(432).int32(message.xmloption); + } + if (message.ginPendingListLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.ginPendingListLimit!, + }, + writer.uint32(442).fork() + ).ldelim(); + } + if (message.deadlockTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.deadlockTimeout!, + }, + writer.uint32(450).fork() + ).ldelim(); + } + if (message.maxLocksPerTransaction !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxLocksPerTransaction!, + }, + writer.uint32(458).fork() + ).ldelim(); + } + if (message.maxPredLocksPerTransaction !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxPredLocksPerTransaction!, + }, + writer.uint32(466).fork() + ).ldelim(); + } + if (message.arrayNulls !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.arrayNulls! }, + writer.uint32(474).fork() + ).ldelim(); + } + if (message.backslashQuote !== 0) { + writer.uint32(480).int32(message.backslashQuote); + } + if (message.defaultWithOids !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.defaultWithOids! }, + writer.uint32(490).fork() + ).ldelim(); + } + if (message.escapeStringWarning !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.escapeStringWarning!, + }, + writer.uint32(498).fork() + ).ldelim(); + } + if (message.loCompatPrivileges !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.loCompatPrivileges!, + }, + writer.uint32(506).fork() + ).ldelim(); + } + if (message.quoteAllIdentifiers !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.quoteAllIdentifiers!, + }, + writer.uint32(522).fork() + ).ldelim(); + } + if (message.standardConformingStrings !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.standardConformingStrings!, + }, + writer.uint32(530).fork() + ).ldelim(); + } + if (message.synchronizeSeqscans !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.synchronizeSeqscans!, + }, + writer.uint32(538).fork() + ).ldelim(); + } + if (message.transformNullEquals !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.transformNullEquals!, + }, + writer.uint32(546).fork() + ).ldelim(); + } + if (message.exitOnError !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.exitOnError! }, + writer.uint32(554).fork() + ).ldelim(); + } + if (message.seqPageCost !== undefined) { + DoubleValue.encode( + { $type: "google.protobuf.DoubleValue", value: message.seqPageCost! }, + writer.uint32(562).fork() + ).ldelim(); + } + if (message.randomPageCost !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.randomPageCost!, + }, + writer.uint32(570).fork() + ).ldelim(); + } + if (message.autovacuumMaxWorkers !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autovacuumMaxWorkers!, + }, + writer.uint32(578).fork() + ).ldelim(); + } + if (message.autovacuumVacuumCostDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autovacuumVacuumCostDelay!, + }, + writer.uint32(586).fork() + ).ldelim(); + } + if (message.autovacuumVacuumCostLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autovacuumVacuumCostLimit!, + }, + writer.uint32(594).fork() + ).ldelim(); + } + if (message.autovacuumNaptime !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autovacuumNaptime!, + }, + writer.uint32(602).fork() + ).ldelim(); + } + if (message.archiveTimeout !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.archiveTimeout! }, + writer.uint32(610).fork() + ).ldelim(); + } + if (message.trackActivityQuerySize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.trackActivityQuerySize!, + }, + writer.uint32(618).fork() + ).ldelim(); + } + if (message.onlineAnalyzeEnable !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.onlineAnalyzeEnable!, + }, + writer.uint32(634).fork() + ).ldelim(); + } + if (message.enableBitmapscan !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableBitmapscan!, + }, + writer.uint32(642).fork() + ).ldelim(); + } + if (message.enableHashagg !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableHashagg! }, + writer.uint32(650).fork() + ).ldelim(); + } + if (message.enableHashjoin !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableHashjoin! }, + writer.uint32(658).fork() + ).ldelim(); + } + if (message.enableIndexscan !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableIndexscan! }, + writer.uint32(666).fork() + ).ldelim(); + } + if (message.enableIndexonlyscan !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableIndexonlyscan!, + }, + writer.uint32(674).fork() + ).ldelim(); + } + if (message.enableMaterial !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableMaterial! }, + writer.uint32(682).fork() + ).ldelim(); + } + if (message.enableMergejoin !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableMergejoin! }, + writer.uint32(690).fork() + ).ldelim(); + } + if (message.enableNestloop !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableNestloop! }, + writer.uint32(698).fork() + ).ldelim(); + } + if (message.enableSeqscan !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableSeqscan! }, + writer.uint32(706).fork() + ).ldelim(); + } + if (message.enableSort !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableSort! }, + writer.uint32(714).fork() + ).ldelim(); + } + if (message.enableTidscan !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableTidscan! }, + writer.uint32(722).fork() + ).ldelim(); + } + if (message.maxWorkerProcesses !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxWorkerProcesses!, + }, + writer.uint32(730).fork() + ).ldelim(); + } + if (message.maxParallelWorkers !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxParallelWorkers!, + }, + writer.uint32(738).fork() + ).ldelim(); + } + if (message.maxParallelWorkersPerGather !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxParallelWorkersPerGather!, + }, + writer.uint32(746).fork() + ).ldelim(); + } + if (message.autovacuumVacuumScaleFactor !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.autovacuumVacuumScaleFactor!, + }, + writer.uint32(754).fork() + ).ldelim(); + } + if (message.autovacuumAnalyzeScaleFactor !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.autovacuumAnalyzeScaleFactor!, + }, + writer.uint32(762).fork() + ).ldelim(); + } + if (message.defaultTransactionReadOnly !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.defaultTransactionReadOnly!, + }, + writer.uint32(770).fork() + ).ldelim(); + } + if (message.timezone !== "") { + writer.uint32(778).string(message.timezone); + } + if (message.enableParallelAppend !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableParallelAppend!, + }, + writer.uint32(786).fork() + ).ldelim(); + } + if (message.enableParallelHash !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableParallelHash!, + }, + writer.uint32(794).fork() + ).ldelim(); + } + if (message.enablePartitionPruning !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enablePartitionPruning!, + }, + writer.uint32(802).fork() + ).ldelim(); + } + if (message.enablePartitionwiseAggregate !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enablePartitionwiseAggregate!, + }, + writer.uint32(810).fork() + ).ldelim(); + } + if (message.enablePartitionwiseJoin !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enablePartitionwiseJoin!, + }, + writer.uint32(818).fork() + ).ldelim(); + } + if (message.jit !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.jit! }, + writer.uint32(826).fork() + ).ldelim(); + } + if (message.maxParallelMaintenanceWorkers !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxParallelMaintenanceWorkers!, + }, + writer.uint32(834).fork() + ).ldelim(); + } + if (message.parallelLeaderParticipation !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.parallelLeaderParticipation!, + }, + writer.uint32(842).fork() + ).ldelim(); + } + if (message.logTransactionSampleRate !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.logTransactionSampleRate!, + }, + writer.uint32(858).fork() + ).ldelim(); + } + if (message.planCacheMode !== 0) { + writer.uint32(864).int32(message.planCacheMode); + } + if (message.effectiveIoConcurrency !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.effectiveIoConcurrency!, + }, + writer.uint32(874).fork() + ).ldelim(); + } + if (message.effectiveCacheSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.effectiveCacheSize!, + }, + writer.uint32(882).fork() + ).ldelim(); + } + writer.uint32(890).fork(); + for (const v of message.sharedPreloadLibraries) { + writer.int32(v); + } + writer.ldelim(); + if (message.autoExplainLogMinDuration !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autoExplainLogMinDuration!, + }, + writer.uint32(898).fork() + ).ldelim(); + } + if (message.autoExplainLogAnalyze !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.autoExplainLogAnalyze!, + }, + writer.uint32(906).fork() + ).ldelim(); + } + if (message.autoExplainLogBuffers !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.autoExplainLogBuffers!, + }, + writer.uint32(914).fork() + ).ldelim(); + } + if (message.autoExplainLogTiming !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.autoExplainLogTiming!, + }, + writer.uint32(922).fork() + ).ldelim(); + } + if (message.autoExplainLogTriggers !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.autoExplainLogTriggers!, + }, + writer.uint32(930).fork() + ).ldelim(); + } + if (message.autoExplainLogVerbose !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.autoExplainLogVerbose!, + }, + writer.uint32(938).fork() + ).ldelim(); + } + if (message.autoExplainLogNestedStatements !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.autoExplainLogNestedStatements!, + }, + writer.uint32(946).fork() + ).ldelim(); + } + if (message.autoExplainSampleRate !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.autoExplainSampleRate!, + }, + writer.uint32(954).fork() + ).ldelim(); + } + if (message.pgHintPlanEnableHint !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgHintPlanEnableHint!, + }, + writer.uint32(962).fork() + ).ldelim(); + } + if (message.pgHintPlanEnableHintTable !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgHintPlanEnableHintTable!, + }, + writer.uint32(970).fork() + ).ldelim(); + } + if (message.pgHintPlanDebugPrint !== 0) { + writer.uint32(976).int32(message.pgHintPlanDebugPrint); + } + if (message.pgHintPlanMessageLevel !== 0) { + writer.uint32(984).int32(message.pgHintPlanMessageLevel); + } + if (message.hashMemMultiplier !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.hashMemMultiplier!, + }, + writer.uint32(994).fork() + ).ldelim(); + } + if (message.logicalDecodingWorkMem !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logicalDecodingWorkMem!, + }, + writer.uint32(1010).fork() + ).ldelim(); + } + if (message.maintenanceIoConcurrency !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maintenanceIoConcurrency!, + }, + writer.uint32(1018).fork() + ).ldelim(); + } + if (message.maxSlotWalKeepSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxSlotWalKeepSize!, + }, + writer.uint32(1026).fork() + ).ldelim(); + } + if (message.walKeepSize !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.walKeepSize! }, + writer.uint32(1034).fork() + ).ldelim(); + } + if (message.enableIncrementalSort !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableIncrementalSort!, + }, + writer.uint32(1042).fork() + ).ldelim(); + } + if (message.autovacuumVacuumInsertThreshold !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autovacuumVacuumInsertThreshold!, + }, + writer.uint32(1050).fork() + ).ldelim(); + } + if (message.autovacuumVacuumInsertScaleFactor !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.autovacuumVacuumInsertScaleFactor!, + }, + writer.uint32(1058).fork() + ).ldelim(); + } + if (message.logMinDurationSample !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logMinDurationSample!, + }, + writer.uint32(1066).fork() + ).ldelim(); + } + if (message.logStatementSampleRate !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.logStatementSampleRate!, + }, + writer.uint32(1074).fork() + ).ldelim(); + } + if (message.logParameterMaxLength !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logParameterMaxLength!, + }, + writer.uint32(1082).fork() + ).ldelim(); + } + if (message.logParameterMaxLengthOnError !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logParameterMaxLengthOnError!, + }, + writer.uint32(1090).fork() + ).ldelim(); + } + if (message.clientConnectionCheckInterval !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.clientConnectionCheckInterval!, + }, + writer.uint32(1098).fork() + ).ldelim(); + } + if (message.enableAsyncAppend !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableAsyncAppend!, + }, + writer.uint32(1106).fork() + ).ldelim(); + } + if (message.enableGathermerge !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableGathermerge!, + }, + writer.uint32(1114).fork() + ).ldelim(); + } + if (message.enableMemoize !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableMemoize! }, + writer.uint32(1122).fork() + ).ldelim(); + } + if (message.logRecoveryConflictWaits !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.logRecoveryConflictWaits!, + }, + writer.uint32(1130).fork() + ).ldelim(); + } + if (message.vacuumFailsafeAge !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumFailsafeAge!, + }, + writer.uint32(1138).fork() + ).ldelim(); + } + if (message.vacuumMultixactFailsafeAge !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumMultixactFailsafeAge!, + }, + writer.uint32(1146).fork() + ).ldelim(); + } + if (message.pgQualstatsEnabled !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsEnabled!, + }, + writer.uint32(1154).fork() + ).ldelim(); + } + if (message.pgQualstatsTrackConstants !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsTrackConstants!, + }, + writer.uint32(1162).fork() + ).ldelim(); + } + if (message.pgQualstatsMax !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.pgQualstatsMax! }, + writer.uint32(1170).fork() + ).ldelim(); + } + if (message.pgQualstatsResolveOids !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsResolveOids!, + }, + writer.uint32(1178).fork() + ).ldelim(); + } + if (message.pgQualstatsSampleRate !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgQualstatsSampleRate!, + }, + writer.uint32(1186).fork() + ).ldelim(); + } + if (message.plantunerFixEmptyTable !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.plantunerFixEmptyTable!, + }, + writer.uint32(1194).fork() + ).ldelim(); + } + if (message.geqo !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.geqo! }, + writer.uint32(1218).fork() + ).ldelim(); + } + if (message.geqoThreshold !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.geqoThreshold! }, + writer.uint32(1226).fork() + ).ldelim(); + } + if (message.geqoEffort !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.geqoEffort! }, + writer.uint32(1234).fork() + ).ldelim(); + } + if (message.geqoSeed !== undefined) { + DoubleValue.encode( + { $type: "google.protobuf.DoubleValue", value: message.geqoSeed! }, + writer.uint32(1266).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Postgresqlconfig141c { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...basePostgresqlconfig141c } as Postgresqlconfig141c; + message.sharedPreloadLibraries = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.maxConnections = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 2: + message.sharedBuffers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 3: + message.tempBuffers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 4: + message.maxPreparedTransactions = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 5: + message.workMem = Int64Value.decode(reader, reader.uint32()).value; + break; + case 6: + message.maintenanceWorkMem = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 7: + message.autovacuumWorkMem = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 8: + message.tempFileLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 9: + message.vacuumCostDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 10: + message.vacuumCostPageHit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 11: + message.vacuumCostPageMiss = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 12: + message.vacuumCostPageDirty = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 13: + message.vacuumCostLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 14: + message.bgwriterDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 15: + message.bgwriterLruMaxpages = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 16: + message.bgwriterLruMultiplier = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 17: + message.bgwriterFlushAfter = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 18: + message.backendFlushAfter = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 19: + message.oldSnapshotThreshold = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 20: + message.walLevel = reader.int32() as any; + break; + case 21: + message.synchronousCommit = reader.int32() as any; + break; + case 22: + message.checkpointTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 23: + message.checkpointCompletionTarget = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 24: + message.checkpointFlushAfter = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 25: + message.maxWalSize = Int64Value.decode(reader, reader.uint32()).value; + break; + case 26: + message.minWalSize = Int64Value.decode(reader, reader.uint32()).value; + break; + case 27: + message.maxStandbyStreamingDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 28: + message.defaultStatisticsTarget = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 29: + message.constraintExclusion = reader.int32() as any; + break; + case 30: + message.cursorTupleFraction = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 31: + message.fromCollapseLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 32: + message.joinCollapseLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 33: + message.forceParallelMode = reader.int32() as any; + break; + case 34: + message.clientMinMessages = reader.int32() as any; + break; + case 35: + message.logMinMessages = reader.int32() as any; + break; + case 36: + message.logMinErrorStatement = reader.int32() as any; + break; + case 37: + message.logMinDurationStatement = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 38: + message.logCheckpoints = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 39: + message.logConnections = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 40: + message.logDisconnections = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 41: + message.logDuration = BoolValue.decode(reader, reader.uint32()).value; + break; + case 42: + message.logErrorVerbosity = reader.int32() as any; + break; + case 43: + message.logLockWaits = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 44: + message.logStatement = reader.int32() as any; + break; + case 45: + message.logTempFiles = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 46: + message.searchPath = reader.string(); + break; + case 47: + message.rowSecurity = BoolValue.decode(reader, reader.uint32()).value; + break; + case 48: + message.defaultTransactionIsolation = reader.int32() as any; + break; + case 49: + message.statementTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 50: + message.lockTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 51: + message.idleInTransactionSessionTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 52: + message.byteaOutput = reader.int32() as any; + break; + case 53: + message.xmlbinary = reader.int32() as any; + break; + case 54: + message.xmloption = reader.int32() as any; + break; + case 55: + message.ginPendingListLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 56: + message.deadlockTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 57: + message.maxLocksPerTransaction = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 58: + message.maxPredLocksPerTransaction = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 59: + message.arrayNulls = BoolValue.decode(reader, reader.uint32()).value; + break; + case 60: + message.backslashQuote = reader.int32() as any; + break; + case 61: + message.defaultWithOids = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 62: + message.escapeStringWarning = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 63: + message.loCompatPrivileges = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 65: + message.quoteAllIdentifiers = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 66: + message.standardConformingStrings = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 67: + message.synchronizeSeqscans = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 68: + message.transformNullEquals = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 69: + message.exitOnError = BoolValue.decode(reader, reader.uint32()).value; + break; + case 70: + message.seqPageCost = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 71: + message.randomPageCost = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 72: + message.autovacuumMaxWorkers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 73: + message.autovacuumVacuumCostDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 74: + message.autovacuumVacuumCostLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 75: + message.autovacuumNaptime = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 76: + message.archiveTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 77: + message.trackActivityQuerySize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 79: + message.onlineAnalyzeEnable = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 80: + message.enableBitmapscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 81: + message.enableHashagg = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 82: + message.enableHashjoin = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 83: + message.enableIndexscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 84: + message.enableIndexonlyscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 85: + message.enableMaterial = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 86: + message.enableMergejoin = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 87: + message.enableNestloop = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 88: + message.enableSeqscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 89: + message.enableSort = BoolValue.decode(reader, reader.uint32()).value; + break; + case 90: + message.enableTidscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 91: + message.maxWorkerProcesses = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 92: + message.maxParallelWorkers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 93: + message.maxParallelWorkersPerGather = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 94: + message.autovacuumVacuumScaleFactor = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 95: + message.autovacuumAnalyzeScaleFactor = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 96: + message.defaultTransactionReadOnly = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 97: + message.timezone = reader.string(); + break; + case 98: + message.enableParallelAppend = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 99: + message.enableParallelHash = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 100: + message.enablePartitionPruning = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 101: + message.enablePartitionwiseAggregate = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 102: + message.enablePartitionwiseJoin = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 103: + message.jit = BoolValue.decode(reader, reader.uint32()).value; + break; + case 104: + message.maxParallelMaintenanceWorkers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 105: + message.parallelLeaderParticipation = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 107: + message.logTransactionSampleRate = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 108: + message.planCacheMode = reader.int32() as any; + break; + case 109: + message.effectiveIoConcurrency = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 110: + message.effectiveCacheSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 111: + if ((tag & 7) === 2) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.sharedPreloadLibraries.push(reader.int32() as any); + } + } else { + message.sharedPreloadLibraries.push(reader.int32() as any); + } + break; + case 112: + message.autoExplainLogMinDuration = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 113: + message.autoExplainLogAnalyze = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 114: + message.autoExplainLogBuffers = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 115: + message.autoExplainLogTiming = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 116: + message.autoExplainLogTriggers = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 117: + message.autoExplainLogVerbose = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 118: + message.autoExplainLogNestedStatements = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 119: + message.autoExplainSampleRate = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 120: + message.pgHintPlanEnableHint = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 121: + message.pgHintPlanEnableHintTable = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 122: + message.pgHintPlanDebugPrint = reader.int32() as any; + break; + case 123: + message.pgHintPlanMessageLevel = reader.int32() as any; + break; + case 124: + message.hashMemMultiplier = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 126: + message.logicalDecodingWorkMem = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 127: + message.maintenanceIoConcurrency = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 128: + message.maxSlotWalKeepSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 129: + message.walKeepSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 130: + message.enableIncrementalSort = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 131: + message.autovacuumVacuumInsertThreshold = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 132: + message.autovacuumVacuumInsertScaleFactor = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 133: + message.logMinDurationSample = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 134: + message.logStatementSampleRate = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 135: + message.logParameterMaxLength = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 136: + message.logParameterMaxLengthOnError = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 137: + message.clientConnectionCheckInterval = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 138: + message.enableAsyncAppend = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 139: + message.enableGathermerge = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 140: + message.enableMemoize = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 141: + message.logRecoveryConflictWaits = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 142: + message.vacuumFailsafeAge = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 143: + message.vacuumMultixactFailsafeAge = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 144: + message.pgQualstatsEnabled = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 145: + message.pgQualstatsTrackConstants = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 146: + message.pgQualstatsMax = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 147: + message.pgQualstatsResolveOids = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 148: + message.pgQualstatsSampleRate = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 149: + message.plantunerFixEmptyTable = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 152: + message.geqo = BoolValue.decode(reader, reader.uint32()).value; + break; + case 153: + message.geqoThreshold = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 154: + message.geqoEffort = Int64Value.decode(reader, reader.uint32()).value; + break; + case 158: + message.geqoSeed = DoubleValue.decode(reader, reader.uint32()).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Postgresqlconfig141c { + const message = { ...basePostgresqlconfig141c } as Postgresqlconfig141c; + message.maxConnections = + object.maxConnections !== undefined && object.maxConnections !== null + ? Number(object.maxConnections) + : undefined; + message.sharedBuffers = + object.sharedBuffers !== undefined && object.sharedBuffers !== null + ? Number(object.sharedBuffers) + : undefined; + message.tempBuffers = + object.tempBuffers !== undefined && object.tempBuffers !== null + ? Number(object.tempBuffers) + : undefined; + message.maxPreparedTransactions = + object.maxPreparedTransactions !== undefined && + object.maxPreparedTransactions !== null + ? Number(object.maxPreparedTransactions) + : undefined; + message.workMem = + object.workMem !== undefined && object.workMem !== null + ? Number(object.workMem) + : undefined; + message.maintenanceWorkMem = + object.maintenanceWorkMem !== undefined && + object.maintenanceWorkMem !== null + ? Number(object.maintenanceWorkMem) + : undefined; + message.autovacuumWorkMem = + object.autovacuumWorkMem !== undefined && + object.autovacuumWorkMem !== null + ? Number(object.autovacuumWorkMem) + : undefined; + message.tempFileLimit = + object.tempFileLimit !== undefined && object.tempFileLimit !== null + ? Number(object.tempFileLimit) + : undefined; + message.vacuumCostDelay = + object.vacuumCostDelay !== undefined && object.vacuumCostDelay !== null + ? Number(object.vacuumCostDelay) + : undefined; + message.vacuumCostPageHit = + object.vacuumCostPageHit !== undefined && + object.vacuumCostPageHit !== null + ? Number(object.vacuumCostPageHit) + : undefined; + message.vacuumCostPageMiss = + object.vacuumCostPageMiss !== undefined && + object.vacuumCostPageMiss !== null + ? Number(object.vacuumCostPageMiss) + : undefined; + message.vacuumCostPageDirty = + object.vacuumCostPageDirty !== undefined && + object.vacuumCostPageDirty !== null + ? Number(object.vacuumCostPageDirty) + : undefined; + message.vacuumCostLimit = + object.vacuumCostLimit !== undefined && object.vacuumCostLimit !== null + ? Number(object.vacuumCostLimit) + : undefined; + message.bgwriterDelay = + object.bgwriterDelay !== undefined && object.bgwriterDelay !== null + ? Number(object.bgwriterDelay) + : undefined; + message.bgwriterLruMaxpages = + object.bgwriterLruMaxpages !== undefined && + object.bgwriterLruMaxpages !== null + ? Number(object.bgwriterLruMaxpages) + : undefined; + message.bgwriterLruMultiplier = + object.bgwriterLruMultiplier !== undefined && + object.bgwriterLruMultiplier !== null + ? Number(object.bgwriterLruMultiplier) + : undefined; + message.bgwriterFlushAfter = + object.bgwriterFlushAfter !== undefined && + object.bgwriterFlushAfter !== null + ? Number(object.bgwriterFlushAfter) + : undefined; + message.backendFlushAfter = + object.backendFlushAfter !== undefined && + object.backendFlushAfter !== null + ? Number(object.backendFlushAfter) + : undefined; + message.oldSnapshotThreshold = + object.oldSnapshotThreshold !== undefined && + object.oldSnapshotThreshold !== null + ? Number(object.oldSnapshotThreshold) + : undefined; + message.walLevel = + object.walLevel !== undefined && object.walLevel !== null + ? postgresqlconfig141c_WalLevelFromJSON(object.walLevel) + : 0; + message.synchronousCommit = + object.synchronousCommit !== undefined && + object.synchronousCommit !== null + ? postgresqlconfig141c_SynchronousCommitFromJSON( + object.synchronousCommit + ) + : 0; + message.checkpointTimeout = + object.checkpointTimeout !== undefined && + object.checkpointTimeout !== null + ? Number(object.checkpointTimeout) + : undefined; + message.checkpointCompletionTarget = + object.checkpointCompletionTarget !== undefined && + object.checkpointCompletionTarget !== null + ? Number(object.checkpointCompletionTarget) + : undefined; + message.checkpointFlushAfter = + object.checkpointFlushAfter !== undefined && + object.checkpointFlushAfter !== null + ? Number(object.checkpointFlushAfter) + : undefined; + message.maxWalSize = + object.maxWalSize !== undefined && object.maxWalSize !== null + ? Number(object.maxWalSize) + : undefined; + message.minWalSize = + object.minWalSize !== undefined && object.minWalSize !== null + ? Number(object.minWalSize) + : undefined; + message.maxStandbyStreamingDelay = + object.maxStandbyStreamingDelay !== undefined && + object.maxStandbyStreamingDelay !== null + ? Number(object.maxStandbyStreamingDelay) + : undefined; + message.defaultStatisticsTarget = + object.defaultStatisticsTarget !== undefined && + object.defaultStatisticsTarget !== null + ? Number(object.defaultStatisticsTarget) + : undefined; + message.constraintExclusion = + object.constraintExclusion !== undefined && + object.constraintExclusion !== null + ? postgresqlconfig141c_ConstraintExclusionFromJSON( + object.constraintExclusion + ) + : 0; + message.cursorTupleFraction = + object.cursorTupleFraction !== undefined && + object.cursorTupleFraction !== null + ? Number(object.cursorTupleFraction) + : undefined; + message.fromCollapseLimit = + object.fromCollapseLimit !== undefined && + object.fromCollapseLimit !== null + ? Number(object.fromCollapseLimit) + : undefined; + message.joinCollapseLimit = + object.joinCollapseLimit !== undefined && + object.joinCollapseLimit !== null + ? Number(object.joinCollapseLimit) + : undefined; + message.forceParallelMode = + object.forceParallelMode !== undefined && + object.forceParallelMode !== null + ? postgresqlconfig141c_ForceParallelModeFromJSON( + object.forceParallelMode + ) + : 0; + message.clientMinMessages = + object.clientMinMessages !== undefined && + object.clientMinMessages !== null + ? postgresqlconfig141c_LogLevelFromJSON(object.clientMinMessages) + : 0; + message.logMinMessages = + object.logMinMessages !== undefined && object.logMinMessages !== null + ? postgresqlconfig141c_LogLevelFromJSON(object.logMinMessages) + : 0; + message.logMinErrorStatement = + object.logMinErrorStatement !== undefined && + object.logMinErrorStatement !== null + ? postgresqlconfig141c_LogLevelFromJSON(object.logMinErrorStatement) + : 0; + message.logMinDurationStatement = + object.logMinDurationStatement !== undefined && + object.logMinDurationStatement !== null + ? Number(object.logMinDurationStatement) + : undefined; + message.logCheckpoints = + object.logCheckpoints !== undefined && object.logCheckpoints !== null + ? Boolean(object.logCheckpoints) + : undefined; + message.logConnections = + object.logConnections !== undefined && object.logConnections !== null + ? Boolean(object.logConnections) + : undefined; + message.logDisconnections = + object.logDisconnections !== undefined && + object.logDisconnections !== null + ? Boolean(object.logDisconnections) + : undefined; + message.logDuration = + object.logDuration !== undefined && object.logDuration !== null + ? Boolean(object.logDuration) + : undefined; + message.logErrorVerbosity = + object.logErrorVerbosity !== undefined && + object.logErrorVerbosity !== null + ? postgresqlconfig141c_LogErrorVerbosityFromJSON( + object.logErrorVerbosity + ) + : 0; + message.logLockWaits = + object.logLockWaits !== undefined && object.logLockWaits !== null + ? Boolean(object.logLockWaits) + : undefined; + message.logStatement = + object.logStatement !== undefined && object.logStatement !== null + ? postgresqlconfig141c_LogStatementFromJSON(object.logStatement) + : 0; + message.logTempFiles = + object.logTempFiles !== undefined && object.logTempFiles !== null + ? Number(object.logTempFiles) + : undefined; + message.searchPath = + object.searchPath !== undefined && object.searchPath !== null + ? String(object.searchPath) + : ""; + message.rowSecurity = + object.rowSecurity !== undefined && object.rowSecurity !== null + ? Boolean(object.rowSecurity) + : undefined; + message.defaultTransactionIsolation = + object.defaultTransactionIsolation !== undefined && + object.defaultTransactionIsolation !== null + ? postgresqlconfig141c_TransactionIsolationFromJSON( + object.defaultTransactionIsolation + ) + : 0; + message.statementTimeout = + object.statementTimeout !== undefined && object.statementTimeout !== null + ? Number(object.statementTimeout) + : undefined; + message.lockTimeout = + object.lockTimeout !== undefined && object.lockTimeout !== null + ? Number(object.lockTimeout) + : undefined; + message.idleInTransactionSessionTimeout = + object.idleInTransactionSessionTimeout !== undefined && + object.idleInTransactionSessionTimeout !== null + ? Number(object.idleInTransactionSessionTimeout) + : undefined; + message.byteaOutput = + object.byteaOutput !== undefined && object.byteaOutput !== null + ? postgresqlconfig141c_ByteaOutputFromJSON(object.byteaOutput) + : 0; + message.xmlbinary = + object.xmlbinary !== undefined && object.xmlbinary !== null + ? postgresqlconfig141c_XmlBinaryFromJSON(object.xmlbinary) + : 0; + message.xmloption = + object.xmloption !== undefined && object.xmloption !== null + ? postgresqlconfig141c_XmlOptionFromJSON(object.xmloption) + : 0; + message.ginPendingListLimit = + object.ginPendingListLimit !== undefined && + object.ginPendingListLimit !== null + ? Number(object.ginPendingListLimit) + : undefined; + message.deadlockTimeout = + object.deadlockTimeout !== undefined && object.deadlockTimeout !== null + ? Number(object.deadlockTimeout) + : undefined; + message.maxLocksPerTransaction = + object.maxLocksPerTransaction !== undefined && + object.maxLocksPerTransaction !== null + ? Number(object.maxLocksPerTransaction) + : undefined; + message.maxPredLocksPerTransaction = + object.maxPredLocksPerTransaction !== undefined && + object.maxPredLocksPerTransaction !== null + ? Number(object.maxPredLocksPerTransaction) + : undefined; + message.arrayNulls = + object.arrayNulls !== undefined && object.arrayNulls !== null + ? Boolean(object.arrayNulls) + : undefined; + message.backslashQuote = + object.backslashQuote !== undefined && object.backslashQuote !== null + ? postgresqlconfig141c_BackslashQuoteFromJSON(object.backslashQuote) + : 0; + message.defaultWithOids = + object.defaultWithOids !== undefined && object.defaultWithOids !== null + ? Boolean(object.defaultWithOids) + : undefined; + message.escapeStringWarning = + object.escapeStringWarning !== undefined && + object.escapeStringWarning !== null + ? Boolean(object.escapeStringWarning) + : undefined; + message.loCompatPrivileges = + object.loCompatPrivileges !== undefined && + object.loCompatPrivileges !== null + ? Boolean(object.loCompatPrivileges) + : undefined; + message.quoteAllIdentifiers = + object.quoteAllIdentifiers !== undefined && + object.quoteAllIdentifiers !== null + ? Boolean(object.quoteAllIdentifiers) + : undefined; + message.standardConformingStrings = + object.standardConformingStrings !== undefined && + object.standardConformingStrings !== null + ? Boolean(object.standardConformingStrings) + : undefined; + message.synchronizeSeqscans = + object.synchronizeSeqscans !== undefined && + object.synchronizeSeqscans !== null + ? Boolean(object.synchronizeSeqscans) + : undefined; + message.transformNullEquals = + object.transformNullEquals !== undefined && + object.transformNullEquals !== null + ? Boolean(object.transformNullEquals) + : undefined; + message.exitOnError = + object.exitOnError !== undefined && object.exitOnError !== null + ? Boolean(object.exitOnError) + : undefined; + message.seqPageCost = + object.seqPageCost !== undefined && object.seqPageCost !== null + ? Number(object.seqPageCost) + : undefined; + message.randomPageCost = + object.randomPageCost !== undefined && object.randomPageCost !== null + ? Number(object.randomPageCost) + : undefined; + message.autovacuumMaxWorkers = + object.autovacuumMaxWorkers !== undefined && + object.autovacuumMaxWorkers !== null + ? Number(object.autovacuumMaxWorkers) + : undefined; + message.autovacuumVacuumCostDelay = + object.autovacuumVacuumCostDelay !== undefined && + object.autovacuumVacuumCostDelay !== null + ? Number(object.autovacuumVacuumCostDelay) + : undefined; + message.autovacuumVacuumCostLimit = + object.autovacuumVacuumCostLimit !== undefined && + object.autovacuumVacuumCostLimit !== null + ? Number(object.autovacuumVacuumCostLimit) + : undefined; + message.autovacuumNaptime = + object.autovacuumNaptime !== undefined && + object.autovacuumNaptime !== null + ? Number(object.autovacuumNaptime) + : undefined; + message.archiveTimeout = + object.archiveTimeout !== undefined && object.archiveTimeout !== null + ? Number(object.archiveTimeout) + : undefined; + message.trackActivityQuerySize = + object.trackActivityQuerySize !== undefined && + object.trackActivityQuerySize !== null + ? Number(object.trackActivityQuerySize) + : undefined; + message.onlineAnalyzeEnable = + object.onlineAnalyzeEnable !== undefined && + object.onlineAnalyzeEnable !== null + ? Boolean(object.onlineAnalyzeEnable) + : undefined; + message.enableBitmapscan = + object.enableBitmapscan !== undefined && object.enableBitmapscan !== null + ? Boolean(object.enableBitmapscan) + : undefined; + message.enableHashagg = + object.enableHashagg !== undefined && object.enableHashagg !== null + ? Boolean(object.enableHashagg) + : undefined; + message.enableHashjoin = + object.enableHashjoin !== undefined && object.enableHashjoin !== null + ? Boolean(object.enableHashjoin) + : undefined; + message.enableIndexscan = + object.enableIndexscan !== undefined && object.enableIndexscan !== null + ? Boolean(object.enableIndexscan) + : undefined; + message.enableIndexonlyscan = + object.enableIndexonlyscan !== undefined && + object.enableIndexonlyscan !== null + ? Boolean(object.enableIndexonlyscan) + : undefined; + message.enableMaterial = + object.enableMaterial !== undefined && object.enableMaterial !== null + ? Boolean(object.enableMaterial) + : undefined; + message.enableMergejoin = + object.enableMergejoin !== undefined && object.enableMergejoin !== null + ? Boolean(object.enableMergejoin) + : undefined; + message.enableNestloop = + object.enableNestloop !== undefined && object.enableNestloop !== null + ? Boolean(object.enableNestloop) + : undefined; + message.enableSeqscan = + object.enableSeqscan !== undefined && object.enableSeqscan !== null + ? Boolean(object.enableSeqscan) + : undefined; + message.enableSort = + object.enableSort !== undefined && object.enableSort !== null + ? Boolean(object.enableSort) + : undefined; + message.enableTidscan = + object.enableTidscan !== undefined && object.enableTidscan !== null + ? Boolean(object.enableTidscan) + : undefined; + message.maxWorkerProcesses = + object.maxWorkerProcesses !== undefined && + object.maxWorkerProcesses !== null + ? Number(object.maxWorkerProcesses) + : undefined; + message.maxParallelWorkers = + object.maxParallelWorkers !== undefined && + object.maxParallelWorkers !== null + ? Number(object.maxParallelWorkers) + : undefined; + message.maxParallelWorkersPerGather = + object.maxParallelWorkersPerGather !== undefined && + object.maxParallelWorkersPerGather !== null + ? Number(object.maxParallelWorkersPerGather) + : undefined; + message.autovacuumVacuumScaleFactor = + object.autovacuumVacuumScaleFactor !== undefined && + object.autovacuumVacuumScaleFactor !== null + ? Number(object.autovacuumVacuumScaleFactor) + : undefined; + message.autovacuumAnalyzeScaleFactor = + object.autovacuumAnalyzeScaleFactor !== undefined && + object.autovacuumAnalyzeScaleFactor !== null + ? Number(object.autovacuumAnalyzeScaleFactor) + : undefined; + message.defaultTransactionReadOnly = + object.defaultTransactionReadOnly !== undefined && + object.defaultTransactionReadOnly !== null + ? Boolean(object.defaultTransactionReadOnly) + : undefined; + message.timezone = + object.timezone !== undefined && object.timezone !== null + ? String(object.timezone) + : ""; + message.enableParallelAppend = + object.enableParallelAppend !== undefined && + object.enableParallelAppend !== null + ? Boolean(object.enableParallelAppend) + : undefined; + message.enableParallelHash = + object.enableParallelHash !== undefined && + object.enableParallelHash !== null + ? Boolean(object.enableParallelHash) + : undefined; + message.enablePartitionPruning = + object.enablePartitionPruning !== undefined && + object.enablePartitionPruning !== null + ? Boolean(object.enablePartitionPruning) + : undefined; + message.enablePartitionwiseAggregate = + object.enablePartitionwiseAggregate !== undefined && + object.enablePartitionwiseAggregate !== null + ? Boolean(object.enablePartitionwiseAggregate) + : undefined; + message.enablePartitionwiseJoin = + object.enablePartitionwiseJoin !== undefined && + object.enablePartitionwiseJoin !== null + ? Boolean(object.enablePartitionwiseJoin) + : undefined; + message.jit = + object.jit !== undefined && object.jit !== null + ? Boolean(object.jit) + : undefined; + message.maxParallelMaintenanceWorkers = + object.maxParallelMaintenanceWorkers !== undefined && + object.maxParallelMaintenanceWorkers !== null + ? Number(object.maxParallelMaintenanceWorkers) + : undefined; + message.parallelLeaderParticipation = + object.parallelLeaderParticipation !== undefined && + object.parallelLeaderParticipation !== null + ? Boolean(object.parallelLeaderParticipation) + : undefined; + message.logTransactionSampleRate = + object.logTransactionSampleRate !== undefined && + object.logTransactionSampleRate !== null + ? Number(object.logTransactionSampleRate) + : undefined; + message.planCacheMode = + object.planCacheMode !== undefined && object.planCacheMode !== null + ? postgresqlconfig141c_PlanCacheModeFromJSON(object.planCacheMode) + : 0; + message.effectiveIoConcurrency = + object.effectiveIoConcurrency !== undefined && + object.effectiveIoConcurrency !== null + ? Number(object.effectiveIoConcurrency) + : undefined; + message.effectiveCacheSize = + object.effectiveCacheSize !== undefined && + object.effectiveCacheSize !== null + ? Number(object.effectiveCacheSize) + : undefined; + message.sharedPreloadLibraries = (object.sharedPreloadLibraries ?? []).map( + (e: any) => postgresqlconfig141c_SharedPreloadLibrariesFromJSON(e) + ); + message.autoExplainLogMinDuration = + object.autoExplainLogMinDuration !== undefined && + object.autoExplainLogMinDuration !== null + ? Number(object.autoExplainLogMinDuration) + : undefined; + message.autoExplainLogAnalyze = + object.autoExplainLogAnalyze !== undefined && + object.autoExplainLogAnalyze !== null + ? Boolean(object.autoExplainLogAnalyze) + : undefined; + message.autoExplainLogBuffers = + object.autoExplainLogBuffers !== undefined && + object.autoExplainLogBuffers !== null + ? Boolean(object.autoExplainLogBuffers) + : undefined; + message.autoExplainLogTiming = + object.autoExplainLogTiming !== undefined && + object.autoExplainLogTiming !== null + ? Boolean(object.autoExplainLogTiming) + : undefined; + message.autoExplainLogTriggers = + object.autoExplainLogTriggers !== undefined && + object.autoExplainLogTriggers !== null + ? Boolean(object.autoExplainLogTriggers) + : undefined; + message.autoExplainLogVerbose = + object.autoExplainLogVerbose !== undefined && + object.autoExplainLogVerbose !== null + ? Boolean(object.autoExplainLogVerbose) + : undefined; + message.autoExplainLogNestedStatements = + object.autoExplainLogNestedStatements !== undefined && + object.autoExplainLogNestedStatements !== null + ? Boolean(object.autoExplainLogNestedStatements) + : undefined; + message.autoExplainSampleRate = + object.autoExplainSampleRate !== undefined && + object.autoExplainSampleRate !== null + ? Number(object.autoExplainSampleRate) + : undefined; + message.pgHintPlanEnableHint = + object.pgHintPlanEnableHint !== undefined && + object.pgHintPlanEnableHint !== null + ? Boolean(object.pgHintPlanEnableHint) + : undefined; + message.pgHintPlanEnableHintTable = + object.pgHintPlanEnableHintTable !== undefined && + object.pgHintPlanEnableHintTable !== null + ? Boolean(object.pgHintPlanEnableHintTable) + : undefined; + message.pgHintPlanDebugPrint = + object.pgHintPlanDebugPrint !== undefined && + object.pgHintPlanDebugPrint !== null + ? postgresqlconfig141c_PgHintPlanDebugPrintFromJSON( + object.pgHintPlanDebugPrint + ) + : 0; + message.pgHintPlanMessageLevel = + object.pgHintPlanMessageLevel !== undefined && + object.pgHintPlanMessageLevel !== null + ? postgresqlconfig141c_LogLevelFromJSON(object.pgHintPlanMessageLevel) + : 0; + message.hashMemMultiplier = + object.hashMemMultiplier !== undefined && + object.hashMemMultiplier !== null + ? Number(object.hashMemMultiplier) + : undefined; + message.logicalDecodingWorkMem = + object.logicalDecodingWorkMem !== undefined && + object.logicalDecodingWorkMem !== null + ? Number(object.logicalDecodingWorkMem) + : undefined; + message.maintenanceIoConcurrency = + object.maintenanceIoConcurrency !== undefined && + object.maintenanceIoConcurrency !== null + ? Number(object.maintenanceIoConcurrency) + : undefined; + message.maxSlotWalKeepSize = + object.maxSlotWalKeepSize !== undefined && + object.maxSlotWalKeepSize !== null + ? Number(object.maxSlotWalKeepSize) + : undefined; + message.walKeepSize = + object.walKeepSize !== undefined && object.walKeepSize !== null + ? Number(object.walKeepSize) + : undefined; + message.enableIncrementalSort = + object.enableIncrementalSort !== undefined && + object.enableIncrementalSort !== null + ? Boolean(object.enableIncrementalSort) + : undefined; + message.autovacuumVacuumInsertThreshold = + object.autovacuumVacuumInsertThreshold !== undefined && + object.autovacuumVacuumInsertThreshold !== null + ? Number(object.autovacuumVacuumInsertThreshold) + : undefined; + message.autovacuumVacuumInsertScaleFactor = + object.autovacuumVacuumInsertScaleFactor !== undefined && + object.autovacuumVacuumInsertScaleFactor !== null + ? Number(object.autovacuumVacuumInsertScaleFactor) + : undefined; + message.logMinDurationSample = + object.logMinDurationSample !== undefined && + object.logMinDurationSample !== null + ? Number(object.logMinDurationSample) + : undefined; + message.logStatementSampleRate = + object.logStatementSampleRate !== undefined && + object.logStatementSampleRate !== null + ? Number(object.logStatementSampleRate) + : undefined; + message.logParameterMaxLength = + object.logParameterMaxLength !== undefined && + object.logParameterMaxLength !== null + ? Number(object.logParameterMaxLength) + : undefined; + message.logParameterMaxLengthOnError = + object.logParameterMaxLengthOnError !== undefined && + object.logParameterMaxLengthOnError !== null + ? Number(object.logParameterMaxLengthOnError) + : undefined; + message.clientConnectionCheckInterval = + object.clientConnectionCheckInterval !== undefined && + object.clientConnectionCheckInterval !== null + ? Number(object.clientConnectionCheckInterval) + : undefined; + message.enableAsyncAppend = + object.enableAsyncAppend !== undefined && + object.enableAsyncAppend !== null + ? Boolean(object.enableAsyncAppend) + : undefined; + message.enableGathermerge = + object.enableGathermerge !== undefined && + object.enableGathermerge !== null + ? Boolean(object.enableGathermerge) + : undefined; + message.enableMemoize = + object.enableMemoize !== undefined && object.enableMemoize !== null + ? Boolean(object.enableMemoize) + : undefined; + message.logRecoveryConflictWaits = + object.logRecoveryConflictWaits !== undefined && + object.logRecoveryConflictWaits !== null + ? Boolean(object.logRecoveryConflictWaits) + : undefined; + message.vacuumFailsafeAge = + object.vacuumFailsafeAge !== undefined && + object.vacuumFailsafeAge !== null + ? Number(object.vacuumFailsafeAge) + : undefined; + message.vacuumMultixactFailsafeAge = + object.vacuumMultixactFailsafeAge !== undefined && + object.vacuumMultixactFailsafeAge !== null + ? Number(object.vacuumMultixactFailsafeAge) + : undefined; + message.pgQualstatsEnabled = + object.pgQualstatsEnabled !== undefined && + object.pgQualstatsEnabled !== null + ? Boolean(object.pgQualstatsEnabled) + : undefined; + message.pgQualstatsTrackConstants = + object.pgQualstatsTrackConstants !== undefined && + object.pgQualstatsTrackConstants !== null + ? Boolean(object.pgQualstatsTrackConstants) + : undefined; + message.pgQualstatsMax = + object.pgQualstatsMax !== undefined && object.pgQualstatsMax !== null + ? Number(object.pgQualstatsMax) + : undefined; + message.pgQualstatsResolveOids = + object.pgQualstatsResolveOids !== undefined && + object.pgQualstatsResolveOids !== null + ? Boolean(object.pgQualstatsResolveOids) + : undefined; + message.pgQualstatsSampleRate = + object.pgQualstatsSampleRate !== undefined && + object.pgQualstatsSampleRate !== null + ? Number(object.pgQualstatsSampleRate) + : undefined; + message.plantunerFixEmptyTable = + object.plantunerFixEmptyTable !== undefined && + object.plantunerFixEmptyTable !== null + ? Boolean(object.plantunerFixEmptyTable) + : undefined; + message.geqo = + object.geqo !== undefined && object.geqo !== null + ? Boolean(object.geqo) + : undefined; + message.geqoThreshold = + object.geqoThreshold !== undefined && object.geqoThreshold !== null + ? Number(object.geqoThreshold) + : undefined; + message.geqoEffort = + object.geqoEffort !== undefined && object.geqoEffort !== null + ? Number(object.geqoEffort) + : undefined; + message.geqoSeed = + object.geqoSeed !== undefined && object.geqoSeed !== null + ? Number(object.geqoSeed) + : undefined; + return message; + }, + + toJSON(message: Postgresqlconfig141c): unknown { + const obj: any = {}; + message.maxConnections !== undefined && + (obj.maxConnections = message.maxConnections); + message.sharedBuffers !== undefined && + (obj.sharedBuffers = message.sharedBuffers); + message.tempBuffers !== undefined && + (obj.tempBuffers = message.tempBuffers); + message.maxPreparedTransactions !== undefined && + (obj.maxPreparedTransactions = message.maxPreparedTransactions); + message.workMem !== undefined && (obj.workMem = message.workMem); + message.maintenanceWorkMem !== undefined && + (obj.maintenanceWorkMem = message.maintenanceWorkMem); + message.autovacuumWorkMem !== undefined && + (obj.autovacuumWorkMem = message.autovacuumWorkMem); + message.tempFileLimit !== undefined && + (obj.tempFileLimit = message.tempFileLimit); + message.vacuumCostDelay !== undefined && + (obj.vacuumCostDelay = message.vacuumCostDelay); + message.vacuumCostPageHit !== undefined && + (obj.vacuumCostPageHit = message.vacuumCostPageHit); + message.vacuumCostPageMiss !== undefined && + (obj.vacuumCostPageMiss = message.vacuumCostPageMiss); + message.vacuumCostPageDirty !== undefined && + (obj.vacuumCostPageDirty = message.vacuumCostPageDirty); + message.vacuumCostLimit !== undefined && + (obj.vacuumCostLimit = message.vacuumCostLimit); + message.bgwriterDelay !== undefined && + (obj.bgwriterDelay = message.bgwriterDelay); + message.bgwriterLruMaxpages !== undefined && + (obj.bgwriterLruMaxpages = message.bgwriterLruMaxpages); + message.bgwriterLruMultiplier !== undefined && + (obj.bgwriterLruMultiplier = message.bgwriterLruMultiplier); + message.bgwriterFlushAfter !== undefined && + (obj.bgwriterFlushAfter = message.bgwriterFlushAfter); + message.backendFlushAfter !== undefined && + (obj.backendFlushAfter = message.backendFlushAfter); + message.oldSnapshotThreshold !== undefined && + (obj.oldSnapshotThreshold = message.oldSnapshotThreshold); + message.walLevel !== undefined && + (obj.walLevel = postgresqlconfig141c_WalLevelToJSON(message.walLevel)); + message.synchronousCommit !== undefined && + (obj.synchronousCommit = postgresqlconfig141c_SynchronousCommitToJSON( + message.synchronousCommit + )); + message.checkpointTimeout !== undefined && + (obj.checkpointTimeout = message.checkpointTimeout); + message.checkpointCompletionTarget !== undefined && + (obj.checkpointCompletionTarget = message.checkpointCompletionTarget); + message.checkpointFlushAfter !== undefined && + (obj.checkpointFlushAfter = message.checkpointFlushAfter); + message.maxWalSize !== undefined && (obj.maxWalSize = message.maxWalSize); + message.minWalSize !== undefined && (obj.minWalSize = message.minWalSize); + message.maxStandbyStreamingDelay !== undefined && + (obj.maxStandbyStreamingDelay = message.maxStandbyStreamingDelay); + message.defaultStatisticsTarget !== undefined && + (obj.defaultStatisticsTarget = message.defaultStatisticsTarget); + message.constraintExclusion !== undefined && + (obj.constraintExclusion = postgresqlconfig141c_ConstraintExclusionToJSON( + message.constraintExclusion + )); + message.cursorTupleFraction !== undefined && + (obj.cursorTupleFraction = message.cursorTupleFraction); + message.fromCollapseLimit !== undefined && + (obj.fromCollapseLimit = message.fromCollapseLimit); + message.joinCollapseLimit !== undefined && + (obj.joinCollapseLimit = message.joinCollapseLimit); + message.forceParallelMode !== undefined && + (obj.forceParallelMode = postgresqlconfig141c_ForceParallelModeToJSON( + message.forceParallelMode + )); + message.clientMinMessages !== undefined && + (obj.clientMinMessages = postgresqlconfig141c_LogLevelToJSON( + message.clientMinMessages + )); + message.logMinMessages !== undefined && + (obj.logMinMessages = postgresqlconfig141c_LogLevelToJSON( + message.logMinMessages + )); + message.logMinErrorStatement !== undefined && + (obj.logMinErrorStatement = postgresqlconfig141c_LogLevelToJSON( + message.logMinErrorStatement + )); + message.logMinDurationStatement !== undefined && + (obj.logMinDurationStatement = message.logMinDurationStatement); + message.logCheckpoints !== undefined && + (obj.logCheckpoints = message.logCheckpoints); + message.logConnections !== undefined && + (obj.logConnections = message.logConnections); + message.logDisconnections !== undefined && + (obj.logDisconnections = message.logDisconnections); + message.logDuration !== undefined && + (obj.logDuration = message.logDuration); + message.logErrorVerbosity !== undefined && + (obj.logErrorVerbosity = postgresqlconfig141c_LogErrorVerbosityToJSON( + message.logErrorVerbosity + )); + message.logLockWaits !== undefined && + (obj.logLockWaits = message.logLockWaits); + message.logStatement !== undefined && + (obj.logStatement = postgresqlconfig141c_LogStatementToJSON( + message.logStatement + )); + message.logTempFiles !== undefined && + (obj.logTempFiles = message.logTempFiles); + message.searchPath !== undefined && (obj.searchPath = message.searchPath); + message.rowSecurity !== undefined && + (obj.rowSecurity = message.rowSecurity); + message.defaultTransactionIsolation !== undefined && + (obj.defaultTransactionIsolation = + postgresqlconfig141c_TransactionIsolationToJSON( + message.defaultTransactionIsolation + )); + message.statementTimeout !== undefined && + (obj.statementTimeout = message.statementTimeout); + message.lockTimeout !== undefined && + (obj.lockTimeout = message.lockTimeout); + message.idleInTransactionSessionTimeout !== undefined && + (obj.idleInTransactionSessionTimeout = + message.idleInTransactionSessionTimeout); + message.byteaOutput !== undefined && + (obj.byteaOutput = postgresqlconfig141c_ByteaOutputToJSON( + message.byteaOutput + )); + message.xmlbinary !== undefined && + (obj.xmlbinary = postgresqlconfig141c_XmlBinaryToJSON(message.xmlbinary)); + message.xmloption !== undefined && + (obj.xmloption = postgresqlconfig141c_XmlOptionToJSON(message.xmloption)); + message.ginPendingListLimit !== undefined && + (obj.ginPendingListLimit = message.ginPendingListLimit); + message.deadlockTimeout !== undefined && + (obj.deadlockTimeout = message.deadlockTimeout); + message.maxLocksPerTransaction !== undefined && + (obj.maxLocksPerTransaction = message.maxLocksPerTransaction); + message.maxPredLocksPerTransaction !== undefined && + (obj.maxPredLocksPerTransaction = message.maxPredLocksPerTransaction); + message.arrayNulls !== undefined && (obj.arrayNulls = message.arrayNulls); + message.backslashQuote !== undefined && + (obj.backslashQuote = postgresqlconfig141c_BackslashQuoteToJSON( + message.backslashQuote + )); + message.defaultWithOids !== undefined && + (obj.defaultWithOids = message.defaultWithOids); + message.escapeStringWarning !== undefined && + (obj.escapeStringWarning = message.escapeStringWarning); + message.loCompatPrivileges !== undefined && + (obj.loCompatPrivileges = message.loCompatPrivileges); + message.quoteAllIdentifiers !== undefined && + (obj.quoteAllIdentifiers = message.quoteAllIdentifiers); + message.standardConformingStrings !== undefined && + (obj.standardConformingStrings = message.standardConformingStrings); + message.synchronizeSeqscans !== undefined && + (obj.synchronizeSeqscans = message.synchronizeSeqscans); + message.transformNullEquals !== undefined && + (obj.transformNullEquals = message.transformNullEquals); + message.exitOnError !== undefined && + (obj.exitOnError = message.exitOnError); + message.seqPageCost !== undefined && + (obj.seqPageCost = message.seqPageCost); + message.randomPageCost !== undefined && + (obj.randomPageCost = message.randomPageCost); + message.autovacuumMaxWorkers !== undefined && + (obj.autovacuumMaxWorkers = message.autovacuumMaxWorkers); + message.autovacuumVacuumCostDelay !== undefined && + (obj.autovacuumVacuumCostDelay = message.autovacuumVacuumCostDelay); + message.autovacuumVacuumCostLimit !== undefined && + (obj.autovacuumVacuumCostLimit = message.autovacuumVacuumCostLimit); + message.autovacuumNaptime !== undefined && + (obj.autovacuumNaptime = message.autovacuumNaptime); + message.archiveTimeout !== undefined && + (obj.archiveTimeout = message.archiveTimeout); + message.trackActivityQuerySize !== undefined && + (obj.trackActivityQuerySize = message.trackActivityQuerySize); + message.onlineAnalyzeEnable !== undefined && + (obj.onlineAnalyzeEnable = message.onlineAnalyzeEnable); + message.enableBitmapscan !== undefined && + (obj.enableBitmapscan = message.enableBitmapscan); + message.enableHashagg !== undefined && + (obj.enableHashagg = message.enableHashagg); + message.enableHashjoin !== undefined && + (obj.enableHashjoin = message.enableHashjoin); + message.enableIndexscan !== undefined && + (obj.enableIndexscan = message.enableIndexscan); + message.enableIndexonlyscan !== undefined && + (obj.enableIndexonlyscan = message.enableIndexonlyscan); + message.enableMaterial !== undefined && + (obj.enableMaterial = message.enableMaterial); + message.enableMergejoin !== undefined && + (obj.enableMergejoin = message.enableMergejoin); + message.enableNestloop !== undefined && + (obj.enableNestloop = message.enableNestloop); + message.enableSeqscan !== undefined && + (obj.enableSeqscan = message.enableSeqscan); + message.enableSort !== undefined && (obj.enableSort = message.enableSort); + message.enableTidscan !== undefined && + (obj.enableTidscan = message.enableTidscan); + message.maxWorkerProcesses !== undefined && + (obj.maxWorkerProcesses = message.maxWorkerProcesses); + message.maxParallelWorkers !== undefined && + (obj.maxParallelWorkers = message.maxParallelWorkers); + message.maxParallelWorkersPerGather !== undefined && + (obj.maxParallelWorkersPerGather = message.maxParallelWorkersPerGather); + message.autovacuumVacuumScaleFactor !== undefined && + (obj.autovacuumVacuumScaleFactor = message.autovacuumVacuumScaleFactor); + message.autovacuumAnalyzeScaleFactor !== undefined && + (obj.autovacuumAnalyzeScaleFactor = message.autovacuumAnalyzeScaleFactor); + message.defaultTransactionReadOnly !== undefined && + (obj.defaultTransactionReadOnly = message.defaultTransactionReadOnly); + message.timezone !== undefined && (obj.timezone = message.timezone); + message.enableParallelAppend !== undefined && + (obj.enableParallelAppend = message.enableParallelAppend); + message.enableParallelHash !== undefined && + (obj.enableParallelHash = message.enableParallelHash); + message.enablePartitionPruning !== undefined && + (obj.enablePartitionPruning = message.enablePartitionPruning); + message.enablePartitionwiseAggregate !== undefined && + (obj.enablePartitionwiseAggregate = message.enablePartitionwiseAggregate); + message.enablePartitionwiseJoin !== undefined && + (obj.enablePartitionwiseJoin = message.enablePartitionwiseJoin); + message.jit !== undefined && (obj.jit = message.jit); + message.maxParallelMaintenanceWorkers !== undefined && + (obj.maxParallelMaintenanceWorkers = + message.maxParallelMaintenanceWorkers); + message.parallelLeaderParticipation !== undefined && + (obj.parallelLeaderParticipation = message.parallelLeaderParticipation); + message.logTransactionSampleRate !== undefined && + (obj.logTransactionSampleRate = message.logTransactionSampleRate); + message.planCacheMode !== undefined && + (obj.planCacheMode = postgresqlconfig141c_PlanCacheModeToJSON( + message.planCacheMode + )); + message.effectiveIoConcurrency !== undefined && + (obj.effectiveIoConcurrency = message.effectiveIoConcurrency); + message.effectiveCacheSize !== undefined && + (obj.effectiveCacheSize = message.effectiveCacheSize); + if (message.sharedPreloadLibraries) { + obj.sharedPreloadLibraries = message.sharedPreloadLibraries.map((e) => + postgresqlconfig141c_SharedPreloadLibrariesToJSON(e) + ); + } else { + obj.sharedPreloadLibraries = []; + } + message.autoExplainLogMinDuration !== undefined && + (obj.autoExplainLogMinDuration = message.autoExplainLogMinDuration); + message.autoExplainLogAnalyze !== undefined && + (obj.autoExplainLogAnalyze = message.autoExplainLogAnalyze); + message.autoExplainLogBuffers !== undefined && + (obj.autoExplainLogBuffers = message.autoExplainLogBuffers); + message.autoExplainLogTiming !== undefined && + (obj.autoExplainLogTiming = message.autoExplainLogTiming); + message.autoExplainLogTriggers !== undefined && + (obj.autoExplainLogTriggers = message.autoExplainLogTriggers); + message.autoExplainLogVerbose !== undefined && + (obj.autoExplainLogVerbose = message.autoExplainLogVerbose); + message.autoExplainLogNestedStatements !== undefined && + (obj.autoExplainLogNestedStatements = + message.autoExplainLogNestedStatements); + message.autoExplainSampleRate !== undefined && + (obj.autoExplainSampleRate = message.autoExplainSampleRate); + message.pgHintPlanEnableHint !== undefined && + (obj.pgHintPlanEnableHint = message.pgHintPlanEnableHint); + message.pgHintPlanEnableHintTable !== undefined && + (obj.pgHintPlanEnableHintTable = message.pgHintPlanEnableHintTable); + message.pgHintPlanDebugPrint !== undefined && + (obj.pgHintPlanDebugPrint = + postgresqlconfig141c_PgHintPlanDebugPrintToJSON( + message.pgHintPlanDebugPrint + )); + message.pgHintPlanMessageLevel !== undefined && + (obj.pgHintPlanMessageLevel = postgresqlconfig141c_LogLevelToJSON( + message.pgHintPlanMessageLevel + )); + message.hashMemMultiplier !== undefined && + (obj.hashMemMultiplier = message.hashMemMultiplier); + message.logicalDecodingWorkMem !== undefined && + (obj.logicalDecodingWorkMem = message.logicalDecodingWorkMem); + message.maintenanceIoConcurrency !== undefined && + (obj.maintenanceIoConcurrency = message.maintenanceIoConcurrency); + message.maxSlotWalKeepSize !== undefined && + (obj.maxSlotWalKeepSize = message.maxSlotWalKeepSize); + message.walKeepSize !== undefined && + (obj.walKeepSize = message.walKeepSize); + message.enableIncrementalSort !== undefined && + (obj.enableIncrementalSort = message.enableIncrementalSort); + message.autovacuumVacuumInsertThreshold !== undefined && + (obj.autovacuumVacuumInsertThreshold = + message.autovacuumVacuumInsertThreshold); + message.autovacuumVacuumInsertScaleFactor !== undefined && + (obj.autovacuumVacuumInsertScaleFactor = + message.autovacuumVacuumInsertScaleFactor); + message.logMinDurationSample !== undefined && + (obj.logMinDurationSample = message.logMinDurationSample); + message.logStatementSampleRate !== undefined && + (obj.logStatementSampleRate = message.logStatementSampleRate); + message.logParameterMaxLength !== undefined && + (obj.logParameterMaxLength = message.logParameterMaxLength); + message.logParameterMaxLengthOnError !== undefined && + (obj.logParameterMaxLengthOnError = message.logParameterMaxLengthOnError); + message.clientConnectionCheckInterval !== undefined && + (obj.clientConnectionCheckInterval = + message.clientConnectionCheckInterval); + message.enableAsyncAppend !== undefined && + (obj.enableAsyncAppend = message.enableAsyncAppend); + message.enableGathermerge !== undefined && + (obj.enableGathermerge = message.enableGathermerge); + message.enableMemoize !== undefined && + (obj.enableMemoize = message.enableMemoize); + message.logRecoveryConflictWaits !== undefined && + (obj.logRecoveryConflictWaits = message.logRecoveryConflictWaits); + message.vacuumFailsafeAge !== undefined && + (obj.vacuumFailsafeAge = message.vacuumFailsafeAge); + message.vacuumMultixactFailsafeAge !== undefined && + (obj.vacuumMultixactFailsafeAge = message.vacuumMultixactFailsafeAge); + message.pgQualstatsEnabled !== undefined && + (obj.pgQualstatsEnabled = message.pgQualstatsEnabled); + message.pgQualstatsTrackConstants !== undefined && + (obj.pgQualstatsTrackConstants = message.pgQualstatsTrackConstants); + message.pgQualstatsMax !== undefined && + (obj.pgQualstatsMax = message.pgQualstatsMax); + message.pgQualstatsResolveOids !== undefined && + (obj.pgQualstatsResolveOids = message.pgQualstatsResolveOids); + message.pgQualstatsSampleRate !== undefined && + (obj.pgQualstatsSampleRate = message.pgQualstatsSampleRate); + message.plantunerFixEmptyTable !== undefined && + (obj.plantunerFixEmptyTable = message.plantunerFixEmptyTable); + message.geqo !== undefined && (obj.geqo = message.geqo); + message.geqoThreshold !== undefined && + (obj.geqoThreshold = message.geqoThreshold); + message.geqoEffort !== undefined && (obj.geqoEffort = message.geqoEffort); + message.geqoSeed !== undefined && (obj.geqoSeed = message.geqoSeed); + return obj; + }, + + fromPartial, I>>( + object: I + ): Postgresqlconfig141c { + const message = { ...basePostgresqlconfig141c } as Postgresqlconfig141c; + message.maxConnections = object.maxConnections ?? undefined; + message.sharedBuffers = object.sharedBuffers ?? undefined; + message.tempBuffers = object.tempBuffers ?? undefined; + message.maxPreparedTransactions = + object.maxPreparedTransactions ?? undefined; + message.workMem = object.workMem ?? undefined; + message.maintenanceWorkMem = object.maintenanceWorkMem ?? undefined; + message.autovacuumWorkMem = object.autovacuumWorkMem ?? undefined; + message.tempFileLimit = object.tempFileLimit ?? undefined; + message.vacuumCostDelay = object.vacuumCostDelay ?? undefined; + message.vacuumCostPageHit = object.vacuumCostPageHit ?? undefined; + message.vacuumCostPageMiss = object.vacuumCostPageMiss ?? undefined; + message.vacuumCostPageDirty = object.vacuumCostPageDirty ?? undefined; + message.vacuumCostLimit = object.vacuumCostLimit ?? undefined; + message.bgwriterDelay = object.bgwriterDelay ?? undefined; + message.bgwriterLruMaxpages = object.bgwriterLruMaxpages ?? undefined; + message.bgwriterLruMultiplier = object.bgwriterLruMultiplier ?? undefined; + message.bgwriterFlushAfter = object.bgwriterFlushAfter ?? undefined; + message.backendFlushAfter = object.backendFlushAfter ?? undefined; + message.oldSnapshotThreshold = object.oldSnapshotThreshold ?? undefined; + message.walLevel = object.walLevel ?? 0; + message.synchronousCommit = object.synchronousCommit ?? 0; + message.checkpointTimeout = object.checkpointTimeout ?? undefined; + message.checkpointCompletionTarget = + object.checkpointCompletionTarget ?? undefined; + message.checkpointFlushAfter = object.checkpointFlushAfter ?? undefined; + message.maxWalSize = object.maxWalSize ?? undefined; + message.minWalSize = object.minWalSize ?? undefined; + message.maxStandbyStreamingDelay = + object.maxStandbyStreamingDelay ?? undefined; + message.defaultStatisticsTarget = + object.defaultStatisticsTarget ?? undefined; + message.constraintExclusion = object.constraintExclusion ?? 0; + message.cursorTupleFraction = object.cursorTupleFraction ?? undefined; + message.fromCollapseLimit = object.fromCollapseLimit ?? undefined; + message.joinCollapseLimit = object.joinCollapseLimit ?? undefined; + message.forceParallelMode = object.forceParallelMode ?? 0; + message.clientMinMessages = object.clientMinMessages ?? 0; + message.logMinMessages = object.logMinMessages ?? 0; + message.logMinErrorStatement = object.logMinErrorStatement ?? 0; + message.logMinDurationStatement = + object.logMinDurationStatement ?? undefined; + message.logCheckpoints = object.logCheckpoints ?? undefined; + message.logConnections = object.logConnections ?? undefined; + message.logDisconnections = object.logDisconnections ?? undefined; + message.logDuration = object.logDuration ?? undefined; + message.logErrorVerbosity = object.logErrorVerbosity ?? 0; + message.logLockWaits = object.logLockWaits ?? undefined; + message.logStatement = object.logStatement ?? 0; + message.logTempFiles = object.logTempFiles ?? undefined; + message.searchPath = object.searchPath ?? ""; + message.rowSecurity = object.rowSecurity ?? undefined; + message.defaultTransactionIsolation = + object.defaultTransactionIsolation ?? 0; + message.statementTimeout = object.statementTimeout ?? undefined; + message.lockTimeout = object.lockTimeout ?? undefined; + message.idleInTransactionSessionTimeout = + object.idleInTransactionSessionTimeout ?? undefined; + message.byteaOutput = object.byteaOutput ?? 0; + message.xmlbinary = object.xmlbinary ?? 0; + message.xmloption = object.xmloption ?? 0; + message.ginPendingListLimit = object.ginPendingListLimit ?? undefined; + message.deadlockTimeout = object.deadlockTimeout ?? undefined; + message.maxLocksPerTransaction = object.maxLocksPerTransaction ?? undefined; + message.maxPredLocksPerTransaction = + object.maxPredLocksPerTransaction ?? undefined; + message.arrayNulls = object.arrayNulls ?? undefined; + message.backslashQuote = object.backslashQuote ?? 0; + message.defaultWithOids = object.defaultWithOids ?? undefined; + message.escapeStringWarning = object.escapeStringWarning ?? undefined; + message.loCompatPrivileges = object.loCompatPrivileges ?? undefined; + message.quoteAllIdentifiers = object.quoteAllIdentifiers ?? undefined; + message.standardConformingStrings = + object.standardConformingStrings ?? undefined; + message.synchronizeSeqscans = object.synchronizeSeqscans ?? undefined; + message.transformNullEquals = object.transformNullEquals ?? undefined; + message.exitOnError = object.exitOnError ?? undefined; + message.seqPageCost = object.seqPageCost ?? undefined; + message.randomPageCost = object.randomPageCost ?? undefined; + message.autovacuumMaxWorkers = object.autovacuumMaxWorkers ?? undefined; + message.autovacuumVacuumCostDelay = + object.autovacuumVacuumCostDelay ?? undefined; + message.autovacuumVacuumCostLimit = + object.autovacuumVacuumCostLimit ?? undefined; + message.autovacuumNaptime = object.autovacuumNaptime ?? undefined; + message.archiveTimeout = object.archiveTimeout ?? undefined; + message.trackActivityQuerySize = object.trackActivityQuerySize ?? undefined; + message.onlineAnalyzeEnable = object.onlineAnalyzeEnable ?? undefined; + message.enableBitmapscan = object.enableBitmapscan ?? undefined; + message.enableHashagg = object.enableHashagg ?? undefined; + message.enableHashjoin = object.enableHashjoin ?? undefined; + message.enableIndexscan = object.enableIndexscan ?? undefined; + message.enableIndexonlyscan = object.enableIndexonlyscan ?? undefined; + message.enableMaterial = object.enableMaterial ?? undefined; + message.enableMergejoin = object.enableMergejoin ?? undefined; + message.enableNestloop = object.enableNestloop ?? undefined; + message.enableSeqscan = object.enableSeqscan ?? undefined; + message.enableSort = object.enableSort ?? undefined; + message.enableTidscan = object.enableTidscan ?? undefined; + message.maxWorkerProcesses = object.maxWorkerProcesses ?? undefined; + message.maxParallelWorkers = object.maxParallelWorkers ?? undefined; + message.maxParallelWorkersPerGather = + object.maxParallelWorkersPerGather ?? undefined; + message.autovacuumVacuumScaleFactor = + object.autovacuumVacuumScaleFactor ?? undefined; + message.autovacuumAnalyzeScaleFactor = + object.autovacuumAnalyzeScaleFactor ?? undefined; + message.defaultTransactionReadOnly = + object.defaultTransactionReadOnly ?? undefined; + message.timezone = object.timezone ?? ""; + message.enableParallelAppend = object.enableParallelAppend ?? undefined; + message.enableParallelHash = object.enableParallelHash ?? undefined; + message.enablePartitionPruning = object.enablePartitionPruning ?? undefined; + message.enablePartitionwiseAggregate = + object.enablePartitionwiseAggregate ?? undefined; + message.enablePartitionwiseJoin = + object.enablePartitionwiseJoin ?? undefined; + message.jit = object.jit ?? undefined; + message.maxParallelMaintenanceWorkers = + object.maxParallelMaintenanceWorkers ?? undefined; + message.parallelLeaderParticipation = + object.parallelLeaderParticipation ?? undefined; + message.logTransactionSampleRate = + object.logTransactionSampleRate ?? undefined; + message.planCacheMode = object.planCacheMode ?? 0; + message.effectiveIoConcurrency = object.effectiveIoConcurrency ?? undefined; + message.effectiveCacheSize = object.effectiveCacheSize ?? undefined; + message.sharedPreloadLibraries = + object.sharedPreloadLibraries?.map((e) => e) || []; + message.autoExplainLogMinDuration = + object.autoExplainLogMinDuration ?? undefined; + message.autoExplainLogAnalyze = object.autoExplainLogAnalyze ?? undefined; + message.autoExplainLogBuffers = object.autoExplainLogBuffers ?? undefined; + message.autoExplainLogTiming = object.autoExplainLogTiming ?? undefined; + message.autoExplainLogTriggers = object.autoExplainLogTriggers ?? undefined; + message.autoExplainLogVerbose = object.autoExplainLogVerbose ?? undefined; + message.autoExplainLogNestedStatements = + object.autoExplainLogNestedStatements ?? undefined; + message.autoExplainSampleRate = object.autoExplainSampleRate ?? undefined; + message.pgHintPlanEnableHint = object.pgHintPlanEnableHint ?? undefined; + message.pgHintPlanEnableHintTable = + object.pgHintPlanEnableHintTable ?? undefined; + message.pgHintPlanDebugPrint = object.pgHintPlanDebugPrint ?? 0; + message.pgHintPlanMessageLevel = object.pgHintPlanMessageLevel ?? 0; + message.hashMemMultiplier = object.hashMemMultiplier ?? undefined; + message.logicalDecodingWorkMem = object.logicalDecodingWorkMem ?? undefined; + message.maintenanceIoConcurrency = + object.maintenanceIoConcurrency ?? undefined; + message.maxSlotWalKeepSize = object.maxSlotWalKeepSize ?? undefined; + message.walKeepSize = object.walKeepSize ?? undefined; + message.enableIncrementalSort = object.enableIncrementalSort ?? undefined; + message.autovacuumVacuumInsertThreshold = + object.autovacuumVacuumInsertThreshold ?? undefined; + message.autovacuumVacuumInsertScaleFactor = + object.autovacuumVacuumInsertScaleFactor ?? undefined; + message.logMinDurationSample = object.logMinDurationSample ?? undefined; + message.logStatementSampleRate = object.logStatementSampleRate ?? undefined; + message.logParameterMaxLength = object.logParameterMaxLength ?? undefined; + message.logParameterMaxLengthOnError = + object.logParameterMaxLengthOnError ?? undefined; + message.clientConnectionCheckInterval = + object.clientConnectionCheckInterval ?? undefined; + message.enableAsyncAppend = object.enableAsyncAppend ?? undefined; + message.enableGathermerge = object.enableGathermerge ?? undefined; + message.enableMemoize = object.enableMemoize ?? undefined; + message.logRecoveryConflictWaits = + object.logRecoveryConflictWaits ?? undefined; + message.vacuumFailsafeAge = object.vacuumFailsafeAge ?? undefined; + message.vacuumMultixactFailsafeAge = + object.vacuumMultixactFailsafeAge ?? undefined; + message.pgQualstatsEnabled = object.pgQualstatsEnabled ?? undefined; + message.pgQualstatsTrackConstants = + object.pgQualstatsTrackConstants ?? undefined; + message.pgQualstatsMax = object.pgQualstatsMax ?? undefined; + message.pgQualstatsResolveOids = object.pgQualstatsResolveOids ?? undefined; + message.pgQualstatsSampleRate = object.pgQualstatsSampleRate ?? undefined; + message.plantunerFixEmptyTable = object.plantunerFixEmptyTable ?? undefined; + message.geqo = object.geqo ?? undefined; + message.geqoThreshold = object.geqoThreshold ?? undefined; + message.geqoEffort = object.geqoEffort ?? undefined; + message.geqoSeed = object.geqoSeed ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(Postgresqlconfig141c.$type, Postgresqlconfig141c); + +const basePostgresqlconfigset141c: object = { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfigSet14_1C", +}; + +export const Postgresqlconfigset141c = { + $type: + "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfigSet14_1C" as const, + + encode( + message: Postgresqlconfigset141c, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.effectiveConfig !== undefined) { + Postgresqlconfig141c.encode( + message.effectiveConfig, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.userConfig !== undefined) { + Postgresqlconfig141c.encode( + message.userConfig, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.defaultConfig !== undefined) { + Postgresqlconfig141c.encode( + message.defaultConfig, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Postgresqlconfigset141c { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...basePostgresqlconfigset141c, + } as Postgresqlconfigset141c; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.effectiveConfig = Postgresqlconfig141c.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.userConfig = Postgresqlconfig141c.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.defaultConfig = Postgresqlconfig141c.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Postgresqlconfigset141c { + const message = { + ...basePostgresqlconfigset141c, + } as Postgresqlconfigset141c; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Postgresqlconfig141c.fromJSON(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Postgresqlconfig141c.fromJSON(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Postgresqlconfig141c.fromJSON(object.defaultConfig) + : undefined; + return message; + }, + + toJSON(message: Postgresqlconfigset141c): unknown { + const obj: any = {}; + message.effectiveConfig !== undefined && + (obj.effectiveConfig = message.effectiveConfig + ? Postgresqlconfig141c.toJSON(message.effectiveConfig) + : undefined); + message.userConfig !== undefined && + (obj.userConfig = message.userConfig + ? Postgresqlconfig141c.toJSON(message.userConfig) + : undefined); + message.defaultConfig !== undefined && + (obj.defaultConfig = message.defaultConfig + ? Postgresqlconfig141c.toJSON(message.defaultConfig) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Postgresqlconfigset141c { + const message = { + ...basePostgresqlconfigset141c, + } as Postgresqlconfigset141c; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Postgresqlconfig141c.fromPartial(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Postgresqlconfig141c.fromPartial(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Postgresqlconfig141c.fromPartial(object.defaultConfig) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Postgresqlconfigset141c.$type, Postgresqlconfigset141c); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/database.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/database.ts index e8351702..0d6d7bc7 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/database.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/database.ts @@ -29,6 +29,8 @@ export interface Database { lcCtype: string; /** PostgreSQL extensions enabled for the database. */ extensions: Extension[]; + /** Name of the database template. */ + templateDb: string; } export interface Extension { @@ -63,6 +65,8 @@ export interface DatabaseSpec { lcCtype: string; /** PostgreSQL extensions to be enabled for the database. */ extensions: Extension[]; + /** Name of the PostgreSQL database template. */ + templateDb: string; } const baseDatabase: object = { @@ -72,6 +76,7 @@ const baseDatabase: object = { owner: "", lcCollate: "", lcCtype: "", + templateDb: "", }; export const Database = { @@ -99,6 +104,9 @@ export const Database = { for (const v of message.extensions) { Extension.encode(v!, writer.uint32(50).fork()).ldelim(); } + if (message.templateDb !== "") { + writer.uint32(58).string(message.templateDb); + } return writer; }, @@ -128,6 +136,9 @@ export const Database = { case 6: message.extensions.push(Extension.decode(reader, reader.uint32())); break; + case 7: + message.templateDb = reader.string(); + break; default: reader.skipType(tag & 7); break; @@ -161,6 +172,10 @@ export const Database = { message.extensions = (object.extensions ?? []).map((e: any) => Extension.fromJSON(e) ); + message.templateDb = + object.templateDb !== undefined && object.templateDb !== null + ? String(object.templateDb) + : ""; return message; }, @@ -178,6 +193,7 @@ export const Database = { } else { obj.extensions = []; } + message.templateDb !== undefined && (obj.templateDb = message.templateDb); return obj; }, @@ -190,6 +206,7 @@ export const Database = { message.lcCtype = object.lcCtype ?? ""; message.extensions = object.extensions?.map((e) => Extension.fromPartial(e)) || []; + message.templateDb = object.templateDb ?? ""; return message; }, }; @@ -277,6 +294,7 @@ const baseDatabaseSpec: object = { owner: "", lcCollate: "", lcCtype: "", + templateDb: "", }; export const DatabaseSpec = { @@ -301,6 +319,9 @@ export const DatabaseSpec = { for (const v of message.extensions) { Extension.encode(v!, writer.uint32(42).fork()).ldelim(); } + if (message.templateDb !== "") { + writer.uint32(50).string(message.templateDb); + } return writer; }, @@ -327,6 +348,9 @@ export const DatabaseSpec = { case 5: message.extensions.push(Extension.decode(reader, reader.uint32())); break; + case 6: + message.templateDb = reader.string(); + break; default: reader.skipType(tag & 7); break; @@ -356,6 +380,10 @@ export const DatabaseSpec = { message.extensions = (object.extensions ?? []).map((e: any) => Extension.fromJSON(e) ); + message.templateDb = + object.templateDb !== undefined && object.templateDb !== null + ? String(object.templateDb) + : ""; return message; }, @@ -372,6 +400,7 @@ export const DatabaseSpec = { } else { obj.extensions = []; } + message.templateDb !== undefined && (obj.templateDb = message.templateDb); return obj; }, @@ -385,6 +414,7 @@ export const DatabaseSpec = { message.lcCtype = object.lcCtype ?? ""; message.extensions = object.extensions?.map((e) => Extension.fromPartial(e)) || []; + message.templateDb = object.templateDb ?? ""; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/database_service.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/database_service.ts index d2162546..0f673122 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/database_service.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/database_service.ts @@ -45,10 +45,15 @@ export interface ListDatabasesRequest { * To get the cluster ID use a [ClusterService.List] request. */ clusterId: string; + /** + * The maximum number of results per page to return. If the number of available + * results is larger than [page_size], the service returns a [ListDatabasesResponse.next_page_token] + * that can be used to get the next page of results in subsequent list requests. + */ pageSize: number; /** * Page token. To get the next page of results, Set [page_token] to the [ListDatabasesResponse.next_page_token] - * returned by a previous list request. + * returned by the previous list request. */ pageToken: string; } @@ -97,6 +102,8 @@ export interface UpdateDatabaseRequest { * To get the name of the database use a [DatabaseService.List] request. */ databaseName: string; + /** Optional. New name of the database. */ + newDatabaseName: string; /** Field mask that specifies which fields of the Database resource should be updated. */ updateMask?: FieldMask; /** @@ -560,6 +567,7 @@ const baseUpdateDatabaseRequest: object = { $type: "yandex.cloud.mdb.postgresql.v1.UpdateDatabaseRequest", clusterId: "", databaseName: "", + newDatabaseName: "", }; export const UpdateDatabaseRequest = { @@ -575,6 +583,9 @@ export const UpdateDatabaseRequest = { if (message.databaseName !== "") { writer.uint32(18).string(message.databaseName); } + if (message.newDatabaseName !== "") { + writer.uint32(42).string(message.newDatabaseName); + } if (message.updateMask !== undefined) { FieldMask.encode(message.updateMask, writer.uint32(26).fork()).ldelim(); } @@ -601,6 +612,9 @@ export const UpdateDatabaseRequest = { case 2: message.databaseName = reader.string(); break; + case 5: + message.newDatabaseName = reader.string(); + break; case 3: message.updateMask = FieldMask.decode(reader, reader.uint32()); break; @@ -625,6 +639,10 @@ export const UpdateDatabaseRequest = { object.databaseName !== undefined && object.databaseName !== null ? String(object.databaseName) : ""; + message.newDatabaseName = + object.newDatabaseName !== undefined && object.newDatabaseName !== null + ? String(object.newDatabaseName) + : ""; message.updateMask = object.updateMask !== undefined && object.updateMask !== null ? FieldMask.fromJSON(object.updateMask) @@ -640,6 +658,8 @@ export const UpdateDatabaseRequest = { message.clusterId !== undefined && (obj.clusterId = message.clusterId); message.databaseName !== undefined && (obj.databaseName = message.databaseName); + message.newDatabaseName !== undefined && + (obj.newDatabaseName = message.newDatabaseName); message.updateMask !== undefined && (obj.updateMask = message.updateMask ? FieldMask.toJSON(message.updateMask) @@ -660,6 +680,7 @@ export const UpdateDatabaseRequest = { const message = { ...baseUpdateDatabaseRequest } as UpdateDatabaseRequest; message.clusterId = object.clusterId ?? ""; message.databaseName = object.databaseName ?? ""; + message.newDatabaseName = object.newDatabaseName ?? ""; message.updateMask = object.updateMask !== undefined && object.updateMask !== null ? FieldMask.fromPartial(object.updateMask) diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/resource_preset_service.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/resource_preset_service.ts index f169d489..6d316889 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/resource_preset_service.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/resource_preset_service.ts @@ -37,7 +37,7 @@ export interface ListResourcePresetsRequest { pageSize: number; /** * Page token. To get the next page of results, set [page_token] to the [ListResourcePresetsResponse.next_page_token] - * returned by a previous list request. + * returned by the previous list request. */ pageToken: string; } diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/user_service.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/user_service.ts index 18d8b670..c6385ce0 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/user_service.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/user_service.ts @@ -55,7 +55,7 @@ export interface ListUsersRequest { pageSize: number; /** * Page token. To get the next page of results, set `page_token` to the [ListUsersResponse.next_page_token] - * returned by a previous list request. + * returned by the previous list request. */ pageToken: string; } diff --git a/src/generated/yandex/cloud/mdb/redis/v1/backup_service.ts b/src/generated/yandex/cloud/mdb/redis/v1/backup_service.ts index f05ffc64..818aa21e 100644 --- a/src/generated/yandex/cloud/mdb/redis/v1/backup_service.ts +++ b/src/generated/yandex/cloud/mdb/redis/v1/backup_service.ts @@ -42,7 +42,7 @@ export interface ListBackupsRequest { pageSize: number; /** * Page token. To get the next page of results, set [page_token] to the [ListBackupsResponse.next_page_token] - * returned by a previous list request. + * returned by the previous list request. */ pageToken: string; } diff --git a/src/generated/yandex/cloud/mdb/redis/v1/cluster.ts b/src/generated/yandex/cloud/mdb/redis/v1/cluster.ts index 02dee9be..02fdfe34 100644 --- a/src/generated/yandex/cloud/mdb/redis/v1/cluster.ts +++ b/src/generated/yandex/cloud/mdb/redis/v1/cluster.ts @@ -11,6 +11,7 @@ import { Timestamp } from "../../../../../google/protobuf/timestamp"; import { Redisconfigset50 } from "../../../../../yandex/cloud/mdb/redis/v1/config/redis5_0"; import { Redisconfigset60 } from "../../../../../yandex/cloud/mdb/redis/v1/config/redis6_0"; import { Redisconfigset62 } from "../../../../../yandex/cloud/mdb/redis/v1/config/redis6_2"; +import { Redisconfigset70 } from "../../../../../yandex/cloud/mdb/redis/v1/config/redis7_0"; import { Int64Value } from "../../../../../google/protobuf/wrappers"; export const protobufPackage = "yandex.cloud.mdb.redis.v1"; @@ -303,6 +304,8 @@ export interface ClusterConfig { redisConfig60?: Redisconfigset60 | undefined; /** Configuration of a Redis 6.2 server. */ redisConfig62?: Redisconfigset62 | undefined; + /** Configuration of a Redis 7.0 server. */ + redisConfig70?: Redisconfigset70 | undefined; /** Resources allocated to Redis hosts. */ resources?: Resources; /** Time to start the daily backup, in the UTC timezone. */ @@ -328,7 +331,7 @@ export interface Host { * Name of the Redis host. The host name is assigned by MDB at creation time, and cannot be changed. * 1-63 characters long. * - * The name is unique across all existing MDB hosts in Yandex Cloud, as it defines the FQDN of the host. + * The name is unique across all MDB hosts that exist on the platform, as it defines the FQDN of the host. */ name: string; /** ID of the Redis cluster. The ID is assigned by MDB at creation time. */ @@ -1129,6 +1132,12 @@ export const ClusterConfig = { writer.uint32(58).fork() ).ldelim(); } + if (message.redisConfig70 !== undefined) { + Redisconfigset70.encode( + message.redisConfig70, + writer.uint32(66).fork() + ).ldelim(); + } if (message.resources !== undefined) { Resources.encode(message.resources, writer.uint32(26).fork()).ldelim(); } @@ -1172,6 +1181,12 @@ export const ClusterConfig = { reader.uint32() ); break; + case 8: + message.redisConfig70 = Redisconfigset70.decode( + reader, + reader.uint32() + ); + break; case 3: message.resources = Resources.decode(reader, reader.uint32()); break; @@ -1207,6 +1222,10 @@ export const ClusterConfig = { object.redisConfig_6_2 !== undefined && object.redisConfig_6_2 !== null ? Redisconfigset62.fromJSON(object.redisConfig_6_2) : undefined; + message.redisConfig70 = + object.redisConfig_7_0 !== undefined && object.redisConfig_7_0 !== null + ? Redisconfigset70.fromJSON(object.redisConfig_7_0) + : undefined; message.resources = object.resources !== undefined && object.resources !== null ? Resources.fromJSON(object.resources) @@ -1238,6 +1257,10 @@ export const ClusterConfig = { (obj.redisConfig_6_2 = message.redisConfig62 ? Redisconfigset62.toJSON(message.redisConfig62) : undefined); + message.redisConfig70 !== undefined && + (obj.redisConfig_7_0 = message.redisConfig70 + ? Redisconfigset70.toJSON(message.redisConfig70) + : undefined); message.resources !== undefined && (obj.resources = message.resources ? Resources.toJSON(message.resources) @@ -1268,6 +1291,10 @@ export const ClusterConfig = { object.redisConfig62 !== undefined && object.redisConfig62 !== null ? Redisconfigset62.fromPartial(object.redisConfig62) : undefined; + message.redisConfig70 = + object.redisConfig70 !== undefined && object.redisConfig70 !== null + ? Redisconfigset70.fromPartial(object.redisConfig70) + : undefined; message.resources = object.resources !== undefined && object.resources !== null ? Resources.fromPartial(object.resources) diff --git a/src/generated/yandex/cloud/mdb/redis/v1/cluster_service.ts b/src/generated/yandex/cloud/mdb/redis/v1/cluster_service.ts index 2eb6d55e..a229918c 100644 --- a/src/generated/yandex/cloud/mdb/redis/v1/cluster_service.ts +++ b/src/generated/yandex/cloud/mdb/redis/v1/cluster_service.ts @@ -38,6 +38,7 @@ import { Backup } from "../../../../../yandex/cloud/mdb/redis/v1/backup"; import { Redisconfig50 } from "../../../../../yandex/cloud/mdb/redis/v1/config/redis5_0"; import { Redisconfig60 } from "../../../../../yandex/cloud/mdb/redis/v1/config/redis6_0"; import { Redisconfig62 } from "../../../../../yandex/cloud/mdb/redis/v1/config/redis6_2"; +import { Redisconfig70 } from "../../../../../yandex/cloud/mdb/redis/v1/config/redis7_0"; import { BoolValue, Int64Value } from "../../../../../google/protobuf/wrappers"; export const protobufPackage = "yandex.cloud.mdb.redis.v1"; @@ -66,7 +67,7 @@ export interface ListClustersRequest { pageSize: number; /** * Page token. To get the next page of results, set [page_token] to the [ListClustersResponse.next_page_token] - * returned by a previous list request. + * returned by the previous list request. */ pageToken: string; /** @@ -308,6 +309,8 @@ export interface RestoreClusterRequest { tlsEnabled?: boolean; /** Persistence mode */ persistenceMode: Cluster_PersistenceMode; + /** Deletion Protection inhibits deletion of the cluster */ + deletionProtection: boolean; } export interface RestoreClusterRequest_LabelsEntry { @@ -448,7 +451,7 @@ export interface ListClusterLogsRequest { pageSize: number; /** * Page token. To get the next page of results, set [page_token] to the - * [ListClusterLogsResponse.next_page_token] returned by a previous list request. + * [ListClusterLogsResponse.next_page_token] returned by the previous list request. */ pageToken: string; } @@ -597,7 +600,7 @@ export interface ListClusterOperationsRequest { pageSize: number; /** * Page token. To get the next page of results, set [page_token] to the [ListClusterOperationsResponse.next_page_token] - * returned by a previous list request. + * returned by the previous list request. */ pageToken: string; } @@ -630,7 +633,7 @@ export interface ListClusterBackupsRequest { pageSize: number; /** * Page token. To get the next page of results, set [page_token] to the [ListClusterBackupsResponse.next_page_token] - * returned by a previous list request. + * returned by the previous list request. */ pageToken: string; } @@ -663,7 +666,7 @@ export interface ListClusterHostsRequest { pageSize: number; /** * Page token. To get the next page of results, set [page_token] to the [ListClusterHostsResponse.next_page_token] - * returned by a previous list request. + * returned by the previous list request. */ pageToken: string; } @@ -750,7 +753,7 @@ export interface ListClusterShardsRequest { pageSize: number; /** * Page token. To get the next page of results, set [page_token] to the - * [ListClusterShardsResponse.next_page_token] returned by a previous list request. + * [ListClusterShardsResponse.next_page_token] returned by the previous list request. */ pageToken: string; } @@ -894,6 +897,7 @@ export interface ConfigSpec { redisConfig50?: Redisconfig50 | undefined; redisConfig60?: Redisconfig60 | undefined; redisConfig62?: Redisconfig62 | undefined; + redisConfig70?: Redisconfig70 | undefined; /** Resources allocated to Redis hosts. */ resources?: Resources; /** Time to start the daily backup, in the UTC timezone. */ @@ -2838,6 +2842,7 @@ const baseRestoreClusterRequest: object = { folderId: "", securityGroupIds: "", persistenceMode: 0, + deletionProtection: false, }; export const RestoreClusterRequest = { @@ -2893,6 +2898,9 @@ export const RestoreClusterRequest = { if (message.persistenceMode !== 0) { writer.uint32(96).int32(message.persistenceMode); } + if (message.deletionProtection === true) { + writer.uint32(104).bool(message.deletionProtection); + } return writer; }, @@ -2951,6 +2959,9 @@ export const RestoreClusterRequest = { case 12: message.persistenceMode = reader.int32() as any; break; + case 13: + message.deletionProtection = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -3009,6 +3020,11 @@ export const RestoreClusterRequest = { object.persistenceMode !== undefined && object.persistenceMode !== null ? cluster_PersistenceModeFromJSON(object.persistenceMode) : 0; + message.deletionProtection = + object.deletionProtection !== undefined && + object.deletionProtection !== null + ? Boolean(object.deletionProtection) + : false; return message; }, @@ -3049,6 +3065,8 @@ export const RestoreClusterRequest = { (obj.persistenceMode = cluster_PersistenceModeToJSON( message.persistenceMode )); + message.deletionProtection !== undefined && + (obj.deletionProtection = message.deletionProtection); return obj; }, @@ -3079,6 +3097,7 @@ export const RestoreClusterRequest = { message.securityGroupIds = object.securityGroupIds?.map((e) => e) || []; message.tlsEnabled = object.tlsEnabled ?? undefined; message.persistenceMode = object.persistenceMode ?? 0; + message.deletionProtection = object.deletionProtection ?? false; return message; }, }; @@ -6265,6 +6284,12 @@ export const ConfigSpec = { writer.uint32(58).fork() ).ldelim(); } + if (message.redisConfig70 !== undefined) { + Redisconfig70.encode( + message.redisConfig70, + writer.uint32(66).fork() + ).ldelim(); + } if (message.resources !== undefined) { Resources.encode(message.resources, writer.uint32(26).fork()).ldelim(); } @@ -6299,6 +6324,9 @@ export const ConfigSpec = { case 7: message.redisConfig62 = Redisconfig62.decode(reader, reader.uint32()); break; + case 8: + message.redisConfig70 = Redisconfig70.decode(reader, reader.uint32()); + break; case 3: message.resources = Resources.decode(reader, reader.uint32()); break; @@ -6334,6 +6362,10 @@ export const ConfigSpec = { object.redisConfig_6_2 !== undefined && object.redisConfig_6_2 !== null ? Redisconfig62.fromJSON(object.redisConfig_6_2) : undefined; + message.redisConfig70 = + object.redisConfig_7_0 !== undefined && object.redisConfig_7_0 !== null + ? Redisconfig70.fromJSON(object.redisConfig_7_0) + : undefined; message.resources = object.resources !== undefined && object.resources !== null ? Resources.fromJSON(object.resources) @@ -6365,6 +6397,10 @@ export const ConfigSpec = { (obj.redisConfig_6_2 = message.redisConfig62 ? Redisconfig62.toJSON(message.redisConfig62) : undefined); + message.redisConfig70 !== undefined && + (obj.redisConfig_7_0 = message.redisConfig70 + ? Redisconfig70.toJSON(message.redisConfig70) + : undefined); message.resources !== undefined && (obj.resources = message.resources ? Resources.toJSON(message.resources) @@ -6395,6 +6431,10 @@ export const ConfigSpec = { object.redisConfig62 !== undefined && object.redisConfig62 !== null ? Redisconfig62.fromPartial(object.redisConfig62) : undefined; + message.redisConfig70 = + object.redisConfig70 !== undefined && object.redisConfig70 !== null + ? Redisconfig70.fromPartial(object.redisConfig70) + : undefined; message.resources = object.resources !== undefined && object.resources !== null ? Resources.fromPartial(object.resources) diff --git a/src/generated/yandex/cloud/mdb/redis/v1/config/redis7_0.ts b/src/generated/yandex/cloud/mdb/redis/v1/config/redis7_0.ts new file mode 100644 index 00000000..1f4e8796 --- /dev/null +++ b/src/generated/yandex/cloud/mdb/redis/v1/config/redis7_0.ts @@ -0,0 +1,647 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { Int64Value } from "../../../../../../google/protobuf/wrappers"; + +export const protobufPackage = "yandex.cloud.mdb.redis.v1.config"; + +/** + * Fields and structure of `RedisConfig` reflects Redis configuration file + * parameters. + */ +export interface Redisconfig70 { + $type: "yandex.cloud.mdb.redis.v1.config.RedisConfig7_0"; + /** + * Redis key eviction policy for a dataset that reaches maximum memory, + * available to the host. Redis maxmemory setting depends on Managed + * Service for Redis [host class](/docs/managed-redis/concepts/instance-types). + * + * All policies are described in detail in [Redis documentation](https://redis.io/topics/lru-cache). + */ + maxmemoryPolicy: Redisconfig70_MaxmemoryPolicy; + /** + * Time that Redis keeps the connection open while the client is idle. + * If no new command is sent during that time, the connection is closed. + */ + timeout?: number; + /** Authentication password. */ + password: string; + /** Number of database buckets on a single redis-server process. */ + databases?: number; + /** Threshold for logging slow requests to server in microseconds (log only slower than it). */ + slowlogLogSlowerThan?: number; + /** Max slow requests number to log. */ + slowlogMaxLen?: number; + /** String setting for pub\sub functionality; subset of KEg$lshzxeAtm. */ + notifyKeyspaceEvents: string; + /** Redis connection output buffers limits for pubsub operations. */ + clientOutputBufferLimitPubsub?: Redisconfig70_ClientOutputBufferLimit; + /** Redis connection output buffers limits for clients. */ + clientOutputBufferLimitNormal?: Redisconfig70_ClientOutputBufferLimit; +} + +export enum Redisconfig70_MaxmemoryPolicy { + MAXMEMORY_POLICY_UNSPECIFIED = 0, + /** VOLATILE_LRU - Try to remove less recently used (LRU) keys with `expire set`. */ + VOLATILE_LRU = 1, + /** ALLKEYS_LRU - Remove less recently used (LRU) keys. */ + ALLKEYS_LRU = 2, + /** VOLATILE_LFU - Try to remove least frequently used (LFU) keys with `expire set`. */ + VOLATILE_LFU = 3, + /** ALLKEYS_LFU - Remove least frequently used (LFU) keys. */ + ALLKEYS_LFU = 4, + /** VOLATILE_RANDOM - Try to remove keys with `expire set` randomly. */ + VOLATILE_RANDOM = 5, + /** ALLKEYS_RANDOM - Remove keys randomly. */ + ALLKEYS_RANDOM = 6, + /** + * VOLATILE_TTL - Try to remove less recently used (LRU) keys with `expire set` + * and shorter TTL first. + */ + VOLATILE_TTL = 7, + /** + * NOEVICTION - Return errors when memory limit was reached and commands could require + * more memory to be used. + */ + NOEVICTION = 8, + UNRECOGNIZED = -1, +} + +export function redisconfig70_MaxmemoryPolicyFromJSON( + object: any +): Redisconfig70_MaxmemoryPolicy { + switch (object) { + case 0: + case "MAXMEMORY_POLICY_UNSPECIFIED": + return Redisconfig70_MaxmemoryPolicy.MAXMEMORY_POLICY_UNSPECIFIED; + case 1: + case "VOLATILE_LRU": + return Redisconfig70_MaxmemoryPolicy.VOLATILE_LRU; + case 2: + case "ALLKEYS_LRU": + return Redisconfig70_MaxmemoryPolicy.ALLKEYS_LRU; + case 3: + case "VOLATILE_LFU": + return Redisconfig70_MaxmemoryPolicy.VOLATILE_LFU; + case 4: + case "ALLKEYS_LFU": + return Redisconfig70_MaxmemoryPolicy.ALLKEYS_LFU; + case 5: + case "VOLATILE_RANDOM": + return Redisconfig70_MaxmemoryPolicy.VOLATILE_RANDOM; + case 6: + case "ALLKEYS_RANDOM": + return Redisconfig70_MaxmemoryPolicy.ALLKEYS_RANDOM; + case 7: + case "VOLATILE_TTL": + return Redisconfig70_MaxmemoryPolicy.VOLATILE_TTL; + case 8: + case "NOEVICTION": + return Redisconfig70_MaxmemoryPolicy.NOEVICTION; + case -1: + case "UNRECOGNIZED": + default: + return Redisconfig70_MaxmemoryPolicy.UNRECOGNIZED; + } +} + +export function redisconfig70_MaxmemoryPolicyToJSON( + object: Redisconfig70_MaxmemoryPolicy +): string { + switch (object) { + case Redisconfig70_MaxmemoryPolicy.MAXMEMORY_POLICY_UNSPECIFIED: + return "MAXMEMORY_POLICY_UNSPECIFIED"; + case Redisconfig70_MaxmemoryPolicy.VOLATILE_LRU: + return "VOLATILE_LRU"; + case Redisconfig70_MaxmemoryPolicy.ALLKEYS_LRU: + return "ALLKEYS_LRU"; + case Redisconfig70_MaxmemoryPolicy.VOLATILE_LFU: + return "VOLATILE_LFU"; + case Redisconfig70_MaxmemoryPolicy.ALLKEYS_LFU: + return "ALLKEYS_LFU"; + case Redisconfig70_MaxmemoryPolicy.VOLATILE_RANDOM: + return "VOLATILE_RANDOM"; + case Redisconfig70_MaxmemoryPolicy.ALLKEYS_RANDOM: + return "ALLKEYS_RANDOM"; + case Redisconfig70_MaxmemoryPolicy.VOLATILE_TTL: + return "VOLATILE_TTL"; + case Redisconfig70_MaxmemoryPolicy.NOEVICTION: + return "NOEVICTION"; + default: + return "UNKNOWN"; + } +} + +export interface Redisconfig70_ClientOutputBufferLimit { + $type: "yandex.cloud.mdb.redis.v1.config.RedisConfig7_0.ClientOutputBufferLimit"; + /** Total limit in bytes. */ + hardLimit?: number; + /** Limit in bytes during certain time period. */ + softLimit?: number; + /** Seconds for soft limit. */ + softSeconds?: number; +} + +export interface Redisconfigset70 { + $type: "yandex.cloud.mdb.redis.v1.config.RedisConfigSet7_0"; + /** + * Effective settings for a Redis 7.0 cluster (a combination of settings + * defined in [user_config] and [default_config]). + */ + effectiveConfig?: Redisconfig70; + /** User-defined settings for a Redis 7.0 cluster. */ + userConfig?: Redisconfig70; + /** Default configuration for a Redis 7.0 cluster. */ + defaultConfig?: Redisconfig70; +} + +const baseRedisconfig70: object = { + $type: "yandex.cloud.mdb.redis.v1.config.RedisConfig7_0", + maxmemoryPolicy: 0, + password: "", + notifyKeyspaceEvents: "", +}; + +export const Redisconfig70 = { + $type: "yandex.cloud.mdb.redis.v1.config.RedisConfig7_0" as const, + + encode( + message: Redisconfig70, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.maxmemoryPolicy !== 0) { + writer.uint32(8).int32(message.maxmemoryPolicy); + } + if (message.timeout !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.timeout! }, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.password !== "") { + writer.uint32(26).string(message.password); + } + if (message.databases !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.databases! }, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.slowlogLogSlowerThan !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.slowlogLogSlowerThan!, + }, + writer.uint32(42).fork() + ).ldelim(); + } + if (message.slowlogMaxLen !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.slowlogMaxLen! }, + writer.uint32(50).fork() + ).ldelim(); + } + if (message.notifyKeyspaceEvents !== "") { + writer.uint32(58).string(message.notifyKeyspaceEvents); + } + if (message.clientOutputBufferLimitPubsub !== undefined) { + Redisconfig70_ClientOutputBufferLimit.encode( + message.clientOutputBufferLimitPubsub, + writer.uint32(66).fork() + ).ldelim(); + } + if (message.clientOutputBufferLimitNormal !== undefined) { + Redisconfig70_ClientOutputBufferLimit.encode( + message.clientOutputBufferLimitNormal, + writer.uint32(74).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Redisconfig70 { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseRedisconfig70 } as Redisconfig70; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.maxmemoryPolicy = reader.int32() as any; + break; + case 2: + message.timeout = Int64Value.decode(reader, reader.uint32()).value; + break; + case 3: + message.password = reader.string(); + break; + case 4: + message.databases = Int64Value.decode(reader, reader.uint32()).value; + break; + case 5: + message.slowlogLogSlowerThan = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 6: + message.slowlogMaxLen = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 7: + message.notifyKeyspaceEvents = reader.string(); + break; + case 8: + message.clientOutputBufferLimitPubsub = + Redisconfig70_ClientOutputBufferLimit.decode( + reader, + reader.uint32() + ); + break; + case 9: + message.clientOutputBufferLimitNormal = + Redisconfig70_ClientOutputBufferLimit.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Redisconfig70 { + const message = { ...baseRedisconfig70 } as Redisconfig70; + message.maxmemoryPolicy = + object.maxmemoryPolicy !== undefined && object.maxmemoryPolicy !== null + ? redisconfig70_MaxmemoryPolicyFromJSON(object.maxmemoryPolicy) + : 0; + message.timeout = + object.timeout !== undefined && object.timeout !== null + ? Number(object.timeout) + : undefined; + message.password = + object.password !== undefined && object.password !== null + ? String(object.password) + : ""; + message.databases = + object.databases !== undefined && object.databases !== null + ? Number(object.databases) + : undefined; + message.slowlogLogSlowerThan = + object.slowlogLogSlowerThan !== undefined && + object.slowlogLogSlowerThan !== null + ? Number(object.slowlogLogSlowerThan) + : undefined; + message.slowlogMaxLen = + object.slowlogMaxLen !== undefined && object.slowlogMaxLen !== null + ? Number(object.slowlogMaxLen) + : undefined; + message.notifyKeyspaceEvents = + object.notifyKeyspaceEvents !== undefined && + object.notifyKeyspaceEvents !== null + ? String(object.notifyKeyspaceEvents) + : ""; + message.clientOutputBufferLimitPubsub = + object.clientOutputBufferLimitPubsub !== undefined && + object.clientOutputBufferLimitPubsub !== null + ? Redisconfig70_ClientOutputBufferLimit.fromJSON( + object.clientOutputBufferLimitPubsub + ) + : undefined; + message.clientOutputBufferLimitNormal = + object.clientOutputBufferLimitNormal !== undefined && + object.clientOutputBufferLimitNormal !== null + ? Redisconfig70_ClientOutputBufferLimit.fromJSON( + object.clientOutputBufferLimitNormal + ) + : undefined; + return message; + }, + + toJSON(message: Redisconfig70): unknown { + const obj: any = {}; + message.maxmemoryPolicy !== undefined && + (obj.maxmemoryPolicy = redisconfig70_MaxmemoryPolicyToJSON( + message.maxmemoryPolicy + )); + message.timeout !== undefined && (obj.timeout = message.timeout); + message.password !== undefined && (obj.password = message.password); + message.databases !== undefined && (obj.databases = message.databases); + message.slowlogLogSlowerThan !== undefined && + (obj.slowlogLogSlowerThan = message.slowlogLogSlowerThan); + message.slowlogMaxLen !== undefined && + (obj.slowlogMaxLen = message.slowlogMaxLen); + message.notifyKeyspaceEvents !== undefined && + (obj.notifyKeyspaceEvents = message.notifyKeyspaceEvents); + message.clientOutputBufferLimitPubsub !== undefined && + (obj.clientOutputBufferLimitPubsub = message.clientOutputBufferLimitPubsub + ? Redisconfig70_ClientOutputBufferLimit.toJSON( + message.clientOutputBufferLimitPubsub + ) + : undefined); + message.clientOutputBufferLimitNormal !== undefined && + (obj.clientOutputBufferLimitNormal = message.clientOutputBufferLimitNormal + ? Redisconfig70_ClientOutputBufferLimit.toJSON( + message.clientOutputBufferLimitNormal + ) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Redisconfig70 { + const message = { ...baseRedisconfig70 } as Redisconfig70; + message.maxmemoryPolicy = object.maxmemoryPolicy ?? 0; + message.timeout = object.timeout ?? undefined; + message.password = object.password ?? ""; + message.databases = object.databases ?? undefined; + message.slowlogLogSlowerThan = object.slowlogLogSlowerThan ?? undefined; + message.slowlogMaxLen = object.slowlogMaxLen ?? undefined; + message.notifyKeyspaceEvents = object.notifyKeyspaceEvents ?? ""; + message.clientOutputBufferLimitPubsub = + object.clientOutputBufferLimitPubsub !== undefined && + object.clientOutputBufferLimitPubsub !== null + ? Redisconfig70_ClientOutputBufferLimit.fromPartial( + object.clientOutputBufferLimitPubsub + ) + : undefined; + message.clientOutputBufferLimitNormal = + object.clientOutputBufferLimitNormal !== undefined && + object.clientOutputBufferLimitNormal !== null + ? Redisconfig70_ClientOutputBufferLimit.fromPartial( + object.clientOutputBufferLimitNormal + ) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Redisconfig70.$type, Redisconfig70); + +const baseRedisconfig70_ClientOutputBufferLimit: object = { + $type: + "yandex.cloud.mdb.redis.v1.config.RedisConfig7_0.ClientOutputBufferLimit", +}; + +export const Redisconfig70_ClientOutputBufferLimit = { + $type: + "yandex.cloud.mdb.redis.v1.config.RedisConfig7_0.ClientOutputBufferLimit" as const, + + encode( + message: Redisconfig70_ClientOutputBufferLimit, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.hardLimit !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.hardLimit! }, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.softLimit !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.softLimit! }, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.softSeconds !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.softSeconds! }, + writer.uint32(42).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Redisconfig70_ClientOutputBufferLimit { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseRedisconfig70_ClientOutputBufferLimit, + } as Redisconfig70_ClientOutputBufferLimit; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.hardLimit = Int64Value.decode(reader, reader.uint32()).value; + break; + case 3: + message.softLimit = Int64Value.decode(reader, reader.uint32()).value; + break; + case 5: + message.softSeconds = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Redisconfig70_ClientOutputBufferLimit { + const message = { + ...baseRedisconfig70_ClientOutputBufferLimit, + } as Redisconfig70_ClientOutputBufferLimit; + message.hardLimit = + object.hardLimit !== undefined && object.hardLimit !== null + ? Number(object.hardLimit) + : undefined; + message.softLimit = + object.softLimit !== undefined && object.softLimit !== null + ? Number(object.softLimit) + : undefined; + message.softSeconds = + object.softSeconds !== undefined && object.softSeconds !== null + ? Number(object.softSeconds) + : undefined; + return message; + }, + + toJSON(message: Redisconfig70_ClientOutputBufferLimit): unknown { + const obj: any = {}; + message.hardLimit !== undefined && (obj.hardLimit = message.hardLimit); + message.softLimit !== undefined && (obj.softLimit = message.softLimit); + message.softSeconds !== undefined && + (obj.softSeconds = message.softSeconds); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Redisconfig70_ClientOutputBufferLimit { + const message = { + ...baseRedisconfig70_ClientOutputBufferLimit, + } as Redisconfig70_ClientOutputBufferLimit; + message.hardLimit = object.hardLimit ?? undefined; + message.softLimit = object.softLimit ?? undefined; + message.softSeconds = object.softSeconds ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Redisconfig70_ClientOutputBufferLimit.$type, + Redisconfig70_ClientOutputBufferLimit +); + +const baseRedisconfigset70: object = { + $type: "yandex.cloud.mdb.redis.v1.config.RedisConfigSet7_0", +}; + +export const Redisconfigset70 = { + $type: "yandex.cloud.mdb.redis.v1.config.RedisConfigSet7_0" as const, + + encode( + message: Redisconfigset70, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.effectiveConfig !== undefined) { + Redisconfig70.encode( + message.effectiveConfig, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.userConfig !== undefined) { + Redisconfig70.encode( + message.userConfig, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.defaultConfig !== undefined) { + Redisconfig70.encode( + message.defaultConfig, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Redisconfigset70 { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseRedisconfigset70 } as Redisconfigset70; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.effectiveConfig = Redisconfig70.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.userConfig = Redisconfig70.decode(reader, reader.uint32()); + break; + case 3: + message.defaultConfig = Redisconfig70.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Redisconfigset70 { + const message = { ...baseRedisconfigset70 } as Redisconfigset70; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Redisconfig70.fromJSON(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Redisconfig70.fromJSON(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Redisconfig70.fromJSON(object.defaultConfig) + : undefined; + return message; + }, + + toJSON(message: Redisconfigset70): unknown { + const obj: any = {}; + message.effectiveConfig !== undefined && + (obj.effectiveConfig = message.effectiveConfig + ? Redisconfig70.toJSON(message.effectiveConfig) + : undefined); + message.userConfig !== undefined && + (obj.userConfig = message.userConfig + ? Redisconfig70.toJSON(message.userConfig) + : undefined); + message.defaultConfig !== undefined && + (obj.defaultConfig = message.defaultConfig + ? Redisconfig70.toJSON(message.defaultConfig) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Redisconfigset70 { + const message = { ...baseRedisconfigset70 } as Redisconfigset70; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Redisconfig70.fromPartial(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Redisconfig70.fromPartial(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Redisconfig70.fromPartial(object.defaultConfig) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Redisconfigset70.$type, Redisconfigset70); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/redis/v1/resource_preset_service.ts b/src/generated/yandex/cloud/mdb/redis/v1/resource_preset_service.ts index d2043b1d..4678e38b 100644 --- a/src/generated/yandex/cloud/mdb/redis/v1/resource_preset_service.ts +++ b/src/generated/yandex/cloud/mdb/redis/v1/resource_preset_service.ts @@ -37,7 +37,7 @@ export interface ListResourcePresetsRequest { pageSize: number; /** * Page token. To get the next page of results, set [page_token] to the [ListResourcePresetsResponse.next_page_token] - * returned by a previous list request. + * returned by the previous list request. */ pageToken: string; } diff --git a/src/generated/yandex/cloud/mdb/sqlserver/v1/backup.ts b/src/generated/yandex/cloud/mdb/sqlserver/v1/backup.ts index 06c4c66c..0a226e66 100644 --- a/src/generated/yandex/cloud/mdb/sqlserver/v1/backup.ts +++ b/src/generated/yandex/cloud/mdb/sqlserver/v1/backup.ts @@ -8,6 +8,7 @@ export const protobufPackage = "yandex.cloud.mdb.sqlserver.v1"; /** * An SQL Server backup resource. + * * For more information, see the [Backup](/docs/managed-sqlserver/concepts/backup) section in the documentation. */ export interface Backup { @@ -16,13 +17,13 @@ export interface Backup { id: string; /** ID of the folder that the backup belongs to. */ folderId: string; - /** Creation timestamp (i.e. when the backup operation was completed). */ + /** Time when the backup operation was completed. */ createdAt?: Date; /** ID of the SQL Server cluster that the backup was created for. */ sourceClusterId: string; /** Time when the backup operation was started. */ startedAt?: Date; - /** List databases included in the backup */ + /** List of databases included in the backup. */ databases: string[]; } diff --git a/src/generated/yandex/cloud/mdb/sqlserver/v1/backup_service.ts b/src/generated/yandex/cloud/mdb/sqlserver/v1/backup_service.ts index d1fc915b..f92e61db 100644 --- a/src/generated/yandex/cloud/mdb/sqlserver/v1/backup_service.ts +++ b/src/generated/yandex/cloud/mdb/sqlserver/v1/backup_service.ts @@ -37,15 +37,12 @@ export interface ListBackupsRequest { */ folderId: string; /** - * The maximum number of results per page to return. If the number of available - * results is larger than `page_size`, the service returns a [ListBackupsResponse.next_page_token] - * that can be used to get the next page of results in subsequent list requests. + * The maximum number of results per page to return. + * + * If the number of available results is larger than [page_size], the service returns a [ListBackupsResponse.next_page_token] that can be used to get the next page of results in subsequent list requests. */ pageSize: number; - /** - * Page token. To get the next page of results, Set `page_token` to the [ListBackupsResponse.next_page_token] - * returned by a previous list request. - */ + /** Page token. To get the next page of results, set [page_token] to the [ListBackupsResponse.next_page_token] returned by the previous list request. */ pageToken: string; } @@ -54,10 +51,11 @@ export interface ListBackupsResponse { /** List of SQL Server backups. */ backups: Backup[]; /** - * Token that allows you to get the next page of results for list requests. If the number of results - * is larger than [ListBackupsRequest.page_size], use the `next_page_token` as the value - * for the [ListBackupsRequest.page_token] parameter in the next list request. Each subsequent - * list request will have its own `next_page_token` to continue paging through the results. + * This token allows you to get the next page of results for ListBackups requests. + * + * If the number of results is larger than [ListBackupsRequest.page_size], use the [next_page_token] as the value for the [ListBackupsRequest.page_token] parameter in the next ListBackups request. + * + * Each subsequent ListBackups request has its own [next_page_token] to continue paging through the results. */ nextPageToken: string; } diff --git a/src/generated/yandex/cloud/mdb/sqlserver/v1/cluster.ts b/src/generated/yandex/cloud/mdb/sqlserver/v1/cluster.ts index b372b637..d87b7ad8 100644 --- a/src/generated/yandex/cloud/mdb/sqlserver/v1/cluster.ts +++ b/src/generated/yandex/cloud/mdb/sqlserver/v1/cluster.ts @@ -8,33 +8,49 @@ import { SQLServerConfigSet2016sp2std, SQLServerConfigSet2016sp2ent, } from "../../../../../yandex/cloud/mdb/sqlserver/v1/config/sqlserver2016sp2"; +import { + SQLServerConfigSet2017std, + SQLServerConfigSet2017ent, +} from "../../../../../yandex/cloud/mdb/sqlserver/v1/config/sqlserver2017"; +import { + SQLServerConfigSet2019std, + SQLServerConfigSet2019ent, +} from "../../../../../yandex/cloud/mdb/sqlserver/v1/config/sqlserver2019"; export const protobufPackage = "yandex.cloud.mdb.sqlserver.v1"; /** * An SQL Server cluster. + * * For more information, see the [Concepts](/docs/managed-sqlserver/concepts) section of the documentation. */ export interface Cluster { $type: "yandex.cloud.mdb.sqlserver.v1.Cluster"; /** * ID of the SQL Server cluster. - * This ID is assigned by Managed Service for SQL Server at creation time. + * + * This ID is assigned by Managed Service for SQL Server at the moment of creation. */ id: string; /** ID of the folder the SQL Server cluster belongs to. */ folderId: string; + /** Time when SQL Server cluster was created. */ createdAt?: Date; /** * Name of the SQL Server cluster. * - * The name must be unique within the folder, comply with RFC 1035 and be 1-63 characters long. + * The name must be unique within the folder, comply with [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt) and be 1-63 characters long. */ name: string; - /** Description of the SQL Server cluster. 0-256 characters long. */ + /** + * Description of the SQL Server cluster. + * + * Must be 0-256 characters long. + */ description: string; /** * Custom labels for the SQL Server cluster as `key:value` pairs. + * * Maximum 64 per resource. */ labels: { [key: string]: string }; @@ -44,35 +60,29 @@ export interface Cluster { monitoring: Monitoring[]; /** Configuration of the SQL Server cluster. */ config?: ClusterConfig; - /** ID of the network the cluster belongs to. */ + /** ID of the network that the cluster belongs to. */ networkId: string; /** Aggregated cluster health. */ health: Cluster_Health; /** Current state of the cluster. */ status: Cluster_Status; - /** User security groups */ + /** User security groups. */ securityGroupIds: string[]; - /** Deletion Protection inhibits deletion of the cluster */ + /** Determines whether the cluster is protected from being deleted. */ deletionProtection: boolean; - /** SQL Server Collation */ + /** SQL Server Collation. */ sqlcollation: string; /** Host groups hosting VMs of the cluster. */ hostGroupIds: string[]; - /** ID of the service account used for access to Yandex Object Storage. */ + /** ID of the service account which is used for access to Object Storage. */ serviceAccountId: string; } export enum Cluster_Environment { ENVIRONMENT_UNSPECIFIED = 0, - /** - * PRODUCTION - Stable environment with a conservative update policy: - * only hotfixes are applied during regular maintenance. - */ + /** PRODUCTION - Stable environment with a conservative update policy: only hotfixes are applied during regular maintenance. */ PRODUCTION = 1, - /** - * PRESTABLE - Environment with more aggressive update policy: new versions - * are rolled out irrespective of backward compatibility. - */ + /** PRESTABLE - Environment with more aggressive update policy: new versions are rolled out irrespective of backward compatibility. */ PRESTABLE = 2, UNRECOGNIZED = -1, } @@ -111,7 +121,7 @@ export function cluster_EnvironmentToJSON(object: Cluster_Environment): string { export enum Cluster_Health { /** HEALTH_UNKNOWN - State of the cluster is unknown ([Host.health] of all hosts in the cluster is `UNKNOWN`). */ HEALTH_UNKNOWN = 0, - /** ALIVE - Cluster is alive and well ([Host.health] of all hosts in the cluster is `ALIVE`). */ + /** ALIVE - Cluster is alive and works well ([Host.health] of all hosts in the cluster is `ALIVE`). */ ALIVE = 1, /** DEAD - Cluster is inoperable ([Host.health] of all hosts in the cluster is `DEAD`). */ DEAD = 2, @@ -256,27 +266,82 @@ export interface ClusterConfig { sqlserverConfig2016sp2std?: SQLServerConfigSet2016sp2std | undefined; /** Configuration of the SQL Server 2016sp2 enterprise edition instance. */ sqlserverConfig2016sp2ent?: SQLServerConfigSet2016sp2ent | undefined; + /** Configuration of the SQL Server 2017 standard edition instance. */ + sqlserverConfig2017std?: SQLServerConfigSet2017std | undefined; + /** Configuration of the SQL Server 2017 enterprise edition instance. */ + sqlserverConfig2017ent?: SQLServerConfigSet2017ent | undefined; + /** Configuration of the SQL Server 2019 standard edition instance. */ + sqlserverConfig2019std?: SQLServerConfigSet2019std | undefined; + /** Configuration of the SQL Server 2019 enterprise edition instance. */ + sqlserverConfig2019ent?: SQLServerConfigSet2019ent | undefined; /** Resources allocated to SQL Server hosts. */ resources?: Resources; - /** Start time for the daily backup in UTC timezone */ + /** Start time for the daily backup in UTC timezone. */ backupWindowStart?: TimeOfDay; - /** Access policy to DB */ + /** Database access policy. */ access?: Access; + /** Secondary replicas connection mode */ + secondaryConnections: ClusterConfig_SecondaryConnections; +} + +export enum ClusterConfig_SecondaryConnections { + SECONDARY_CONNECTIONS_UNSPECIFIED = 0, + /** SECONDARY_CONNECTIONS_OFF - Connections to secondary replicas are prohibited */ + SECONDARY_CONNECTIONS_OFF = 1, + /** SECONDARY_CONNECTIONS_READ_ONLY - Secondary replicas are read-only */ + SECONDARY_CONNECTIONS_READ_ONLY = 2, + UNRECOGNIZED = -1, +} + +export function clusterConfig_SecondaryConnectionsFromJSON( + object: any +): ClusterConfig_SecondaryConnections { + switch (object) { + case 0: + case "SECONDARY_CONNECTIONS_UNSPECIFIED": + return ClusterConfig_SecondaryConnections.SECONDARY_CONNECTIONS_UNSPECIFIED; + case 1: + case "SECONDARY_CONNECTIONS_OFF": + return ClusterConfig_SecondaryConnections.SECONDARY_CONNECTIONS_OFF; + case 2: + case "SECONDARY_CONNECTIONS_READ_ONLY": + return ClusterConfig_SecondaryConnections.SECONDARY_CONNECTIONS_READ_ONLY; + case -1: + case "UNRECOGNIZED": + default: + return ClusterConfig_SecondaryConnections.UNRECOGNIZED; + } +} + +export function clusterConfig_SecondaryConnectionsToJSON( + object: ClusterConfig_SecondaryConnections +): string { + switch (object) { + case ClusterConfig_SecondaryConnections.SECONDARY_CONNECTIONS_UNSPECIFIED: + return "SECONDARY_CONNECTIONS_UNSPECIFIED"; + case ClusterConfig_SecondaryConnections.SECONDARY_CONNECTIONS_OFF: + return "SECONDARY_CONNECTIONS_OFF"; + case ClusterConfig_SecondaryConnections.SECONDARY_CONNECTIONS_READ_ONLY: + return "SECONDARY_CONNECTIONS_READ_ONLY"; + default: + return "UNKNOWN"; + } } export interface Host { $type: "yandex.cloud.mdb.sqlserver.v1.Host"; /** - * Name of the SQL Server host. The host name is assigned by Managed Service for SQL Server - * at creation time, and cannot be changed. 1-63 characters long. + * Name of the SQL Server host. + * + * The host name is assigned by Managed Service for SQL Server at the moment of creation and cannot be changed. 1-63 characters long. * - * The name is unique across all existing database hosts in Yandex Cloud, - * as it defines the FQDN of the host. + * The name is unique across all database hosts that exist on the platform as it defines the FQDN of the host. */ name: string; /** - * ID of the SQL Server host. The ID is assigned by Managed Service for SQL Server - * at creation time. + * ID of the SQL Server host. + * + * The ID is assigned by Managed Service for SQL Server at the moment of creation. */ clusterId: string; /** ID of the availability zone where the SQL Server host resides. */ @@ -341,9 +406,9 @@ export enum Host_Health { HEALTH_UNKNOWN = 0, /** ALIVE - The host is performing all its functions normally. */ ALIVE = 1, - /** DEAD - The host is inoperable, and cannot perform any of its essential functions. */ + /** DEAD - The host is inoperable and cannot perform any of its essential functions. */ DEAD = 2, - /** DEGRADED - The host is degraded, and can perform only some of its essential functions. */ + /** DEGRADED - The host is degraded and can perform only some of its essential functions. */ DEGRADED = 3, UNRECOGNIZED = -1, } @@ -394,7 +459,7 @@ export interface Service { export enum Service_Type { TYPE_UNSPECIFIED = 0, - /** SQLSERVER - SQL Server service */ + /** SQLSERVER - SQL Server service. */ SQLSERVER = 1, UNRECOGNIZED = -1, } @@ -469,7 +534,8 @@ export function service_HealthToJSON(object: Service_Health): string { export interface Resources { $type: "yandex.cloud.mdb.sqlserver.v1.Resources"; /** - * ID of the preset for computational resources available to a host (CPU, memory etc.). + * ID of the preset for computational resources available to a host (CPU, memory, etc.). + * * All available presets are listed in the [documentation](/docs/managed-sqlserver/concepts/instance-types). */ resourcePresetId: string; @@ -479,18 +545,18 @@ export interface Resources { * Type of the storage environment for the host. * * Possible values: - * * network-hdd - network HDD drive, - * * network-ssd - network SSD drive, - * * local-ssd - local SSD storage. + * * `network-hdd` - network HDD drive; + * * `network-ssd` - network SSD drive; + * * `local-ssd` - local SSD storage. */ diskTypeId: string; } export interface Access { $type: "yandex.cloud.mdb.sqlserver.v1.Access"; - /** Allow access for DataLens */ + /** Allows access for DataLens. */ dataLens: boolean; - /** Allow access for Web SQL. */ + /** Allows access for Web SQL. */ webSql: boolean; } @@ -981,6 +1047,7 @@ messageTypeRegistry.set(Monitoring.$type, Monitoring); const baseClusterConfig: object = { $type: "yandex.cloud.mdb.sqlserver.v1.ClusterConfig", version: "", + secondaryConnections: 0, }; export const ClusterConfig = { @@ -1005,6 +1072,30 @@ export const ClusterConfig = { writer.uint32(42).fork() ).ldelim(); } + if (message.sqlserverConfig2017std !== undefined) { + SQLServerConfigSet2017std.encode( + message.sqlserverConfig2017std, + writer.uint32(66).fork() + ).ldelim(); + } + if (message.sqlserverConfig2017ent !== undefined) { + SQLServerConfigSet2017ent.encode( + message.sqlserverConfig2017ent, + writer.uint32(74).fork() + ).ldelim(); + } + if (message.sqlserverConfig2019std !== undefined) { + SQLServerConfigSet2019std.encode( + message.sqlserverConfig2019std, + writer.uint32(82).fork() + ).ldelim(); + } + if (message.sqlserverConfig2019ent !== undefined) { + SQLServerConfigSet2019ent.encode( + message.sqlserverConfig2019ent, + writer.uint32(90).fork() + ).ldelim(); + } if (message.resources !== undefined) { Resources.encode(message.resources, writer.uint32(26).fork()).ldelim(); } @@ -1017,6 +1108,9 @@ export const ClusterConfig = { if (message.access !== undefined) { Access.encode(message.access, writer.uint32(50).fork()).ldelim(); } + if (message.secondaryConnections !== 0) { + writer.uint32(56).int32(message.secondaryConnections); + } return writer; }, @@ -1038,6 +1132,30 @@ export const ClusterConfig = { message.sqlserverConfig2016sp2ent = SQLServerConfigSet2016sp2ent.decode(reader, reader.uint32()); break; + case 8: + message.sqlserverConfig2017std = SQLServerConfigSet2017std.decode( + reader, + reader.uint32() + ); + break; + case 9: + message.sqlserverConfig2017ent = SQLServerConfigSet2017ent.decode( + reader, + reader.uint32() + ); + break; + case 10: + message.sqlserverConfig2019std = SQLServerConfigSet2019std.decode( + reader, + reader.uint32() + ); + break; + case 11: + message.sqlserverConfig2019ent = SQLServerConfigSet2019ent.decode( + reader, + reader.uint32() + ); + break; case 3: message.resources = Resources.decode(reader, reader.uint32()); break; @@ -1047,6 +1165,9 @@ export const ClusterConfig = { case 6: message.access = Access.decode(reader, reader.uint32()); break; + case 7: + message.secondaryConnections = reader.int32() as any; + break; default: reader.skipType(tag & 7); break; @@ -1075,6 +1196,26 @@ export const ClusterConfig = { object.sqlserverConfig_2016sp2ent ) : undefined; + message.sqlserverConfig2017std = + object.sqlserverConfig_2017std !== undefined && + object.sqlserverConfig_2017std !== null + ? SQLServerConfigSet2017std.fromJSON(object.sqlserverConfig_2017std) + : undefined; + message.sqlserverConfig2017ent = + object.sqlserverConfig_2017ent !== undefined && + object.sqlserverConfig_2017ent !== null + ? SQLServerConfigSet2017ent.fromJSON(object.sqlserverConfig_2017ent) + : undefined; + message.sqlserverConfig2019std = + object.sqlserverConfig_2019std !== undefined && + object.sqlserverConfig_2019std !== null + ? SQLServerConfigSet2019std.fromJSON(object.sqlserverConfig_2019std) + : undefined; + message.sqlserverConfig2019ent = + object.sqlserverConfig_2019ent !== undefined && + object.sqlserverConfig_2019ent !== null + ? SQLServerConfigSet2019ent.fromJSON(object.sqlserverConfig_2019ent) + : undefined; message.resources = object.resources !== undefined && object.resources !== null ? Resources.fromJSON(object.resources) @@ -1088,6 +1229,13 @@ export const ClusterConfig = { object.access !== undefined && object.access !== null ? Access.fromJSON(object.access) : undefined; + message.secondaryConnections = + object.secondaryConnections !== undefined && + object.secondaryConnections !== null + ? clusterConfig_SecondaryConnectionsFromJSON( + object.secondaryConnections + ) + : 0; return message; }, @@ -1102,6 +1250,22 @@ export const ClusterConfig = { (obj.sqlserverConfig_2016sp2ent = message.sqlserverConfig2016sp2ent ? SQLServerConfigSet2016sp2ent.toJSON(message.sqlserverConfig2016sp2ent) : undefined); + message.sqlserverConfig2017std !== undefined && + (obj.sqlserverConfig_2017std = message.sqlserverConfig2017std + ? SQLServerConfigSet2017std.toJSON(message.sqlserverConfig2017std) + : undefined); + message.sqlserverConfig2017ent !== undefined && + (obj.sqlserverConfig_2017ent = message.sqlserverConfig2017ent + ? SQLServerConfigSet2017ent.toJSON(message.sqlserverConfig2017ent) + : undefined); + message.sqlserverConfig2019std !== undefined && + (obj.sqlserverConfig_2019std = message.sqlserverConfig2019std + ? SQLServerConfigSet2019std.toJSON(message.sqlserverConfig2019std) + : undefined); + message.sqlserverConfig2019ent !== undefined && + (obj.sqlserverConfig_2019ent = message.sqlserverConfig2019ent + ? SQLServerConfigSet2019ent.toJSON(message.sqlserverConfig2019ent) + : undefined); message.resources !== undefined && (obj.resources = message.resources ? Resources.toJSON(message.resources) @@ -1112,6 +1276,10 @@ export const ClusterConfig = { : undefined); message.access !== undefined && (obj.access = message.access ? Access.toJSON(message.access) : undefined); + message.secondaryConnections !== undefined && + (obj.secondaryConnections = clusterConfig_SecondaryConnectionsToJSON( + message.secondaryConnections + )); return obj; }, @@ -1134,6 +1302,26 @@ export const ClusterConfig = { object.sqlserverConfig2016sp2ent ) : undefined; + message.sqlserverConfig2017std = + object.sqlserverConfig2017std !== undefined && + object.sqlserverConfig2017std !== null + ? SQLServerConfigSet2017std.fromPartial(object.sqlserverConfig2017std) + : undefined; + message.sqlserverConfig2017ent = + object.sqlserverConfig2017ent !== undefined && + object.sqlserverConfig2017ent !== null + ? SQLServerConfigSet2017ent.fromPartial(object.sqlserverConfig2017ent) + : undefined; + message.sqlserverConfig2019std = + object.sqlserverConfig2019std !== undefined && + object.sqlserverConfig2019std !== null + ? SQLServerConfigSet2019std.fromPartial(object.sqlserverConfig2019std) + : undefined; + message.sqlserverConfig2019ent = + object.sqlserverConfig2019ent !== undefined && + object.sqlserverConfig2019ent !== null + ? SQLServerConfigSet2019ent.fromPartial(object.sqlserverConfig2019ent) + : undefined; message.resources = object.resources !== undefined && object.resources !== null ? Resources.fromPartial(object.resources) @@ -1147,6 +1335,7 @@ export const ClusterConfig = { object.access !== undefined && object.access !== null ? Access.fromPartial(object.access) : undefined; + message.secondaryConnections = object.secondaryConnections ?? 0; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/sqlserver/v1/cluster_service.ts b/src/generated/yandex/cloud/mdb/sqlserver/v1/cluster_service.ts index 9b5decc0..84ecf0b9 100644 --- a/src/generated/yandex/cloud/mdb/sqlserver/v1/cluster_service.ts +++ b/src/generated/yandex/cloud/mdb/sqlserver/v1/cluster_service.ts @@ -18,10 +18,13 @@ import { Cluster_Environment, Resources, Access, + ClusterConfig_SecondaryConnections, Cluster, Host, cluster_EnvironmentFromJSON, cluster_EnvironmentToJSON, + clusterConfig_SecondaryConnectionsFromJSON, + clusterConfig_SecondaryConnectionsToJSON, } from "../../../../../yandex/cloud/mdb/sqlserver/v1/cluster"; import { FieldMask } from "../../../../../google/protobuf/field_mask"; import { TimeOfDay } from "../../../../../google/type/timeofday"; @@ -34,6 +37,14 @@ import { SQLServerConfig2016sp2std, SQLServerConfig2016sp2ent, } from "../../../../../yandex/cloud/mdb/sqlserver/v1/config/sqlserver2016sp2"; +import { + SQLServerConfig2017std, + SQLServerConfig2017ent, +} from "../../../../../yandex/cloud/mdb/sqlserver/v1/config/sqlserver2017"; +import { + SQLServerConfig2019std, + SQLServerConfig2019ent, +} from "../../../../../yandex/cloud/mdb/sqlserver/v1/config/sqlserver2019"; export const protobufPackage = "yandex.cloud.mdb.sqlserver.v1"; @@ -56,24 +67,23 @@ export interface ListClustersRequest { */ folderId: string; /** - * The maximum number of results per page to return. If the number of available - * results is larger than `page_size`, the service returns a [ListClustersResponse.next_page_token] - * that can be used to get the next page of results in subsequent list requests. + * The maximum number of results per page to return. + * + * If the number of available results is larger than [page_size], the service returns a [ListClustersResponse.next_page_token] that can be used to get the next page of results in subsequent list requests. */ pageSize: number; - /** - * Page token. To get the next page of results, set `page_token` to the [ListClustersResponse.next_page_token] - * returned by a previous list request. - */ + /** Page token. To get the next page of results, set [page_token] to the [ListClustersResponse.next_page_token] returned by the previous list request. */ pageToken: string; /** * A filter expression that filters resources listed in the response. + * * The expression must specify: - * 1. The field name to filter by. Currently you can only use filtering with the [Cluster.name] field. - * 2. An `=` operator. - * 3. The value in double quotes (`"`). Must be 1-63 characters long and match the regular expression `[a-zA-Z0-9_-]+`. * - * Example of a filter: `name NOT IN 'test,beta'`. + * 1. A field name to filter by. Currently you can only use filtering with the [Cluster.name] field. + * 2. A conditional operator. Can be either `=` or `!=` for single values, `IN` or `NOT IN` for lists of values. + * 3. A value. Must be 1-63 characters long and match the regular expression `[a-zA-Z0-9_-]+`. + * + * Example of a filter expression: `name NOT IN 'test,beta'`. */ filter: string; } @@ -83,10 +93,9 @@ export interface ListClustersResponse { /** List of SQL Server clusters. */ clusters: Cluster[]; /** - * Token that allows you to get the next page of results for list requests. If the number of results - * is larger than [ListClustersRequest.page_size], use the `next_page_token` as the value - * for the [ListClustersRequest.page_token] parameter in the next list request. Each subsequent - * list request will have its own `next_page_token` to continue paging through the results. + * Token that allows you to get the next page of results for list requests. + * + * If the number of results is larger than [ListClustersRequest.page_size], use the `next_page_token` as the value for the [ListClustersRequest.page_token] parameter in the next list request. Each subsequent list request has its own `next_page_token` to continue paging through the results. */ nextPageToken: string; } @@ -105,12 +114,12 @@ export interface CreateClusterRequest { description: string; /** * Custom labels for the SQL Server cluster as `key:value` pairs. - * For example, "project": "mvp" or "source": "dictionary". + * For example, "project":"mvp" or "source":"dictionary". */ labels: { [key: string]: string }; /** Deployment environment of the SQL Server cluster. */ environment: Cluster_Environment; - /** SQL Server and hosts configuration for the cluster. */ + /** Configurations of SQL Server and hosts of the cluster. */ configSpec?: ConfigSpec; /** One or more configurations of databases to be created in the SQL Server cluster. */ databaseSpecs: DatabaseSpec[]; @@ -120,15 +129,15 @@ export interface CreateClusterRequest { hostSpecs: HostSpec[]; /** ID of the network to create the SQL Server cluster in. */ networkId: string; - /** User security groups */ + /** User security groups. */ securityGroupIds: string[]; - /** Deletion Protection inhibits deletion of the cluster */ + /** Determines whether the cluster is protected from being deleted. */ deletionProtection: boolean; - /** name of SQL Collation that cluster will be created with */ + /** Name of SQL Collation that cluster will be created with. */ sqlcollation: string; /** Host groups hosting VMs of the cluster. */ hostGroupIds: string[]; - /** ID of the service account used for access to Yandex Object Storage. */ + /** ID of the service account used for access to Object Storage. */ serviceAccountId: string; } @@ -157,11 +166,12 @@ export interface UpdateClusterRequest { /** New description of the SQL Server cluster. */ description: string; /** - * Custom labels for the SQL Server cluster as `key:value` pairs. Maximum 64 per resource. + * Custom labels for the SQL Server cluster as `key:value` pairs. + * + * For example, `"project":"mvp"` or `"source":"dictionary"`. * - * For example, "project": "mvp" or "source": "dictionary". + * The new set of labels completely replaces the old one. * - * The new set of labels will completely replace the old ones. * To add a label, request the current set with the [ClusterService.Get] method, then send an [ClusterService.Update] request with the new label added to the set. */ labels: { [key: string]: string }; @@ -169,11 +179,11 @@ export interface UpdateClusterRequest { configSpec?: ConfigSpec; /** New name for the SQL Server cluster. */ name: string; - /** User security groups */ + /** User security groups. */ securityGroupIds: string[]; - /** Deletion Protection inhibits deletion of the cluster */ + /** Determines whether the cluster is protected from being deleted. */ deletionProtection: boolean; - /** ID of the service account used for access to Yandex Object Storage. */ + /** ID of the service account used for access to Object Storage. */ serviceAccountId: string; } @@ -236,8 +246,9 @@ export interface RestoreClusterRequest { /** Description of the new SQL Server cluster to be created from the backup. */ description: string; /** - * Custom labels for the new SQL Server cluster to be created from the backup as `key:value` pairs. Maximum 64 per resource. - * For example, "project": "mvp" or "source": "dictionary". + * Custom labels for the new SQL Server cluster to be created from the backup as `key:value` pairs. + * + * For example, `"project":"mvp"` or `"source":"dictionary"`. */ labels: { [key: string]: string }; /** Deployment environment of the new SQL Server cluster to be created from the backup. */ @@ -254,13 +265,13 @@ export interface RestoreClusterRequest { * To get the folder ID, use a [yandex.cloud.resourcemanager.v1.FolderService.List] request. */ folderId: string; - /** User security groups */ + /** User security groups. */ securityGroupIds: string[]; - /** Deletion Protection inhibits deletion of the cluster */ + /** Determines whether the cluster is protected from being deleted. */ deletionProtection: boolean; /** Host groups hosting VMs of the cluster. */ hostGroupIds: string[]; - /** ID of the service account used for access to Yandex Object Storage. */ + /** ID of the service account used for access to Object Storage. */ serviceAccountId: string; } @@ -280,7 +291,7 @@ export interface RestoreClusterMetadata { export interface StartClusterFailoverRequest { $type: "yandex.cloud.mdb.sqlserver.v1.StartClusterFailoverRequest"; - /** ID of sqlserver cluster. */ + /** ID of SQL Server cluster. */ clusterId: string; /** * Host name to switch master role to. @@ -292,7 +303,7 @@ export interface StartClusterFailoverRequest { export interface StartClusterFailoverMetadata { $type: "yandex.cloud.mdb.sqlserver.v1.StartClusterFailoverMetadata"; - /** ID of the sqlserver cluster being failovered. */ + /** ID of the SQL Server cluster being failovered. */ clusterId: string; } @@ -326,32 +337,30 @@ export interface ListClusterLogsRequest { columnFilter: string[]; /** Type of the service to request logs about. */ serviceType: ListClusterLogsRequest_ServiceType; - /** Start timestamp for the logs request. */ + /** Specifies a moment that the logs are requested from. */ fromTime?: Date; - /** End timestamp for the logs request. */ + /** Specifies a moment that the logs are requested till. */ toTime?: Date; /** - * The maximum number of results per page to return. If the number of available - * results is larger than `page_size`, the service returns a [ListClusterLogsResponse.next_page_token] - * that can be used to get the next page of results in subsequent list requests. + * The maximum number of results per page to return. + * + * If the number of available results is larger than [page_size], the service returns a [ListClusterLogsResponse.next_page_token] that can be used to get the next page of results in subsequent list requests. */ pageSize: number; - /** - * Page token. To get the next page of results, set `page_token` to the - * [ListClusterLogsResponse.next_page_token] returned by a previous list request. - */ + /** Page token. To get the next page of results, set [page_token] to the [ListClusterLogsResponse.next_page_token] returned by the previous list request. */ pageToken: string; - /** Always return `next_page_token`, even if current page is empty. */ + /** The service returns [next_page_token] even if the current page is empty. */ alwaysNextPageToken: boolean; /** * A filter expression that filters resources listed in the response. * * The expression must specify: - * 1. The field name to filter by. Currently filtering can be applied to the [LogRecord.logs.message.hostname] field. - * 2. An `=` operator. - * 3. The value in double quotes (`"`). Must be 1-63 characters long and match the regular expression `[a-z0-9.-]{1,61}`. * - * Examples of a filter: `message.hostname='node1.db.cloud.yandex.net'` + * 1. A field name to filter by. Currently filtering can be applied to the [LogRecord.logs.message.hostname] field only. + * 2. A conditional operator. Can be either `=` or `!=` for single values, `IN` or `NOT IN` for lists of values. + * 3. A value. Must be 1-63 characters long and match the regular expression `[a-z0-9.-]{1,61}`. + * + * Example of a filter: `message.hostname='node1.db.cloud.yandex.net'`. */ filter: string; } @@ -405,10 +414,11 @@ export interface ListClusterLogsResponse { /** Requested log records. */ logs: LogRecord[]; /** - * Token that allows you to get the next page of results for list requests. If the number of results - * is larger than [ListClusterLogsRequest.page_size], use the `next_page_token` as the value - * for the [ListClusterLogsRequest.page_token] query parameter in the next list request. - * Each subsequent list request will have its own `next_page_token` to continue paging through the results. + * Token that allows you to get the next page of results for list requests. + * + * If the number of results is larger than [ListClusterLogsRequest.page_size], use the [next_page_token] as the value for the [ListClusterLogsRequest.page_token] query parameter in the next list request. + * + * Each subsequent list request has its own [next_page_token] to continue paging through the results. */ nextPageToken: string; } @@ -422,15 +432,12 @@ export interface ListClusterOperationsRequest { */ clusterId: string; /** - * The maximum number of results per page to return. If the number of available - * results is larger than `page_size`, the service returns a [ListClusterOperationsResponse.next_page_token] - * that can be used to get the next page of results in subsequent list requests. + * The maximum number of results per page to return. + * + * If the number of available results is larger than [page_size], the service returns a [ListClusterOperationsResponse.next_page_token] that can be used to get the next page of results in subsequent list requests. */ pageSize: number; - /** - * Page token. To get the next page of results, set `page_token` to the [ListClusterOperationsResponse.next_page_token] - * returned by a previous list request. - */ + /** Page token. To get the next page of results, set [page_token] to the [ListClusterOperationsResponse.next_page_token] returned by the previous list request. */ pageToken: string; } @@ -439,10 +446,11 @@ export interface ListClusterOperationsResponse { /** List of operations for the specified SQL Server cluster. */ operations: Operation[]; /** - * Token that allows you to get the next page of results for list requests. If the number of results - * is larger than [ListClusterOperationsRequest.page_size], use the `next_page_token` as the value - * for the [ListClusterOperationsRequest.page_token] query parameter in the next list request. - * Each subsequent list request will have its own `next_page_token` to continue paging through the results. + * Token that allows you to get the next page of results for list requests. + * + * If the number of results is larger than [ListClusterOperationsRequest.page_size], use the [next_page_token] as the value for the [ListClusterOperationsRequest.page_token] query parameter in the next list request. + * + * Each subsequent list request has its own [next_page_token] to continue paging through the results. */ nextPageToken: string; } @@ -456,15 +464,12 @@ export interface ListClusterBackupsRequest { */ clusterId: string; /** - * The maximum number of results per page to return. If the number of available - * results is larger than `page_size`, the service returns a [ListClusterBackupsResponse.next_page_token] - * that can be used to get the next page of results in subsequent list requests. + * The maximum number of results per page to return. + * + * If the number of available results is larger than [page_size], the service returns a [ListClusterBackupsResponse.next_page_token] that can be used to get the next page of results in subsequent list requests. */ pageSize: number; - /** - * Page token. To get the next page of results, set `page_token` to the [ListClusterBackupsResponse.next_page_token] - * returned by a previous list request. - */ + /** Page token. To get the next page of results, set [page_token] to the [ListClusterBackupsResponse.next_page_token] returned by the previous list request. */ pageToken: string; } @@ -473,10 +478,11 @@ export interface ListClusterBackupsResponse { /** List of SQL Server backups. */ backups: Backup[]; /** - * Token that allows you to get the next page of results for list requests. If the number of results - * is larger than [ListClusterBackupsRequest.page_size], use the `next_page_token` as the value - * for the [ListClusterBackupsRequest.page_token] query parameter in the next list request. - * Each subsequent list request will have its own `next_page_token` to continue paging through the results. + * Token that allows you to get the next page of results for list requests. + * + * If the number of results is larger than [ListClusterBackupsRequest.page_size], use the [next_page_token] as the value for the [ListClusterBackupsRequest.page_token] query parameter in the next list request. + * + * Each subsequent list request has its own [next_page_token] to continue paging through the results. */ nextPageToken: string; } @@ -490,15 +496,12 @@ export interface ListClusterHostsRequest { */ clusterId: string; /** - * The maximum number of results per page to return. If the number of available - * results is larger than `page_size`, the service returns a [ListClusterHostsResponse.next_page_token] - * that can be used to get the next page of results in subsequent list requests. + * The maximum number of results per page to return. + * + * If the number of available results is larger than [page_size], the service returns a [ListClusterHostsResponse.next_page_token] that can be used to get the next page of results in subsequent list requests. */ pageSize: number; - /** - * Page token. To get the next page of results, set `page_token` to the [ListClusterHostsResponse.next_page_token] - * returned by a previous list request. - */ + /** Page token. To get the next page of results, set [page_token] to the [ListClusterHostsResponse.next_page_token] returned by the previous list request. */ pageToken: string; } @@ -507,10 +510,11 @@ export interface ListClusterHostsResponse { /** List of SQL Server hosts. */ hosts: Host[]; /** - * Token that allows you to get the next page of results for list requests. If the number of results - * is larger than [ListClusterHostsRequest.page_size], use the `next_page_token` as the value - * for the [ListClusterHostsRequest.page_token] query parameter in the next list request. - * Each subsequent list request will have its own `next_page_token` to continue paging through the results. + * Token that allows you to get the next page of results for list requests. + * + * If the number of results is larger than [ListClusterHostsRequest.page_size], use the [next_page_token] as the value for the [ListClusterHostsRequest.page_token] query parameter in the next list request. + * + * Each subsequent list request has its own [next_page_token] to continue paging through the results. */ nextPageToken: string; } @@ -590,43 +594,82 @@ export interface HostSpec { */ zoneId: string; /** - * ID of the subnet that the host should belong to. This subnet should be a part - * of the network that the cluster belongs to. + * ID of the subnet that the host should belong to. This subnet should be a part of the network that the cluster belongs to. + * * The ID of the network is set in the field [Cluster.network_id]. */ subnetId: string; /** - * Whether the host should get a public IP address on creation. + * Determines whether the host gets a public IP address on creation. * - * After a host has been created, this setting cannot be changed. To remove an assigned public IP, or to assign - * a public IP to a host without one, recreate the host with [assign_public_ip] set as needed. + * After a host has been created, this setting cannot be changed. To remove an assigned public IP, or to assign a public IP to a host without one, recreate the host with [assign_public_ip] set as needed. * * Possible values: - * * false - don't assign a public IP to the host. - * * true - the host should have a public IP address. + * * `false` - do not assign a public IP to the host; + * * `true` - assign a public IP to the host. + */ + assignPublicIp: boolean; +} + +export interface UpdateHostSpec { + $type: "yandex.cloud.mdb.sqlserver.v1.UpdateHostSpec"; + /** + * Name of the host to update. + * + * To get the SQL Server host name, use a [ClusterService.ListHosts] request. */ + hostName: string; + /** Field mask that specifies which fields of the SQL Server host should be updated. */ + updateMask?: FieldMask; + /** Determines whether the host gets a public IP address on creation. */ assignPublicIp: boolean; } +export interface UpdateClusterHostsRequest { + $type: "yandex.cloud.mdb.sqlserver.v1.UpdateClusterHostsRequest"; + /** + * ID of the SQL Server cluster to update hosts in. + * To get the SQL Server cluster ID, use a [ClusterService.List] request. + */ + clusterId: string; + /** New configurations to apply to hosts. */ + updateHostSpecs: UpdateHostSpec[]; +} + export interface ConfigSpec { $type: "yandex.cloud.mdb.sqlserver.v1.ConfigSpec"; /** * Version of SQL Server used in the cluster. * * Possible values: - * * 2016sp2 + * * 2016sp2std, + * * 2016sp2ent, + * * 2017std, + * * 2017ent, + * * 2019std, + * * 2019ent. */ version: string; /** Configuration for an SQL Server 2016 SP2 Standard edition cluster. */ sqlserverConfig2016sp2std?: SQLServerConfig2016sp2std | undefined; /** Configuration for an SQL Server 2016 SP2 Enterprise edition cluster. */ sqlserverConfig2016sp2ent?: SQLServerConfig2016sp2ent | undefined; + /** Configuration for an SQL Server 2017 Standard edition cluster. */ + sqlserverConfig2017std?: SQLServerConfig2017std | undefined; + /** Configuration for an SQL Server 2017 Enterprise edition cluster. */ + sqlserverConfig2017ent?: SQLServerConfig2017ent | undefined; + /** Configuration for an SQL Server 2019 Standard edition cluster. */ + sqlserverConfig2019std?: SQLServerConfig2019std | undefined; + /** Configuration for an SQL Server 2019 Enterprise edition cluster. */ + sqlserverConfig2019ent?: SQLServerConfig2019ent | undefined; /** Resources allocated to SQL Server hosts. */ resources?: Resources; - /** Start time for the daily backup in UTC timezone */ + /** Start time for the daily backup in UTC timezone. */ backupWindowStart?: TimeOfDay; - /** Access policy to DB */ + /** Database access policy. */ access?: Access; + /** Secondary replicas connection mode */ + secondaryConnections: ClusterConfig_SecondaryConnections; } const baseGetClusterRequest: object = { @@ -4209,9 +4252,199 @@ export const HostSpec = { messageTypeRegistry.set(HostSpec.$type, HostSpec); +const baseUpdateHostSpec: object = { + $type: "yandex.cloud.mdb.sqlserver.v1.UpdateHostSpec", + hostName: "", + assignPublicIp: false, +}; + +export const UpdateHostSpec = { + $type: "yandex.cloud.mdb.sqlserver.v1.UpdateHostSpec" as const, + + encode( + message: UpdateHostSpec, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.hostName !== "") { + writer.uint32(10).string(message.hostName); + } + if (message.updateMask !== undefined) { + FieldMask.encode(message.updateMask, writer.uint32(18).fork()).ldelim(); + } + if (message.assignPublicIp === true) { + writer.uint32(24).bool(message.assignPublicIp); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): UpdateHostSpec { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseUpdateHostSpec } as UpdateHostSpec; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.hostName = reader.string(); + break; + case 2: + message.updateMask = FieldMask.decode(reader, reader.uint32()); + break; + case 3: + message.assignPublicIp = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateHostSpec { + const message = { ...baseUpdateHostSpec } as UpdateHostSpec; + message.hostName = + object.hostName !== undefined && object.hostName !== null + ? String(object.hostName) + : ""; + message.updateMask = + object.updateMask !== undefined && object.updateMask !== null + ? FieldMask.fromJSON(object.updateMask) + : undefined; + message.assignPublicIp = + object.assignPublicIp !== undefined && object.assignPublicIp !== null + ? Boolean(object.assignPublicIp) + : false; + return message; + }, + + toJSON(message: UpdateHostSpec): unknown { + const obj: any = {}; + message.hostName !== undefined && (obj.hostName = message.hostName); + message.updateMask !== undefined && + (obj.updateMask = message.updateMask + ? FieldMask.toJSON(message.updateMask) + : undefined); + message.assignPublicIp !== undefined && + (obj.assignPublicIp = message.assignPublicIp); + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateHostSpec { + const message = { ...baseUpdateHostSpec } as UpdateHostSpec; + message.hostName = object.hostName ?? ""; + message.updateMask = + object.updateMask !== undefined && object.updateMask !== null + ? FieldMask.fromPartial(object.updateMask) + : undefined; + message.assignPublicIp = object.assignPublicIp ?? false; + return message; + }, +}; + +messageTypeRegistry.set(UpdateHostSpec.$type, UpdateHostSpec); + +const baseUpdateClusterHostsRequest: object = { + $type: "yandex.cloud.mdb.sqlserver.v1.UpdateClusterHostsRequest", + clusterId: "", +}; + +export const UpdateClusterHostsRequest = { + $type: "yandex.cloud.mdb.sqlserver.v1.UpdateClusterHostsRequest" as const, + + encode( + message: UpdateClusterHostsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + for (const v of message.updateHostSpecs) { + UpdateHostSpec.encode(v!, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateClusterHostsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateClusterHostsRequest, + } as UpdateClusterHostsRequest; + message.updateHostSpecs = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.updateHostSpecs.push( + UpdateHostSpec.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateClusterHostsRequest { + const message = { + ...baseUpdateClusterHostsRequest, + } as UpdateClusterHostsRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.updateHostSpecs = (object.updateHostSpecs ?? []).map((e: any) => + UpdateHostSpec.fromJSON(e) + ); + return message; + }, + + toJSON(message: UpdateClusterHostsRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + if (message.updateHostSpecs) { + obj.updateHostSpecs = message.updateHostSpecs.map((e) => + e ? UpdateHostSpec.toJSON(e) : undefined + ); + } else { + obj.updateHostSpecs = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateClusterHostsRequest { + const message = { + ...baseUpdateClusterHostsRequest, + } as UpdateClusterHostsRequest; + message.clusterId = object.clusterId ?? ""; + message.updateHostSpecs = + object.updateHostSpecs?.map((e) => UpdateHostSpec.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set( + UpdateClusterHostsRequest.$type, + UpdateClusterHostsRequest +); + const baseConfigSpec: object = { $type: "yandex.cloud.mdb.sqlserver.v1.ConfigSpec", version: "", + secondaryConnections: 0, }; export const ConfigSpec = { @@ -4236,6 +4469,30 @@ export const ConfigSpec = { writer.uint32(42).fork() ).ldelim(); } + if (message.sqlserverConfig2017std !== undefined) { + SQLServerConfig2017std.encode( + message.sqlserverConfig2017std, + writer.uint32(66).fork() + ).ldelim(); + } + if (message.sqlserverConfig2017ent !== undefined) { + SQLServerConfig2017ent.encode( + message.sqlserverConfig2017ent, + writer.uint32(74).fork() + ).ldelim(); + } + if (message.sqlserverConfig2019std !== undefined) { + SQLServerConfig2019std.encode( + message.sqlserverConfig2019std, + writer.uint32(82).fork() + ).ldelim(); + } + if (message.sqlserverConfig2019ent !== undefined) { + SQLServerConfig2019ent.encode( + message.sqlserverConfig2019ent, + writer.uint32(90).fork() + ).ldelim(); + } if (message.resources !== undefined) { Resources.encode(message.resources, writer.uint32(26).fork()).ldelim(); } @@ -4248,6 +4505,9 @@ export const ConfigSpec = { if (message.access !== undefined) { Access.encode(message.access, writer.uint32(50).fork()).ldelim(); } + if (message.secondaryConnections !== 0) { + writer.uint32(56).int32(message.secondaryConnections); + } return writer; }, @@ -4273,6 +4533,30 @@ export const ConfigSpec = { reader.uint32() ); break; + case 8: + message.sqlserverConfig2017std = SQLServerConfig2017std.decode( + reader, + reader.uint32() + ); + break; + case 9: + message.sqlserverConfig2017ent = SQLServerConfig2017ent.decode( + reader, + reader.uint32() + ); + break; + case 10: + message.sqlserverConfig2019std = SQLServerConfig2019std.decode( + reader, + reader.uint32() + ); + break; + case 11: + message.sqlserverConfig2019ent = SQLServerConfig2019ent.decode( + reader, + reader.uint32() + ); + break; case 3: message.resources = Resources.decode(reader, reader.uint32()); break; @@ -4282,6 +4566,9 @@ export const ConfigSpec = { case 6: message.access = Access.decode(reader, reader.uint32()); break; + case 7: + message.secondaryConnections = reader.int32() as any; + break; default: reader.skipType(tag & 7); break; @@ -4306,6 +4593,26 @@ export const ConfigSpec = { object.sqlserverConfig_2016sp2ent !== null ? SQLServerConfig2016sp2ent.fromJSON(object.sqlserverConfig_2016sp2ent) : undefined; + message.sqlserverConfig2017std = + object.sqlserverConfig_2017std !== undefined && + object.sqlserverConfig_2017std !== null + ? SQLServerConfig2017std.fromJSON(object.sqlserverConfig_2017std) + : undefined; + message.sqlserverConfig2017ent = + object.sqlserverConfig_2017ent !== undefined && + object.sqlserverConfig_2017ent !== null + ? SQLServerConfig2017ent.fromJSON(object.sqlserverConfig_2017ent) + : undefined; + message.sqlserverConfig2019std = + object.sqlserverConfig_2019std !== undefined && + object.sqlserverConfig_2019std !== null + ? SQLServerConfig2019std.fromJSON(object.sqlserverConfig_2019std) + : undefined; + message.sqlserverConfig2019ent = + object.sqlserverConfig_2019ent !== undefined && + object.sqlserverConfig_2019ent !== null + ? SQLServerConfig2019ent.fromJSON(object.sqlserverConfig_2019ent) + : undefined; message.resources = object.resources !== undefined && object.resources !== null ? Resources.fromJSON(object.resources) @@ -4319,6 +4626,13 @@ export const ConfigSpec = { object.access !== undefined && object.access !== null ? Access.fromJSON(object.access) : undefined; + message.secondaryConnections = + object.secondaryConnections !== undefined && + object.secondaryConnections !== null + ? clusterConfig_SecondaryConnectionsFromJSON( + object.secondaryConnections + ) + : 0; return message; }, @@ -4333,6 +4647,22 @@ export const ConfigSpec = { (obj.sqlserverConfig_2016sp2ent = message.sqlserverConfig2016sp2ent ? SQLServerConfig2016sp2ent.toJSON(message.sqlserverConfig2016sp2ent) : undefined); + message.sqlserverConfig2017std !== undefined && + (obj.sqlserverConfig_2017std = message.sqlserverConfig2017std + ? SQLServerConfig2017std.toJSON(message.sqlserverConfig2017std) + : undefined); + message.sqlserverConfig2017ent !== undefined && + (obj.sqlserverConfig_2017ent = message.sqlserverConfig2017ent + ? SQLServerConfig2017ent.toJSON(message.sqlserverConfig2017ent) + : undefined); + message.sqlserverConfig2019std !== undefined && + (obj.sqlserverConfig_2019std = message.sqlserverConfig2019std + ? SQLServerConfig2019std.toJSON(message.sqlserverConfig2019std) + : undefined); + message.sqlserverConfig2019ent !== undefined && + (obj.sqlserverConfig_2019ent = message.sqlserverConfig2019ent + ? SQLServerConfig2019ent.toJSON(message.sqlserverConfig2019ent) + : undefined); message.resources !== undefined && (obj.resources = message.resources ? Resources.toJSON(message.resources) @@ -4343,6 +4673,10 @@ export const ConfigSpec = { : undefined); message.access !== undefined && (obj.access = message.access ? Access.toJSON(message.access) : undefined); + message.secondaryConnections !== undefined && + (obj.secondaryConnections = clusterConfig_SecondaryConnectionsToJSON( + message.secondaryConnections + )); return obj; }, @@ -4365,6 +4699,26 @@ export const ConfigSpec = { object.sqlserverConfig2016sp2ent ) : undefined; + message.sqlserverConfig2017std = + object.sqlserverConfig2017std !== undefined && + object.sqlserverConfig2017std !== null + ? SQLServerConfig2017std.fromPartial(object.sqlserverConfig2017std) + : undefined; + message.sqlserverConfig2017ent = + object.sqlserverConfig2017ent !== undefined && + object.sqlserverConfig2017ent !== null + ? SQLServerConfig2017ent.fromPartial(object.sqlserverConfig2017ent) + : undefined; + message.sqlserverConfig2019std = + object.sqlserverConfig2019std !== undefined && + object.sqlserverConfig2019std !== null + ? SQLServerConfig2019std.fromPartial(object.sqlserverConfig2019std) + : undefined; + message.sqlserverConfig2019ent = + object.sqlserverConfig2019ent !== undefined && + object.sqlserverConfig2019ent !== null + ? SQLServerConfig2019ent.fromPartial(object.sqlserverConfig2019ent) + : undefined; message.resources = object.resources !== undefined && object.resources !== null ? Resources.fromPartial(object.resources) @@ -4378,6 +4732,7 @@ export const ConfigSpec = { object.access !== undefined && object.access !== null ? Access.fromPartial(object.access) : undefined; + message.secondaryConnections = object.secondaryConnections ?? 0; return message; }, }; @@ -4568,7 +4923,7 @@ export const ClusterServiceService = { responseDeserialize: (value: Buffer) => ListClusterBackupsResponse.decode(value), }, - /** Retrieves a list of hosts for the specified SQL Server cluster. */ + /** Retrieves the list of hosts for the specified SQL Server cluster. */ listHosts: { path: "/yandex.cloud.mdb.sqlserver.v1.ClusterService/ListHosts", requestStream: false, @@ -4582,6 +4937,19 @@ export const ClusterServiceService = { responseDeserialize: (value: Buffer) => ListClusterHostsResponse.decode(value), }, + /** Updates the specified hosts. */ + updateHosts: { + path: "/yandex.cloud.mdb.sqlserver.v1.ClusterService/UpdateHosts", + requestStream: false, + responseStream: false, + requestSerialize: (value: UpdateClusterHostsRequest) => + Buffer.from(UpdateClusterHostsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + UpdateClusterHostsRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, } as const; export interface ClusterServiceServer extends UntypedServiceImplementation { @@ -4627,8 +4995,10 @@ export interface ClusterServiceServer extends UntypedServiceImplementation { ListClusterBackupsRequest, ListClusterBackupsResponse >; - /** Retrieves a list of hosts for the specified SQL Server cluster. */ + /** Retrieves the list of hosts for the specified SQL Server cluster. */ listHosts: handleUnaryCall; + /** Updates the specified hosts. */ + updateHosts: handleUnaryCall; } export interface ClusterServiceClient extends Client { @@ -4900,7 +5270,7 @@ export interface ClusterServiceClient extends Client { response: ListClusterBackupsResponse ) => void ): ClientUnaryCall; - /** Retrieves a list of hosts for the specified SQL Server cluster. */ + /** Retrieves the list of hosts for the specified SQL Server cluster. */ listHosts( request: ListClusterHostsRequest, callback: ( @@ -4925,6 +5295,22 @@ export interface ClusterServiceClient extends Client { response: ListClusterHostsResponse ) => void ): ClientUnaryCall; + /** Updates the specified hosts. */ + updateHosts( + request: UpdateClusterHostsRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + updateHosts( + request: UpdateClusterHostsRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + updateHosts( + request: UpdateClusterHostsRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; } export const ClusterServiceClient = makeGenericClientConstructor( diff --git a/src/generated/yandex/cloud/mdb/sqlserver/v1/config/sqlserver2016sp2.ts b/src/generated/yandex/cloud/mdb/sqlserver/v1/config/sqlserver2016sp2.ts index 81130229..857e482d 100644 --- a/src/generated/yandex/cloud/mdb/sqlserver/v1/config/sqlserver2016sp2.ts +++ b/src/generated/yandex/cloud/mdb/sqlserver/v1/config/sqlserver2016sp2.ts @@ -34,10 +34,11 @@ export interface SQLServerConfig2016sp2std { costThresholdForParallelism?: number; /** * Describes how to configure login auditing to monitor SQL Server Database Engine login activity. + * * Possible values: - * * 0 - do not log login attempts, - * * 1 - log only failed login attempts, - * * 2 - log only successful login attempts (not recommended), + * * 0 - do not log login attempts; + * * 1 - log only failed login attempts; + * * 2 - log only successful login attempts (not recommended); * * 3 - log all login attempts (not recommended). * * See in-depth description in [SQL Server documentation](https://docs.microsoft.com/en-us/sql/ssms/configure-login-auditing-sql-server-management-studio?view=sql-server-2016). @@ -45,7 +46,8 @@ export interface SQLServerConfig2016sp2std { auditLevel?: number; /** * Manages the fill factor server configuration option. - * When an index is created or rebuilt the fill factor determines the percentage of space on each index leaf-level page to be filled with data, reserving the rest as free space for future growth. + * + * When an index is created or rebuilt, the fill factor determines the percentage of space on each index leaf-level page to be filled with data, reserving the rest as free space for future growth. * * Values 0 and 100 mean full page usage (no space reserved). * @@ -54,6 +56,7 @@ export interface SQLServerConfig2016sp2std { fillFactorPercent?: number; /** * Determines whether plans should be cached only after second execution. + * * Allows to avoid SQL cache bloat because of single-use plans. * * See in-depth description in [SQL Server documentation](https://docs.microsoft.com/en-us/sql/database-engine/configure-windows/optimize-for-ad-hoc-workloads-server-configuration-option?view=sql-server-2016). @@ -96,10 +99,11 @@ export interface SQLServerConfig2016sp2ent { costThresholdForParallelism?: number; /** * Describes how to configure login auditing to monitor SQL Server Database Engine login activity. + * * Possible values: - * * 0 - do not log login attempts, - * * 1 - log only failed login attempts, - * * 2 - log only successful login attempts (not recommended), + * * 0 - do not log login attempts; + * * 1 - log only failed login attempts; + * * 2 - log only successful login attempts (not recommended); * * 3 - log all login attempts (not recommended). * * See in-depth description in [SQL Server documentation](https://docs.microsoft.com/en-us/sql/ssms/configure-login-auditing-sql-server-management-studio?view=sql-server-2016). @@ -107,7 +111,7 @@ export interface SQLServerConfig2016sp2ent { auditLevel?: number; /** * Manages the fill factor server configuration option. - * When an index is created or rebuilt the fill factor determines the percentage of space on each index leaf-level page to be filled with data, reserving the rest as free space for future growth. + * When an index is created or rebuilt, the fill factor determines the percentage of space on each index leaf-level page to be filled with data, reserving the rest as free space for future growth. * * Values 0 and 100 mean full page usage (no space reserved). * @@ -116,6 +120,7 @@ export interface SQLServerConfig2016sp2ent { fillFactorPercent?: number; /** * Determines whether plans should be cached only after second execution. + * * Allows to avoid SQL cache bloat because of single-use plans. * * See in-depth description in [SQL Server documentation](https://docs.microsoft.com/en-us/sql/database-engine/configure-windows/optimize-for-ad-hoc-workloads-server-configuration-option?view=sql-server-2016). diff --git a/src/generated/yandex/cloud/mdb/sqlserver/v1/config/sqlserver2017.ts b/src/generated/yandex/cloud/mdb/sqlserver/v1/config/sqlserver2017.ts new file mode 100644 index 00000000..d1de7892 --- /dev/null +++ b/src/generated/yandex/cloud/mdb/sqlserver/v1/config/sqlserver2017.ts @@ -0,0 +1,761 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { + Int64Value, + BoolValue, +} from "../../../../../../google/protobuf/wrappers"; + +export const protobufPackage = "yandex.cloud.mdb.sqlserver.v1.config"; + +/** + * SQL Server 2017 Standard edition supported configuration options are listed here. + * + * Detailed description for each set of options is available in [SQL Server documentation](https://docs.microsoft.com/en-us/sql/database-engine/configure-windows/server-configuration-options-sql-server?view=sql-server-2017). + * + * Any options that are not listed here are not supported. + */ +export interface SQLServerConfig2017std { + $type: "yandex.cloud.mdb.sqlserver.v1.config.SQLServerConfig2017std"; + /** + * Limits the number of processors to use in parallel plan execution per task. + * + * See in-depth description in [SQL Server documentation](https://docs.microsoft.com/en-us/sql/database-engine/configure-windows/configure-the-max-degree-of-parallelism-server-configuration-option?view=sql-server-2017). + */ + maxDegreeOfParallelism?: number; + /** + * Specifies the threshold at which SQL Server creates and runs parallel plans for queries. + * + * SQL Server creates and runs a parallel plan for a query only when the estimated cost to run a serial plan for the same query is higher than the value of the option. + * + * See in-depth description in [SQL Server documentation](https://docs.microsoft.com/en-us/sql/database-engine/configure-windows/configure-the-cost-threshold-for-parallelism-server-configuration-option?view=sql-server-2017). + */ + costThresholdForParallelism?: number; + /** + * Describes how to configure login auditing to monitor SQL Server Database Engine login activity. + * + * Possible values: + * * 0 - do not log login attempts; + * * 1 - log only failed login attempts; + * * 2 - log only successful login attempts (not recommended); + * * 3 - log all login attempts (not recommended). + * + * See in-depth description in [SQL Server documentation](https://docs.microsoft.com/en-us/sql/ssms/configure-login-auditing-sql-server-management-studio?view=sql-server-2017). + */ + auditLevel?: number; + /** + * Manages the fill factor server configuration option. + * + * When an index is created or rebuilt, the fill factor determines the percentage of space on each index leaf-level page to be filled with data, reserving the rest as free space for future growth. + * + * Values 0 and 100 mean full page usage (no space reserved). + * + * See in-depth description in [SQL Server documentation](https://docs.microsoft.com/en-us/sql/database-engine/configure-windows/configure-the-fill-factor-server-configuration-option?view=sql-server-2017). + */ + fillFactorPercent?: number; + /** + * Determines whether plans should be cached only after second execution. + * + * Allows to avoid SQL cache bloat because of single-use plans. + * + * See in-depth description in [SQL Server documentation](https://docs.microsoft.com/en-us/sql/database-engine/configure-windows/optimize-for-ad-hoc-workloads-server-configuration-option?view=sql-server-2017). + */ + optimizeForAdHocWorkloads?: boolean; +} + +export interface SQLServerConfigSet2017std { + $type: "yandex.cloud.mdb.sqlserver.v1.config.SQLServerConfigSet2017std"; + /** Effective settings for an SQL Server 2017 cluster (a combination of settings defined in [user_config] and [default_config]). */ + effectiveConfig?: SQLServerConfig2017std; + /** User-defined settings for an SQL Server 2017 cluster. */ + userConfig?: SQLServerConfig2017std; + /** Default configuration for an SQL Server 2017 cluster. */ + defaultConfig?: SQLServerConfig2017std; +} + +/** + * SQL Server 2017 Enterprise edition supported configuration options are listed here. + * + * Detailed description for each set of options is available in [SQL Server documentation](https://docs.microsoft.com/en-us/sql/database-engine/configure-windows/server-configuration-options-sql-server?view=sql-server-2017). + * + * Any options that are not listed here are not supported. + */ +export interface SQLServerConfig2017ent { + $type: "yandex.cloud.mdb.sqlserver.v1.config.SQLServerConfig2017ent"; + /** + * Limits the number of processors to use in parallel plan execution per task. + * + * See in-depth description in [SQL Server documentation](https://docs.microsoft.com/en-us/sql/database-engine/configure-windows/configure-the-max-degree-of-parallelism-server-configuration-option?view=sql-server-2017). + */ + maxDegreeOfParallelism?: number; + /** + * Specifies the threshold at which SQL Server creates and runs parallel plans for queries. + * + * SQL Server creates and runs a parallel plan for a query only when the estimated cost to run a serial plan for the same query is higher than the value of the option. + * + * See in-depth description in [SQL Server documentation](https://docs.microsoft.com/en-us/sql/database-engine/configure-windows/configure-the-cost-threshold-for-parallelism-server-configuration-option?view=sql-server-2017). + */ + costThresholdForParallelism?: number; + /** + * Describes how to configure login auditing to monitor SQL Server Database Engine login activity. + * + * Possible values: + * * 0 - do not log login attempts; + * * 1 - log only failed login attempts; + * * 2 - log only successful login attempts (not recommended); + * * 3 - log all login attempts (not recommended). + * + * See in-depth description in [SQL Server documentation](https://docs.microsoft.com/en-us/sql/ssms/configure-login-auditing-sql-server-management-studio?view=sql-server-2017). + */ + auditLevel?: number; + /** + * Manages the fill factor server configuration option. + * When an index is created or rebuilt, the fill factor determines the percentage of space on each index leaf-level page to be filled with data, reserving the rest as free space for future growth. + * + * Values 0 and 100 mean full page usage (no space reserved). + * + * See in-depth description in [SQL Server documentation](https://docs.microsoft.com/en-us/sql/database-engine/configure-windows/configure-the-fill-factor-server-configuration-option?view=sql-server-2017). + */ + fillFactorPercent?: number; + /** + * Determines whether plans should be cached only after second execution. + * + * Allows to avoid SQL cache bloat because of single-use plans. + * + * See in-depth description in [SQL Server documentation](https://docs.microsoft.com/en-us/sql/database-engine/configure-windows/optimize-for-ad-hoc-workloads-server-configuration-option?view=sql-server-2017). + */ + optimizeForAdHocWorkloads?: boolean; +} + +export interface SQLServerConfigSet2017ent { + $type: "yandex.cloud.mdb.sqlserver.v1.config.SQLServerConfigSet2017ent"; + /** Effective settings for an SQL Server 2017 cluster (a combination of settings defined in [user_config] and [default_config]). */ + effectiveConfig?: SQLServerConfig2017ent; + /** User-defined settings for an SQL Server 2017 cluster. */ + userConfig?: SQLServerConfig2017ent; + /** Default configuration for an SQL Server 2017 cluster. */ + defaultConfig?: SQLServerConfig2017ent; +} + +const baseSQLServerConfig2017std: object = { + $type: "yandex.cloud.mdb.sqlserver.v1.config.SQLServerConfig2017std", +}; + +export const SQLServerConfig2017std = { + $type: "yandex.cloud.mdb.sqlserver.v1.config.SQLServerConfig2017std" as const, + + encode( + message: SQLServerConfig2017std, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.maxDegreeOfParallelism !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxDegreeOfParallelism!, + }, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.costThresholdForParallelism !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.costThresholdForParallelism!, + }, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.auditLevel !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.auditLevel! }, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.fillFactorPercent !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.fillFactorPercent!, + }, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.optimizeForAdHocWorkloads !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.optimizeForAdHocWorkloads!, + }, + writer.uint32(42).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): SQLServerConfig2017std { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseSQLServerConfig2017std } as SQLServerConfig2017std; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.maxDegreeOfParallelism = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 2: + message.costThresholdForParallelism = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 3: + message.auditLevel = Int64Value.decode(reader, reader.uint32()).value; + break; + case 4: + message.fillFactorPercent = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 5: + message.optimizeForAdHocWorkloads = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): SQLServerConfig2017std { + const message = { ...baseSQLServerConfig2017std } as SQLServerConfig2017std; + message.maxDegreeOfParallelism = + object.maxDegreeOfParallelism !== undefined && + object.maxDegreeOfParallelism !== null + ? Number(object.maxDegreeOfParallelism) + : undefined; + message.costThresholdForParallelism = + object.costThresholdForParallelism !== undefined && + object.costThresholdForParallelism !== null + ? Number(object.costThresholdForParallelism) + : undefined; + message.auditLevel = + object.auditLevel !== undefined && object.auditLevel !== null + ? Number(object.auditLevel) + : undefined; + message.fillFactorPercent = + object.fillFactorPercent !== undefined && + object.fillFactorPercent !== null + ? Number(object.fillFactorPercent) + : undefined; + message.optimizeForAdHocWorkloads = + object.optimizeForAdHocWorkloads !== undefined && + object.optimizeForAdHocWorkloads !== null + ? Boolean(object.optimizeForAdHocWorkloads) + : undefined; + return message; + }, + + toJSON(message: SQLServerConfig2017std): unknown { + const obj: any = {}; + message.maxDegreeOfParallelism !== undefined && + (obj.maxDegreeOfParallelism = message.maxDegreeOfParallelism); + message.costThresholdForParallelism !== undefined && + (obj.costThresholdForParallelism = message.costThresholdForParallelism); + message.auditLevel !== undefined && (obj.auditLevel = message.auditLevel); + message.fillFactorPercent !== undefined && + (obj.fillFactorPercent = message.fillFactorPercent); + message.optimizeForAdHocWorkloads !== undefined && + (obj.optimizeForAdHocWorkloads = message.optimizeForAdHocWorkloads); + return obj; + }, + + fromPartial, I>>( + object: I + ): SQLServerConfig2017std { + const message = { ...baseSQLServerConfig2017std } as SQLServerConfig2017std; + message.maxDegreeOfParallelism = object.maxDegreeOfParallelism ?? undefined; + message.costThresholdForParallelism = + object.costThresholdForParallelism ?? undefined; + message.auditLevel = object.auditLevel ?? undefined; + message.fillFactorPercent = object.fillFactorPercent ?? undefined; + message.optimizeForAdHocWorkloads = + object.optimizeForAdHocWorkloads ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(SQLServerConfig2017std.$type, SQLServerConfig2017std); + +const baseSQLServerConfigSet2017std: object = { + $type: "yandex.cloud.mdb.sqlserver.v1.config.SQLServerConfigSet2017std", +}; + +export const SQLServerConfigSet2017std = { + $type: + "yandex.cloud.mdb.sqlserver.v1.config.SQLServerConfigSet2017std" as const, + + encode( + message: SQLServerConfigSet2017std, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.effectiveConfig !== undefined) { + SQLServerConfig2017std.encode( + message.effectiveConfig, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.userConfig !== undefined) { + SQLServerConfig2017std.encode( + message.userConfig, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.defaultConfig !== undefined) { + SQLServerConfig2017std.encode( + message.defaultConfig, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): SQLServerConfigSet2017std { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseSQLServerConfigSet2017std, + } as SQLServerConfigSet2017std; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.effectiveConfig = SQLServerConfig2017std.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.userConfig = SQLServerConfig2017std.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.defaultConfig = SQLServerConfig2017std.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): SQLServerConfigSet2017std { + const message = { + ...baseSQLServerConfigSet2017std, + } as SQLServerConfigSet2017std; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? SQLServerConfig2017std.fromJSON(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? SQLServerConfig2017std.fromJSON(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? SQLServerConfig2017std.fromJSON(object.defaultConfig) + : undefined; + return message; + }, + + toJSON(message: SQLServerConfigSet2017std): unknown { + const obj: any = {}; + message.effectiveConfig !== undefined && + (obj.effectiveConfig = message.effectiveConfig + ? SQLServerConfig2017std.toJSON(message.effectiveConfig) + : undefined); + message.userConfig !== undefined && + (obj.userConfig = message.userConfig + ? SQLServerConfig2017std.toJSON(message.userConfig) + : undefined); + message.defaultConfig !== undefined && + (obj.defaultConfig = message.defaultConfig + ? SQLServerConfig2017std.toJSON(message.defaultConfig) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): SQLServerConfigSet2017std { + const message = { + ...baseSQLServerConfigSet2017std, + } as SQLServerConfigSet2017std; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? SQLServerConfig2017std.fromPartial(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? SQLServerConfig2017std.fromPartial(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? SQLServerConfig2017std.fromPartial(object.defaultConfig) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + SQLServerConfigSet2017std.$type, + SQLServerConfigSet2017std +); + +const baseSQLServerConfig2017ent: object = { + $type: "yandex.cloud.mdb.sqlserver.v1.config.SQLServerConfig2017ent", +}; + +export const SQLServerConfig2017ent = { + $type: "yandex.cloud.mdb.sqlserver.v1.config.SQLServerConfig2017ent" as const, + + encode( + message: SQLServerConfig2017ent, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.maxDegreeOfParallelism !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxDegreeOfParallelism!, + }, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.costThresholdForParallelism !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.costThresholdForParallelism!, + }, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.auditLevel !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.auditLevel! }, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.fillFactorPercent !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.fillFactorPercent!, + }, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.optimizeForAdHocWorkloads !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.optimizeForAdHocWorkloads!, + }, + writer.uint32(42).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): SQLServerConfig2017ent { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseSQLServerConfig2017ent } as SQLServerConfig2017ent; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.maxDegreeOfParallelism = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 2: + message.costThresholdForParallelism = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 3: + message.auditLevel = Int64Value.decode(reader, reader.uint32()).value; + break; + case 4: + message.fillFactorPercent = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 5: + message.optimizeForAdHocWorkloads = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): SQLServerConfig2017ent { + const message = { ...baseSQLServerConfig2017ent } as SQLServerConfig2017ent; + message.maxDegreeOfParallelism = + object.maxDegreeOfParallelism !== undefined && + object.maxDegreeOfParallelism !== null + ? Number(object.maxDegreeOfParallelism) + : undefined; + message.costThresholdForParallelism = + object.costThresholdForParallelism !== undefined && + object.costThresholdForParallelism !== null + ? Number(object.costThresholdForParallelism) + : undefined; + message.auditLevel = + object.auditLevel !== undefined && object.auditLevel !== null + ? Number(object.auditLevel) + : undefined; + message.fillFactorPercent = + object.fillFactorPercent !== undefined && + object.fillFactorPercent !== null + ? Number(object.fillFactorPercent) + : undefined; + message.optimizeForAdHocWorkloads = + object.optimizeForAdHocWorkloads !== undefined && + object.optimizeForAdHocWorkloads !== null + ? Boolean(object.optimizeForAdHocWorkloads) + : undefined; + return message; + }, + + toJSON(message: SQLServerConfig2017ent): unknown { + const obj: any = {}; + message.maxDegreeOfParallelism !== undefined && + (obj.maxDegreeOfParallelism = message.maxDegreeOfParallelism); + message.costThresholdForParallelism !== undefined && + (obj.costThresholdForParallelism = message.costThresholdForParallelism); + message.auditLevel !== undefined && (obj.auditLevel = message.auditLevel); + message.fillFactorPercent !== undefined && + (obj.fillFactorPercent = message.fillFactorPercent); + message.optimizeForAdHocWorkloads !== undefined && + (obj.optimizeForAdHocWorkloads = message.optimizeForAdHocWorkloads); + return obj; + }, + + fromPartial, I>>( + object: I + ): SQLServerConfig2017ent { + const message = { ...baseSQLServerConfig2017ent } as SQLServerConfig2017ent; + message.maxDegreeOfParallelism = object.maxDegreeOfParallelism ?? undefined; + message.costThresholdForParallelism = + object.costThresholdForParallelism ?? undefined; + message.auditLevel = object.auditLevel ?? undefined; + message.fillFactorPercent = object.fillFactorPercent ?? undefined; + message.optimizeForAdHocWorkloads = + object.optimizeForAdHocWorkloads ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(SQLServerConfig2017ent.$type, SQLServerConfig2017ent); + +const baseSQLServerConfigSet2017ent: object = { + $type: "yandex.cloud.mdb.sqlserver.v1.config.SQLServerConfigSet2017ent", +}; + +export const SQLServerConfigSet2017ent = { + $type: + "yandex.cloud.mdb.sqlserver.v1.config.SQLServerConfigSet2017ent" as const, + + encode( + message: SQLServerConfigSet2017ent, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.effectiveConfig !== undefined) { + SQLServerConfig2017ent.encode( + message.effectiveConfig, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.userConfig !== undefined) { + SQLServerConfig2017ent.encode( + message.userConfig, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.defaultConfig !== undefined) { + SQLServerConfig2017ent.encode( + message.defaultConfig, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): SQLServerConfigSet2017ent { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseSQLServerConfigSet2017ent, + } as SQLServerConfigSet2017ent; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.effectiveConfig = SQLServerConfig2017ent.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.userConfig = SQLServerConfig2017ent.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.defaultConfig = SQLServerConfig2017ent.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): SQLServerConfigSet2017ent { + const message = { + ...baseSQLServerConfigSet2017ent, + } as SQLServerConfigSet2017ent; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? SQLServerConfig2017ent.fromJSON(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? SQLServerConfig2017ent.fromJSON(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? SQLServerConfig2017ent.fromJSON(object.defaultConfig) + : undefined; + return message; + }, + + toJSON(message: SQLServerConfigSet2017ent): unknown { + const obj: any = {}; + message.effectiveConfig !== undefined && + (obj.effectiveConfig = message.effectiveConfig + ? SQLServerConfig2017ent.toJSON(message.effectiveConfig) + : undefined); + message.userConfig !== undefined && + (obj.userConfig = message.userConfig + ? SQLServerConfig2017ent.toJSON(message.userConfig) + : undefined); + message.defaultConfig !== undefined && + (obj.defaultConfig = message.defaultConfig + ? SQLServerConfig2017ent.toJSON(message.defaultConfig) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): SQLServerConfigSet2017ent { + const message = { + ...baseSQLServerConfigSet2017ent, + } as SQLServerConfigSet2017ent; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? SQLServerConfig2017ent.fromPartial(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? SQLServerConfig2017ent.fromPartial(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? SQLServerConfig2017ent.fromPartial(object.defaultConfig) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + SQLServerConfigSet2017ent.$type, + SQLServerConfigSet2017ent +); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/sqlserver/v1/config/sqlserver2019.ts b/src/generated/yandex/cloud/mdb/sqlserver/v1/config/sqlserver2019.ts new file mode 100644 index 00000000..e24573e0 --- /dev/null +++ b/src/generated/yandex/cloud/mdb/sqlserver/v1/config/sqlserver2019.ts @@ -0,0 +1,761 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { + Int64Value, + BoolValue, +} from "../../../../../../google/protobuf/wrappers"; + +export const protobufPackage = "yandex.cloud.mdb.sqlserver.v1.config"; + +/** + * SQL Server 2019 Standard edition supported configuration options are listed here. + * + * Detailed description for each set of options is available in [SQL Server documentation](https://docs.microsoft.com/en-us/sql/database-engine/configure-windows/server-configuration-options-sql-server?view=sql-server-2019). + * + * Any options that are not listed here are not supported. + */ +export interface SQLServerConfig2019std { + $type: "yandex.cloud.mdb.sqlserver.v1.config.SQLServerConfig2019std"; + /** + * Limits the number of processors to use in parallel plan execution per task. + * + * See in-depth description in [SQL Server documentation](https://docs.microsoft.com/en-us/sql/database-engine/configure-windows/configure-the-max-degree-of-parallelism-server-configuration-option?view=sql-server-2019). + */ + maxDegreeOfParallelism?: number; + /** + * Specifies the threshold at which SQL Server creates and runs parallel plans for queries. + * + * SQL Server creates and runs a parallel plan for a query only when the estimated cost to run a serial plan for the same query is higher than the value of the option. + * + * See in-depth description in [SQL Server documentation](https://docs.microsoft.com/en-us/sql/database-engine/configure-windows/configure-the-cost-threshold-for-parallelism-server-configuration-option?view=sql-server-2019). + */ + costThresholdForParallelism?: number; + /** + * Describes how to configure login auditing to monitor SQL Server Database Engine login activity. + * + * Possible values: + * * 0 - do not log login attempts; + * * 1 - log only failed login attempts; + * * 2 - log only successful login attempts (not recommended); + * * 3 - log all login attempts (not recommended). + * + * See in-depth description in [SQL Server documentation](https://docs.microsoft.com/en-us/sql/ssms/configure-login-auditing-sql-server-management-studio?view=sql-server-2019). + */ + auditLevel?: number; + /** + * Manages the fill factor server configuration option. + * + * When an index is created or rebuilt, the fill factor determines the percentage of space on each index leaf-level page to be filled with data, reserving the rest as free space for future growth. + * + * Values 0 and 100 mean full page usage (no space reserved). + * + * See in-depth description in [SQL Server documentation](https://docs.microsoft.com/en-us/sql/database-engine/configure-windows/configure-the-fill-factor-server-configuration-option?view=sql-server-2019). + */ + fillFactorPercent?: number; + /** + * Determines whether plans should be cached only after second execution. + * + * Allows to avoid SQL cache bloat because of single-use plans. + * + * See in-depth description in [SQL Server documentation](https://docs.microsoft.com/en-us/sql/database-engine/configure-windows/optimize-for-ad-hoc-workloads-server-configuration-option?view=sql-server-2019). + */ + optimizeForAdHocWorkloads?: boolean; +} + +export interface SQLServerConfigSet2019std { + $type: "yandex.cloud.mdb.sqlserver.v1.config.SQLServerConfigSet2019std"; + /** Effective settings for an SQL Server 2019 cluster (a combination of settings defined in [user_config] and [default_config]). */ + effectiveConfig?: SQLServerConfig2019std; + /** User-defined settings for an SQL Server 2019 cluster. */ + userConfig?: SQLServerConfig2019std; + /** Default configuration for an SQL Server 2019 cluster. */ + defaultConfig?: SQLServerConfig2019std; +} + +/** + * SQL Server 2019 Enterprise edition supported configuration options are listed here. + * + * Detailed description for each set of options is available in [SQL Server documentation](https://docs.microsoft.com/en-us/sql/database-engine/configure-windows/server-configuration-options-sql-server?view=sql-server-2019). + * + * Any options that are not listed here are not supported. + */ +export interface SQLServerConfig2019ent { + $type: "yandex.cloud.mdb.sqlserver.v1.config.SQLServerConfig2019ent"; + /** + * Limits the number of processors to use in parallel plan execution per task. + * + * See in-depth description in [SQL Server documentation](https://docs.microsoft.com/en-us/sql/database-engine/configure-windows/configure-the-max-degree-of-parallelism-server-configuration-option?view=sql-server-2019). + */ + maxDegreeOfParallelism?: number; + /** + * Specifies the threshold at which SQL Server creates and runs parallel plans for queries. + * + * SQL Server creates and runs a parallel plan for a query only when the estimated cost to run a serial plan for the same query is higher than the value of the option. + * + * See in-depth description in [SQL Server documentation](https://docs.microsoft.com/en-us/sql/database-engine/configure-windows/configure-the-cost-threshold-for-parallelism-server-configuration-option?view=sql-server-2019). + */ + costThresholdForParallelism?: number; + /** + * Describes how to configure login auditing to monitor SQL Server Database Engine login activity. + * + * Possible values: + * * 0 - do not log login attempts; + * * 1 - log only failed login attempts; + * * 2 - log only successful login attempts (not recommended); + * * 3 - log all login attempts (not recommended). + * + * See in-depth description in [SQL Server documentation](https://docs.microsoft.com/en-us/sql/ssms/configure-login-auditing-sql-server-management-studio?view=sql-server-2019). + */ + auditLevel?: number; + /** + * Manages the fill factor server configuration option. + * When an index is created or rebuilt, the fill factor determines the percentage of space on each index leaf-level page to be filled with data, reserving the rest as free space for future growth. + * + * Values 0 and 100 mean full page usage (no space reserved). + * + * See in-depth description in [SQL Server documentation](https://docs.microsoft.com/en-us/sql/database-engine/configure-windows/configure-the-fill-factor-server-configuration-option?view=sql-server-2019). + */ + fillFactorPercent?: number; + /** + * Determines whether plans should be cached only after second execution. + * + * Allows to avoid SQL cache bloat because of single-use plans. + * + * See in-depth description in [SQL Server documentation](https://docs.microsoft.com/en-us/sql/database-engine/configure-windows/optimize-for-ad-hoc-workloads-server-configuration-option?view=sql-server-2019). + */ + optimizeForAdHocWorkloads?: boolean; +} + +export interface SQLServerConfigSet2019ent { + $type: "yandex.cloud.mdb.sqlserver.v1.config.SQLServerConfigSet2019ent"; + /** Effective settings for an SQL Server 2019 cluster (a combination of settings defined in [user_config] and [default_config]). */ + effectiveConfig?: SQLServerConfig2019ent; + /** User-defined settings for an SQL Server 2019 cluster. */ + userConfig?: SQLServerConfig2019ent; + /** Default configuration for an SQL Server 2019 cluster. */ + defaultConfig?: SQLServerConfig2019ent; +} + +const baseSQLServerConfig2019std: object = { + $type: "yandex.cloud.mdb.sqlserver.v1.config.SQLServerConfig2019std", +}; + +export const SQLServerConfig2019std = { + $type: "yandex.cloud.mdb.sqlserver.v1.config.SQLServerConfig2019std" as const, + + encode( + message: SQLServerConfig2019std, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.maxDegreeOfParallelism !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxDegreeOfParallelism!, + }, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.costThresholdForParallelism !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.costThresholdForParallelism!, + }, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.auditLevel !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.auditLevel! }, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.fillFactorPercent !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.fillFactorPercent!, + }, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.optimizeForAdHocWorkloads !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.optimizeForAdHocWorkloads!, + }, + writer.uint32(42).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): SQLServerConfig2019std { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseSQLServerConfig2019std } as SQLServerConfig2019std; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.maxDegreeOfParallelism = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 2: + message.costThresholdForParallelism = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 3: + message.auditLevel = Int64Value.decode(reader, reader.uint32()).value; + break; + case 4: + message.fillFactorPercent = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 5: + message.optimizeForAdHocWorkloads = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): SQLServerConfig2019std { + const message = { ...baseSQLServerConfig2019std } as SQLServerConfig2019std; + message.maxDegreeOfParallelism = + object.maxDegreeOfParallelism !== undefined && + object.maxDegreeOfParallelism !== null + ? Number(object.maxDegreeOfParallelism) + : undefined; + message.costThresholdForParallelism = + object.costThresholdForParallelism !== undefined && + object.costThresholdForParallelism !== null + ? Number(object.costThresholdForParallelism) + : undefined; + message.auditLevel = + object.auditLevel !== undefined && object.auditLevel !== null + ? Number(object.auditLevel) + : undefined; + message.fillFactorPercent = + object.fillFactorPercent !== undefined && + object.fillFactorPercent !== null + ? Number(object.fillFactorPercent) + : undefined; + message.optimizeForAdHocWorkloads = + object.optimizeForAdHocWorkloads !== undefined && + object.optimizeForAdHocWorkloads !== null + ? Boolean(object.optimizeForAdHocWorkloads) + : undefined; + return message; + }, + + toJSON(message: SQLServerConfig2019std): unknown { + const obj: any = {}; + message.maxDegreeOfParallelism !== undefined && + (obj.maxDegreeOfParallelism = message.maxDegreeOfParallelism); + message.costThresholdForParallelism !== undefined && + (obj.costThresholdForParallelism = message.costThresholdForParallelism); + message.auditLevel !== undefined && (obj.auditLevel = message.auditLevel); + message.fillFactorPercent !== undefined && + (obj.fillFactorPercent = message.fillFactorPercent); + message.optimizeForAdHocWorkloads !== undefined && + (obj.optimizeForAdHocWorkloads = message.optimizeForAdHocWorkloads); + return obj; + }, + + fromPartial, I>>( + object: I + ): SQLServerConfig2019std { + const message = { ...baseSQLServerConfig2019std } as SQLServerConfig2019std; + message.maxDegreeOfParallelism = object.maxDegreeOfParallelism ?? undefined; + message.costThresholdForParallelism = + object.costThresholdForParallelism ?? undefined; + message.auditLevel = object.auditLevel ?? undefined; + message.fillFactorPercent = object.fillFactorPercent ?? undefined; + message.optimizeForAdHocWorkloads = + object.optimizeForAdHocWorkloads ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(SQLServerConfig2019std.$type, SQLServerConfig2019std); + +const baseSQLServerConfigSet2019std: object = { + $type: "yandex.cloud.mdb.sqlserver.v1.config.SQLServerConfigSet2019std", +}; + +export const SQLServerConfigSet2019std = { + $type: + "yandex.cloud.mdb.sqlserver.v1.config.SQLServerConfigSet2019std" as const, + + encode( + message: SQLServerConfigSet2019std, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.effectiveConfig !== undefined) { + SQLServerConfig2019std.encode( + message.effectiveConfig, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.userConfig !== undefined) { + SQLServerConfig2019std.encode( + message.userConfig, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.defaultConfig !== undefined) { + SQLServerConfig2019std.encode( + message.defaultConfig, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): SQLServerConfigSet2019std { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseSQLServerConfigSet2019std, + } as SQLServerConfigSet2019std; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.effectiveConfig = SQLServerConfig2019std.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.userConfig = SQLServerConfig2019std.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.defaultConfig = SQLServerConfig2019std.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): SQLServerConfigSet2019std { + const message = { + ...baseSQLServerConfigSet2019std, + } as SQLServerConfigSet2019std; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? SQLServerConfig2019std.fromJSON(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? SQLServerConfig2019std.fromJSON(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? SQLServerConfig2019std.fromJSON(object.defaultConfig) + : undefined; + return message; + }, + + toJSON(message: SQLServerConfigSet2019std): unknown { + const obj: any = {}; + message.effectiveConfig !== undefined && + (obj.effectiveConfig = message.effectiveConfig + ? SQLServerConfig2019std.toJSON(message.effectiveConfig) + : undefined); + message.userConfig !== undefined && + (obj.userConfig = message.userConfig + ? SQLServerConfig2019std.toJSON(message.userConfig) + : undefined); + message.defaultConfig !== undefined && + (obj.defaultConfig = message.defaultConfig + ? SQLServerConfig2019std.toJSON(message.defaultConfig) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): SQLServerConfigSet2019std { + const message = { + ...baseSQLServerConfigSet2019std, + } as SQLServerConfigSet2019std; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? SQLServerConfig2019std.fromPartial(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? SQLServerConfig2019std.fromPartial(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? SQLServerConfig2019std.fromPartial(object.defaultConfig) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + SQLServerConfigSet2019std.$type, + SQLServerConfigSet2019std +); + +const baseSQLServerConfig2019ent: object = { + $type: "yandex.cloud.mdb.sqlserver.v1.config.SQLServerConfig2019ent", +}; + +export const SQLServerConfig2019ent = { + $type: "yandex.cloud.mdb.sqlserver.v1.config.SQLServerConfig2019ent" as const, + + encode( + message: SQLServerConfig2019ent, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.maxDegreeOfParallelism !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxDegreeOfParallelism!, + }, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.costThresholdForParallelism !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.costThresholdForParallelism!, + }, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.auditLevel !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.auditLevel! }, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.fillFactorPercent !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.fillFactorPercent!, + }, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.optimizeForAdHocWorkloads !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.optimizeForAdHocWorkloads!, + }, + writer.uint32(42).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): SQLServerConfig2019ent { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseSQLServerConfig2019ent } as SQLServerConfig2019ent; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.maxDegreeOfParallelism = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 2: + message.costThresholdForParallelism = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 3: + message.auditLevel = Int64Value.decode(reader, reader.uint32()).value; + break; + case 4: + message.fillFactorPercent = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 5: + message.optimizeForAdHocWorkloads = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): SQLServerConfig2019ent { + const message = { ...baseSQLServerConfig2019ent } as SQLServerConfig2019ent; + message.maxDegreeOfParallelism = + object.maxDegreeOfParallelism !== undefined && + object.maxDegreeOfParallelism !== null + ? Number(object.maxDegreeOfParallelism) + : undefined; + message.costThresholdForParallelism = + object.costThresholdForParallelism !== undefined && + object.costThresholdForParallelism !== null + ? Number(object.costThresholdForParallelism) + : undefined; + message.auditLevel = + object.auditLevel !== undefined && object.auditLevel !== null + ? Number(object.auditLevel) + : undefined; + message.fillFactorPercent = + object.fillFactorPercent !== undefined && + object.fillFactorPercent !== null + ? Number(object.fillFactorPercent) + : undefined; + message.optimizeForAdHocWorkloads = + object.optimizeForAdHocWorkloads !== undefined && + object.optimizeForAdHocWorkloads !== null + ? Boolean(object.optimizeForAdHocWorkloads) + : undefined; + return message; + }, + + toJSON(message: SQLServerConfig2019ent): unknown { + const obj: any = {}; + message.maxDegreeOfParallelism !== undefined && + (obj.maxDegreeOfParallelism = message.maxDegreeOfParallelism); + message.costThresholdForParallelism !== undefined && + (obj.costThresholdForParallelism = message.costThresholdForParallelism); + message.auditLevel !== undefined && (obj.auditLevel = message.auditLevel); + message.fillFactorPercent !== undefined && + (obj.fillFactorPercent = message.fillFactorPercent); + message.optimizeForAdHocWorkloads !== undefined && + (obj.optimizeForAdHocWorkloads = message.optimizeForAdHocWorkloads); + return obj; + }, + + fromPartial, I>>( + object: I + ): SQLServerConfig2019ent { + const message = { ...baseSQLServerConfig2019ent } as SQLServerConfig2019ent; + message.maxDegreeOfParallelism = object.maxDegreeOfParallelism ?? undefined; + message.costThresholdForParallelism = + object.costThresholdForParallelism ?? undefined; + message.auditLevel = object.auditLevel ?? undefined; + message.fillFactorPercent = object.fillFactorPercent ?? undefined; + message.optimizeForAdHocWorkloads = + object.optimizeForAdHocWorkloads ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(SQLServerConfig2019ent.$type, SQLServerConfig2019ent); + +const baseSQLServerConfigSet2019ent: object = { + $type: "yandex.cloud.mdb.sqlserver.v1.config.SQLServerConfigSet2019ent", +}; + +export const SQLServerConfigSet2019ent = { + $type: + "yandex.cloud.mdb.sqlserver.v1.config.SQLServerConfigSet2019ent" as const, + + encode( + message: SQLServerConfigSet2019ent, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.effectiveConfig !== undefined) { + SQLServerConfig2019ent.encode( + message.effectiveConfig, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.userConfig !== undefined) { + SQLServerConfig2019ent.encode( + message.userConfig, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.defaultConfig !== undefined) { + SQLServerConfig2019ent.encode( + message.defaultConfig, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): SQLServerConfigSet2019ent { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseSQLServerConfigSet2019ent, + } as SQLServerConfigSet2019ent; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.effectiveConfig = SQLServerConfig2019ent.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.userConfig = SQLServerConfig2019ent.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.defaultConfig = SQLServerConfig2019ent.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): SQLServerConfigSet2019ent { + const message = { + ...baseSQLServerConfigSet2019ent, + } as SQLServerConfigSet2019ent; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? SQLServerConfig2019ent.fromJSON(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? SQLServerConfig2019ent.fromJSON(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? SQLServerConfig2019ent.fromJSON(object.defaultConfig) + : undefined; + return message; + }, + + toJSON(message: SQLServerConfigSet2019ent): unknown { + const obj: any = {}; + message.effectiveConfig !== undefined && + (obj.effectiveConfig = message.effectiveConfig + ? SQLServerConfig2019ent.toJSON(message.effectiveConfig) + : undefined); + message.userConfig !== undefined && + (obj.userConfig = message.userConfig + ? SQLServerConfig2019ent.toJSON(message.userConfig) + : undefined); + message.defaultConfig !== undefined && + (obj.defaultConfig = message.defaultConfig + ? SQLServerConfig2019ent.toJSON(message.defaultConfig) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): SQLServerConfigSet2019ent { + const message = { + ...baseSQLServerConfigSet2019ent, + } as SQLServerConfigSet2019ent; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? SQLServerConfig2019ent.fromPartial(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? SQLServerConfig2019ent.fromPartial(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? SQLServerConfig2019ent.fromPartial(object.defaultConfig) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + SQLServerConfigSet2019ent.$type, + SQLServerConfigSet2019ent +); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/sqlserver/v1/database.ts b/src/generated/yandex/cloud/mdb/sqlserver/v1/database.ts index aa03b52b..3a824569 100644 --- a/src/generated/yandex/cloud/mdb/sqlserver/v1/database.ts +++ b/src/generated/yandex/cloud/mdb/sqlserver/v1/database.ts @@ -7,13 +7,14 @@ export const protobufPackage = "yandex.cloud.mdb.sqlserver.v1"; /** * An SQL Server database. + * * For more information, see the [Concepts](/docs/managed-sqlserver/concepts) section of the documentation. */ export interface Database { $type: "yandex.cloud.mdb.sqlserver.v1.Database"; /** Name of the database. */ name: string; - /** ID of the SQL Server cluster the database belongs to. */ + /** ID of the SQL Server cluster that the database belongs to. */ clusterId: string; } diff --git a/src/generated/yandex/cloud/mdb/sqlserver/v1/database_service.ts b/src/generated/yandex/cloud/mdb/sqlserver/v1/database_service.ts index e652a3d8..9d57287e 100644 --- a/src/generated/yandex/cloud/mdb/sqlserver/v1/database_service.ts +++ b/src/generated/yandex/cloud/mdb/sqlserver/v1/database_service.ts @@ -48,15 +48,12 @@ export interface ListDatabasesRequest { */ clusterId: string; /** - * The maximum number of results per page to return. If the number of available - * results is larger than `page_size`, the service returns a [ListDatabasesResponse.next_page_token] - * that can be used to get the next page of results in subsequent list requests. + * The maximum number of results per page to return. + * + * If the number of available results is larger than [page_size], the service returns a [ListDatabasesResponse.next_page_token] that can be used to get the next page of results in subsequent list requests. */ pageSize: number; - /** - * Page token. To get the next page of results, Set `page_token` to the [ListDatabasesResponse.next_page_token] - * returned by a previous list request. - */ + /** Page token. To get the next page of results, set [page_token] to the [ListDatabasesResponse.next_page_token] returned by the previous list request. */ pageToken: string; } @@ -65,10 +62,11 @@ export interface ListDatabasesResponse { /** List of SQL Server databases. */ databases: Database[]; /** - * Token that allows you to get the next page of results for list requests. If the number of results - * is larger than [ListDatabasesRequest.page_size], use the `next_page_token` as the value - * for the [ListDatabasesRequest.page_token] parameter in the next list request. Each subsequent - * list request will have its own `next_page_token` to continue paging through the results. + * Token that allows you to get the next page of results for list requests. + * + * If the number of results is larger than [ListDatabasesRequest.page_size], use the [next_page_token] as the value for the [ListDatabasesRequest.page_token] parameter in the next list request. + * + * Each subsequent list request has its own [next_page_token] to continue paging through the results. */ nextPageToken: string; } @@ -120,54 +118,56 @@ export interface DeleteDatabaseMetadata { export interface RestoreDatabaseRequest { $type: "yandex.cloud.mdb.sqlserver.v1.RestoreDatabaseRequest"; /** - * Required. ID of the SQL Server cluster to restore a database in. - * To get the cluster ID, use a [ClusterService.List] request + * ID of the SQL Server cluster to restore a database in. + * + * To get the cluster ID, use a [ClusterService.List] request. */ clusterId: string; - /** Name of the SQLServer database that is being restored. */ + /** Name of the SQL Server database that is being restored. */ databaseName: string; - /** name of the database which backup will be used to restore the database */ + /** Name of the database which backup is used to restore the database. */ fromDatabase: string; - /** ID of a backup to be used */ + /** ID of a backup to be used. */ backupId: string; - /** Timestamp which is used for Point-in-Time recovery */ + /** Timestamp which is used for Point-in-Time recovery. */ time?: Date; } export interface RestoreDatabaseMetadata { $type: "yandex.cloud.mdb.sqlserver.v1.RestoreDatabaseMetadata"; - /** ID of the SQLServer cluster where a database is being created. */ + /** ID of the SQL Server cluster where a database is being created. */ clusterId: string; - /** Name of the SQLServer database that is being created. */ + /** Name of an SQL Server database that is being created. */ databaseName: string; - /** name of the database which backup will be used to restore the database */ + /** Name of the database which backup is used to restore the database. */ fromDatabase: string; - /** ID of a backup to be used */ + /** ID of a backup to be used. */ backupId: string; } export interface ImportDatabaseBackupRequest { $type: "yandex.cloud.mdb.sqlserver.v1.ImportDatabaseBackupRequest"; /** - * Required. ID of the SQL Server cluster to import a database in. - * To get the cluster ID, use a [ClusterService.List] request + * ID of the SQL Server cluster to import a database in. + * + * To get the cluster ID, use a [ClusterService.List] request. */ clusterId: string; - /** Name of the SQLServer database that is being imported. */ + /** Name of the SQL Server database that is being imported. */ databaseName: string; /** Name of object storage bucket to import backups from. */ s3Bucket: string; /** Path in object storage bucket to import backups from. */ s3Path: string; - /** List of .bak files in bucket containing database backup */ + /** List of .bak files in bucket containing database backup. */ files: string[]; } export interface ImportDatabaseBackupMetadata { $type: "yandex.cloud.mdb.sqlserver.v1.ImportDatabaseBackupMetadata"; - /** ID of the SQLServer cluster where a database is being imported. */ + /** ID of the SQL Server cluster where a database is being imported. */ clusterId: string; - /** Name of the SQLServer database that is being imported. */ + /** Name of the SQL Server database that is being imported. */ databaseName: string; /** Name of object storage bucket to import backups from. */ s3Bucket: string; @@ -178,29 +178,30 @@ export interface ImportDatabaseBackupMetadata { export interface ExportDatabaseBackupRequest { $type: "yandex.cloud.mdb.sqlserver.v1.ExportDatabaseBackupRequest"; /** - * Required. ID of the SQL Server cluster to export a database from. - * To get the cluster ID, use a [ClusterService.List] request + * ID of the SQL Server cluster to export a database from. + * + * To get the cluster ID, use a [ClusterService.List] request. */ clusterId: string; - /** Name of the SQLServer database that is being exported. */ + /** Name of the SQL Server database that is being exported. */ databaseName: string; - /** Name of object storage bucket to export backups to */ + /** Name of object storage bucket to export backups to. */ s3Bucket: string; /** Path in object storage bucket to export backups to. */ s3Path: string; - /** Prefix for .bak files to */ + /** Prefix for .bak files to export. */ prefix: string; } export interface ExportDatabaseBackupMetadata { $type: "yandex.cloud.mdb.sqlserver.v1.ExportDatabaseBackupMetadata"; - /** ID of the SQLServer cluster where a database is being exported. */ + /** ID of the SQL Server cluster where a database is being exported. */ clusterId: string; - /** Name of the SQLServer database that is being exported. */ + /** Name of the SQL Server database that is being exported. */ databaseName: string; - /** Name of object storage bucket to import backups from. */ + /** Name of object storage bucket to export backups to. */ s3Bucket: string; - /** Path in object storage bucket to import backups from. */ + /** Path in object storage bucket to export backups to. */ s3Path: string; } @@ -1541,7 +1542,7 @@ export const DatabaseServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Creates a new SQL Server database in the specified cluster from a backup */ + /** Creates a new SQL Server database in the specified cluster from a backup. */ restore: { path: "/yandex.cloud.mdb.sqlserver.v1.DatabaseService/Restore", requestStream: false, @@ -1553,7 +1554,7 @@ export const DatabaseServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Imports a new SQL Server database from external backup */ + /** Imports a new SQL Server database from an external backup. */ importBackup: { path: "/yandex.cloud.mdb.sqlserver.v1.DatabaseService/ImportBackup", requestStream: false, @@ -1566,7 +1567,7 @@ export const DatabaseServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Exports database backup to external backup */ + /** Exports the last database backup to an external backup. */ exportBackup: { path: "/yandex.cloud.mdb.sqlserver.v1.DatabaseService/ExportBackup", requestStream: false, @@ -1604,11 +1605,11 @@ export interface DatabaseServiceServer extends UntypedServiceImplementation { list: handleUnaryCall; /** Creates a new SQL Server database in the specified cluster. */ create: handleUnaryCall; - /** Creates a new SQL Server database in the specified cluster from a backup */ + /** Creates a new SQL Server database in the specified cluster from a backup. */ restore: handleUnaryCall; - /** Imports a new SQL Server database from external backup */ + /** Imports a new SQL Server database from an external backup. */ importBackup: handleUnaryCall; - /** Exports database backup to external backup */ + /** Exports the last database backup to an external backup. */ exportBackup: handleUnaryCall; /** Deletes the specified SQL Server database. */ delete: handleUnaryCall; @@ -1676,7 +1677,7 @@ export interface DatabaseServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Creates a new SQL Server database in the specified cluster from a backup */ + /** Creates a new SQL Server database in the specified cluster from a backup. */ restore( request: RestoreDatabaseRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -1692,7 +1693,7 @@ export interface DatabaseServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Imports a new SQL Server database from external backup */ + /** Imports a new SQL Server database from an external backup. */ importBackup( request: ImportDatabaseBackupRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -1708,7 +1709,7 @@ export interface DatabaseServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Exports database backup to external backup */ + /** Exports the last database backup to an external backup. */ exportBackup( request: ExportDatabaseBackupRequest, callback: (error: ServiceError | null, response: Operation) => void diff --git a/src/generated/yandex/cloud/mdb/sqlserver/v1/resource_preset_service.ts b/src/generated/yandex/cloud/mdb/sqlserver/v1/resource_preset_service.ts index cb459dbf..a3b1fac3 100644 --- a/src/generated/yandex/cloud/mdb/sqlserver/v1/resource_preset_service.ts +++ b/src/generated/yandex/cloud/mdb/sqlserver/v1/resource_preset_service.ts @@ -31,15 +31,12 @@ export interface GetResourcePresetRequest { export interface ListResourcePresetsRequest { $type: "yandex.cloud.mdb.sqlserver.v1.ListResourcePresetsRequest"; /** - * The maximum number of results per page to return. If the number of available - * results is larger than `page_size`, the service returns a [ListResourcePresetsResponse.next_page_token] - * that can be used to get the next page of results in subsequent list requests. + * The maximum number of results per page to return. + * + * If the number of available results is larger than [page_size], the service returns a [ListResourcePresetsResponse.next_page_token] that can be used to get the next page of results in subsequent list requests. */ pageSize: number; - /** - * Page token. To get the next page of results, set `page_token` to the [ListResourcePresetsResponse.next_page_token] - * returned by a previous list request. - */ + /** Page token. To get the next page of results, set [page_token] to the [ListResourcePresetsResponse.next_page_token] returned by the previous list request. */ pageToken: string; } @@ -48,10 +45,11 @@ export interface ListResourcePresetsResponse { /** List of resource presets. */ resourcePresets: ResourcePreset[]; /** - * Token that allows you to get the next page of results for list requests. If the number of results - * is larger than [ListResourcePresetsRequest.page_size], use the `next_page_token` as the value - * for the [ListResourcePresetsRequest.page_token] parameter in the next list request. Each subsequent - * list request will have its own `next_page_token` to continue paging through the results. + * Token that allows you to get the next page of results for list requests. + * + * If the number of results is larger than [ListResourcePresetsRequest.page_size], use the [next_page_token] as the value for the [ListResourcePresetsRequest.page_token] parameter in the next list request. + * + * Each subsequent list request has its own [next_page_token] to continue paging through the results. */ nextPageToken: string; } diff --git a/src/generated/yandex/cloud/mdb/sqlserver/v1/user.ts b/src/generated/yandex/cloud/mdb/sqlserver/v1/user.ts index 1dad10f5..2fef2ede 100644 --- a/src/generated/yandex/cloud/mdb/sqlserver/v1/user.ts +++ b/src/generated/yandex/cloud/mdb/sqlserver/v1/user.ts @@ -5,6 +5,46 @@ import _m0 from "protobufjs/minimal"; export const protobufPackage = "yandex.cloud.mdb.sqlserver.v1"; +/** Set of server roles. */ +export enum ServerRole { + SERVER_ROLE_UNSPECIFIED = 0, + /** + * MDB_MONITOR - Effectively grants VIEW SERVER STATE to the login. + * + * That gives an ability to use various dynamic management views to monitor server state, including Activity Monitor tool that is built-in into SSMS. + * + * No intrusive actions are allowed, so this is pretty safe to grant. + */ + MDB_MONITOR = 1, + UNRECOGNIZED = -1, +} + +export function serverRoleFromJSON(object: any): ServerRole { + switch (object) { + case 0: + case "SERVER_ROLE_UNSPECIFIED": + return ServerRole.SERVER_ROLE_UNSPECIFIED; + case 1: + case "MDB_MONITOR": + return ServerRole.MDB_MONITOR; + case -1: + case "UNRECOGNIZED": + default: + return ServerRole.UNRECOGNIZED; + } +} + +export function serverRoleToJSON(object: ServerRole): string { + switch (object) { + case ServerRole.SERVER_ROLE_UNSPECIFIED: + return "SERVER_ROLE_UNSPECIFIED"; + case ServerRole.MDB_MONITOR: + return "MDB_MONITOR"; + default: + return "UNKNOWN"; + } +} + /** An SQL Server user. */ export interface User { $type: "yandex.cloud.mdb.sqlserver.v1.User"; @@ -14,6 +54,8 @@ export interface User { clusterId: string; /** Set of permissions granted to the user. */ permissions: Permission[]; + /** Set of server roles granted to the login. */ + serverRoles: ServerRole[]; } export interface Permission { @@ -24,13 +66,14 @@ export interface Permission { roles: Permission_Role[]; } +/** Role granted to the user within the database. */ export enum Permission_Role { ROLE_UNSPECIFIED = 0, - /** DB_OWNER - Members of this fixed database role can perform all configuration and maintenance activities on the database, and can also drop the database in SQL Server. */ + /** DB_OWNER - Members of this fixed database role can perform all configuration and maintenance activities on a database and can also drop a database in SQL Server. */ DB_OWNER = 1, /** DB_SECURITYADMIN - Members of this fixed database role can modify role membership for custom roles only and manage permissions. They can potentially elevate their privileges and their actions should be monitored. */ DB_SECURITYADMIN = 2, - /** DB_ACCESSADMIN - Members of this fixed database role can add or remove access to the database for Windows logins, Windows groups, and SQL Server logins. */ + /** DB_ACCESSADMIN - Members of this fixed database role can add or remove access to a database for Windows logins, Windows groups, and SQL Server logins. */ DB_ACCESSADMIN = 3, /** DB_BACKUPOPERATOR - Members of this fixed database role can back up the database. */ DB_BACKUPOPERATOR = 4, @@ -40,9 +83,9 @@ export enum Permission_Role { DB_DATAWRITER = 6, /** DB_DATAREADER - Members of this fixed database role can read all data from all user tables. */ DB_DATAREADER = 7, - /** DB_DENYDATAWRITER - Members of this fixed database role cannot add, modify, or delete any data in the user tables within a database. Denial has a higher priority than a grant, so you can use this role to quickly restrict one's privileges without explicitly revoking permissions or roles. */ + /** DB_DENYDATAWRITER - Members of this fixed database role cannot add, modify, or delete any data in the user tables within a database. A denial has a higher priority than a grant, so you can use this role to quickly restrict one's privileges without explicitly revoking permissions or roles. */ DB_DENYDATAWRITER = 8, - /** DB_DENYDATAREADER - Members of this fixed database role cannot read any data in the user tables within a database. Denial has a higher priority than a grant, so you can use this role to quickly restrict one's privileges without explicitly revoking permissions or roles. */ + /** DB_DENYDATAREADER - Members of this fixed database role cannot read any data in the user tables within a database. A denial has a higher priority than a grant, so you can use this role to quickly restrict one's privileges without explicitly revoking permissions or roles. */ DB_DENYDATAREADER = 9, UNRECOGNIZED = -1, } @@ -121,12 +164,15 @@ export interface UserSpec { password: string; /** Set of permissions to grant to the user. */ permissions: Permission[]; + /** Set of server roles. */ + serverRoles: ServerRole[]; } const baseUser: object = { $type: "yandex.cloud.mdb.sqlserver.v1.User", name: "", clusterId: "", + serverRoles: 0, }; export const User = { @@ -142,6 +188,11 @@ export const User = { for (const v of message.permissions) { Permission.encode(v!, writer.uint32(26).fork()).ldelim(); } + writer.uint32(34).fork(); + for (const v of message.serverRoles) { + writer.int32(v); + } + writer.ldelim(); return writer; }, @@ -150,6 +201,7 @@ export const User = { let end = length === undefined ? reader.len : reader.pos + length; const message = { ...baseUser } as User; message.permissions = []; + message.serverRoles = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -162,6 +214,16 @@ export const User = { case 3: message.permissions.push(Permission.decode(reader, reader.uint32())); break; + case 4: + if ((tag & 7) === 2) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.serverRoles.push(reader.int32() as any); + } + } else { + message.serverRoles.push(reader.int32() as any); + } + break; default: reader.skipType(tag & 7); break; @@ -183,6 +245,9 @@ export const User = { message.permissions = (object.permissions ?? []).map((e: any) => Permission.fromJSON(e) ); + message.serverRoles = (object.serverRoles ?? []).map((e: any) => + serverRoleFromJSON(e) + ); return message; }, @@ -197,6 +262,11 @@ export const User = { } else { obj.permissions = []; } + if (message.serverRoles) { + obj.serverRoles = message.serverRoles.map((e) => serverRoleToJSON(e)); + } else { + obj.serverRoles = []; + } return obj; }, @@ -206,6 +276,7 @@ export const User = { message.clusterId = object.clusterId ?? ""; message.permissions = object.permissions?.map((e) => Permission.fromPartial(e)) || []; + message.serverRoles = object.serverRoles?.map((e) => e) || []; return message; }, }; @@ -305,6 +376,7 @@ const baseUserSpec: object = { $type: "yandex.cloud.mdb.sqlserver.v1.UserSpec", name: "", password: "", + serverRoles: 0, }; export const UserSpec = { @@ -323,6 +395,11 @@ export const UserSpec = { for (const v of message.permissions) { Permission.encode(v!, writer.uint32(26).fork()).ldelim(); } + writer.uint32(34).fork(); + for (const v of message.serverRoles) { + writer.int32(v); + } + writer.ldelim(); return writer; }, @@ -331,6 +408,7 @@ export const UserSpec = { let end = length === undefined ? reader.len : reader.pos + length; const message = { ...baseUserSpec } as UserSpec; message.permissions = []; + message.serverRoles = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -343,6 +421,16 @@ export const UserSpec = { case 3: message.permissions.push(Permission.decode(reader, reader.uint32())); break; + case 4: + if ((tag & 7) === 2) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.serverRoles.push(reader.int32() as any); + } + } else { + message.serverRoles.push(reader.int32() as any); + } + break; default: reader.skipType(tag & 7); break; @@ -364,6 +452,9 @@ export const UserSpec = { message.permissions = (object.permissions ?? []).map((e: any) => Permission.fromJSON(e) ); + message.serverRoles = (object.serverRoles ?? []).map((e: any) => + serverRoleFromJSON(e) + ); return message; }, @@ -378,6 +469,11 @@ export const UserSpec = { } else { obj.permissions = []; } + if (message.serverRoles) { + obj.serverRoles = message.serverRoles.map((e) => serverRoleToJSON(e)); + } else { + obj.serverRoles = []; + } return obj; }, @@ -387,6 +483,7 @@ export const UserSpec = { message.password = object.password ?? ""; message.permissions = object.permissions?.map((e) => Permission.fromPartial(e)) || []; + message.serverRoles = object.serverRoles?.map((e) => e) || []; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/sqlserver/v1/user_service.ts b/src/generated/yandex/cloud/mdb/sqlserver/v1/user_service.ts index dd29603a..ad3d06f2 100644 --- a/src/generated/yandex/cloud/mdb/sqlserver/v1/user_service.ts +++ b/src/generated/yandex/cloud/mdb/sqlserver/v1/user_service.ts @@ -18,6 +18,9 @@ import { UserSpec, Permission, User, + ServerRole, + serverRoleFromJSON, + serverRoleToJSON, } from "../../../../../yandex/cloud/mdb/sqlserver/v1/user"; import { FieldMask } from "../../../../../google/protobuf/field_mask"; import { Operation } from "../../../../../yandex/cloud/operation/operation"; @@ -49,15 +52,12 @@ export interface ListUsersRequest { */ clusterId: string; /** - * The maximum number of results per page to return. If the number of available - * results is larger than `page_size`, the service returns a [ListUsersResponse.next_page_token] - * that can be used to get the next page of results in subsequent list requests. + * The maximum number of results per page to return. + * + * If the number of available results is larger than [page_size], the service returns a [ListUsersResponse.next_page_token] that can be used to get the next page of results in subsequent list requests. */ pageSize: number; - /** - * Page token. To get the next page of results, set `page_token` to the [ListUsersResponse.next_page_token] - * returned by a previous list request. - */ + /** Page token. To get the next page of results, set [page_token] to the [ListUsersResponse.next_page_token] returned by the previous list request. */ pageToken: string; } @@ -66,10 +66,11 @@ export interface ListUsersResponse { /** Requested list of SQL Server users. */ users: User[]; /** - * Token that allows you to get the next page of results for list requests. If the number of results - * is larger than [ListUsersRequest.page_size], use the `next_page_token` as the value - * for the [ListUsersRequest.page_token] parameter in the next list request. Each subsequent - * list request will have its own `next_page_token` to continue paging through the results. + * Token that allows you to get the next page of results for list requests. + * + * If the number of results is larger than [ListUsersRequest.page_size], use the [next_page_token] as the value for the [ListUsersRequest.page_token] parameter in the next list request. + * + * Each subsequent list request has its own [next_page_token] to continue paging through the results. */ nextPageToken: string; } @@ -114,6 +115,8 @@ export interface UpdateUserRequest { password: string; /** New set of permissions for the user. */ permissions: Permission[]; + /** New set of server roles granted to the login. */ + serverRoles: ServerRole[]; } export interface UpdateUserMetadata { @@ -158,6 +161,7 @@ export interface GrantUserPermissionRequest { clusterId: string; /** * Name of the user to grant the permission to. + * * To get the name of the user, use a [UserService.List] request. */ userName: string; @@ -604,6 +608,7 @@ const baseUpdateUserRequest: object = { clusterId: "", userName: "", password: "", + serverRoles: 0, }; export const UpdateUserRequest = { @@ -628,6 +633,11 @@ export const UpdateUserRequest = { for (const v of message.permissions) { Permission.encode(v!, writer.uint32(42).fork()).ldelim(); } + writer.uint32(50).fork(); + for (const v of message.serverRoles) { + writer.int32(v); + } + writer.ldelim(); return writer; }, @@ -636,6 +646,7 @@ export const UpdateUserRequest = { let end = length === undefined ? reader.len : reader.pos + length; const message = { ...baseUpdateUserRequest } as UpdateUserRequest; message.permissions = []; + message.serverRoles = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -654,6 +665,16 @@ export const UpdateUserRequest = { case 5: message.permissions.push(Permission.decode(reader, reader.uint32())); break; + case 6: + if ((tag & 7) === 2) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.serverRoles.push(reader.int32() as any); + } + } else { + message.serverRoles.push(reader.int32() as any); + } + break; default: reader.skipType(tag & 7); break; @@ -683,6 +704,9 @@ export const UpdateUserRequest = { message.permissions = (object.permissions ?? []).map((e: any) => Permission.fromJSON(e) ); + message.serverRoles = (object.serverRoles ?? []).map((e: any) => + serverRoleFromJSON(e) + ); return message; }, @@ -702,6 +726,11 @@ export const UpdateUserRequest = { } else { obj.permissions = []; } + if (message.serverRoles) { + obj.serverRoles = message.serverRoles.map((e) => serverRoleToJSON(e)); + } else { + obj.serverRoles = []; + } return obj; }, @@ -718,6 +747,7 @@ export const UpdateUserRequest = { message.password = object.password ?? ""; message.permissions = object.permissions?.map((e) => Permission.fromPartial(e)) || []; + message.serverRoles = object.serverRoles?.map((e) => e) || []; return message; }, }; diff --git a/src/generated/yandex/cloud/monitoring/index.ts b/src/generated/yandex/cloud/monitoring/index.ts new file mode 100644 index 00000000..9c576d0e --- /dev/null +++ b/src/generated/yandex/cloud/monitoring/index.ts @@ -0,0 +1,9 @@ +export * as chart_widget from './v3/chart_widget' +export * as dashboard from './v3/dashboard' +export * as dashboard_service from './v3/dashboard_service' +export * as downsampling from './v3/downsampling' +export * as parametrization from './v3/parametrization' +export * as text_widget from './v3/text_widget' +export * as title_widget from './v3/title_widget' +export * as unit_format from './v3/unit_format' +export * as widget from './v3/widget' \ No newline at end of file diff --git a/src/generated/yandex/cloud/monitoring/v3/chart_widget.ts b/src/generated/yandex/cloud/monitoring/v3/chart_widget.ts new file mode 100644 index 00000000..7fafb1dd --- /dev/null +++ b/src/generated/yandex/cloud/monitoring/v3/chart_widget.ts @@ -0,0 +1,2471 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { Downsampling } from "../../../../yandex/cloud/monitoring/v3/downsampling"; +import { + UnitFormat, + unitFormatFromJSON, + unitFormatToJSON, +} from "../../../../yandex/cloud/monitoring/v3/unit_format"; +import { Int64Value } from "../../../../google/protobuf/wrappers"; + +export const protobufPackage = "yandex.cloud.monitoring.v3"; + +/** Chart widget. */ +export interface ChartWidget { + $type: "yandex.cloud.monitoring.v3.ChartWidget"; + /** Required. Chart ID. */ + id: string; + /** Queries. */ + queries?: ChartWidget_Queries; + /** Visualization settings. */ + visualizationSettings?: ChartWidget_VisualizationSettings; + /** Override settings. */ + seriesOverrides: ChartWidget_SeriesOverrides[]; + /** Name hiding settings. */ + nameHidingSettings?: ChartWidget_NameHidingSettings; + /** Chart description in dashboard (not enabled in UI). */ + description: string; + /** Chart widget title. */ + title: string; + /** Enable legend under chart. */ + displayLegend: boolean; + /** Fixed time interval for chart. */ + freeze: ChartWidget_FreezeDuration; +} + +export enum ChartWidget_FreezeDuration { + FREEZE_DURATION_UNSPECIFIED = 0, + /** FREEZE_DURATION_HOUR - Last hour. */ + FREEZE_DURATION_HOUR = 1, + /** FREEZE_DURATION_DAY - Last day = last 24 hours. */ + FREEZE_DURATION_DAY = 2, + /** FREEZE_DURATION_WEEK - Last 7 days. */ + FREEZE_DURATION_WEEK = 3, + /** FREEZE_DURATION_MONTH - Last 31 days. */ + FREEZE_DURATION_MONTH = 4, + UNRECOGNIZED = -1, +} + +export function chartWidget_FreezeDurationFromJSON( + object: any +): ChartWidget_FreezeDuration { + switch (object) { + case 0: + case "FREEZE_DURATION_UNSPECIFIED": + return ChartWidget_FreezeDuration.FREEZE_DURATION_UNSPECIFIED; + case 1: + case "FREEZE_DURATION_HOUR": + return ChartWidget_FreezeDuration.FREEZE_DURATION_HOUR; + case 2: + case "FREEZE_DURATION_DAY": + return ChartWidget_FreezeDuration.FREEZE_DURATION_DAY; + case 3: + case "FREEZE_DURATION_WEEK": + return ChartWidget_FreezeDuration.FREEZE_DURATION_WEEK; + case 4: + case "FREEZE_DURATION_MONTH": + return ChartWidget_FreezeDuration.FREEZE_DURATION_MONTH; + case -1: + case "UNRECOGNIZED": + default: + return ChartWidget_FreezeDuration.UNRECOGNIZED; + } +} + +export function chartWidget_FreezeDurationToJSON( + object: ChartWidget_FreezeDuration +): string { + switch (object) { + case ChartWidget_FreezeDuration.FREEZE_DURATION_UNSPECIFIED: + return "FREEZE_DURATION_UNSPECIFIED"; + case ChartWidget_FreezeDuration.FREEZE_DURATION_HOUR: + return "FREEZE_DURATION_HOUR"; + case ChartWidget_FreezeDuration.FREEZE_DURATION_DAY: + return "FREEZE_DURATION_DAY"; + case ChartWidget_FreezeDuration.FREEZE_DURATION_WEEK: + return "FREEZE_DURATION_WEEK"; + case ChartWidget_FreezeDuration.FREEZE_DURATION_MONTH: + return "FREEZE_DURATION_MONTH"; + default: + return "UNKNOWN"; + } +} + +/** Query settings. */ +export interface ChartWidget_Queries { + $type: "yandex.cloud.monitoring.v3.ChartWidget.Queries"; + /** Required. List of targets. */ + targets: ChartWidget_Queries_Target[]; + /** Required. Downsampling settings. */ + downsampling?: Downsampling; +} + +/** Query target. */ +export interface ChartWidget_Queries_Target { + $type: "yandex.cloud.monitoring.v3.ChartWidget.Queries.Target"; + /** Required. Query. */ + query: string; + /** Text mode. */ + textMode: boolean; + /** Checks that target is visible or invisible. */ + hidden: boolean; +} + +/** Visualization settings. */ +export interface ChartWidget_VisualizationSettings { + $type: "yandex.cloud.monitoring.v3.ChartWidget.VisualizationSettings"; + /** Visualization type. */ + type: ChartWidget_VisualizationSettings_VisualizationType; + /** Normalize. */ + normalize: boolean; + /** Interpolate. */ + interpolate: ChartWidget_VisualizationSettings_Interpolate; + /** Aggregation. */ + aggregation: ChartWidget_VisualizationSettings_SeriesAggregation; + /** Color scheme settings. */ + colorSchemeSettings?: ChartWidget_VisualizationSettings_ColorSchemeSettings; + /** Heatmap settings. */ + heatmapSettings?: ChartWidget_VisualizationSettings_HeatmapSettings; + /** Y axis settings. */ + yaxisSettings?: ChartWidget_VisualizationSettings_YaxisSettings; + /** Inside chart title. */ + title: string; + /** Show chart labels. */ + showLabels: boolean; +} + +/** Chart visualization type. */ +export enum ChartWidget_VisualizationSettings_VisualizationType { + /** VISUALIZATION_TYPE_UNSPECIFIED - Not specified (line by default). */ + VISUALIZATION_TYPE_UNSPECIFIED = 0, + /** VISUALIZATION_TYPE_LINE - Line chart. */ + VISUALIZATION_TYPE_LINE = 1, + /** VISUALIZATION_TYPE_STACK - Stack chart. */ + VISUALIZATION_TYPE_STACK = 2, + /** VISUALIZATION_TYPE_COLUMN - Points as columns chart. */ + VISUALIZATION_TYPE_COLUMN = 3, + /** VISUALIZATION_TYPE_POINTS - Points. */ + VISUALIZATION_TYPE_POINTS = 4, + /** VISUALIZATION_TYPE_PIE - Pie aggregation chart. */ + VISUALIZATION_TYPE_PIE = 5, + /** VISUALIZATION_TYPE_BARS - Bars aggregation chart. */ + VISUALIZATION_TYPE_BARS = 6, + /** VISUALIZATION_TYPE_DISTRIBUTION - Distribution aggregation chart. */ + VISUALIZATION_TYPE_DISTRIBUTION = 7, + /** VISUALIZATION_TYPE_HEATMAP - Heatmap aggregation chart. */ + VISUALIZATION_TYPE_HEATMAP = 8, + UNRECOGNIZED = -1, +} + +export function chartWidget_VisualizationSettings_VisualizationTypeFromJSON( + object: any +): ChartWidget_VisualizationSettings_VisualizationType { + switch (object) { + case 0: + case "VISUALIZATION_TYPE_UNSPECIFIED": + return ChartWidget_VisualizationSettings_VisualizationType.VISUALIZATION_TYPE_UNSPECIFIED; + case 1: + case "VISUALIZATION_TYPE_LINE": + return ChartWidget_VisualizationSettings_VisualizationType.VISUALIZATION_TYPE_LINE; + case 2: + case "VISUALIZATION_TYPE_STACK": + return ChartWidget_VisualizationSettings_VisualizationType.VISUALIZATION_TYPE_STACK; + case 3: + case "VISUALIZATION_TYPE_COLUMN": + return ChartWidget_VisualizationSettings_VisualizationType.VISUALIZATION_TYPE_COLUMN; + case 4: + case "VISUALIZATION_TYPE_POINTS": + return ChartWidget_VisualizationSettings_VisualizationType.VISUALIZATION_TYPE_POINTS; + case 5: + case "VISUALIZATION_TYPE_PIE": + return ChartWidget_VisualizationSettings_VisualizationType.VISUALIZATION_TYPE_PIE; + case 6: + case "VISUALIZATION_TYPE_BARS": + return ChartWidget_VisualizationSettings_VisualizationType.VISUALIZATION_TYPE_BARS; + case 7: + case "VISUALIZATION_TYPE_DISTRIBUTION": + return ChartWidget_VisualizationSettings_VisualizationType.VISUALIZATION_TYPE_DISTRIBUTION; + case 8: + case "VISUALIZATION_TYPE_HEATMAP": + return ChartWidget_VisualizationSettings_VisualizationType.VISUALIZATION_TYPE_HEATMAP; + case -1: + case "UNRECOGNIZED": + default: + return ChartWidget_VisualizationSettings_VisualizationType.UNRECOGNIZED; + } +} + +export function chartWidget_VisualizationSettings_VisualizationTypeToJSON( + object: ChartWidget_VisualizationSettings_VisualizationType +): string { + switch (object) { + case ChartWidget_VisualizationSettings_VisualizationType.VISUALIZATION_TYPE_UNSPECIFIED: + return "VISUALIZATION_TYPE_UNSPECIFIED"; + case ChartWidget_VisualizationSettings_VisualizationType.VISUALIZATION_TYPE_LINE: + return "VISUALIZATION_TYPE_LINE"; + case ChartWidget_VisualizationSettings_VisualizationType.VISUALIZATION_TYPE_STACK: + return "VISUALIZATION_TYPE_STACK"; + case ChartWidget_VisualizationSettings_VisualizationType.VISUALIZATION_TYPE_COLUMN: + return "VISUALIZATION_TYPE_COLUMN"; + case ChartWidget_VisualizationSettings_VisualizationType.VISUALIZATION_TYPE_POINTS: + return "VISUALIZATION_TYPE_POINTS"; + case ChartWidget_VisualizationSettings_VisualizationType.VISUALIZATION_TYPE_PIE: + return "VISUALIZATION_TYPE_PIE"; + case ChartWidget_VisualizationSettings_VisualizationType.VISUALIZATION_TYPE_BARS: + return "VISUALIZATION_TYPE_BARS"; + case ChartWidget_VisualizationSettings_VisualizationType.VISUALIZATION_TYPE_DISTRIBUTION: + return "VISUALIZATION_TYPE_DISTRIBUTION"; + case ChartWidget_VisualizationSettings_VisualizationType.VISUALIZATION_TYPE_HEATMAP: + return "VISUALIZATION_TYPE_HEATMAP"; + default: + return "UNKNOWN"; + } +} + +export enum ChartWidget_VisualizationSettings_Interpolate { + /** INTERPOLATE_UNSPECIFIED - Not specified (linear by default). */ + INTERPOLATE_UNSPECIFIED = 0, + /** INTERPOLATE_LINEAR - Linear. */ + INTERPOLATE_LINEAR = 1, + /** INTERPOLATE_LEFT - Left. */ + INTERPOLATE_LEFT = 2, + /** INTERPOLATE_RIGHT - Right. */ + INTERPOLATE_RIGHT = 3, + UNRECOGNIZED = -1, +} + +export function chartWidget_VisualizationSettings_InterpolateFromJSON( + object: any +): ChartWidget_VisualizationSettings_Interpolate { + switch (object) { + case 0: + case "INTERPOLATE_UNSPECIFIED": + return ChartWidget_VisualizationSettings_Interpolate.INTERPOLATE_UNSPECIFIED; + case 1: + case "INTERPOLATE_LINEAR": + return ChartWidget_VisualizationSettings_Interpolate.INTERPOLATE_LINEAR; + case 2: + case "INTERPOLATE_LEFT": + return ChartWidget_VisualizationSettings_Interpolate.INTERPOLATE_LEFT; + case 3: + case "INTERPOLATE_RIGHT": + return ChartWidget_VisualizationSettings_Interpolate.INTERPOLATE_RIGHT; + case -1: + case "UNRECOGNIZED": + default: + return ChartWidget_VisualizationSettings_Interpolate.UNRECOGNIZED; + } +} + +export function chartWidget_VisualizationSettings_InterpolateToJSON( + object: ChartWidget_VisualizationSettings_Interpolate +): string { + switch (object) { + case ChartWidget_VisualizationSettings_Interpolate.INTERPOLATE_UNSPECIFIED: + return "INTERPOLATE_UNSPECIFIED"; + case ChartWidget_VisualizationSettings_Interpolate.INTERPOLATE_LINEAR: + return "INTERPOLATE_LINEAR"; + case ChartWidget_VisualizationSettings_Interpolate.INTERPOLATE_LEFT: + return "INTERPOLATE_LEFT"; + case ChartWidget_VisualizationSettings_Interpolate.INTERPOLATE_RIGHT: + return "INTERPOLATE_RIGHT"; + default: + return "UNKNOWN"; + } +} + +/** + * Y axis type. + * N.B. _TYPE prefix is necessary to expect name clash with Interpolate LINEAR value. + */ +export enum ChartWidget_VisualizationSettings_YaxisType { + /** YAXIS_TYPE_UNSPECIFIED - Not specified (linear by default). */ + YAXIS_TYPE_UNSPECIFIED = 0, + /** YAXIS_TYPE_LINEAR - Linear. */ + YAXIS_TYPE_LINEAR = 1, + /** YAXIS_TYPE_LOGARITHMIC - Logarithmic. */ + YAXIS_TYPE_LOGARITHMIC = 2, + UNRECOGNIZED = -1, +} + +export function chartWidget_VisualizationSettings_YaxisTypeFromJSON( + object: any +): ChartWidget_VisualizationSettings_YaxisType { + switch (object) { + case 0: + case "YAXIS_TYPE_UNSPECIFIED": + return ChartWidget_VisualizationSettings_YaxisType.YAXIS_TYPE_UNSPECIFIED; + case 1: + case "YAXIS_TYPE_LINEAR": + return ChartWidget_VisualizationSettings_YaxisType.YAXIS_TYPE_LINEAR; + case 2: + case "YAXIS_TYPE_LOGARITHMIC": + return ChartWidget_VisualizationSettings_YaxisType.YAXIS_TYPE_LOGARITHMIC; + case -1: + case "UNRECOGNIZED": + default: + return ChartWidget_VisualizationSettings_YaxisType.UNRECOGNIZED; + } +} + +export function chartWidget_VisualizationSettings_YaxisTypeToJSON( + object: ChartWidget_VisualizationSettings_YaxisType +): string { + switch (object) { + case ChartWidget_VisualizationSettings_YaxisType.YAXIS_TYPE_UNSPECIFIED: + return "YAXIS_TYPE_UNSPECIFIED"; + case ChartWidget_VisualizationSettings_YaxisType.YAXIS_TYPE_LINEAR: + return "YAXIS_TYPE_LINEAR"; + case ChartWidget_VisualizationSettings_YaxisType.YAXIS_TYPE_LOGARITHMIC: + return "YAXIS_TYPE_LOGARITHMIC"; + default: + return "UNKNOWN"; + } +} + +export enum ChartWidget_VisualizationSettings_SeriesAggregation { + /** SERIES_AGGREGATION_UNSPECIFIED - Not specified (avg by default). */ + SERIES_AGGREGATION_UNSPECIFIED = 0, + /** SERIES_AGGREGATION_AVG - Average. */ + SERIES_AGGREGATION_AVG = 1, + /** SERIES_AGGREGATION_MIN - Minimum. */ + SERIES_AGGREGATION_MIN = 2, + /** SERIES_AGGREGATION_MAX - Maximum. */ + SERIES_AGGREGATION_MAX = 3, + /** SERIES_AGGREGATION_LAST - Last non-NaN value. */ + SERIES_AGGREGATION_LAST = 4, + /** SERIES_AGGREGATION_SUM - Sum. */ + SERIES_AGGREGATION_SUM = 5, + UNRECOGNIZED = -1, +} + +export function chartWidget_VisualizationSettings_SeriesAggregationFromJSON( + object: any +): ChartWidget_VisualizationSettings_SeriesAggregation { + switch (object) { + case 0: + case "SERIES_AGGREGATION_UNSPECIFIED": + return ChartWidget_VisualizationSettings_SeriesAggregation.SERIES_AGGREGATION_UNSPECIFIED; + case 1: + case "SERIES_AGGREGATION_AVG": + return ChartWidget_VisualizationSettings_SeriesAggregation.SERIES_AGGREGATION_AVG; + case 2: + case "SERIES_AGGREGATION_MIN": + return ChartWidget_VisualizationSettings_SeriesAggregation.SERIES_AGGREGATION_MIN; + case 3: + case "SERIES_AGGREGATION_MAX": + return ChartWidget_VisualizationSettings_SeriesAggregation.SERIES_AGGREGATION_MAX; + case 4: + case "SERIES_AGGREGATION_LAST": + return ChartWidget_VisualizationSettings_SeriesAggregation.SERIES_AGGREGATION_LAST; + case 5: + case "SERIES_AGGREGATION_SUM": + return ChartWidget_VisualizationSettings_SeriesAggregation.SERIES_AGGREGATION_SUM; + case -1: + case "UNRECOGNIZED": + default: + return ChartWidget_VisualizationSettings_SeriesAggregation.UNRECOGNIZED; + } +} + +export function chartWidget_VisualizationSettings_SeriesAggregationToJSON( + object: ChartWidget_VisualizationSettings_SeriesAggregation +): string { + switch (object) { + case ChartWidget_VisualizationSettings_SeriesAggregation.SERIES_AGGREGATION_UNSPECIFIED: + return "SERIES_AGGREGATION_UNSPECIFIED"; + case ChartWidget_VisualizationSettings_SeriesAggregation.SERIES_AGGREGATION_AVG: + return "SERIES_AGGREGATION_AVG"; + case ChartWidget_VisualizationSettings_SeriesAggregation.SERIES_AGGREGATION_MIN: + return "SERIES_AGGREGATION_MIN"; + case ChartWidget_VisualizationSettings_SeriesAggregation.SERIES_AGGREGATION_MAX: + return "SERIES_AGGREGATION_MAX"; + case ChartWidget_VisualizationSettings_SeriesAggregation.SERIES_AGGREGATION_LAST: + return "SERIES_AGGREGATION_LAST"; + case ChartWidget_VisualizationSettings_SeriesAggregation.SERIES_AGGREGATION_SUM: + return "SERIES_AGGREGATION_SUM"; + default: + return "UNKNOWN"; + } +} + +export interface ChartWidget_VisualizationSettings_ColorSchemeSettings { + $type: "yandex.cloud.monitoring.v3.ChartWidget.VisualizationSettings.ColorSchemeSettings"; + /** Automatic color scheme. */ + automatic?: + | ChartWidget_VisualizationSettings_ColorSchemeSettings_AutomaticColorScheme + | undefined; + /** Standard color scheme. */ + standard?: + | ChartWidget_VisualizationSettings_ColorSchemeSettings_StandardColorScheme + | undefined; + /** Gradient color scheme. */ + gradient?: + | ChartWidget_VisualizationSettings_ColorSchemeSettings_GradientColorScheme + | undefined; +} + +export interface ChartWidget_VisualizationSettings_ColorSchemeSettings_AutomaticColorScheme { + $type: "yandex.cloud.monitoring.v3.ChartWidget.VisualizationSettings.ColorSchemeSettings.AutomaticColorScheme"; +} + +export interface ChartWidget_VisualizationSettings_ColorSchemeSettings_StandardColorScheme { + $type: "yandex.cloud.monitoring.v3.ChartWidget.VisualizationSettings.ColorSchemeSettings.StandardColorScheme"; +} + +export interface ChartWidget_VisualizationSettings_ColorSchemeSettings_GradientColorScheme { + $type: "yandex.cloud.monitoring.v3.ChartWidget.VisualizationSettings.ColorSchemeSettings.GradientColorScheme"; + /** Gradient green value. */ + greenValue: string; + /** Gradient yellow value. */ + yellowValue: string; + /** Gradient red value. */ + redValue: string; + /** Gradient violet_value. */ + violetValue: string; +} + +export interface ChartWidget_VisualizationSettings_HeatmapSettings { + $type: "yandex.cloud.monitoring.v3.ChartWidget.VisualizationSettings.HeatmapSettings"; + /** Heatmap green value. */ + greenValue: string; + /** Heatmap yellow value. */ + yellowValue: string; + /** Heatmap red value. */ + redValue: string; + /** Heatmap violet_value. */ + violetValue: string; +} + +/** Y axis settings. */ +export interface ChartWidget_VisualizationSettings_Yaxis { + $type: "yandex.cloud.monitoring.v3.ChartWidget.VisualizationSettings.Yaxis"; + /** Type. */ + type: ChartWidget_VisualizationSettings_YaxisType; + /** Title or empty. */ + title: string; + /** Min value in extended number format or empty. */ + min: string; + /** Max value in extended number format or empty. */ + max: string; + /** Unit format. */ + unitFormat: UnitFormat; + /** Tick value precision (null as default, 0-7 in other cases). */ + precision?: number; +} + +export interface ChartWidget_VisualizationSettings_YaxisSettings { + $type: "yandex.cloud.monitoring.v3.ChartWidget.VisualizationSettings.YaxisSettings"; + /** Left Y axis settings. */ + left?: ChartWidget_VisualizationSettings_Yaxis; + /** Right Y axis settings. */ + right?: ChartWidget_VisualizationSettings_Yaxis; +} + +/** Series override settings. */ +export interface ChartWidget_SeriesOverrides { + $type: "yandex.cloud.monitoring.v3.ChartWidget.SeriesOverrides"; + /** Series name. */ + name: string | undefined; + /** Target index. */ + targetIndex: string | undefined; + /** Required. Override settings. */ + settings?: ChartWidget_SeriesOverrides_SeriesOverrideSettings; +} + +export enum ChartWidget_SeriesOverrides_YaxisPosition { + /** YAXIS_POSITION_UNSPECIFIED - Not specified (left by default). */ + YAXIS_POSITION_UNSPECIFIED = 0, + /** YAXIS_POSITION_LEFT - Left. */ + YAXIS_POSITION_LEFT = 1, + /** YAXIS_POSITION_RIGHT - Right. */ + YAXIS_POSITION_RIGHT = 2, + UNRECOGNIZED = -1, +} + +export function chartWidget_SeriesOverrides_YaxisPositionFromJSON( + object: any +): ChartWidget_SeriesOverrides_YaxisPosition { + switch (object) { + case 0: + case "YAXIS_POSITION_UNSPECIFIED": + return ChartWidget_SeriesOverrides_YaxisPosition.YAXIS_POSITION_UNSPECIFIED; + case 1: + case "YAXIS_POSITION_LEFT": + return ChartWidget_SeriesOverrides_YaxisPosition.YAXIS_POSITION_LEFT; + case 2: + case "YAXIS_POSITION_RIGHT": + return ChartWidget_SeriesOverrides_YaxisPosition.YAXIS_POSITION_RIGHT; + case -1: + case "UNRECOGNIZED": + default: + return ChartWidget_SeriesOverrides_YaxisPosition.UNRECOGNIZED; + } +} + +export function chartWidget_SeriesOverrides_YaxisPositionToJSON( + object: ChartWidget_SeriesOverrides_YaxisPosition +): string { + switch (object) { + case ChartWidget_SeriesOverrides_YaxisPosition.YAXIS_POSITION_UNSPECIFIED: + return "YAXIS_POSITION_UNSPECIFIED"; + case ChartWidget_SeriesOverrides_YaxisPosition.YAXIS_POSITION_LEFT: + return "YAXIS_POSITION_LEFT"; + case ChartWidget_SeriesOverrides_YaxisPosition.YAXIS_POSITION_RIGHT: + return "YAXIS_POSITION_RIGHT"; + default: + return "UNKNOWN"; + } +} + +export enum ChartWidget_SeriesOverrides_SeriesVisualizationType { + /** SERIES_VISUALIZATION_TYPE_UNSPECIFIED - Not specified (line by default). */ + SERIES_VISUALIZATION_TYPE_UNSPECIFIED = 0, + /** SERIES_VISUALIZATION_TYPE_LINE - Line chart. */ + SERIES_VISUALIZATION_TYPE_LINE = 1, + /** SERIES_VISUALIZATION_TYPE_STACK - Stack chart. */ + SERIES_VISUALIZATION_TYPE_STACK = 2, + /** SERIES_VISUALIZATION_TYPE_COLUMN - Points as columns chart. */ + SERIES_VISUALIZATION_TYPE_COLUMN = 3, + /** SERIES_VISUALIZATION_TYPE_POINTS - Points. */ + SERIES_VISUALIZATION_TYPE_POINTS = 4, + UNRECOGNIZED = -1, +} + +export function chartWidget_SeriesOverrides_SeriesVisualizationTypeFromJSON( + object: any +): ChartWidget_SeriesOverrides_SeriesVisualizationType { + switch (object) { + case 0: + case "SERIES_VISUALIZATION_TYPE_UNSPECIFIED": + return ChartWidget_SeriesOverrides_SeriesVisualizationType.SERIES_VISUALIZATION_TYPE_UNSPECIFIED; + case 1: + case "SERIES_VISUALIZATION_TYPE_LINE": + return ChartWidget_SeriesOverrides_SeriesVisualizationType.SERIES_VISUALIZATION_TYPE_LINE; + case 2: + case "SERIES_VISUALIZATION_TYPE_STACK": + return ChartWidget_SeriesOverrides_SeriesVisualizationType.SERIES_VISUALIZATION_TYPE_STACK; + case 3: + case "SERIES_VISUALIZATION_TYPE_COLUMN": + return ChartWidget_SeriesOverrides_SeriesVisualizationType.SERIES_VISUALIZATION_TYPE_COLUMN; + case 4: + case "SERIES_VISUALIZATION_TYPE_POINTS": + return ChartWidget_SeriesOverrides_SeriesVisualizationType.SERIES_VISUALIZATION_TYPE_POINTS; + case -1: + case "UNRECOGNIZED": + default: + return ChartWidget_SeriesOverrides_SeriesVisualizationType.UNRECOGNIZED; + } +} + +export function chartWidget_SeriesOverrides_SeriesVisualizationTypeToJSON( + object: ChartWidget_SeriesOverrides_SeriesVisualizationType +): string { + switch (object) { + case ChartWidget_SeriesOverrides_SeriesVisualizationType.SERIES_VISUALIZATION_TYPE_UNSPECIFIED: + return "SERIES_VISUALIZATION_TYPE_UNSPECIFIED"; + case ChartWidget_SeriesOverrides_SeriesVisualizationType.SERIES_VISUALIZATION_TYPE_LINE: + return "SERIES_VISUALIZATION_TYPE_LINE"; + case ChartWidget_SeriesOverrides_SeriesVisualizationType.SERIES_VISUALIZATION_TYPE_STACK: + return "SERIES_VISUALIZATION_TYPE_STACK"; + case ChartWidget_SeriesOverrides_SeriesVisualizationType.SERIES_VISUALIZATION_TYPE_COLUMN: + return "SERIES_VISUALIZATION_TYPE_COLUMN"; + case ChartWidget_SeriesOverrides_SeriesVisualizationType.SERIES_VISUALIZATION_TYPE_POINTS: + return "SERIES_VISUALIZATION_TYPE_POINTS"; + default: + return "UNKNOWN"; + } +} + +export interface ChartWidget_SeriesOverrides_SeriesOverrideSettings { + $type: "yandex.cloud.monitoring.v3.ChartWidget.SeriesOverrides.SeriesOverrideSettings"; + /** Series name or empty. */ + name: string; + /** Series color or empty. */ + color: string; + /** Type. */ + type: ChartWidget_SeriesOverrides_SeriesVisualizationType; + /** Stack name or empty. */ + stackName: string; + /** Stack grow down. */ + growDown: boolean; + /** Yaxis position. */ + yaxisPosition: ChartWidget_SeriesOverrides_YaxisPosition; +} + +/** Name hiding settings. */ +export interface ChartWidget_NameHidingSettings { + $type: "yandex.cloud.monitoring.v3.ChartWidget.NameHidingSettings"; + /** True if we want to show concrete series names only, false if we want to hide concrete series names. */ + positive: boolean; + /** Series names to show or hide. */ + names: string[]; +} + +const baseChartWidget: object = { + $type: "yandex.cloud.monitoring.v3.ChartWidget", + id: "", + description: "", + title: "", + displayLegend: false, + freeze: 0, +}; + +export const ChartWidget = { + $type: "yandex.cloud.monitoring.v3.ChartWidget" as const, + + encode( + message: ChartWidget, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.id !== "") { + writer.uint32(10).string(message.id); + } + if (message.queries !== undefined) { + ChartWidget_Queries.encode( + message.queries, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.visualizationSettings !== undefined) { + ChartWidget_VisualizationSettings.encode( + message.visualizationSettings, + writer.uint32(26).fork() + ).ldelim(); + } + for (const v of message.seriesOverrides) { + ChartWidget_SeriesOverrides.encode(v!, writer.uint32(34).fork()).ldelim(); + } + if (message.nameHidingSettings !== undefined) { + ChartWidget_NameHidingSettings.encode( + message.nameHidingSettings, + writer.uint32(42).fork() + ).ldelim(); + } + if (message.description !== "") { + writer.uint32(50).string(message.description); + } + if (message.title !== "") { + writer.uint32(58).string(message.title); + } + if (message.displayLegend === true) { + writer.uint32(64).bool(message.displayLegend); + } + if (message.freeze !== 0) { + writer.uint32(72).int32(message.freeze); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ChartWidget { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseChartWidget } as ChartWidget; + message.seriesOverrides = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = reader.string(); + break; + case 2: + message.queries = ChartWidget_Queries.decode(reader, reader.uint32()); + break; + case 3: + message.visualizationSettings = + ChartWidget_VisualizationSettings.decode(reader, reader.uint32()); + break; + case 4: + message.seriesOverrides.push( + ChartWidget_SeriesOverrides.decode(reader, reader.uint32()) + ); + break; + case 5: + message.nameHidingSettings = ChartWidget_NameHidingSettings.decode( + reader, + reader.uint32() + ); + break; + case 6: + message.description = reader.string(); + break; + case 7: + message.title = reader.string(); + break; + case 8: + message.displayLegend = reader.bool(); + break; + case 9: + message.freeze = reader.int32() as any; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ChartWidget { + const message = { ...baseChartWidget } as ChartWidget; + message.id = + object.id !== undefined && object.id !== null ? String(object.id) : ""; + message.queries = + object.queries !== undefined && object.queries !== null + ? ChartWidget_Queries.fromJSON(object.queries) + : undefined; + message.visualizationSettings = + object.visualizationSettings !== undefined && + object.visualizationSettings !== null + ? ChartWidget_VisualizationSettings.fromJSON( + object.visualizationSettings + ) + : undefined; + message.seriesOverrides = (object.seriesOverrides ?? []).map((e: any) => + ChartWidget_SeriesOverrides.fromJSON(e) + ); + message.nameHidingSettings = + object.nameHidingSettings !== undefined && + object.nameHidingSettings !== null + ? ChartWidget_NameHidingSettings.fromJSON(object.nameHidingSettings) + : undefined; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.title = + object.title !== undefined && object.title !== null + ? String(object.title) + : ""; + message.displayLegend = + object.displayLegend !== undefined && object.displayLegend !== null + ? Boolean(object.displayLegend) + : false; + message.freeze = + object.freeze !== undefined && object.freeze !== null + ? chartWidget_FreezeDurationFromJSON(object.freeze) + : 0; + return message; + }, + + toJSON(message: ChartWidget): unknown { + const obj: any = {}; + message.id !== undefined && (obj.id = message.id); + message.queries !== undefined && + (obj.queries = message.queries + ? ChartWidget_Queries.toJSON(message.queries) + : undefined); + message.visualizationSettings !== undefined && + (obj.visualizationSettings = message.visualizationSettings + ? ChartWidget_VisualizationSettings.toJSON( + message.visualizationSettings + ) + : undefined); + if (message.seriesOverrides) { + obj.seriesOverrides = message.seriesOverrides.map((e) => + e ? ChartWidget_SeriesOverrides.toJSON(e) : undefined + ); + } else { + obj.seriesOverrides = []; + } + message.nameHidingSettings !== undefined && + (obj.nameHidingSettings = message.nameHidingSettings + ? ChartWidget_NameHidingSettings.toJSON(message.nameHidingSettings) + : undefined); + message.description !== undefined && + (obj.description = message.description); + message.title !== undefined && (obj.title = message.title); + message.displayLegend !== undefined && + (obj.displayLegend = message.displayLegend); + message.freeze !== undefined && + (obj.freeze = chartWidget_FreezeDurationToJSON(message.freeze)); + return obj; + }, + + fromPartial, I>>( + object: I + ): ChartWidget { + const message = { ...baseChartWidget } as ChartWidget; + message.id = object.id ?? ""; + message.queries = + object.queries !== undefined && object.queries !== null + ? ChartWidget_Queries.fromPartial(object.queries) + : undefined; + message.visualizationSettings = + object.visualizationSettings !== undefined && + object.visualizationSettings !== null + ? ChartWidget_VisualizationSettings.fromPartial( + object.visualizationSettings + ) + : undefined; + message.seriesOverrides = + object.seriesOverrides?.map((e) => + ChartWidget_SeriesOverrides.fromPartial(e) + ) || []; + message.nameHidingSettings = + object.nameHidingSettings !== undefined && + object.nameHidingSettings !== null + ? ChartWidget_NameHidingSettings.fromPartial(object.nameHidingSettings) + : undefined; + message.description = object.description ?? ""; + message.title = object.title ?? ""; + message.displayLegend = object.displayLegend ?? false; + message.freeze = object.freeze ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(ChartWidget.$type, ChartWidget); + +const baseChartWidget_Queries: object = { + $type: "yandex.cloud.monitoring.v3.ChartWidget.Queries", +}; + +export const ChartWidget_Queries = { + $type: "yandex.cloud.monitoring.v3.ChartWidget.Queries" as const, + + encode( + message: ChartWidget_Queries, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.targets) { + ChartWidget_Queries_Target.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.downsampling !== undefined) { + Downsampling.encode( + message.downsampling, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ChartWidget_Queries { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseChartWidget_Queries } as ChartWidget_Queries; + message.targets = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.targets.push( + ChartWidget_Queries_Target.decode(reader, reader.uint32()) + ); + break; + case 2: + message.downsampling = Downsampling.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ChartWidget_Queries { + const message = { ...baseChartWidget_Queries } as ChartWidget_Queries; + message.targets = (object.targets ?? []).map((e: any) => + ChartWidget_Queries_Target.fromJSON(e) + ); + message.downsampling = + object.downsampling !== undefined && object.downsampling !== null + ? Downsampling.fromJSON(object.downsampling) + : undefined; + return message; + }, + + toJSON(message: ChartWidget_Queries): unknown { + const obj: any = {}; + if (message.targets) { + obj.targets = message.targets.map((e) => + e ? ChartWidget_Queries_Target.toJSON(e) : undefined + ); + } else { + obj.targets = []; + } + message.downsampling !== undefined && + (obj.downsampling = message.downsampling + ? Downsampling.toJSON(message.downsampling) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): ChartWidget_Queries { + const message = { ...baseChartWidget_Queries } as ChartWidget_Queries; + message.targets = + object.targets?.map((e) => ChartWidget_Queries_Target.fromPartial(e)) || + []; + message.downsampling = + object.downsampling !== undefined && object.downsampling !== null + ? Downsampling.fromPartial(object.downsampling) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(ChartWidget_Queries.$type, ChartWidget_Queries); + +const baseChartWidget_Queries_Target: object = { + $type: "yandex.cloud.monitoring.v3.ChartWidget.Queries.Target", + query: "", + textMode: false, + hidden: false, +}; + +export const ChartWidget_Queries_Target = { + $type: "yandex.cloud.monitoring.v3.ChartWidget.Queries.Target" as const, + + encode( + message: ChartWidget_Queries_Target, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.query !== "") { + writer.uint32(10).string(message.query); + } + if (message.textMode === true) { + writer.uint32(16).bool(message.textMode); + } + if (message.hidden === true) { + writer.uint32(24).bool(message.hidden); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ChartWidget_Queries_Target { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseChartWidget_Queries_Target, + } as ChartWidget_Queries_Target; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.query = reader.string(); + break; + case 2: + message.textMode = reader.bool(); + break; + case 3: + message.hidden = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ChartWidget_Queries_Target { + const message = { + ...baseChartWidget_Queries_Target, + } as ChartWidget_Queries_Target; + message.query = + object.query !== undefined && object.query !== null + ? String(object.query) + : ""; + message.textMode = + object.textMode !== undefined && object.textMode !== null + ? Boolean(object.textMode) + : false; + message.hidden = + object.hidden !== undefined && object.hidden !== null + ? Boolean(object.hidden) + : false; + return message; + }, + + toJSON(message: ChartWidget_Queries_Target): unknown { + const obj: any = {}; + message.query !== undefined && (obj.query = message.query); + message.textMode !== undefined && (obj.textMode = message.textMode); + message.hidden !== undefined && (obj.hidden = message.hidden); + return obj; + }, + + fromPartial, I>>( + object: I + ): ChartWidget_Queries_Target { + const message = { + ...baseChartWidget_Queries_Target, + } as ChartWidget_Queries_Target; + message.query = object.query ?? ""; + message.textMode = object.textMode ?? false; + message.hidden = object.hidden ?? false; + return message; + }, +}; + +messageTypeRegistry.set( + ChartWidget_Queries_Target.$type, + ChartWidget_Queries_Target +); + +const baseChartWidget_VisualizationSettings: object = { + $type: "yandex.cloud.monitoring.v3.ChartWidget.VisualizationSettings", + type: 0, + normalize: false, + interpolate: 0, + aggregation: 0, + title: "", + showLabels: false, +}; + +export const ChartWidget_VisualizationSettings = { + $type: + "yandex.cloud.monitoring.v3.ChartWidget.VisualizationSettings" as const, + + encode( + message: ChartWidget_VisualizationSettings, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.type !== 0) { + writer.uint32(8).int32(message.type); + } + if (message.normalize === true) { + writer.uint32(16).bool(message.normalize); + } + if (message.interpolate !== 0) { + writer.uint32(24).int32(message.interpolate); + } + if (message.aggregation !== 0) { + writer.uint32(32).int32(message.aggregation); + } + if (message.colorSchemeSettings !== undefined) { + ChartWidget_VisualizationSettings_ColorSchemeSettings.encode( + message.colorSchemeSettings, + writer.uint32(42).fork() + ).ldelim(); + } + if (message.heatmapSettings !== undefined) { + ChartWidget_VisualizationSettings_HeatmapSettings.encode( + message.heatmapSettings, + writer.uint32(50).fork() + ).ldelim(); + } + if (message.yaxisSettings !== undefined) { + ChartWidget_VisualizationSettings_YaxisSettings.encode( + message.yaxisSettings, + writer.uint32(58).fork() + ).ldelim(); + } + if (message.title !== "") { + writer.uint32(66).string(message.title); + } + if (message.showLabels === true) { + writer.uint32(72).bool(message.showLabels); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ChartWidget_VisualizationSettings { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseChartWidget_VisualizationSettings, + } as ChartWidget_VisualizationSettings; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.type = reader.int32() as any; + break; + case 2: + message.normalize = reader.bool(); + break; + case 3: + message.interpolate = reader.int32() as any; + break; + case 4: + message.aggregation = reader.int32() as any; + break; + case 5: + message.colorSchemeSettings = + ChartWidget_VisualizationSettings_ColorSchemeSettings.decode( + reader, + reader.uint32() + ); + break; + case 6: + message.heatmapSettings = + ChartWidget_VisualizationSettings_HeatmapSettings.decode( + reader, + reader.uint32() + ); + break; + case 7: + message.yaxisSettings = + ChartWidget_VisualizationSettings_YaxisSettings.decode( + reader, + reader.uint32() + ); + break; + case 8: + message.title = reader.string(); + break; + case 9: + message.showLabels = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ChartWidget_VisualizationSettings { + const message = { + ...baseChartWidget_VisualizationSettings, + } as ChartWidget_VisualizationSettings; + message.type = + object.type !== undefined && object.type !== null + ? chartWidget_VisualizationSettings_VisualizationTypeFromJSON( + object.type + ) + : 0; + message.normalize = + object.normalize !== undefined && object.normalize !== null + ? Boolean(object.normalize) + : false; + message.interpolate = + object.interpolate !== undefined && object.interpolate !== null + ? chartWidget_VisualizationSettings_InterpolateFromJSON( + object.interpolate + ) + : 0; + message.aggregation = + object.aggregation !== undefined && object.aggregation !== null + ? chartWidget_VisualizationSettings_SeriesAggregationFromJSON( + object.aggregation + ) + : 0; + message.colorSchemeSettings = + object.colorSchemeSettings !== undefined && + object.colorSchemeSettings !== null + ? ChartWidget_VisualizationSettings_ColorSchemeSettings.fromJSON( + object.colorSchemeSettings + ) + : undefined; + message.heatmapSettings = + object.heatmapSettings !== undefined && object.heatmapSettings !== null + ? ChartWidget_VisualizationSettings_HeatmapSettings.fromJSON( + object.heatmapSettings + ) + : undefined; + message.yaxisSettings = + object.yaxisSettings !== undefined && object.yaxisSettings !== null + ? ChartWidget_VisualizationSettings_YaxisSettings.fromJSON( + object.yaxisSettings + ) + : undefined; + message.title = + object.title !== undefined && object.title !== null + ? String(object.title) + : ""; + message.showLabels = + object.showLabels !== undefined && object.showLabels !== null + ? Boolean(object.showLabels) + : false; + return message; + }, + + toJSON(message: ChartWidget_VisualizationSettings): unknown { + const obj: any = {}; + message.type !== undefined && + (obj.type = chartWidget_VisualizationSettings_VisualizationTypeToJSON( + message.type + )); + message.normalize !== undefined && (obj.normalize = message.normalize); + message.interpolate !== undefined && + (obj.interpolate = chartWidget_VisualizationSettings_InterpolateToJSON( + message.interpolate + )); + message.aggregation !== undefined && + (obj.aggregation = + chartWidget_VisualizationSettings_SeriesAggregationToJSON( + message.aggregation + )); + message.colorSchemeSettings !== undefined && + (obj.colorSchemeSettings = message.colorSchemeSettings + ? ChartWidget_VisualizationSettings_ColorSchemeSettings.toJSON( + message.colorSchemeSettings + ) + : undefined); + message.heatmapSettings !== undefined && + (obj.heatmapSettings = message.heatmapSettings + ? ChartWidget_VisualizationSettings_HeatmapSettings.toJSON( + message.heatmapSettings + ) + : undefined); + message.yaxisSettings !== undefined && + (obj.yaxisSettings = message.yaxisSettings + ? ChartWidget_VisualizationSettings_YaxisSettings.toJSON( + message.yaxisSettings + ) + : undefined); + message.title !== undefined && (obj.title = message.title); + message.showLabels !== undefined && (obj.showLabels = message.showLabels); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): ChartWidget_VisualizationSettings { + const message = { + ...baseChartWidget_VisualizationSettings, + } as ChartWidget_VisualizationSettings; + message.type = object.type ?? 0; + message.normalize = object.normalize ?? false; + message.interpolate = object.interpolate ?? 0; + message.aggregation = object.aggregation ?? 0; + message.colorSchemeSettings = + object.colorSchemeSettings !== undefined && + object.colorSchemeSettings !== null + ? ChartWidget_VisualizationSettings_ColorSchemeSettings.fromPartial( + object.colorSchemeSettings + ) + : undefined; + message.heatmapSettings = + object.heatmapSettings !== undefined && object.heatmapSettings !== null + ? ChartWidget_VisualizationSettings_HeatmapSettings.fromPartial( + object.heatmapSettings + ) + : undefined; + message.yaxisSettings = + object.yaxisSettings !== undefined && object.yaxisSettings !== null + ? ChartWidget_VisualizationSettings_YaxisSettings.fromPartial( + object.yaxisSettings + ) + : undefined; + message.title = object.title ?? ""; + message.showLabels = object.showLabels ?? false; + return message; + }, +}; + +messageTypeRegistry.set( + ChartWidget_VisualizationSettings.$type, + ChartWidget_VisualizationSettings +); + +const baseChartWidget_VisualizationSettings_ColorSchemeSettings: object = { + $type: + "yandex.cloud.monitoring.v3.ChartWidget.VisualizationSettings.ColorSchemeSettings", +}; + +export const ChartWidget_VisualizationSettings_ColorSchemeSettings = { + $type: + "yandex.cloud.monitoring.v3.ChartWidget.VisualizationSettings.ColorSchemeSettings" as const, + + encode( + message: ChartWidget_VisualizationSettings_ColorSchemeSettings, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.automatic !== undefined) { + ChartWidget_VisualizationSettings_ColorSchemeSettings_AutomaticColorScheme.encode( + message.automatic, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.standard !== undefined) { + ChartWidget_VisualizationSettings_ColorSchemeSettings_StandardColorScheme.encode( + message.standard, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.gradient !== undefined) { + ChartWidget_VisualizationSettings_ColorSchemeSettings_GradientColorScheme.encode( + message.gradient, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ChartWidget_VisualizationSettings_ColorSchemeSettings { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseChartWidget_VisualizationSettings_ColorSchemeSettings, + } as ChartWidget_VisualizationSettings_ColorSchemeSettings; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.automatic = + ChartWidget_VisualizationSettings_ColorSchemeSettings_AutomaticColorScheme.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.standard = + ChartWidget_VisualizationSettings_ColorSchemeSettings_StandardColorScheme.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.gradient = + ChartWidget_VisualizationSettings_ColorSchemeSettings_GradientColorScheme.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ChartWidget_VisualizationSettings_ColorSchemeSettings { + const message = { + ...baseChartWidget_VisualizationSettings_ColorSchemeSettings, + } as ChartWidget_VisualizationSettings_ColorSchemeSettings; + message.automatic = + object.automatic !== undefined && object.automatic !== null + ? ChartWidget_VisualizationSettings_ColorSchemeSettings_AutomaticColorScheme.fromJSON( + object.automatic + ) + : undefined; + message.standard = + object.standard !== undefined && object.standard !== null + ? ChartWidget_VisualizationSettings_ColorSchemeSettings_StandardColorScheme.fromJSON( + object.standard + ) + : undefined; + message.gradient = + object.gradient !== undefined && object.gradient !== null + ? ChartWidget_VisualizationSettings_ColorSchemeSettings_GradientColorScheme.fromJSON( + object.gradient + ) + : undefined; + return message; + }, + + toJSON( + message: ChartWidget_VisualizationSettings_ColorSchemeSettings + ): unknown { + const obj: any = {}; + message.automatic !== undefined && + (obj.automatic = message.automatic + ? ChartWidget_VisualizationSettings_ColorSchemeSettings_AutomaticColorScheme.toJSON( + message.automatic + ) + : undefined); + message.standard !== undefined && + (obj.standard = message.standard + ? ChartWidget_VisualizationSettings_ColorSchemeSettings_StandardColorScheme.toJSON( + message.standard + ) + : undefined); + message.gradient !== undefined && + (obj.gradient = message.gradient + ? ChartWidget_VisualizationSettings_ColorSchemeSettings_GradientColorScheme.toJSON( + message.gradient + ) + : undefined); + return obj; + }, + + fromPartial< + I extends Exact< + DeepPartial, + I + > + >(object: I): ChartWidget_VisualizationSettings_ColorSchemeSettings { + const message = { + ...baseChartWidget_VisualizationSettings_ColorSchemeSettings, + } as ChartWidget_VisualizationSettings_ColorSchemeSettings; + message.automatic = + object.automatic !== undefined && object.automatic !== null + ? ChartWidget_VisualizationSettings_ColorSchemeSettings_AutomaticColorScheme.fromPartial( + object.automatic + ) + : undefined; + message.standard = + object.standard !== undefined && object.standard !== null + ? ChartWidget_VisualizationSettings_ColorSchemeSettings_StandardColorScheme.fromPartial( + object.standard + ) + : undefined; + message.gradient = + object.gradient !== undefined && object.gradient !== null + ? ChartWidget_VisualizationSettings_ColorSchemeSettings_GradientColorScheme.fromPartial( + object.gradient + ) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + ChartWidget_VisualizationSettings_ColorSchemeSettings.$type, + ChartWidget_VisualizationSettings_ColorSchemeSettings +); + +const baseChartWidget_VisualizationSettings_ColorSchemeSettings_AutomaticColorScheme: object = + { + $type: + "yandex.cloud.monitoring.v3.ChartWidget.VisualizationSettings.ColorSchemeSettings.AutomaticColorScheme", + }; + +export const ChartWidget_VisualizationSettings_ColorSchemeSettings_AutomaticColorScheme = + { + $type: + "yandex.cloud.monitoring.v3.ChartWidget.VisualizationSettings.ColorSchemeSettings.AutomaticColorScheme" as const, + + encode( + _: ChartWidget_VisualizationSettings_ColorSchemeSettings_AutomaticColorScheme, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ChartWidget_VisualizationSettings_ColorSchemeSettings_AutomaticColorScheme { + const reader = + input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseChartWidget_VisualizationSettings_ColorSchemeSettings_AutomaticColorScheme, + } as ChartWidget_VisualizationSettings_ColorSchemeSettings_AutomaticColorScheme; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON( + _: any + ): ChartWidget_VisualizationSettings_ColorSchemeSettings_AutomaticColorScheme { + const message = { + ...baseChartWidget_VisualizationSettings_ColorSchemeSettings_AutomaticColorScheme, + } as ChartWidget_VisualizationSettings_ColorSchemeSettings_AutomaticColorScheme; + return message; + }, + + toJSON( + _: ChartWidget_VisualizationSettings_ColorSchemeSettings_AutomaticColorScheme + ): unknown { + const obj: any = {}; + return obj; + }, + + fromPartial< + I extends Exact< + DeepPartial, + I + > + >( + _: I + ): ChartWidget_VisualizationSettings_ColorSchemeSettings_AutomaticColorScheme { + const message = { + ...baseChartWidget_VisualizationSettings_ColorSchemeSettings_AutomaticColorScheme, + } as ChartWidget_VisualizationSettings_ColorSchemeSettings_AutomaticColorScheme; + return message; + }, + }; + +messageTypeRegistry.set( + ChartWidget_VisualizationSettings_ColorSchemeSettings_AutomaticColorScheme.$type, + ChartWidget_VisualizationSettings_ColorSchemeSettings_AutomaticColorScheme +); + +const baseChartWidget_VisualizationSettings_ColorSchemeSettings_StandardColorScheme: object = + { + $type: + "yandex.cloud.monitoring.v3.ChartWidget.VisualizationSettings.ColorSchemeSettings.StandardColorScheme", + }; + +export const ChartWidget_VisualizationSettings_ColorSchemeSettings_StandardColorScheme = + { + $type: + "yandex.cloud.monitoring.v3.ChartWidget.VisualizationSettings.ColorSchemeSettings.StandardColorScheme" as const, + + encode( + _: ChartWidget_VisualizationSettings_ColorSchemeSettings_StandardColorScheme, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ChartWidget_VisualizationSettings_ColorSchemeSettings_StandardColorScheme { + const reader = + input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseChartWidget_VisualizationSettings_ColorSchemeSettings_StandardColorScheme, + } as ChartWidget_VisualizationSettings_ColorSchemeSettings_StandardColorScheme; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON( + _: any + ): ChartWidget_VisualizationSettings_ColorSchemeSettings_StandardColorScheme { + const message = { + ...baseChartWidget_VisualizationSettings_ColorSchemeSettings_StandardColorScheme, + } as ChartWidget_VisualizationSettings_ColorSchemeSettings_StandardColorScheme; + return message; + }, + + toJSON( + _: ChartWidget_VisualizationSettings_ColorSchemeSettings_StandardColorScheme + ): unknown { + const obj: any = {}; + return obj; + }, + + fromPartial< + I extends Exact< + DeepPartial, + I + > + >( + _: I + ): ChartWidget_VisualizationSettings_ColorSchemeSettings_StandardColorScheme { + const message = { + ...baseChartWidget_VisualizationSettings_ColorSchemeSettings_StandardColorScheme, + } as ChartWidget_VisualizationSettings_ColorSchemeSettings_StandardColorScheme; + return message; + }, + }; + +messageTypeRegistry.set( + ChartWidget_VisualizationSettings_ColorSchemeSettings_StandardColorScheme.$type, + ChartWidget_VisualizationSettings_ColorSchemeSettings_StandardColorScheme +); + +const baseChartWidget_VisualizationSettings_ColorSchemeSettings_GradientColorScheme: object = + { + $type: + "yandex.cloud.monitoring.v3.ChartWidget.VisualizationSettings.ColorSchemeSettings.GradientColorScheme", + greenValue: "", + yellowValue: "", + redValue: "", + violetValue: "", + }; + +export const ChartWidget_VisualizationSettings_ColorSchemeSettings_GradientColorScheme = + { + $type: + "yandex.cloud.monitoring.v3.ChartWidget.VisualizationSettings.ColorSchemeSettings.GradientColorScheme" as const, + + encode( + message: ChartWidget_VisualizationSettings_ColorSchemeSettings_GradientColorScheme, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.greenValue !== "") { + writer.uint32(18).string(message.greenValue); + } + if (message.yellowValue !== "") { + writer.uint32(26).string(message.yellowValue); + } + if (message.redValue !== "") { + writer.uint32(34).string(message.redValue); + } + if (message.violetValue !== "") { + writer.uint32(42).string(message.violetValue); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ChartWidget_VisualizationSettings_ColorSchemeSettings_GradientColorScheme { + const reader = + input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseChartWidget_VisualizationSettings_ColorSchemeSettings_GradientColorScheme, + } as ChartWidget_VisualizationSettings_ColorSchemeSettings_GradientColorScheme; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + message.greenValue = reader.string(); + break; + case 3: + message.yellowValue = reader.string(); + break; + case 4: + message.redValue = reader.string(); + break; + case 5: + message.violetValue = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON( + object: any + ): ChartWidget_VisualizationSettings_ColorSchemeSettings_GradientColorScheme { + const message = { + ...baseChartWidget_VisualizationSettings_ColorSchemeSettings_GradientColorScheme, + } as ChartWidget_VisualizationSettings_ColorSchemeSettings_GradientColorScheme; + message.greenValue = + object.greenValue !== undefined && object.greenValue !== null + ? String(object.greenValue) + : ""; + message.yellowValue = + object.yellowValue !== undefined && object.yellowValue !== null + ? String(object.yellowValue) + : ""; + message.redValue = + object.redValue !== undefined && object.redValue !== null + ? String(object.redValue) + : ""; + message.violetValue = + object.violetValue !== undefined && object.violetValue !== null + ? String(object.violetValue) + : ""; + return message; + }, + + toJSON( + message: ChartWidget_VisualizationSettings_ColorSchemeSettings_GradientColorScheme + ): unknown { + const obj: any = {}; + message.greenValue !== undefined && (obj.greenValue = message.greenValue); + message.yellowValue !== undefined && + (obj.yellowValue = message.yellowValue); + message.redValue !== undefined && (obj.redValue = message.redValue); + message.violetValue !== undefined && + (obj.violetValue = message.violetValue); + return obj; + }, + + fromPartial< + I extends Exact< + DeepPartial, + I + > + >( + object: I + ): ChartWidget_VisualizationSettings_ColorSchemeSettings_GradientColorScheme { + const message = { + ...baseChartWidget_VisualizationSettings_ColorSchemeSettings_GradientColorScheme, + } as ChartWidget_VisualizationSettings_ColorSchemeSettings_GradientColorScheme; + message.greenValue = object.greenValue ?? ""; + message.yellowValue = object.yellowValue ?? ""; + message.redValue = object.redValue ?? ""; + message.violetValue = object.violetValue ?? ""; + return message; + }, + }; + +messageTypeRegistry.set( + ChartWidget_VisualizationSettings_ColorSchemeSettings_GradientColorScheme.$type, + ChartWidget_VisualizationSettings_ColorSchemeSettings_GradientColorScheme +); + +const baseChartWidget_VisualizationSettings_HeatmapSettings: object = { + $type: + "yandex.cloud.monitoring.v3.ChartWidget.VisualizationSettings.HeatmapSettings", + greenValue: "", + yellowValue: "", + redValue: "", + violetValue: "", +}; + +export const ChartWidget_VisualizationSettings_HeatmapSettings = { + $type: + "yandex.cloud.monitoring.v3.ChartWidget.VisualizationSettings.HeatmapSettings" as const, + + encode( + message: ChartWidget_VisualizationSettings_HeatmapSettings, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.greenValue !== "") { + writer.uint32(18).string(message.greenValue); + } + if (message.yellowValue !== "") { + writer.uint32(26).string(message.yellowValue); + } + if (message.redValue !== "") { + writer.uint32(34).string(message.redValue); + } + if (message.violetValue !== "") { + writer.uint32(42).string(message.violetValue); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ChartWidget_VisualizationSettings_HeatmapSettings { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseChartWidget_VisualizationSettings_HeatmapSettings, + } as ChartWidget_VisualizationSettings_HeatmapSettings; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + message.greenValue = reader.string(); + break; + case 3: + message.yellowValue = reader.string(); + break; + case 4: + message.redValue = reader.string(); + break; + case 5: + message.violetValue = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ChartWidget_VisualizationSettings_HeatmapSettings { + const message = { + ...baseChartWidget_VisualizationSettings_HeatmapSettings, + } as ChartWidget_VisualizationSettings_HeatmapSettings; + message.greenValue = + object.greenValue !== undefined && object.greenValue !== null + ? String(object.greenValue) + : ""; + message.yellowValue = + object.yellowValue !== undefined && object.yellowValue !== null + ? String(object.yellowValue) + : ""; + message.redValue = + object.redValue !== undefined && object.redValue !== null + ? String(object.redValue) + : ""; + message.violetValue = + object.violetValue !== undefined && object.violetValue !== null + ? String(object.violetValue) + : ""; + return message; + }, + + toJSON(message: ChartWidget_VisualizationSettings_HeatmapSettings): unknown { + const obj: any = {}; + message.greenValue !== undefined && (obj.greenValue = message.greenValue); + message.yellowValue !== undefined && + (obj.yellowValue = message.yellowValue); + message.redValue !== undefined && (obj.redValue = message.redValue); + message.violetValue !== undefined && + (obj.violetValue = message.violetValue); + return obj; + }, + + fromPartial< + I extends Exact< + DeepPartial, + I + > + >(object: I): ChartWidget_VisualizationSettings_HeatmapSettings { + const message = { + ...baseChartWidget_VisualizationSettings_HeatmapSettings, + } as ChartWidget_VisualizationSettings_HeatmapSettings; + message.greenValue = object.greenValue ?? ""; + message.yellowValue = object.yellowValue ?? ""; + message.redValue = object.redValue ?? ""; + message.violetValue = object.violetValue ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ChartWidget_VisualizationSettings_HeatmapSettings.$type, + ChartWidget_VisualizationSettings_HeatmapSettings +); + +const baseChartWidget_VisualizationSettings_Yaxis: object = { + $type: "yandex.cloud.monitoring.v3.ChartWidget.VisualizationSettings.Yaxis", + type: 0, + title: "", + min: "", + max: "", + unitFormat: 0, +}; + +export const ChartWidget_VisualizationSettings_Yaxis = { + $type: + "yandex.cloud.monitoring.v3.ChartWidget.VisualizationSettings.Yaxis" as const, + + encode( + message: ChartWidget_VisualizationSettings_Yaxis, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.type !== 0) { + writer.uint32(8).int32(message.type); + } + if (message.title !== "") { + writer.uint32(18).string(message.title); + } + if (message.min !== "") { + writer.uint32(26).string(message.min); + } + if (message.max !== "") { + writer.uint32(34).string(message.max); + } + if (message.unitFormat !== 0) { + writer.uint32(40).int32(message.unitFormat); + } + if (message.precision !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.precision! }, + writer.uint32(50).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ChartWidget_VisualizationSettings_Yaxis { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseChartWidget_VisualizationSettings_Yaxis, + } as ChartWidget_VisualizationSettings_Yaxis; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.type = reader.int32() as any; + break; + case 2: + message.title = reader.string(); + break; + case 3: + message.min = reader.string(); + break; + case 4: + message.max = reader.string(); + break; + case 5: + message.unitFormat = reader.int32() as any; + break; + case 6: + message.precision = Int64Value.decode(reader, reader.uint32()).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ChartWidget_VisualizationSettings_Yaxis { + const message = { + ...baseChartWidget_VisualizationSettings_Yaxis, + } as ChartWidget_VisualizationSettings_Yaxis; + message.type = + object.type !== undefined && object.type !== null + ? chartWidget_VisualizationSettings_YaxisTypeFromJSON(object.type) + : 0; + message.title = + object.title !== undefined && object.title !== null + ? String(object.title) + : ""; + message.min = + object.min !== undefined && object.min !== null ? String(object.min) : ""; + message.max = + object.max !== undefined && object.max !== null ? String(object.max) : ""; + message.unitFormat = + object.unitFormat !== undefined && object.unitFormat !== null + ? unitFormatFromJSON(object.unitFormat) + : 0; + message.precision = + object.precision !== undefined && object.precision !== null + ? Number(object.precision) + : undefined; + return message; + }, + + toJSON(message: ChartWidget_VisualizationSettings_Yaxis): unknown { + const obj: any = {}; + message.type !== undefined && + (obj.type = chartWidget_VisualizationSettings_YaxisTypeToJSON( + message.type + )); + message.title !== undefined && (obj.title = message.title); + message.min !== undefined && (obj.min = message.min); + message.max !== undefined && (obj.max = message.max); + message.unitFormat !== undefined && + (obj.unitFormat = unitFormatToJSON(message.unitFormat)); + message.precision !== undefined && (obj.precision = message.precision); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): ChartWidget_VisualizationSettings_Yaxis { + const message = { + ...baseChartWidget_VisualizationSettings_Yaxis, + } as ChartWidget_VisualizationSettings_Yaxis; + message.type = object.type ?? 0; + message.title = object.title ?? ""; + message.min = object.min ?? ""; + message.max = object.max ?? ""; + message.unitFormat = object.unitFormat ?? 0; + message.precision = object.precision ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + ChartWidget_VisualizationSettings_Yaxis.$type, + ChartWidget_VisualizationSettings_Yaxis +); + +const baseChartWidget_VisualizationSettings_YaxisSettings: object = { + $type: + "yandex.cloud.monitoring.v3.ChartWidget.VisualizationSettings.YaxisSettings", +}; + +export const ChartWidget_VisualizationSettings_YaxisSettings = { + $type: + "yandex.cloud.monitoring.v3.ChartWidget.VisualizationSettings.YaxisSettings" as const, + + encode( + message: ChartWidget_VisualizationSettings_YaxisSettings, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.left !== undefined) { + ChartWidget_VisualizationSettings_Yaxis.encode( + message.left, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.right !== undefined) { + ChartWidget_VisualizationSettings_Yaxis.encode( + message.right, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ChartWidget_VisualizationSettings_YaxisSettings { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseChartWidget_VisualizationSettings_YaxisSettings, + } as ChartWidget_VisualizationSettings_YaxisSettings; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.left = ChartWidget_VisualizationSettings_Yaxis.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.right = ChartWidget_VisualizationSettings_Yaxis.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ChartWidget_VisualizationSettings_YaxisSettings { + const message = { + ...baseChartWidget_VisualizationSettings_YaxisSettings, + } as ChartWidget_VisualizationSettings_YaxisSettings; + message.left = + object.left !== undefined && object.left !== null + ? ChartWidget_VisualizationSettings_Yaxis.fromJSON(object.left) + : undefined; + message.right = + object.right !== undefined && object.right !== null + ? ChartWidget_VisualizationSettings_Yaxis.fromJSON(object.right) + : undefined; + return message; + }, + + toJSON(message: ChartWidget_VisualizationSettings_YaxisSettings): unknown { + const obj: any = {}; + message.left !== undefined && + (obj.left = message.left + ? ChartWidget_VisualizationSettings_Yaxis.toJSON(message.left) + : undefined); + message.right !== undefined && + (obj.right = message.right + ? ChartWidget_VisualizationSettings_Yaxis.toJSON(message.right) + : undefined); + return obj; + }, + + fromPartial< + I extends Exact< + DeepPartial, + I + > + >(object: I): ChartWidget_VisualizationSettings_YaxisSettings { + const message = { + ...baseChartWidget_VisualizationSettings_YaxisSettings, + } as ChartWidget_VisualizationSettings_YaxisSettings; + message.left = + object.left !== undefined && object.left !== null + ? ChartWidget_VisualizationSettings_Yaxis.fromPartial(object.left) + : undefined; + message.right = + object.right !== undefined && object.right !== null + ? ChartWidget_VisualizationSettings_Yaxis.fromPartial(object.right) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + ChartWidget_VisualizationSettings_YaxisSettings.$type, + ChartWidget_VisualizationSettings_YaxisSettings +); + +const baseChartWidget_SeriesOverrides: object = { + $type: "yandex.cloud.monitoring.v3.ChartWidget.SeriesOverrides", +}; + +export const ChartWidget_SeriesOverrides = { + $type: "yandex.cloud.monitoring.v3.ChartWidget.SeriesOverrides" as const, + + encode( + message: ChartWidget_SeriesOverrides, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.name !== undefined) { + writer.uint32(10).string(message.name); + } + if (message.targetIndex !== undefined) { + writer.uint32(18).string(message.targetIndex); + } + if (message.settings !== undefined) { + ChartWidget_SeriesOverrides_SeriesOverrideSettings.encode( + message.settings, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ChartWidget_SeriesOverrides { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseChartWidget_SeriesOverrides, + } as ChartWidget_SeriesOverrides; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.targetIndex = reader.string(); + break; + case 3: + message.settings = + ChartWidget_SeriesOverrides_SeriesOverrideSettings.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ChartWidget_SeriesOverrides { + const message = { + ...baseChartWidget_SeriesOverrides, + } as ChartWidget_SeriesOverrides; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : undefined; + message.targetIndex = + object.targetIndex !== undefined && object.targetIndex !== null + ? String(object.targetIndex) + : undefined; + message.settings = + object.settings !== undefined && object.settings !== null + ? ChartWidget_SeriesOverrides_SeriesOverrideSettings.fromJSON( + object.settings + ) + : undefined; + return message; + }, + + toJSON(message: ChartWidget_SeriesOverrides): unknown { + const obj: any = {}; + message.name !== undefined && (obj.name = message.name); + message.targetIndex !== undefined && + (obj.targetIndex = message.targetIndex); + message.settings !== undefined && + (obj.settings = message.settings + ? ChartWidget_SeriesOverrides_SeriesOverrideSettings.toJSON( + message.settings + ) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): ChartWidget_SeriesOverrides { + const message = { + ...baseChartWidget_SeriesOverrides, + } as ChartWidget_SeriesOverrides; + message.name = object.name ?? undefined; + message.targetIndex = object.targetIndex ?? undefined; + message.settings = + object.settings !== undefined && object.settings !== null + ? ChartWidget_SeriesOverrides_SeriesOverrideSettings.fromPartial( + object.settings + ) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + ChartWidget_SeriesOverrides.$type, + ChartWidget_SeriesOverrides +); + +const baseChartWidget_SeriesOverrides_SeriesOverrideSettings: object = { + $type: + "yandex.cloud.monitoring.v3.ChartWidget.SeriesOverrides.SeriesOverrideSettings", + name: "", + color: "", + type: 0, + stackName: "", + growDown: false, + yaxisPosition: 0, +}; + +export const ChartWidget_SeriesOverrides_SeriesOverrideSettings = { + $type: + "yandex.cloud.monitoring.v3.ChartWidget.SeriesOverrides.SeriesOverrideSettings" as const, + + encode( + message: ChartWidget_SeriesOverrides_SeriesOverrideSettings, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + if (message.color !== "") { + writer.uint32(18).string(message.color); + } + if (message.type !== 0) { + writer.uint32(24).int32(message.type); + } + if (message.stackName !== "") { + writer.uint32(34).string(message.stackName); + } + if (message.growDown === true) { + writer.uint32(40).bool(message.growDown); + } + if (message.yaxisPosition !== 0) { + writer.uint32(48).int32(message.yaxisPosition); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ChartWidget_SeriesOverrides_SeriesOverrideSettings { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseChartWidget_SeriesOverrides_SeriesOverrideSettings, + } as ChartWidget_SeriesOverrides_SeriesOverrideSettings; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.color = reader.string(); + break; + case 3: + message.type = reader.int32() as any; + break; + case 4: + message.stackName = reader.string(); + break; + case 5: + message.growDown = reader.bool(); + break; + case 6: + message.yaxisPosition = reader.int32() as any; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ChartWidget_SeriesOverrides_SeriesOverrideSettings { + const message = { + ...baseChartWidget_SeriesOverrides_SeriesOverrideSettings, + } as ChartWidget_SeriesOverrides_SeriesOverrideSettings; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.color = + object.color !== undefined && object.color !== null + ? String(object.color) + : ""; + message.type = + object.type !== undefined && object.type !== null + ? chartWidget_SeriesOverrides_SeriesVisualizationTypeFromJSON( + object.type + ) + : 0; + message.stackName = + object.stackName !== undefined && object.stackName !== null + ? String(object.stackName) + : ""; + message.growDown = + object.growDown !== undefined && object.growDown !== null + ? Boolean(object.growDown) + : false; + message.yaxisPosition = + object.yaxisPosition !== undefined && object.yaxisPosition !== null + ? chartWidget_SeriesOverrides_YaxisPositionFromJSON( + object.yaxisPosition + ) + : 0; + return message; + }, + + toJSON(message: ChartWidget_SeriesOverrides_SeriesOverrideSettings): unknown { + const obj: any = {}; + message.name !== undefined && (obj.name = message.name); + message.color !== undefined && (obj.color = message.color); + message.type !== undefined && + (obj.type = chartWidget_SeriesOverrides_SeriesVisualizationTypeToJSON( + message.type + )); + message.stackName !== undefined && (obj.stackName = message.stackName); + message.growDown !== undefined && (obj.growDown = message.growDown); + message.yaxisPosition !== undefined && + (obj.yaxisPosition = chartWidget_SeriesOverrides_YaxisPositionToJSON( + message.yaxisPosition + )); + return obj; + }, + + fromPartial< + I extends Exact< + DeepPartial, + I + > + >(object: I): ChartWidget_SeriesOverrides_SeriesOverrideSettings { + const message = { + ...baseChartWidget_SeriesOverrides_SeriesOverrideSettings, + } as ChartWidget_SeriesOverrides_SeriesOverrideSettings; + message.name = object.name ?? ""; + message.color = object.color ?? ""; + message.type = object.type ?? 0; + message.stackName = object.stackName ?? ""; + message.growDown = object.growDown ?? false; + message.yaxisPosition = object.yaxisPosition ?? 0; + return message; + }, +}; + +messageTypeRegistry.set( + ChartWidget_SeriesOverrides_SeriesOverrideSettings.$type, + ChartWidget_SeriesOverrides_SeriesOverrideSettings +); + +const baseChartWidget_NameHidingSettings: object = { + $type: "yandex.cloud.monitoring.v3.ChartWidget.NameHidingSettings", + positive: false, + names: "", +}; + +export const ChartWidget_NameHidingSettings = { + $type: "yandex.cloud.monitoring.v3.ChartWidget.NameHidingSettings" as const, + + encode( + message: ChartWidget_NameHidingSettings, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.positive === true) { + writer.uint32(8).bool(message.positive); + } + for (const v of message.names) { + writer.uint32(18).string(v!); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ChartWidget_NameHidingSettings { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseChartWidget_NameHidingSettings, + } as ChartWidget_NameHidingSettings; + message.names = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.positive = reader.bool(); + break; + case 2: + message.names.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ChartWidget_NameHidingSettings { + const message = { + ...baseChartWidget_NameHidingSettings, + } as ChartWidget_NameHidingSettings; + message.positive = + object.positive !== undefined && object.positive !== null + ? Boolean(object.positive) + : false; + message.names = (object.names ?? []).map((e: any) => String(e)); + return message; + }, + + toJSON(message: ChartWidget_NameHidingSettings): unknown { + const obj: any = {}; + message.positive !== undefined && (obj.positive = message.positive); + if (message.names) { + obj.names = message.names.map((e) => e); + } else { + obj.names = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): ChartWidget_NameHidingSettings { + const message = { + ...baseChartWidget_NameHidingSettings, + } as ChartWidget_NameHidingSettings; + message.positive = object.positive ?? false; + message.names = object.names?.map((e) => e) || []; + return message; + }, +}; + +messageTypeRegistry.set( + ChartWidget_NameHidingSettings.$type, + ChartWidget_NameHidingSettings +); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/monitoring/v3/dashboard.ts b/src/generated/yandex/cloud/monitoring/v3/dashboard.ts new file mode 100644 index 00000000..8a95f8e8 --- /dev/null +++ b/src/generated/yandex/cloud/monitoring/v3/dashboard.ts @@ -0,0 +1,442 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { Parametrization } from "../../../../yandex/cloud/monitoring/v3/parametrization"; +import { Timestamp } from "../../../../google/protobuf/timestamp"; +import { Widget } from "../../../../yandex/cloud/monitoring/v3/widget"; + +export const protobufPackage = "yandex.cloud.monitoring.v3"; + +/** Dashboard resource. */ +export interface Dashboard { + $type: "yandex.cloud.monitoring.v3.Dashboard"; + /** Dashboard ID. */ + id: string; + /** Folder ID. */ + folderId: string | undefined; + /** Creation timestamp. */ + createdAt?: Date; + /** Modification timestamp. */ + modifiedAt?: Date; + /** ID of the user who created the dashboard. */ + createdBy: string; + /** ID of the user who modified the dashboard. */ + modifiedBy: string; + /** Dashboard name. */ + name: string; + /** Dashboard description. */ + description: string; + /** Resource labels as `key:value` pairs. */ + labels: { [key: string]: string }; + /** Dashboard title. */ + title: string; + /** List of dashboard widgets. */ + widgets: Widget[]; + /** Dashboard parametrization. */ + parametrization?: Parametrization; + /** Dashboard etag. */ + etag: string; +} + +export interface Dashboard_LabelsEntry { + $type: "yandex.cloud.monitoring.v3.Dashboard.LabelsEntry"; + key: string; + value: string; +} + +const baseDashboard: object = { + $type: "yandex.cloud.monitoring.v3.Dashboard", + id: "", + createdBy: "", + modifiedBy: "", + name: "", + description: "", + title: "", + etag: "", +}; + +export const Dashboard = { + $type: "yandex.cloud.monitoring.v3.Dashboard" as const, + + encode( + message: Dashboard, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.id !== "") { + writer.uint32(10).string(message.id); + } + if (message.folderId !== undefined) { + writer.uint32(26).string(message.folderId); + } + if (message.createdAt !== undefined) { + Timestamp.encode( + toTimestamp(message.createdAt), + writer.uint32(162).fork() + ).ldelim(); + } + if (message.modifiedAt !== undefined) { + Timestamp.encode( + toTimestamp(message.modifiedAt), + writer.uint32(170).fork() + ).ldelim(); + } + if (message.createdBy !== "") { + writer.uint32(178).string(message.createdBy); + } + if (message.modifiedBy !== "") { + writer.uint32(186).string(message.modifiedBy); + } + if (message.name !== "") { + writer.uint32(194).string(message.name); + } + if (message.description !== "") { + writer.uint32(202).string(message.description); + } + Object.entries(message.labels).forEach(([key, value]) => { + Dashboard_LabelsEntry.encode( + { + $type: "yandex.cloud.monitoring.v3.Dashboard.LabelsEntry", + key: key as any, + value, + }, + writer.uint32(210).fork() + ).ldelim(); + }); + if (message.title !== "") { + writer.uint32(218).string(message.title); + } + for (const v of message.widgets) { + Widget.encode(v!, writer.uint32(226).fork()).ldelim(); + } + if (message.parametrization !== undefined) { + Parametrization.encode( + message.parametrization, + writer.uint32(234).fork() + ).ldelim(); + } + if (message.etag !== "") { + writer.uint32(242).string(message.etag); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Dashboard { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDashboard } as Dashboard; + message.labels = {}; + message.widgets = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = reader.string(); + break; + case 3: + message.folderId = reader.string(); + break; + case 20: + message.createdAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 21: + message.modifiedAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 22: + message.createdBy = reader.string(); + break; + case 23: + message.modifiedBy = reader.string(); + break; + case 24: + message.name = reader.string(); + break; + case 25: + message.description = reader.string(); + break; + case 26: + const entry26 = Dashboard_LabelsEntry.decode(reader, reader.uint32()); + if (entry26.value !== undefined) { + message.labels[entry26.key] = entry26.value; + } + break; + case 27: + message.title = reader.string(); + break; + case 28: + message.widgets.push(Widget.decode(reader, reader.uint32())); + break; + case 29: + message.parametrization = Parametrization.decode( + reader, + reader.uint32() + ); + break; + case 30: + message.etag = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Dashboard { + const message = { ...baseDashboard } as Dashboard; + message.id = + object.id !== undefined && object.id !== null ? String(object.id) : ""; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : undefined; + message.createdAt = + object.createdAt !== undefined && object.createdAt !== null + ? fromJsonTimestamp(object.createdAt) + : undefined; + message.modifiedAt = + object.modifiedAt !== undefined && object.modifiedAt !== null + ? fromJsonTimestamp(object.modifiedAt) + : undefined; + message.createdBy = + object.createdBy !== undefined && object.createdBy !== null + ? String(object.createdBy) + : ""; + message.modifiedBy = + object.modifiedBy !== undefined && object.modifiedBy !== null + ? String(object.modifiedBy) + : ""; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); + message.title = + object.title !== undefined && object.title !== null + ? String(object.title) + : ""; + message.widgets = (object.widgets ?? []).map((e: any) => + Widget.fromJSON(e) + ); + message.parametrization = + object.parametrization !== undefined && object.parametrization !== null + ? Parametrization.fromJSON(object.parametrization) + : undefined; + message.etag = + object.etag !== undefined && object.etag !== null + ? String(object.etag) + : ""; + return message; + }, + + toJSON(message: Dashboard): unknown { + const obj: any = {}; + message.id !== undefined && (obj.id = message.id); + message.folderId !== undefined && (obj.folderId = message.folderId); + message.createdAt !== undefined && + (obj.createdAt = message.createdAt.toISOString()); + message.modifiedAt !== undefined && + (obj.modifiedAt = message.modifiedAt.toISOString()); + message.createdBy !== undefined && (obj.createdBy = message.createdBy); + message.modifiedBy !== undefined && (obj.modifiedBy = message.modifiedBy); + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + obj.labels = {}; + if (message.labels) { + Object.entries(message.labels).forEach(([k, v]) => { + obj.labels[k] = v; + }); + } + message.title !== undefined && (obj.title = message.title); + if (message.widgets) { + obj.widgets = message.widgets.map((e) => + e ? Widget.toJSON(e) : undefined + ); + } else { + obj.widgets = []; + } + message.parametrization !== undefined && + (obj.parametrization = message.parametrization + ? Parametrization.toJSON(message.parametrization) + : undefined); + message.etag !== undefined && (obj.etag = message.etag); + return obj; + }, + + fromPartial, I>>( + object: I + ): Dashboard { + const message = { ...baseDashboard } as Dashboard; + message.id = object.id ?? ""; + message.folderId = object.folderId ?? undefined; + message.createdAt = object.createdAt ?? undefined; + message.modifiedAt = object.modifiedAt ?? undefined; + message.createdBy = object.createdBy ?? ""; + message.modifiedBy = object.modifiedBy ?? ""; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); + message.title = object.title ?? ""; + message.widgets = object.widgets?.map((e) => Widget.fromPartial(e)) || []; + message.parametrization = + object.parametrization !== undefined && object.parametrization !== null + ? Parametrization.fromPartial(object.parametrization) + : undefined; + message.etag = object.etag ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(Dashboard.$type, Dashboard); + +const baseDashboard_LabelsEntry: object = { + $type: "yandex.cloud.monitoring.v3.Dashboard.LabelsEntry", + key: "", + value: "", +}; + +export const Dashboard_LabelsEntry = { + $type: "yandex.cloud.monitoring.v3.Dashboard.LabelsEntry" as const, + + encode( + message: Dashboard_LabelsEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Dashboard_LabelsEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDashboard_LabelsEntry } as Dashboard_LabelsEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Dashboard_LabelsEntry { + const message = { ...baseDashboard_LabelsEntry } as Dashboard_LabelsEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: Dashboard_LabelsEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial, I>>( + object: I + ): Dashboard_LabelsEntry { + const message = { ...baseDashboard_LabelsEntry } as Dashboard_LabelsEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(Dashboard_LabelsEntry.$type, Dashboard_LabelsEntry); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function toTimestamp(date: Date): Timestamp { + const seconds = date.getTime() / 1_000; + const nanos = (date.getTime() % 1_000) * 1_000_000; + return { $type: "google.protobuf.Timestamp", seconds, nanos }; +} + +function fromTimestamp(t: Timestamp): Date { + let millis = t.seconds * 1_000; + millis += t.nanos / 1_000_000; + return new Date(millis); +} + +function fromJsonTimestamp(o: any): Date { + if (o instanceof Date) { + return o; + } else if (typeof o === "string") { + return new Date(o); + } else { + return fromTimestamp(Timestamp.fromJSON(o)); + } +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/monitoring/v3/dashboard_service.ts b/src/generated/yandex/cloud/monitoring/v3/dashboard_service.ts new file mode 100644 index 00000000..3e1031bc --- /dev/null +++ b/src/generated/yandex/cloud/monitoring/v3/dashboard_service.ts @@ -0,0 +1,1752 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../typeRegistry"; +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleUnaryCall, + Client, + ClientUnaryCall, + Metadata, + CallOptions, + ServiceError, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; +import { Parametrization } from "../../../../yandex/cloud/monitoring/v3/parametrization"; +import { Dashboard } from "../../../../yandex/cloud/monitoring/v3/dashboard"; +import { Widget } from "../../../../yandex/cloud/monitoring/v3/widget"; +import { Operation } from "../../../../yandex/cloud/operation/operation"; + +export const protobufPackage = "yandex.cloud.monitoring.v3"; + +export interface GetDashboardRequest { + $type: "yandex.cloud.monitoring.v3.GetDashboardRequest"; + /** Required. Dashboard ID. */ + dashboardId: string; +} + +export interface ListDashboardsRequest { + $type: "yandex.cloud.monitoring.v3.ListDashboardsRequest"; + /** Required. Folder ID. */ + folderId: string | undefined; + /** + * The maximum number of dashboards to return. + * If unspecified, at most 100 dashboards will be returned. + * The maximum value is 1000; values above 1000 will be coerced to 1000. + */ + pageSize: number; + /** + * Page token. To get the next page of results, set `page_token` to the + * [ListDashboardResponse.next_page_token] returned by a previous list request. + */ + pageToken: string; + /** + * A filter expression that filters resources listed in the response. + * The expression must specify: + * 1. The field name. Currently you can use filtering only on the [Dashboard.name] field. + * 2. An `=` operator. + * 3. The value in double quotes (`"`). Must be 3-63 characters long and match the regular expression `[a-z][-a-z0-9]{1,61}[a-z0-9]`. + * Example: name="abc" + */ + filter: string; +} + +export interface ListDashboardsResponse { + $type: "yandex.cloud.monitoring.v3.ListDashboardsResponse"; + /** List of dashboards. */ + dashboards: Dashboard[]; + /** Token to retrieve the next page of results, or empty if there are no more results in the list. */ + nextPageToken: string; +} + +export interface CreateDashboardRequest { + $type: "yandex.cloud.monitoring.v3.CreateDashboardRequest"; + /** Required. Folder ID. */ + folderId: string | undefined; + /** Required. Dashboard name. */ + name: string; + /** Dashboard description. */ + description: string; + /** Resource labels as `key:value` pairs. */ + labels: { [key: string]: string }; + /** Dashboard title. */ + title: string; + /** List of dashboard widgets. */ + widgets: Widget[]; + /** Dashboard parametrization. */ + parametrization?: Parametrization; +} + +export interface CreateDashboardRequest_LabelsEntry { + $type: "yandex.cloud.monitoring.v3.CreateDashboardRequest.LabelsEntry"; + key: string; + value: string; +} + +export interface CreateDashboardMetadata { + $type: "yandex.cloud.monitoring.v3.CreateDashboardMetadata"; + /** Dashboard ID. */ + dashboardId: string; +} + +export interface UpdateDashboardRequest { + $type: "yandex.cloud.monitoring.v3.UpdateDashboardRequest"; + /** Required. Dashboard ID. */ + dashboardId: string; + /** Required. Dashboard name. */ + name: string; + /** Dashboard description. */ + description: string; + /** + * Resource labels as `key:value` pairs. + * + * Existing set of `labels` is completely replaced by the provided set. + */ + labels: { [key: string]: string }; + /** Dashboard title. */ + title: string; + /** List of dashboard widgets. */ + widgets: Widget[]; + /** Dashboard parametrization. */ + parametrization?: Parametrization; + /** The current etag of the dashboard. */ + etag: string; +} + +export interface UpdateDashboardRequest_LabelsEntry { + $type: "yandex.cloud.monitoring.v3.UpdateDashboardRequest.LabelsEntry"; + key: string; + value: string; +} + +export interface UpdateDashboardMetadata { + $type: "yandex.cloud.monitoring.v3.UpdateDashboardMetadata"; + /** Dashboard ID. */ + dashboardId: string; +} + +export interface DeleteDashboardRequest { + $type: "yandex.cloud.monitoring.v3.DeleteDashboardRequest"; + /** Required. Dashboard ID. */ + dashboardId: string; + /** The current etag of the dashboard. */ + etag: string; +} + +export interface DeleteDashboardMetadata { + $type: "yandex.cloud.monitoring.v3.DeleteDashboardMetadata"; + /** Dashboard ID. */ + dashboardId: string; +} + +export interface ListDashboardOperationsRequest { + $type: "yandex.cloud.monitoring.v3.ListDashboardOperationsRequest"; + /** ID of the dashboard to list operations for. */ + dashboardId: string; + /** + * The maximum number of results per page to return. If the number of available + * results is larger than [page_size], the service returns a [ListDashboardOperationsResponse.next_page_token] + * that can be used to get the next page of results in subsequent list requests. + */ + pageSize: number; + /** + * Page token. To get the next page of results, set [page_token] to the + * [ListDashboardOperationsResponse.next_page_token] returned by a previous list request. + */ + pageToken: string; +} + +export interface ListDashboardOperationsResponse { + $type: "yandex.cloud.monitoring.v3.ListDashboardOperationsResponse"; + /** List of operations for the specified dashboard. */ + operations: Operation[]; + /** + * This token allows you to get the next page of results for list requests. If the number of results + * is larger than [ListDashboardOperationsRequest.page_size], use the [next_page_token] as the value + * for the [ListDashboardOperationsRequest.page_token] query parameter in the next list request. + * Each subsequent list request will have its own [next_page_token] to continue paging through the results. + */ + nextPageToken: string; +} + +const baseGetDashboardRequest: object = { + $type: "yandex.cloud.monitoring.v3.GetDashboardRequest", + dashboardId: "", +}; + +export const GetDashboardRequest = { + $type: "yandex.cloud.monitoring.v3.GetDashboardRequest" as const, + + encode( + message: GetDashboardRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.dashboardId !== "") { + writer.uint32(10).string(message.dashboardId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): GetDashboardRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGetDashboardRequest } as GetDashboardRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.dashboardId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GetDashboardRequest { + const message = { ...baseGetDashboardRequest } as GetDashboardRequest; + message.dashboardId = + object.dashboardId !== undefined && object.dashboardId !== null + ? String(object.dashboardId) + : ""; + return message; + }, + + toJSON(message: GetDashboardRequest): unknown { + const obj: any = {}; + message.dashboardId !== undefined && + (obj.dashboardId = message.dashboardId); + return obj; + }, + + fromPartial, I>>( + object: I + ): GetDashboardRequest { + const message = { ...baseGetDashboardRequest } as GetDashboardRequest; + message.dashboardId = object.dashboardId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(GetDashboardRequest.$type, GetDashboardRequest); + +const baseListDashboardsRequest: object = { + $type: "yandex.cloud.monitoring.v3.ListDashboardsRequest", + pageSize: 0, + pageToken: "", + filter: "", +}; + +export const ListDashboardsRequest = { + $type: "yandex.cloud.monitoring.v3.ListDashboardsRequest" as const, + + encode( + message: ListDashboardsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.folderId !== undefined) { + writer.uint32(18).string(message.folderId); + } + if (message.pageSize !== 0) { + writer.uint32(152).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(162).string(message.pageToken); + } + if (message.filter !== "") { + writer.uint32(170).string(message.filter); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListDashboardsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListDashboardsRequest } as ListDashboardsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + message.folderId = reader.string(); + break; + case 19: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 20: + message.pageToken = reader.string(); + break; + case 21: + message.filter = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListDashboardsRequest { + const message = { ...baseListDashboardsRequest } as ListDashboardsRequest; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : undefined; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + message.filter = + object.filter !== undefined && object.filter !== null + ? String(object.filter) + : ""; + return message; + }, + + toJSON(message: ListDashboardsRequest): unknown { + const obj: any = {}; + message.folderId !== undefined && (obj.folderId = message.folderId); + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + message.filter !== undefined && (obj.filter = message.filter); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListDashboardsRequest { + const message = { ...baseListDashboardsRequest } as ListDashboardsRequest; + message.folderId = object.folderId ?? undefined; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + message.filter = object.filter ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListDashboardsRequest.$type, ListDashboardsRequest); + +const baseListDashboardsResponse: object = { + $type: "yandex.cloud.monitoring.v3.ListDashboardsResponse", + nextPageToken: "", +}; + +export const ListDashboardsResponse = { + $type: "yandex.cloud.monitoring.v3.ListDashboardsResponse" as const, + + encode( + message: ListDashboardsResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.dashboards) { + Dashboard.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListDashboardsResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListDashboardsResponse } as ListDashboardsResponse; + message.dashboards = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.dashboards.push(Dashboard.decode(reader, reader.uint32())); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListDashboardsResponse { + const message = { ...baseListDashboardsResponse } as ListDashboardsResponse; + message.dashboards = (object.dashboards ?? []).map((e: any) => + Dashboard.fromJSON(e) + ); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListDashboardsResponse): unknown { + const obj: any = {}; + if (message.dashboards) { + obj.dashboards = message.dashboards.map((e) => + e ? Dashboard.toJSON(e) : undefined + ); + } else { + obj.dashboards = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListDashboardsResponse { + const message = { ...baseListDashboardsResponse } as ListDashboardsResponse; + message.dashboards = + object.dashboards?.map((e) => Dashboard.fromPartial(e)) || []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListDashboardsResponse.$type, ListDashboardsResponse); + +const baseCreateDashboardRequest: object = { + $type: "yandex.cloud.monitoring.v3.CreateDashboardRequest", + name: "", + description: "", + title: "", +}; + +export const CreateDashboardRequest = { + $type: "yandex.cloud.monitoring.v3.CreateDashboardRequest" as const, + + encode( + message: CreateDashboardRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.folderId !== undefined) { + writer.uint32(18).string(message.folderId); + } + if (message.name !== "") { + writer.uint32(154).string(message.name); + } + if (message.description !== "") { + writer.uint32(162).string(message.description); + } + Object.entries(message.labels).forEach(([key, value]) => { + CreateDashboardRequest_LabelsEntry.encode( + { + $type: + "yandex.cloud.monitoring.v3.CreateDashboardRequest.LabelsEntry", + key: key as any, + value, + }, + writer.uint32(170).fork() + ).ldelim(); + }); + if (message.title !== "") { + writer.uint32(178).string(message.title); + } + for (const v of message.widgets) { + Widget.encode(v!, writer.uint32(186).fork()).ldelim(); + } + if (message.parametrization !== undefined) { + Parametrization.encode( + message.parametrization, + writer.uint32(194).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): CreateDashboardRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseCreateDashboardRequest } as CreateDashboardRequest; + message.labels = {}; + message.widgets = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + message.folderId = reader.string(); + break; + case 19: + message.name = reader.string(); + break; + case 20: + message.description = reader.string(); + break; + case 21: + const entry21 = CreateDashboardRequest_LabelsEntry.decode( + reader, + reader.uint32() + ); + if (entry21.value !== undefined) { + message.labels[entry21.key] = entry21.value; + } + break; + case 22: + message.title = reader.string(); + break; + case 23: + message.widgets.push(Widget.decode(reader, reader.uint32())); + break; + case 24: + message.parametrization = Parametrization.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateDashboardRequest { + const message = { ...baseCreateDashboardRequest } as CreateDashboardRequest; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : undefined; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); + message.title = + object.title !== undefined && object.title !== null + ? String(object.title) + : ""; + message.widgets = (object.widgets ?? []).map((e: any) => + Widget.fromJSON(e) + ); + message.parametrization = + object.parametrization !== undefined && object.parametrization !== null + ? Parametrization.fromJSON(object.parametrization) + : undefined; + return message; + }, + + toJSON(message: CreateDashboardRequest): unknown { + const obj: any = {}; + message.folderId !== undefined && (obj.folderId = message.folderId); + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + obj.labels = {}; + if (message.labels) { + Object.entries(message.labels).forEach(([k, v]) => { + obj.labels[k] = v; + }); + } + message.title !== undefined && (obj.title = message.title); + if (message.widgets) { + obj.widgets = message.widgets.map((e) => + e ? Widget.toJSON(e) : undefined + ); + } else { + obj.widgets = []; + } + message.parametrization !== undefined && + (obj.parametrization = message.parametrization + ? Parametrization.toJSON(message.parametrization) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): CreateDashboardRequest { + const message = { ...baseCreateDashboardRequest } as CreateDashboardRequest; + message.folderId = object.folderId ?? undefined; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); + message.title = object.title ?? ""; + message.widgets = object.widgets?.map((e) => Widget.fromPartial(e)) || []; + message.parametrization = + object.parametrization !== undefined && object.parametrization !== null + ? Parametrization.fromPartial(object.parametrization) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(CreateDashboardRequest.$type, CreateDashboardRequest); + +const baseCreateDashboardRequest_LabelsEntry: object = { + $type: "yandex.cloud.monitoring.v3.CreateDashboardRequest.LabelsEntry", + key: "", + value: "", +}; + +export const CreateDashboardRequest_LabelsEntry = { + $type: + "yandex.cloud.monitoring.v3.CreateDashboardRequest.LabelsEntry" as const, + + encode( + message: CreateDashboardRequest_LabelsEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): CreateDashboardRequest_LabelsEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseCreateDashboardRequest_LabelsEntry, + } as CreateDashboardRequest_LabelsEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateDashboardRequest_LabelsEntry { + const message = { + ...baseCreateDashboardRequest_LabelsEntry, + } as CreateDashboardRequest_LabelsEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: CreateDashboardRequest_LabelsEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): CreateDashboardRequest_LabelsEntry { + const message = { + ...baseCreateDashboardRequest_LabelsEntry, + } as CreateDashboardRequest_LabelsEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + CreateDashboardRequest_LabelsEntry.$type, + CreateDashboardRequest_LabelsEntry +); + +const baseCreateDashboardMetadata: object = { + $type: "yandex.cloud.monitoring.v3.CreateDashboardMetadata", + dashboardId: "", +}; + +export const CreateDashboardMetadata = { + $type: "yandex.cloud.monitoring.v3.CreateDashboardMetadata" as const, + + encode( + message: CreateDashboardMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.dashboardId !== "") { + writer.uint32(10).string(message.dashboardId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): CreateDashboardMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseCreateDashboardMetadata, + } as CreateDashboardMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.dashboardId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateDashboardMetadata { + const message = { + ...baseCreateDashboardMetadata, + } as CreateDashboardMetadata; + message.dashboardId = + object.dashboardId !== undefined && object.dashboardId !== null + ? String(object.dashboardId) + : ""; + return message; + }, + + toJSON(message: CreateDashboardMetadata): unknown { + const obj: any = {}; + message.dashboardId !== undefined && + (obj.dashboardId = message.dashboardId); + return obj; + }, + + fromPartial, I>>( + object: I + ): CreateDashboardMetadata { + const message = { + ...baseCreateDashboardMetadata, + } as CreateDashboardMetadata; + message.dashboardId = object.dashboardId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(CreateDashboardMetadata.$type, CreateDashboardMetadata); + +const baseUpdateDashboardRequest: object = { + $type: "yandex.cloud.monitoring.v3.UpdateDashboardRequest", + dashboardId: "", + name: "", + description: "", + title: "", + etag: "", +}; + +export const UpdateDashboardRequest = { + $type: "yandex.cloud.monitoring.v3.UpdateDashboardRequest" as const, + + encode( + message: UpdateDashboardRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.dashboardId !== "") { + writer.uint32(10).string(message.dashboardId); + } + if (message.name !== "") { + writer.uint32(18).string(message.name); + } + if (message.description !== "") { + writer.uint32(26).string(message.description); + } + Object.entries(message.labels).forEach(([key, value]) => { + UpdateDashboardRequest_LabelsEntry.encode( + { + $type: + "yandex.cloud.monitoring.v3.UpdateDashboardRequest.LabelsEntry", + key: key as any, + value, + }, + writer.uint32(34).fork() + ).ldelim(); + }); + if (message.title !== "") { + writer.uint32(42).string(message.title); + } + for (const v of message.widgets) { + Widget.encode(v!, writer.uint32(50).fork()).ldelim(); + } + if (message.parametrization !== undefined) { + Parametrization.encode( + message.parametrization, + writer.uint32(58).fork() + ).ldelim(); + } + if (message.etag !== "") { + writer.uint32(66).string(message.etag); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateDashboardRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseUpdateDashboardRequest } as UpdateDashboardRequest; + message.labels = {}; + message.widgets = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.dashboardId = reader.string(); + break; + case 2: + message.name = reader.string(); + break; + case 3: + message.description = reader.string(); + break; + case 4: + const entry4 = UpdateDashboardRequest_LabelsEntry.decode( + reader, + reader.uint32() + ); + if (entry4.value !== undefined) { + message.labels[entry4.key] = entry4.value; + } + break; + case 5: + message.title = reader.string(); + break; + case 6: + message.widgets.push(Widget.decode(reader, reader.uint32())); + break; + case 7: + message.parametrization = Parametrization.decode( + reader, + reader.uint32() + ); + break; + case 8: + message.etag = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateDashboardRequest { + const message = { ...baseUpdateDashboardRequest } as UpdateDashboardRequest; + message.dashboardId = + object.dashboardId !== undefined && object.dashboardId !== null + ? String(object.dashboardId) + : ""; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); + message.title = + object.title !== undefined && object.title !== null + ? String(object.title) + : ""; + message.widgets = (object.widgets ?? []).map((e: any) => + Widget.fromJSON(e) + ); + message.parametrization = + object.parametrization !== undefined && object.parametrization !== null + ? Parametrization.fromJSON(object.parametrization) + : undefined; + message.etag = + object.etag !== undefined && object.etag !== null + ? String(object.etag) + : ""; + return message; + }, + + toJSON(message: UpdateDashboardRequest): unknown { + const obj: any = {}; + message.dashboardId !== undefined && + (obj.dashboardId = message.dashboardId); + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + obj.labels = {}; + if (message.labels) { + Object.entries(message.labels).forEach(([k, v]) => { + obj.labels[k] = v; + }); + } + message.title !== undefined && (obj.title = message.title); + if (message.widgets) { + obj.widgets = message.widgets.map((e) => + e ? Widget.toJSON(e) : undefined + ); + } else { + obj.widgets = []; + } + message.parametrization !== undefined && + (obj.parametrization = message.parametrization + ? Parametrization.toJSON(message.parametrization) + : undefined); + message.etag !== undefined && (obj.etag = message.etag); + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateDashboardRequest { + const message = { ...baseUpdateDashboardRequest } as UpdateDashboardRequest; + message.dashboardId = object.dashboardId ?? ""; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); + message.title = object.title ?? ""; + message.widgets = object.widgets?.map((e) => Widget.fromPartial(e)) || []; + message.parametrization = + object.parametrization !== undefined && object.parametrization !== null + ? Parametrization.fromPartial(object.parametrization) + : undefined; + message.etag = object.etag ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(UpdateDashboardRequest.$type, UpdateDashboardRequest); + +const baseUpdateDashboardRequest_LabelsEntry: object = { + $type: "yandex.cloud.monitoring.v3.UpdateDashboardRequest.LabelsEntry", + key: "", + value: "", +}; + +export const UpdateDashboardRequest_LabelsEntry = { + $type: + "yandex.cloud.monitoring.v3.UpdateDashboardRequest.LabelsEntry" as const, + + encode( + message: UpdateDashboardRequest_LabelsEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateDashboardRequest_LabelsEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateDashboardRequest_LabelsEntry, + } as UpdateDashboardRequest_LabelsEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateDashboardRequest_LabelsEntry { + const message = { + ...baseUpdateDashboardRequest_LabelsEntry, + } as UpdateDashboardRequest_LabelsEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: UpdateDashboardRequest_LabelsEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): UpdateDashboardRequest_LabelsEntry { + const message = { + ...baseUpdateDashboardRequest_LabelsEntry, + } as UpdateDashboardRequest_LabelsEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + UpdateDashboardRequest_LabelsEntry.$type, + UpdateDashboardRequest_LabelsEntry +); + +const baseUpdateDashboardMetadata: object = { + $type: "yandex.cloud.monitoring.v3.UpdateDashboardMetadata", + dashboardId: "", +}; + +export const UpdateDashboardMetadata = { + $type: "yandex.cloud.monitoring.v3.UpdateDashboardMetadata" as const, + + encode( + message: UpdateDashboardMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.dashboardId !== "") { + writer.uint32(10).string(message.dashboardId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateDashboardMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateDashboardMetadata, + } as UpdateDashboardMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.dashboardId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateDashboardMetadata { + const message = { + ...baseUpdateDashboardMetadata, + } as UpdateDashboardMetadata; + message.dashboardId = + object.dashboardId !== undefined && object.dashboardId !== null + ? String(object.dashboardId) + : ""; + return message; + }, + + toJSON(message: UpdateDashboardMetadata): unknown { + const obj: any = {}; + message.dashboardId !== undefined && + (obj.dashboardId = message.dashboardId); + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateDashboardMetadata { + const message = { + ...baseUpdateDashboardMetadata, + } as UpdateDashboardMetadata; + message.dashboardId = object.dashboardId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(UpdateDashboardMetadata.$type, UpdateDashboardMetadata); + +const baseDeleteDashboardRequest: object = { + $type: "yandex.cloud.monitoring.v3.DeleteDashboardRequest", + dashboardId: "", + etag: "", +}; + +export const DeleteDashboardRequest = { + $type: "yandex.cloud.monitoring.v3.DeleteDashboardRequest" as const, + + encode( + message: DeleteDashboardRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.dashboardId !== "") { + writer.uint32(10).string(message.dashboardId); + } + if (message.etag !== "") { + writer.uint32(18).string(message.etag); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DeleteDashboardRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDeleteDashboardRequest } as DeleteDashboardRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.dashboardId = reader.string(); + break; + case 2: + message.etag = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteDashboardRequest { + const message = { ...baseDeleteDashboardRequest } as DeleteDashboardRequest; + message.dashboardId = + object.dashboardId !== undefined && object.dashboardId !== null + ? String(object.dashboardId) + : ""; + message.etag = + object.etag !== undefined && object.etag !== null + ? String(object.etag) + : ""; + return message; + }, + + toJSON(message: DeleteDashboardRequest): unknown { + const obj: any = {}; + message.dashboardId !== undefined && + (obj.dashboardId = message.dashboardId); + message.etag !== undefined && (obj.etag = message.etag); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteDashboardRequest { + const message = { ...baseDeleteDashboardRequest } as DeleteDashboardRequest; + message.dashboardId = object.dashboardId ?? ""; + message.etag = object.etag ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(DeleteDashboardRequest.$type, DeleteDashboardRequest); + +const baseDeleteDashboardMetadata: object = { + $type: "yandex.cloud.monitoring.v3.DeleteDashboardMetadata", + dashboardId: "", +}; + +export const DeleteDashboardMetadata = { + $type: "yandex.cloud.monitoring.v3.DeleteDashboardMetadata" as const, + + encode( + message: DeleteDashboardMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.dashboardId !== "") { + writer.uint32(10).string(message.dashboardId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DeleteDashboardMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseDeleteDashboardMetadata, + } as DeleteDashboardMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.dashboardId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteDashboardMetadata { + const message = { + ...baseDeleteDashboardMetadata, + } as DeleteDashboardMetadata; + message.dashboardId = + object.dashboardId !== undefined && object.dashboardId !== null + ? String(object.dashboardId) + : ""; + return message; + }, + + toJSON(message: DeleteDashboardMetadata): unknown { + const obj: any = {}; + message.dashboardId !== undefined && + (obj.dashboardId = message.dashboardId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteDashboardMetadata { + const message = { + ...baseDeleteDashboardMetadata, + } as DeleteDashboardMetadata; + message.dashboardId = object.dashboardId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(DeleteDashboardMetadata.$type, DeleteDashboardMetadata); + +const baseListDashboardOperationsRequest: object = { + $type: "yandex.cloud.monitoring.v3.ListDashboardOperationsRequest", + dashboardId: "", + pageSize: 0, + pageToken: "", +}; + +export const ListDashboardOperationsRequest = { + $type: "yandex.cloud.monitoring.v3.ListDashboardOperationsRequest" as const, + + encode( + message: ListDashboardOperationsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.dashboardId !== "") { + writer.uint32(10).string(message.dashboardId); + } + if (message.pageSize !== 0) { + writer.uint32(16).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(26).string(message.pageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListDashboardOperationsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListDashboardOperationsRequest, + } as ListDashboardOperationsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.dashboardId = reader.string(); + break; + case 2: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 3: + message.pageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListDashboardOperationsRequest { + const message = { + ...baseListDashboardOperationsRequest, + } as ListDashboardOperationsRequest; + message.dashboardId = + object.dashboardId !== undefined && object.dashboardId !== null + ? String(object.dashboardId) + : ""; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + return message; + }, + + toJSON(message: ListDashboardOperationsRequest): unknown { + const obj: any = {}; + message.dashboardId !== undefined && + (obj.dashboardId = message.dashboardId); + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListDashboardOperationsRequest { + const message = { + ...baseListDashboardOperationsRequest, + } as ListDashboardOperationsRequest; + message.dashboardId = object.dashboardId ?? ""; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListDashboardOperationsRequest.$type, + ListDashboardOperationsRequest +); + +const baseListDashboardOperationsResponse: object = { + $type: "yandex.cloud.monitoring.v3.ListDashboardOperationsResponse", + nextPageToken: "", +}; + +export const ListDashboardOperationsResponse = { + $type: "yandex.cloud.monitoring.v3.ListDashboardOperationsResponse" as const, + + encode( + message: ListDashboardOperationsResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.operations) { + Operation.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListDashboardOperationsResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListDashboardOperationsResponse, + } as ListDashboardOperationsResponse; + message.operations = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.operations.push(Operation.decode(reader, reader.uint32())); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListDashboardOperationsResponse { + const message = { + ...baseListDashboardOperationsResponse, + } as ListDashboardOperationsResponse; + message.operations = (object.operations ?? []).map((e: any) => + Operation.fromJSON(e) + ); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListDashboardOperationsResponse): unknown { + const obj: any = {}; + if (message.operations) { + obj.operations = message.operations.map((e) => + e ? Operation.toJSON(e) : undefined + ); + } else { + obj.operations = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListDashboardOperationsResponse { + const message = { + ...baseListDashboardOperationsResponse, + } as ListDashboardOperationsResponse; + message.operations = + object.operations?.map((e) => Operation.fromPartial(e)) || []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListDashboardOperationsResponse.$type, + ListDashboardOperationsResponse +); + +/** A set of methods for managing dashboards. */ +export const DashboardServiceService = { + /** Returns the specified dashboard. */ + get: { + path: "/yandex.cloud.monitoring.v3.DashboardService/Get", + requestStream: false, + responseStream: false, + requestSerialize: (value: GetDashboardRequest) => + Buffer.from(GetDashboardRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => GetDashboardRequest.decode(value), + responseSerialize: (value: Dashboard) => + Buffer.from(Dashboard.encode(value).finish()), + responseDeserialize: (value: Buffer) => Dashboard.decode(value), + }, + /** Retrieves the list of dashboards in the specified folder. */ + list: { + path: "/yandex.cloud.monitoring.v3.DashboardService/List", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListDashboardsRequest) => + Buffer.from(ListDashboardsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => ListDashboardsRequest.decode(value), + responseSerialize: (value: ListDashboardsResponse) => + Buffer.from(ListDashboardsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + ListDashboardsResponse.decode(value), + }, + /** Creates a new dashboard in the specified folder. */ + create: { + path: "/yandex.cloud.monitoring.v3.DashboardService/Create", + requestStream: false, + responseStream: false, + requestSerialize: (value: CreateDashboardRequest) => + Buffer.from(CreateDashboardRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => CreateDashboardRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Updates the specified dashboard. */ + update: { + path: "/yandex.cloud.monitoring.v3.DashboardService/Update", + requestStream: false, + responseStream: false, + requestSerialize: (value: UpdateDashboardRequest) => + Buffer.from(UpdateDashboardRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => UpdateDashboardRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Deletes the specified dashboard. */ + delete: { + path: "/yandex.cloud.monitoring.v3.DashboardService/Delete", + requestStream: false, + responseStream: false, + requestSerialize: (value: DeleteDashboardRequest) => + Buffer.from(DeleteDashboardRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => DeleteDashboardRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Lists operations for the specified dashboard. */ + listOperations: { + path: "/yandex.cloud.monitoring.v3.DashboardService/ListOperations", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListDashboardOperationsRequest) => + Buffer.from(ListDashboardOperationsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + ListDashboardOperationsRequest.decode(value), + responseSerialize: (value: ListDashboardOperationsResponse) => + Buffer.from(ListDashboardOperationsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + ListDashboardOperationsResponse.decode(value), + }, +} as const; + +export interface DashboardServiceServer extends UntypedServiceImplementation { + /** Returns the specified dashboard. */ + get: handleUnaryCall; + /** Retrieves the list of dashboards in the specified folder. */ + list: handleUnaryCall; + /** Creates a new dashboard in the specified folder. */ + create: handleUnaryCall; + /** Updates the specified dashboard. */ + update: handleUnaryCall; + /** Deletes the specified dashboard. */ + delete: handleUnaryCall; + /** Lists operations for the specified dashboard. */ + listOperations: handleUnaryCall< + ListDashboardOperationsRequest, + ListDashboardOperationsResponse + >; +} + +export interface DashboardServiceClient extends Client { + /** Returns the specified dashboard. */ + get( + request: GetDashboardRequest, + callback: (error: ServiceError | null, response: Dashboard) => void + ): ClientUnaryCall; + get( + request: GetDashboardRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Dashboard) => void + ): ClientUnaryCall; + get( + request: GetDashboardRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Dashboard) => void + ): ClientUnaryCall; + /** Retrieves the list of dashboards in the specified folder. */ + list( + request: ListDashboardsRequest, + callback: ( + error: ServiceError | null, + response: ListDashboardsResponse + ) => void + ): ClientUnaryCall; + list( + request: ListDashboardsRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListDashboardsResponse + ) => void + ): ClientUnaryCall; + list( + request: ListDashboardsRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListDashboardsResponse + ) => void + ): ClientUnaryCall; + /** Creates a new dashboard in the specified folder. */ + create( + request: CreateDashboardRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + create( + request: CreateDashboardRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + create( + request: CreateDashboardRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Updates the specified dashboard. */ + update( + request: UpdateDashboardRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + update( + request: UpdateDashboardRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + update( + request: UpdateDashboardRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Deletes the specified dashboard. */ + delete( + request: DeleteDashboardRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + delete( + request: DeleteDashboardRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + delete( + request: DeleteDashboardRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Lists operations for the specified dashboard. */ + listOperations( + request: ListDashboardOperationsRequest, + callback: ( + error: ServiceError | null, + response: ListDashboardOperationsResponse + ) => void + ): ClientUnaryCall; + listOperations( + request: ListDashboardOperationsRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListDashboardOperationsResponse + ) => void + ): ClientUnaryCall; + listOperations( + request: ListDashboardOperationsRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListDashboardOperationsResponse + ) => void + ): ClientUnaryCall; +} + +export const DashboardServiceClient = makeGenericClientConstructor( + DashboardServiceService, + "yandex.cloud.monitoring.v3.DashboardService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): DashboardServiceClient; + service: typeof DashboardServiceService; +}; + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/monitoring/v3/downsampling.ts b/src/generated/yandex/cloud/monitoring/v3/downsampling.ts new file mode 100644 index 00000000..820badcd --- /dev/null +++ b/src/generated/yandex/cloud/monitoring/v3/downsampling.ts @@ -0,0 +1,316 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; + +export const protobufPackage = "yandex.cloud.monitoring.v3"; + +/** List of available aggregate functions for downsampling. */ +export interface Downsampling { + $type: "yandex.cloud.monitoring.v3.Downsampling"; + /** Maximum number of points to be returned. */ + maxPoints: number | undefined; + /** + * Time interval (grid) for downsampling in milliseconds. + * Points in the specified range are aggregated into one time point. + */ + gridInterval: number | undefined; + /** Disable downsampling. */ + disabled: boolean | undefined; + /** Function that is used for downsampling. */ + gridAggregation: Downsampling_GridAggregation; + /** Parameters for filling gaps in data. */ + gapFilling: Downsampling_GapFilling; +} + +/** List of available aggregate functions for downsampling. */ +export enum Downsampling_GridAggregation { + GRID_AGGREGATION_UNSPECIFIED = 0, + /** GRID_AGGREGATION_MAX - Max value. */ + GRID_AGGREGATION_MAX = 1, + /** GRID_AGGREGATION_MIN - Min value. */ + GRID_AGGREGATION_MIN = 2, + /** GRID_AGGREGATION_SUM - Sum of values. */ + GRID_AGGREGATION_SUM = 3, + /** GRID_AGGREGATION_AVG - Average value. */ + GRID_AGGREGATION_AVG = 4, + /** GRID_AGGREGATION_LAST - Last value. */ + GRID_AGGREGATION_LAST = 5, + /** GRID_AGGREGATION_COUNT - Total count of points. */ + GRID_AGGREGATION_COUNT = 6, + UNRECOGNIZED = -1, +} + +export function downsampling_GridAggregationFromJSON( + object: any +): Downsampling_GridAggregation { + switch (object) { + case 0: + case "GRID_AGGREGATION_UNSPECIFIED": + return Downsampling_GridAggregation.GRID_AGGREGATION_UNSPECIFIED; + case 1: + case "GRID_AGGREGATION_MAX": + return Downsampling_GridAggregation.GRID_AGGREGATION_MAX; + case 2: + case "GRID_AGGREGATION_MIN": + return Downsampling_GridAggregation.GRID_AGGREGATION_MIN; + case 3: + case "GRID_AGGREGATION_SUM": + return Downsampling_GridAggregation.GRID_AGGREGATION_SUM; + case 4: + case "GRID_AGGREGATION_AVG": + return Downsampling_GridAggregation.GRID_AGGREGATION_AVG; + case 5: + case "GRID_AGGREGATION_LAST": + return Downsampling_GridAggregation.GRID_AGGREGATION_LAST; + case 6: + case "GRID_AGGREGATION_COUNT": + return Downsampling_GridAggregation.GRID_AGGREGATION_COUNT; + case -1: + case "UNRECOGNIZED": + default: + return Downsampling_GridAggregation.UNRECOGNIZED; + } +} + +export function downsampling_GridAggregationToJSON( + object: Downsampling_GridAggregation +): string { + switch (object) { + case Downsampling_GridAggregation.GRID_AGGREGATION_UNSPECIFIED: + return "GRID_AGGREGATION_UNSPECIFIED"; + case Downsampling_GridAggregation.GRID_AGGREGATION_MAX: + return "GRID_AGGREGATION_MAX"; + case Downsampling_GridAggregation.GRID_AGGREGATION_MIN: + return "GRID_AGGREGATION_MIN"; + case Downsampling_GridAggregation.GRID_AGGREGATION_SUM: + return "GRID_AGGREGATION_SUM"; + case Downsampling_GridAggregation.GRID_AGGREGATION_AVG: + return "GRID_AGGREGATION_AVG"; + case Downsampling_GridAggregation.GRID_AGGREGATION_LAST: + return "GRID_AGGREGATION_LAST"; + case Downsampling_GridAggregation.GRID_AGGREGATION_COUNT: + return "GRID_AGGREGATION_COUNT"; + default: + return "UNKNOWN"; + } +} + +/** List of available gap filling policy for downsampling. */ +export enum Downsampling_GapFilling { + GAP_FILLING_UNSPECIFIED = 0, + /** GAP_FILLING_NULL - Returns `null` as a metric value and `timestamp` as a time series value. */ + GAP_FILLING_NULL = 1, + /** GAP_FILLING_NONE - Returns no value and no timestamp. */ + GAP_FILLING_NONE = 2, + /** GAP_FILLING_PREVIOUS - Returns the value from the previous time interval. */ + GAP_FILLING_PREVIOUS = 3, + UNRECOGNIZED = -1, +} + +export function downsampling_GapFillingFromJSON( + object: any +): Downsampling_GapFilling { + switch (object) { + case 0: + case "GAP_FILLING_UNSPECIFIED": + return Downsampling_GapFilling.GAP_FILLING_UNSPECIFIED; + case 1: + case "GAP_FILLING_NULL": + return Downsampling_GapFilling.GAP_FILLING_NULL; + case 2: + case "GAP_FILLING_NONE": + return Downsampling_GapFilling.GAP_FILLING_NONE; + case 3: + case "GAP_FILLING_PREVIOUS": + return Downsampling_GapFilling.GAP_FILLING_PREVIOUS; + case -1: + case "UNRECOGNIZED": + default: + return Downsampling_GapFilling.UNRECOGNIZED; + } +} + +export function downsampling_GapFillingToJSON( + object: Downsampling_GapFilling +): string { + switch (object) { + case Downsampling_GapFilling.GAP_FILLING_UNSPECIFIED: + return "GAP_FILLING_UNSPECIFIED"; + case Downsampling_GapFilling.GAP_FILLING_NULL: + return "GAP_FILLING_NULL"; + case Downsampling_GapFilling.GAP_FILLING_NONE: + return "GAP_FILLING_NONE"; + case Downsampling_GapFilling.GAP_FILLING_PREVIOUS: + return "GAP_FILLING_PREVIOUS"; + default: + return "UNKNOWN"; + } +} + +const baseDownsampling: object = { + $type: "yandex.cloud.monitoring.v3.Downsampling", + gridAggregation: 0, + gapFilling: 0, +}; + +export const Downsampling = { + $type: "yandex.cloud.monitoring.v3.Downsampling" as const, + + encode( + message: Downsampling, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.maxPoints !== undefined) { + writer.uint32(8).int64(message.maxPoints); + } + if (message.gridInterval !== undefined) { + writer.uint32(16).int64(message.gridInterval); + } + if (message.disabled !== undefined) { + writer.uint32(24).bool(message.disabled); + } + if (message.gridAggregation !== 0) { + writer.uint32(32).int32(message.gridAggregation); + } + if (message.gapFilling !== 0) { + writer.uint32(40).int32(message.gapFilling); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Downsampling { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDownsampling } as Downsampling; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.maxPoints = longToNumber(reader.int64() as Long); + break; + case 2: + message.gridInterval = longToNumber(reader.int64() as Long); + break; + case 3: + message.disabled = reader.bool(); + break; + case 4: + message.gridAggregation = reader.int32() as any; + break; + case 5: + message.gapFilling = reader.int32() as any; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Downsampling { + const message = { ...baseDownsampling } as Downsampling; + message.maxPoints = + object.maxPoints !== undefined && object.maxPoints !== null + ? Number(object.maxPoints) + : undefined; + message.gridInterval = + object.gridInterval !== undefined && object.gridInterval !== null + ? Number(object.gridInterval) + : undefined; + message.disabled = + object.disabled !== undefined && object.disabled !== null + ? Boolean(object.disabled) + : undefined; + message.gridAggregation = + object.gridAggregation !== undefined && object.gridAggregation !== null + ? downsampling_GridAggregationFromJSON(object.gridAggregation) + : 0; + message.gapFilling = + object.gapFilling !== undefined && object.gapFilling !== null + ? downsampling_GapFillingFromJSON(object.gapFilling) + : 0; + return message; + }, + + toJSON(message: Downsampling): unknown { + const obj: any = {}; + message.maxPoints !== undefined && + (obj.maxPoints = Math.round(message.maxPoints)); + message.gridInterval !== undefined && + (obj.gridInterval = Math.round(message.gridInterval)); + message.disabled !== undefined && (obj.disabled = message.disabled); + message.gridAggregation !== undefined && + (obj.gridAggregation = downsampling_GridAggregationToJSON( + message.gridAggregation + )); + message.gapFilling !== undefined && + (obj.gapFilling = downsampling_GapFillingToJSON(message.gapFilling)); + return obj; + }, + + fromPartial, I>>( + object: I + ): Downsampling { + const message = { ...baseDownsampling } as Downsampling; + message.maxPoints = object.maxPoints ?? undefined; + message.gridInterval = object.gridInterval ?? undefined; + message.disabled = object.disabled ?? undefined; + message.gridAggregation = object.gridAggregation ?? 0; + message.gapFilling = object.gapFilling ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(Downsampling.$type, Downsampling); + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/monitoring/v3/parametrization.ts b/src/generated/yandex/cloud/monitoring/v3/parametrization.ts new file mode 100644 index 00000000..21d98f9d --- /dev/null +++ b/src/generated/yandex/cloud/monitoring/v3/parametrization.ts @@ -0,0 +1,967 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { + UnitFormat, + unitFormatFromJSON, + unitFormatToJSON, +} from "../../../../yandex/cloud/monitoring/v3/unit_format"; + +export const protobufPackage = "yandex.cloud.monitoring.v3"; + +/** Label values parameter. */ +export interface LabelValuesParameter { + $type: "yandex.cloud.monitoring.v3.LabelValuesParameter"; + /** Required. Folder ID. */ + folderId: string | undefined; + /** Required. Selectors to select metric label values. */ + selectors: string; + /** Required. Label key to list label values. */ + labelKey: string; + /** Specifies the multiselectable values of parameter. */ + multiselectable: boolean; + /** Default values. */ + defaultValues: string[]; +} + +/** Custom parameter. */ +export interface CustomParameter { + $type: "yandex.cloud.monitoring.v3.CustomParameter"; + /** Required. List of parameter values. */ + values: string[]; + /** Specifies the multiselectable values of parameter. */ + multiselectable: boolean; + /** Default values. */ + defaultValues: string[]; +} + +/** Text parameter. */ +export interface TextParameter { + $type: "yandex.cloud.monitoring.v3.TextParameter"; + /** Default value. */ + defaultValue: string; +} + +/** Double parameter. */ +export interface DoubleParameter { + $type: "yandex.cloud.monitoring.v3.DoubleParameter"; + /** Default value. */ + defaultValue: number; + /** Parameter unit. */ + unitFormat: UnitFormat; +} + +/** Integer parameter. */ +export interface IntegerParameter { + $type: "yandex.cloud.monitoring.v3.IntegerParameter"; + /** Default value. */ + defaultValue: number; + /** Parameter unit. */ + unitFormat: UnitFormat; +} + +/** Text multiple values parameter. */ +export interface TextValuesParameter { + $type: "yandex.cloud.monitoring.v3.TextValuesParameter"; + /** Default value. */ + defaultValues: string[]; +} + +/** Parameter. */ +export interface Parameter { + $type: "yandex.cloud.monitoring.v3.Parameter"; + /** Parameter identifier. */ + name: string; + /** UI-visible title of the parameter. */ + title: string; + /** Label values parameter. */ + labelValues?: LabelValuesParameter | undefined; + /** Custom parameter. */ + custom?: CustomParameter | undefined; + /** Text parameter. */ + text?: TextParameter | undefined; + /** Integer parameter. */ + integerParameter?: IntegerParameter | undefined; + /** Double parameter. */ + doubleParameter?: DoubleParameter | undefined; + /** Integer parameter. */ + textValues?: TextValuesParameter | undefined; + /** UI-visibility. */ + hidden: boolean; + /** Parameter description. */ + description: string; +} + +/** Parametrization. */ +export interface Parametrization { + $type: "yandex.cloud.monitoring.v3.Parametrization"; + /** Parameters. */ + parameters: Parameter[]; + /** Predefined selectors. */ + selectors: string; +} + +const baseLabelValuesParameter: object = { + $type: "yandex.cloud.monitoring.v3.LabelValuesParameter", + selectors: "", + labelKey: "", + multiselectable: false, + defaultValues: "", +}; + +export const LabelValuesParameter = { + $type: "yandex.cloud.monitoring.v3.LabelValuesParameter" as const, + + encode( + message: LabelValuesParameter, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.folderId !== undefined) { + writer.uint32(18).string(message.folderId); + } + if (message.selectors !== "") { + writer.uint32(154).string(message.selectors); + } + if (message.labelKey !== "") { + writer.uint32(162).string(message.labelKey); + } + if (message.multiselectable === true) { + writer.uint32(168).bool(message.multiselectable); + } + for (const v of message.defaultValues) { + writer.uint32(178).string(v!); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): LabelValuesParameter { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseLabelValuesParameter } as LabelValuesParameter; + message.defaultValues = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + message.folderId = reader.string(); + break; + case 19: + message.selectors = reader.string(); + break; + case 20: + message.labelKey = reader.string(); + break; + case 21: + message.multiselectable = reader.bool(); + break; + case 22: + message.defaultValues.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): LabelValuesParameter { + const message = { ...baseLabelValuesParameter } as LabelValuesParameter; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : undefined; + message.selectors = + object.selectors !== undefined && object.selectors !== null + ? String(object.selectors) + : ""; + message.labelKey = + object.labelKey !== undefined && object.labelKey !== null + ? String(object.labelKey) + : ""; + message.multiselectable = + object.multiselectable !== undefined && object.multiselectable !== null + ? Boolean(object.multiselectable) + : false; + message.defaultValues = (object.defaultValues ?? []).map((e: any) => + String(e) + ); + return message; + }, + + toJSON(message: LabelValuesParameter): unknown { + const obj: any = {}; + message.folderId !== undefined && (obj.folderId = message.folderId); + message.selectors !== undefined && (obj.selectors = message.selectors); + message.labelKey !== undefined && (obj.labelKey = message.labelKey); + message.multiselectable !== undefined && + (obj.multiselectable = message.multiselectable); + if (message.defaultValues) { + obj.defaultValues = message.defaultValues.map((e) => e); + } else { + obj.defaultValues = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): LabelValuesParameter { + const message = { ...baseLabelValuesParameter } as LabelValuesParameter; + message.folderId = object.folderId ?? undefined; + message.selectors = object.selectors ?? ""; + message.labelKey = object.labelKey ?? ""; + message.multiselectable = object.multiselectable ?? false; + message.defaultValues = object.defaultValues?.map((e) => e) || []; + return message; + }, +}; + +messageTypeRegistry.set(LabelValuesParameter.$type, LabelValuesParameter); + +const baseCustomParameter: object = { + $type: "yandex.cloud.monitoring.v3.CustomParameter", + values: "", + multiselectable: false, + defaultValues: "", +}; + +export const CustomParameter = { + $type: "yandex.cloud.monitoring.v3.CustomParameter" as const, + + encode( + message: CustomParameter, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.values) { + writer.uint32(10).string(v!); + } + if (message.multiselectable === true) { + writer.uint32(16).bool(message.multiselectable); + } + for (const v of message.defaultValues) { + writer.uint32(26).string(v!); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): CustomParameter { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseCustomParameter } as CustomParameter; + message.values = []; + message.defaultValues = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.values.push(reader.string()); + break; + case 2: + message.multiselectable = reader.bool(); + break; + case 3: + message.defaultValues.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CustomParameter { + const message = { ...baseCustomParameter } as CustomParameter; + message.values = (object.values ?? []).map((e: any) => String(e)); + message.multiselectable = + object.multiselectable !== undefined && object.multiselectable !== null + ? Boolean(object.multiselectable) + : false; + message.defaultValues = (object.defaultValues ?? []).map((e: any) => + String(e) + ); + return message; + }, + + toJSON(message: CustomParameter): unknown { + const obj: any = {}; + if (message.values) { + obj.values = message.values.map((e) => e); + } else { + obj.values = []; + } + message.multiselectable !== undefined && + (obj.multiselectable = message.multiselectable); + if (message.defaultValues) { + obj.defaultValues = message.defaultValues.map((e) => e); + } else { + obj.defaultValues = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): CustomParameter { + const message = { ...baseCustomParameter } as CustomParameter; + message.values = object.values?.map((e) => e) || []; + message.multiselectable = object.multiselectable ?? false; + message.defaultValues = object.defaultValues?.map((e) => e) || []; + return message; + }, +}; + +messageTypeRegistry.set(CustomParameter.$type, CustomParameter); + +const baseTextParameter: object = { + $type: "yandex.cloud.monitoring.v3.TextParameter", + defaultValue: "", +}; + +export const TextParameter = { + $type: "yandex.cloud.monitoring.v3.TextParameter" as const, + + encode( + message: TextParameter, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.defaultValue !== "") { + writer.uint32(10).string(message.defaultValue); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): TextParameter { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseTextParameter } as TextParameter; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.defaultValue = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): TextParameter { + const message = { ...baseTextParameter } as TextParameter; + message.defaultValue = + object.defaultValue !== undefined && object.defaultValue !== null + ? String(object.defaultValue) + : ""; + return message; + }, + + toJSON(message: TextParameter): unknown { + const obj: any = {}; + message.defaultValue !== undefined && + (obj.defaultValue = message.defaultValue); + return obj; + }, + + fromPartial, I>>( + object: I + ): TextParameter { + const message = { ...baseTextParameter } as TextParameter; + message.defaultValue = object.defaultValue ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(TextParameter.$type, TextParameter); + +const baseDoubleParameter: object = { + $type: "yandex.cloud.monitoring.v3.DoubleParameter", + defaultValue: 0, + unitFormat: 0, +}; + +export const DoubleParameter = { + $type: "yandex.cloud.monitoring.v3.DoubleParameter" as const, + + encode( + message: DoubleParameter, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.defaultValue !== 0) { + writer.uint32(9).double(message.defaultValue); + } + if (message.unitFormat !== 0) { + writer.uint32(16).int32(message.unitFormat); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): DoubleParameter { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDoubleParameter } as DoubleParameter; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.defaultValue = reader.double(); + break; + case 2: + message.unitFormat = reader.int32() as any; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DoubleParameter { + const message = { ...baseDoubleParameter } as DoubleParameter; + message.defaultValue = + object.defaultValue !== undefined && object.defaultValue !== null + ? Number(object.defaultValue) + : 0; + message.unitFormat = + object.unitFormat !== undefined && object.unitFormat !== null + ? unitFormatFromJSON(object.unitFormat) + : 0; + return message; + }, + + toJSON(message: DoubleParameter): unknown { + const obj: any = {}; + message.defaultValue !== undefined && + (obj.defaultValue = message.defaultValue); + message.unitFormat !== undefined && + (obj.unitFormat = unitFormatToJSON(message.unitFormat)); + return obj; + }, + + fromPartial, I>>( + object: I + ): DoubleParameter { + const message = { ...baseDoubleParameter } as DoubleParameter; + message.defaultValue = object.defaultValue ?? 0; + message.unitFormat = object.unitFormat ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(DoubleParameter.$type, DoubleParameter); + +const baseIntegerParameter: object = { + $type: "yandex.cloud.monitoring.v3.IntegerParameter", + defaultValue: 0, + unitFormat: 0, +}; + +export const IntegerParameter = { + $type: "yandex.cloud.monitoring.v3.IntegerParameter" as const, + + encode( + message: IntegerParameter, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.defaultValue !== 0) { + writer.uint32(8).int64(message.defaultValue); + } + if (message.unitFormat !== 0) { + writer.uint32(16).int32(message.unitFormat); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): IntegerParameter { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseIntegerParameter } as IntegerParameter; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.defaultValue = longToNumber(reader.int64() as Long); + break; + case 2: + message.unitFormat = reader.int32() as any; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): IntegerParameter { + const message = { ...baseIntegerParameter } as IntegerParameter; + message.defaultValue = + object.defaultValue !== undefined && object.defaultValue !== null + ? Number(object.defaultValue) + : 0; + message.unitFormat = + object.unitFormat !== undefined && object.unitFormat !== null + ? unitFormatFromJSON(object.unitFormat) + : 0; + return message; + }, + + toJSON(message: IntegerParameter): unknown { + const obj: any = {}; + message.defaultValue !== undefined && + (obj.defaultValue = Math.round(message.defaultValue)); + message.unitFormat !== undefined && + (obj.unitFormat = unitFormatToJSON(message.unitFormat)); + return obj; + }, + + fromPartial, I>>( + object: I + ): IntegerParameter { + const message = { ...baseIntegerParameter } as IntegerParameter; + message.defaultValue = object.defaultValue ?? 0; + message.unitFormat = object.unitFormat ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(IntegerParameter.$type, IntegerParameter); + +const baseTextValuesParameter: object = { + $type: "yandex.cloud.monitoring.v3.TextValuesParameter", + defaultValues: "", +}; + +export const TextValuesParameter = { + $type: "yandex.cloud.monitoring.v3.TextValuesParameter" as const, + + encode( + message: TextValuesParameter, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.defaultValues) { + writer.uint32(10).string(v!); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): TextValuesParameter { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseTextValuesParameter } as TextValuesParameter; + message.defaultValues = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.defaultValues.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): TextValuesParameter { + const message = { ...baseTextValuesParameter } as TextValuesParameter; + message.defaultValues = (object.defaultValues ?? []).map((e: any) => + String(e) + ); + return message; + }, + + toJSON(message: TextValuesParameter): unknown { + const obj: any = {}; + if (message.defaultValues) { + obj.defaultValues = message.defaultValues.map((e) => e); + } else { + obj.defaultValues = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): TextValuesParameter { + const message = { ...baseTextValuesParameter } as TextValuesParameter; + message.defaultValues = object.defaultValues?.map((e) => e) || []; + return message; + }, +}; + +messageTypeRegistry.set(TextValuesParameter.$type, TextValuesParameter); + +const baseParameter: object = { + $type: "yandex.cloud.monitoring.v3.Parameter", + name: "", + title: "", + hidden: false, + description: "", +}; + +export const Parameter = { + $type: "yandex.cloud.monitoring.v3.Parameter" as const, + + encode( + message: Parameter, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + if (message.title !== "") { + writer.uint32(18).string(message.title); + } + if (message.labelValues !== undefined) { + LabelValuesParameter.encode( + message.labelValues, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.custom !== undefined) { + CustomParameter.encode(message.custom, writer.uint32(34).fork()).ldelim(); + } + if (message.text !== undefined) { + TextParameter.encode(message.text, writer.uint32(42).fork()).ldelim(); + } + if (message.integerParameter !== undefined) { + IntegerParameter.encode( + message.integerParameter, + writer.uint32(58).fork() + ).ldelim(); + } + if (message.doubleParameter !== undefined) { + DoubleParameter.encode( + message.doubleParameter, + writer.uint32(66).fork() + ).ldelim(); + } + if (message.textValues !== undefined) { + TextValuesParameter.encode( + message.textValues, + writer.uint32(74).fork() + ).ldelim(); + } + if (message.hidden === true) { + writer.uint32(48).bool(message.hidden); + } + if (message.description !== "") { + writer.uint32(82).string(message.description); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Parameter { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseParameter } as Parameter; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.title = reader.string(); + break; + case 3: + message.labelValues = LabelValuesParameter.decode( + reader, + reader.uint32() + ); + break; + case 4: + message.custom = CustomParameter.decode(reader, reader.uint32()); + break; + case 5: + message.text = TextParameter.decode(reader, reader.uint32()); + break; + case 7: + message.integerParameter = IntegerParameter.decode( + reader, + reader.uint32() + ); + break; + case 8: + message.doubleParameter = DoubleParameter.decode( + reader, + reader.uint32() + ); + break; + case 9: + message.textValues = TextValuesParameter.decode( + reader, + reader.uint32() + ); + break; + case 6: + message.hidden = reader.bool(); + break; + case 10: + message.description = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Parameter { + const message = { ...baseParameter } as Parameter; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.title = + object.title !== undefined && object.title !== null + ? String(object.title) + : ""; + message.labelValues = + object.labelValues !== undefined && object.labelValues !== null + ? LabelValuesParameter.fromJSON(object.labelValues) + : undefined; + message.custom = + object.custom !== undefined && object.custom !== null + ? CustomParameter.fromJSON(object.custom) + : undefined; + message.text = + object.text !== undefined && object.text !== null + ? TextParameter.fromJSON(object.text) + : undefined; + message.integerParameter = + object.integerParameter !== undefined && object.integerParameter !== null + ? IntegerParameter.fromJSON(object.integerParameter) + : undefined; + message.doubleParameter = + object.doubleParameter !== undefined && object.doubleParameter !== null + ? DoubleParameter.fromJSON(object.doubleParameter) + : undefined; + message.textValues = + object.textValues !== undefined && object.textValues !== null + ? TextValuesParameter.fromJSON(object.textValues) + : undefined; + message.hidden = + object.hidden !== undefined && object.hidden !== null + ? Boolean(object.hidden) + : false; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + return message; + }, + + toJSON(message: Parameter): unknown { + const obj: any = {}; + message.name !== undefined && (obj.name = message.name); + message.title !== undefined && (obj.title = message.title); + message.labelValues !== undefined && + (obj.labelValues = message.labelValues + ? LabelValuesParameter.toJSON(message.labelValues) + : undefined); + message.custom !== undefined && + (obj.custom = message.custom + ? CustomParameter.toJSON(message.custom) + : undefined); + message.text !== undefined && + (obj.text = message.text + ? TextParameter.toJSON(message.text) + : undefined); + message.integerParameter !== undefined && + (obj.integerParameter = message.integerParameter + ? IntegerParameter.toJSON(message.integerParameter) + : undefined); + message.doubleParameter !== undefined && + (obj.doubleParameter = message.doubleParameter + ? DoubleParameter.toJSON(message.doubleParameter) + : undefined); + message.textValues !== undefined && + (obj.textValues = message.textValues + ? TextValuesParameter.toJSON(message.textValues) + : undefined); + message.hidden !== undefined && (obj.hidden = message.hidden); + message.description !== undefined && + (obj.description = message.description); + return obj; + }, + + fromPartial, I>>( + object: I + ): Parameter { + const message = { ...baseParameter } as Parameter; + message.name = object.name ?? ""; + message.title = object.title ?? ""; + message.labelValues = + object.labelValues !== undefined && object.labelValues !== null + ? LabelValuesParameter.fromPartial(object.labelValues) + : undefined; + message.custom = + object.custom !== undefined && object.custom !== null + ? CustomParameter.fromPartial(object.custom) + : undefined; + message.text = + object.text !== undefined && object.text !== null + ? TextParameter.fromPartial(object.text) + : undefined; + message.integerParameter = + object.integerParameter !== undefined && object.integerParameter !== null + ? IntegerParameter.fromPartial(object.integerParameter) + : undefined; + message.doubleParameter = + object.doubleParameter !== undefined && object.doubleParameter !== null + ? DoubleParameter.fromPartial(object.doubleParameter) + : undefined; + message.textValues = + object.textValues !== undefined && object.textValues !== null + ? TextValuesParameter.fromPartial(object.textValues) + : undefined; + message.hidden = object.hidden ?? false; + message.description = object.description ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(Parameter.$type, Parameter); + +const baseParametrization: object = { + $type: "yandex.cloud.monitoring.v3.Parametrization", + selectors: "", +}; + +export const Parametrization = { + $type: "yandex.cloud.monitoring.v3.Parametrization" as const, + + encode( + message: Parametrization, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.parameters) { + Parameter.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.selectors !== "") { + writer.uint32(18).string(message.selectors); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Parametrization { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseParametrization } as Parametrization; + message.parameters = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.parameters.push(Parameter.decode(reader, reader.uint32())); + break; + case 2: + message.selectors = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Parametrization { + const message = { ...baseParametrization } as Parametrization; + message.parameters = (object.parameters ?? []).map((e: any) => + Parameter.fromJSON(e) + ); + message.selectors = + object.selectors !== undefined && object.selectors !== null + ? String(object.selectors) + : ""; + return message; + }, + + toJSON(message: Parametrization): unknown { + const obj: any = {}; + if (message.parameters) { + obj.parameters = message.parameters.map((e) => + e ? Parameter.toJSON(e) : undefined + ); + } else { + obj.parameters = []; + } + message.selectors !== undefined && (obj.selectors = message.selectors); + return obj; + }, + + fromPartial, I>>( + object: I + ): Parametrization { + const message = { ...baseParametrization } as Parametrization; + message.parameters = + object.parameters?.map((e) => Parameter.fromPartial(e)) || []; + message.selectors = object.selectors ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(Parametrization.$type, Parametrization); + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/monitoring/v3/text_widget.ts b/src/generated/yandex/cloud/monitoring/v3/text_widget.ts new file mode 100644 index 00000000..7cc910f2 --- /dev/null +++ b/src/generated/yandex/cloud/monitoring/v3/text_widget.ts @@ -0,0 +1,107 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; + +export const protobufPackage = "yandex.cloud.monitoring.v3"; + +/** Text widget. */ +export interface TextWidget { + $type: "yandex.cloud.monitoring.v3.TextWidget"; + /** Text. */ + text: string; +} + +const baseTextWidget: object = { + $type: "yandex.cloud.monitoring.v3.TextWidget", + text: "", +}; + +export const TextWidget = { + $type: "yandex.cloud.monitoring.v3.TextWidget" as const, + + encode( + message: TextWidget, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.text !== "") { + writer.uint32(10).string(message.text); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): TextWidget { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseTextWidget } as TextWidget; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.text = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): TextWidget { + const message = { ...baseTextWidget } as TextWidget; + message.text = + object.text !== undefined && object.text !== null + ? String(object.text) + : ""; + return message; + }, + + toJSON(message: TextWidget): unknown { + const obj: any = {}; + message.text !== undefined && (obj.text = message.text); + return obj; + }, + + fromPartial, I>>( + object: I + ): TextWidget { + const message = { ...baseTextWidget } as TextWidget; + message.text = object.text ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(TextWidget.$type, TextWidget); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/monitoring/v3/title_widget.ts b/src/generated/yandex/cloud/monitoring/v3/title_widget.ts new file mode 100644 index 00000000..b386c337 --- /dev/null +++ b/src/generated/yandex/cloud/monitoring/v3/title_widget.ts @@ -0,0 +1,182 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; + +export const protobufPackage = "yandex.cloud.monitoring.v3"; + +/** Title widget. */ +export interface TitleWidget { + $type: "yandex.cloud.monitoring.v3.TitleWidget"; + /** Title text. */ + text: string; + /** Title size. */ + size: TitleWidget_TitleSize; +} + +/** Title size. */ +export enum TitleWidget_TitleSize { + TITLE_SIZE_UNSPECIFIED = 0, + /** TITLE_SIZE_XS - Extra small size. */ + TITLE_SIZE_XS = 1, + /** TITLE_SIZE_S - Small size. */ + TITLE_SIZE_S = 2, + /** TITLE_SIZE_M - Middle size. */ + TITLE_SIZE_M = 3, + /** TITLE_SIZE_L - Large size. */ + TITLE_SIZE_L = 4, + UNRECOGNIZED = -1, +} + +export function titleWidget_TitleSizeFromJSON( + object: any +): TitleWidget_TitleSize { + switch (object) { + case 0: + case "TITLE_SIZE_UNSPECIFIED": + return TitleWidget_TitleSize.TITLE_SIZE_UNSPECIFIED; + case 1: + case "TITLE_SIZE_XS": + return TitleWidget_TitleSize.TITLE_SIZE_XS; + case 2: + case "TITLE_SIZE_S": + return TitleWidget_TitleSize.TITLE_SIZE_S; + case 3: + case "TITLE_SIZE_M": + return TitleWidget_TitleSize.TITLE_SIZE_M; + case 4: + case "TITLE_SIZE_L": + return TitleWidget_TitleSize.TITLE_SIZE_L; + case -1: + case "UNRECOGNIZED": + default: + return TitleWidget_TitleSize.UNRECOGNIZED; + } +} + +export function titleWidget_TitleSizeToJSON( + object: TitleWidget_TitleSize +): string { + switch (object) { + case TitleWidget_TitleSize.TITLE_SIZE_UNSPECIFIED: + return "TITLE_SIZE_UNSPECIFIED"; + case TitleWidget_TitleSize.TITLE_SIZE_XS: + return "TITLE_SIZE_XS"; + case TitleWidget_TitleSize.TITLE_SIZE_S: + return "TITLE_SIZE_S"; + case TitleWidget_TitleSize.TITLE_SIZE_M: + return "TITLE_SIZE_M"; + case TitleWidget_TitleSize.TITLE_SIZE_L: + return "TITLE_SIZE_L"; + default: + return "UNKNOWN"; + } +} + +const baseTitleWidget: object = { + $type: "yandex.cloud.monitoring.v3.TitleWidget", + text: "", + size: 0, +}; + +export const TitleWidget = { + $type: "yandex.cloud.monitoring.v3.TitleWidget" as const, + + encode( + message: TitleWidget, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.text !== "") { + writer.uint32(10).string(message.text); + } + if (message.size !== 0) { + writer.uint32(16).int32(message.size); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): TitleWidget { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseTitleWidget } as TitleWidget; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.text = reader.string(); + break; + case 2: + message.size = reader.int32() as any; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): TitleWidget { + const message = { ...baseTitleWidget } as TitleWidget; + message.text = + object.text !== undefined && object.text !== null + ? String(object.text) + : ""; + message.size = + object.size !== undefined && object.size !== null + ? titleWidget_TitleSizeFromJSON(object.size) + : 0; + return message; + }, + + toJSON(message: TitleWidget): unknown { + const obj: any = {}; + message.text !== undefined && (obj.text = message.text); + message.size !== undefined && + (obj.size = titleWidget_TitleSizeToJSON(message.size)); + return obj; + }, + + fromPartial, I>>( + object: I + ): TitleWidget { + const message = { ...baseTitleWidget } as TitleWidget; + message.text = object.text ?? ""; + message.size = object.size ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(TitleWidget.$type, TitleWidget); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/monitoring/v3/unit_format.ts b/src/generated/yandex/cloud/monitoring/v3/unit_format.ts new file mode 100644 index 00000000..0cc80d0c --- /dev/null +++ b/src/generated/yandex/cloud/monitoring/v3/unit_format.ts @@ -0,0 +1,666 @@ +/* eslint-disable */ +import Long from "long"; +import _m0 from "protobufjs/minimal"; + +export const protobufPackage = "yandex.cloud.monitoring.v3"; + +export enum UnitFormat { + UNIT_FORMAT_UNSPECIFIED = 0, + /** UNIT_NONE - None (show tick values as-is). */ + UNIT_NONE = 1, + /** UNIT_COUNT - Count. */ + UNIT_COUNT = 2, + /** UNIT_PERCENT - Percent (0-100). */ + UNIT_PERCENT = 3, + /** UNIT_PERCENT_UNIT - Percent (0-1). */ + UNIT_PERCENT_UNIT = 4, + /** UNIT_NANOSECONDS - Nanoseconds (ns). */ + UNIT_NANOSECONDS = 5, + /** UNIT_MICROSECONDS - Microseconds (µs). */ + UNIT_MICROSECONDS = 6, + /** UNIT_MILLISECONDS - Milliseconds (ms). */ + UNIT_MILLISECONDS = 7, + /** UNIT_SECONDS - Seconds (s). */ + UNIT_SECONDS = 8, + /** UNIT_MINUTES - Minutes (m). */ + UNIT_MINUTES = 9, + /** UNIT_HOURS - Hours (h). */ + UNIT_HOURS = 10, + /** UNIT_DAYS - Days (d). */ + UNIT_DAYS = 11, + /** UNIT_BITS_SI - Bits (SI). */ + UNIT_BITS_SI = 12, + /** UNIT_BYTES_SI - Bytes (SI). */ + UNIT_BYTES_SI = 13, + /** UNIT_KILOBYTES - Kilobytes (KB). */ + UNIT_KILOBYTES = 14, + /** UNIT_MEGABYTES - Megabytes (MB). */ + UNIT_MEGABYTES = 15, + /** UNIT_GIGABYTES - Gigabytes (GB). */ + UNIT_GIGABYTES = 16, + /** UNIT_TERABYTES - Terabytes (TB) */ + UNIT_TERABYTES = 17, + /** UNIT_PETABYTES - Petabytes (PB). */ + UNIT_PETABYTES = 18, + /** UNIT_EXABYTES - Exabytes (EB). */ + UNIT_EXABYTES = 19, + /** UNIT_BITS_IEC - Bits (IEC). */ + UNIT_BITS_IEC = 20, + /** UNIT_BYTES_IEC - Bytes (IEC). */ + UNIT_BYTES_IEC = 21, + /** UNIT_KIBIBYTES - Kibibytes (KiB). */ + UNIT_KIBIBYTES = 22, + /** UNIT_MEBIBYTES - Mebibytes (MiB). */ + UNIT_MEBIBYTES = 23, + /** UNIT_GIBIBYTES - Gigibytes (GiB). */ + UNIT_GIBIBYTES = 24, + /** UNIT_TEBIBYTES - Tebibytes (TiB). */ + UNIT_TEBIBYTES = 25, + /** UNIT_PEBIBYTES - Pebibytes (PiB). */ + UNIT_PEBIBYTES = 26, + /** UNIT_EXBIBYTES - Exbibytes (EiB). */ + UNIT_EXBIBYTES = 27, + /** UNIT_REQUESTS_PER_SECOND - Requests per second (reqps). */ + UNIT_REQUESTS_PER_SECOND = 28, + /** UNIT_OPERATIONS_PER_SECOND - Operations per second (ops). */ + UNIT_OPERATIONS_PER_SECOND = 29, + /** UNIT_WRITES_PER_SECOND - Writes per second (wps). */ + UNIT_WRITES_PER_SECOND = 30, + /** UNIT_READS_PER_SECOND - Reads per second (rps). */ + UNIT_READS_PER_SECOND = 31, + /** UNIT_PACKETS_PER_SECOND - Packets per second (pps). */ + UNIT_PACKETS_PER_SECOND = 32, + /** UNIT_IO_OPERATIONS_PER_SECOND - IO operations per second (iops). */ + UNIT_IO_OPERATIONS_PER_SECOND = 33, + /** UNIT_COUNTS_PER_SECOND - Counts per second (counts/sec). */ + UNIT_COUNTS_PER_SECOND = 34, + /** UNIT_BITS_SI_PER_SECOND - Bits (SI) per second (bits/sec). */ + UNIT_BITS_SI_PER_SECOND = 35, + /** UNIT_BYTES_SI_PER_SECOND - Bytes (SI) per second (bytes/sec). */ + UNIT_BYTES_SI_PER_SECOND = 36, + /** UNIT_KILOBITS_PER_SECOND - Kilobits per second (KBits/sec). */ + UNIT_KILOBITS_PER_SECOND = 37, + /** UNIT_KILOBYTES_PER_SECOND - Kilobytes per second (KB/sec). */ + UNIT_KILOBYTES_PER_SECOND = 38, + /** UNIT_MEGABITS_PER_SECOND - Megabits per second (MBits/sec). */ + UNIT_MEGABITS_PER_SECOND = 39, + /** UNIT_MEGABYTES_PER_SECOND - Megabytes per second (MB/sec). */ + UNIT_MEGABYTES_PER_SECOND = 40, + /** UNIT_GIGABITS_PER_SECOND - Gigabits per second (GBits/sec). */ + UNIT_GIGABITS_PER_SECOND = 41, + /** UNIT_GIGABYTES_PER_SECOND - Gigabytes per second (GB/sec). */ + UNIT_GIGABYTES_PER_SECOND = 42, + /** UNIT_TERABITS_PER_SECOND - Terabits per second (TBits/sec). */ + UNIT_TERABITS_PER_SECOND = 43, + /** UNIT_TERABYTES_PER_SECOND - Terabytes per second (TB/sec). */ + UNIT_TERABYTES_PER_SECOND = 44, + /** UNIT_PETABITS_PER_SECOND - Petabits per second (Pbits/sec). */ + UNIT_PETABITS_PER_SECOND = 45, + /** UNIT_PETABYTES_PER_SECOND - Petabytes per second (PB/sec). */ + UNIT_PETABYTES_PER_SECOND = 46, + /** UNIT_BITS_IEC_PER_SECOND - Bits (IEC) per second (bits/sec). */ + UNIT_BITS_IEC_PER_SECOND = 47, + /** UNIT_BYTES_IEC_PER_SECOND - Bytes (IEC) per second (bytes/sec). */ + UNIT_BYTES_IEC_PER_SECOND = 48, + /** UNIT_KIBIBITS_PER_SECOND - Kibibits per second (KiBits/sec). */ + UNIT_KIBIBITS_PER_SECOND = 49, + /** UNIT_KIBIBYTES_PER_SECOND - Kibibytes per second (KiB/sec). */ + UNIT_KIBIBYTES_PER_SECOND = 50, + /** UNIT_MEBIBITS_PER_SECOND - Mebibits per second (MiBits/sec). */ + UNIT_MEBIBITS_PER_SECOND = 51, + /** UNIT_MEBIBYTES_PER_SECOND - Mebibytes per second (MiB/sec). */ + UNIT_MEBIBYTES_PER_SECOND = 52, + /** UNIT_GIBIBITS_PER_SECOND - Gibibits per second (GiBits/sec). */ + UNIT_GIBIBITS_PER_SECOND = 53, + /** UNIT_GIBIBYTES_PER_SECOND - Gibibytes per second (GiB/sec). */ + UNIT_GIBIBYTES_PER_SECOND = 54, + /** UNIT_TEBIBITS_PER_SECOND - Tebibits per second (TiBits/sec). */ + UNIT_TEBIBITS_PER_SECOND = 55, + /** UNIT_TEBIBYTES_PER_SECOND - Tebibytes per second (TiB/sec). */ + UNIT_TEBIBYTES_PER_SECOND = 56, + /** UNIT_PEBIBITS_PER_SECOND - Pebibits per second (PiBits/sec). */ + UNIT_PEBIBITS_PER_SECOND = 57, + /** UNIT_PEBIBYTES_PER_SECOND - Pebibytes per second (PiB/sec). */ + UNIT_PEBIBYTES_PER_SECOND = 58, + /** UNIT_DATETIME_UTC - Datetime (UTC). */ + UNIT_DATETIME_UTC = 59, + /** UNIT_DATETIME_LOCAL - Datetime (local). */ + UNIT_DATETIME_LOCAL = 60, + /** UNIT_HERTZ - Hertz (Hz). */ + UNIT_HERTZ = 61, + /** UNIT_KILOHERTZ - Kilohertz (KHz). */ + UNIT_KILOHERTZ = 62, + /** UNIT_MEGAHERTZ - Megahertz (MHz). */ + UNIT_MEGAHERTZ = 63, + /** UNIT_GIGAHERTZ - Gigahertz (GHz). */ + UNIT_GIGAHERTZ = 64, + /** UNIT_DOLLAR - Dollar. */ + UNIT_DOLLAR = 65, + /** UNIT_EURO - Euro. */ + UNIT_EURO = 66, + /** UNIT_ROUBLE - Rouble. */ + UNIT_ROUBLE = 67, + /** UNIT_CELSIUS - Celsius (°C). */ + UNIT_CELSIUS = 68, + /** UNIT_FAHRENHEIT - Fahrenheit (°F). */ + UNIT_FAHRENHEIT = 69, + /** UNIT_KELVIN - Kelvin (K). */ + UNIT_KELVIN = 70, + /** UNIT_FLOP_PER_SECOND - Flop per second (FLOP/sec). */ + UNIT_FLOP_PER_SECOND = 71, + /** UNIT_KILOFLOP_PER_SECOND - Kiloflop per second (KFLOP/sec). */ + UNIT_KILOFLOP_PER_SECOND = 72, + /** UNIT_MEGAFLOP_PER_SECOND - Megaflop per second (MFLOP/sec). */ + UNIT_MEGAFLOP_PER_SECOND = 73, + /** UNIT_GIGAFLOP_PER_SECOND - Gigaflop per second (GFLOP/sec). */ + UNIT_GIGAFLOP_PER_SECOND = 74, + /** UNIT_PETAFLOP_PER_SECOND - Petaflop per second (PFLOP/sec). */ + UNIT_PETAFLOP_PER_SECOND = 75, + /** UNIT_EXAFLOP_PER_SECOND - Exaflop per second (EFLOP/sec). */ + UNIT_EXAFLOP_PER_SECOND = 76, + /** UNIT_METERS_PER_SECOND - Meters per second (m/sec). */ + UNIT_METERS_PER_SECOND = 77, + /** UNIT_KILOMETERS_PER_HOUR - Kilometers per hour (km/h). */ + UNIT_KILOMETERS_PER_HOUR = 78, + /** UNIT_MILES_PER_HOUR - Miles per hour (mi/h). */ + UNIT_MILES_PER_HOUR = 79, + /** UNIT_MILLIMETER - Millimeter. */ + UNIT_MILLIMETER = 80, + /** UNIT_CENTIMETER - Centimeter. */ + UNIT_CENTIMETER = 81, + /** UNIT_METER - Meter. */ + UNIT_METER = 82, + /** UNIT_KILOMETER - Kilometer. */ + UNIT_KILOMETER = 83, + /** UNIT_MILE - Mile. */ + UNIT_MILE = 84, + /** UNIT_PPM - Parts per million (ppm). */ + UNIT_PPM = 85, + /** UNIT_EVENTS_PER_SECOND - Events per second */ + UNIT_EVENTS_PER_SECOND = 86, + /** UNIT_PACKETS - Packets */ + UNIT_PACKETS = 87, + /** UNIT_DBM - dBm (dbm) */ + UNIT_DBM = 88, + /** UNIT_VIRTUAL_CPU - Virtual CPU cores based on CPU time (vcpu) */ + UNIT_VIRTUAL_CPU = 89, + /** UNIT_MESSAGES_PER_SECOND - Messages per second (mps) */ + UNIT_MESSAGES_PER_SECOND = 90, + UNRECOGNIZED = -1, +} + +export function unitFormatFromJSON(object: any): UnitFormat { + switch (object) { + case 0: + case "UNIT_FORMAT_UNSPECIFIED": + return UnitFormat.UNIT_FORMAT_UNSPECIFIED; + case 1: + case "UNIT_NONE": + return UnitFormat.UNIT_NONE; + case 2: + case "UNIT_COUNT": + return UnitFormat.UNIT_COUNT; + case 3: + case "UNIT_PERCENT": + return UnitFormat.UNIT_PERCENT; + case 4: + case "UNIT_PERCENT_UNIT": + return UnitFormat.UNIT_PERCENT_UNIT; + case 5: + case "UNIT_NANOSECONDS": + return UnitFormat.UNIT_NANOSECONDS; + case 6: + case "UNIT_MICROSECONDS": + return UnitFormat.UNIT_MICROSECONDS; + case 7: + case "UNIT_MILLISECONDS": + return UnitFormat.UNIT_MILLISECONDS; + case 8: + case "UNIT_SECONDS": + return UnitFormat.UNIT_SECONDS; + case 9: + case "UNIT_MINUTES": + return UnitFormat.UNIT_MINUTES; + case 10: + case "UNIT_HOURS": + return UnitFormat.UNIT_HOURS; + case 11: + case "UNIT_DAYS": + return UnitFormat.UNIT_DAYS; + case 12: + case "UNIT_BITS_SI": + return UnitFormat.UNIT_BITS_SI; + case 13: + case "UNIT_BYTES_SI": + return UnitFormat.UNIT_BYTES_SI; + case 14: + case "UNIT_KILOBYTES": + return UnitFormat.UNIT_KILOBYTES; + case 15: + case "UNIT_MEGABYTES": + return UnitFormat.UNIT_MEGABYTES; + case 16: + case "UNIT_GIGABYTES": + return UnitFormat.UNIT_GIGABYTES; + case 17: + case "UNIT_TERABYTES": + return UnitFormat.UNIT_TERABYTES; + case 18: + case "UNIT_PETABYTES": + return UnitFormat.UNIT_PETABYTES; + case 19: + case "UNIT_EXABYTES": + return UnitFormat.UNIT_EXABYTES; + case 20: + case "UNIT_BITS_IEC": + return UnitFormat.UNIT_BITS_IEC; + case 21: + case "UNIT_BYTES_IEC": + return UnitFormat.UNIT_BYTES_IEC; + case 22: + case "UNIT_KIBIBYTES": + return UnitFormat.UNIT_KIBIBYTES; + case 23: + case "UNIT_MEBIBYTES": + return UnitFormat.UNIT_MEBIBYTES; + case 24: + case "UNIT_GIBIBYTES": + return UnitFormat.UNIT_GIBIBYTES; + case 25: + case "UNIT_TEBIBYTES": + return UnitFormat.UNIT_TEBIBYTES; + case 26: + case "UNIT_PEBIBYTES": + return UnitFormat.UNIT_PEBIBYTES; + case 27: + case "UNIT_EXBIBYTES": + return UnitFormat.UNIT_EXBIBYTES; + case 28: + case "UNIT_REQUESTS_PER_SECOND": + return UnitFormat.UNIT_REQUESTS_PER_SECOND; + case 29: + case "UNIT_OPERATIONS_PER_SECOND": + return UnitFormat.UNIT_OPERATIONS_PER_SECOND; + case 30: + case "UNIT_WRITES_PER_SECOND": + return UnitFormat.UNIT_WRITES_PER_SECOND; + case 31: + case "UNIT_READS_PER_SECOND": + return UnitFormat.UNIT_READS_PER_SECOND; + case 32: + case "UNIT_PACKETS_PER_SECOND": + return UnitFormat.UNIT_PACKETS_PER_SECOND; + case 33: + case "UNIT_IO_OPERATIONS_PER_SECOND": + return UnitFormat.UNIT_IO_OPERATIONS_PER_SECOND; + case 34: + case "UNIT_COUNTS_PER_SECOND": + return UnitFormat.UNIT_COUNTS_PER_SECOND; + case 35: + case "UNIT_BITS_SI_PER_SECOND": + return UnitFormat.UNIT_BITS_SI_PER_SECOND; + case 36: + case "UNIT_BYTES_SI_PER_SECOND": + return UnitFormat.UNIT_BYTES_SI_PER_SECOND; + case 37: + case "UNIT_KILOBITS_PER_SECOND": + return UnitFormat.UNIT_KILOBITS_PER_SECOND; + case 38: + case "UNIT_KILOBYTES_PER_SECOND": + return UnitFormat.UNIT_KILOBYTES_PER_SECOND; + case 39: + case "UNIT_MEGABITS_PER_SECOND": + return UnitFormat.UNIT_MEGABITS_PER_SECOND; + case 40: + case "UNIT_MEGABYTES_PER_SECOND": + return UnitFormat.UNIT_MEGABYTES_PER_SECOND; + case 41: + case "UNIT_GIGABITS_PER_SECOND": + return UnitFormat.UNIT_GIGABITS_PER_SECOND; + case 42: + case "UNIT_GIGABYTES_PER_SECOND": + return UnitFormat.UNIT_GIGABYTES_PER_SECOND; + case 43: + case "UNIT_TERABITS_PER_SECOND": + return UnitFormat.UNIT_TERABITS_PER_SECOND; + case 44: + case "UNIT_TERABYTES_PER_SECOND": + return UnitFormat.UNIT_TERABYTES_PER_SECOND; + case 45: + case "UNIT_PETABITS_PER_SECOND": + return UnitFormat.UNIT_PETABITS_PER_SECOND; + case 46: + case "UNIT_PETABYTES_PER_SECOND": + return UnitFormat.UNIT_PETABYTES_PER_SECOND; + case 47: + case "UNIT_BITS_IEC_PER_SECOND": + return UnitFormat.UNIT_BITS_IEC_PER_SECOND; + case 48: + case "UNIT_BYTES_IEC_PER_SECOND": + return UnitFormat.UNIT_BYTES_IEC_PER_SECOND; + case 49: + case "UNIT_KIBIBITS_PER_SECOND": + return UnitFormat.UNIT_KIBIBITS_PER_SECOND; + case 50: + case "UNIT_KIBIBYTES_PER_SECOND": + return UnitFormat.UNIT_KIBIBYTES_PER_SECOND; + case 51: + case "UNIT_MEBIBITS_PER_SECOND": + return UnitFormat.UNIT_MEBIBITS_PER_SECOND; + case 52: + case "UNIT_MEBIBYTES_PER_SECOND": + return UnitFormat.UNIT_MEBIBYTES_PER_SECOND; + case 53: + case "UNIT_GIBIBITS_PER_SECOND": + return UnitFormat.UNIT_GIBIBITS_PER_SECOND; + case 54: + case "UNIT_GIBIBYTES_PER_SECOND": + return UnitFormat.UNIT_GIBIBYTES_PER_SECOND; + case 55: + case "UNIT_TEBIBITS_PER_SECOND": + return UnitFormat.UNIT_TEBIBITS_PER_SECOND; + case 56: + case "UNIT_TEBIBYTES_PER_SECOND": + return UnitFormat.UNIT_TEBIBYTES_PER_SECOND; + case 57: + case "UNIT_PEBIBITS_PER_SECOND": + return UnitFormat.UNIT_PEBIBITS_PER_SECOND; + case 58: + case "UNIT_PEBIBYTES_PER_SECOND": + return UnitFormat.UNIT_PEBIBYTES_PER_SECOND; + case 59: + case "UNIT_DATETIME_UTC": + return UnitFormat.UNIT_DATETIME_UTC; + case 60: + case "UNIT_DATETIME_LOCAL": + return UnitFormat.UNIT_DATETIME_LOCAL; + case 61: + case "UNIT_HERTZ": + return UnitFormat.UNIT_HERTZ; + case 62: + case "UNIT_KILOHERTZ": + return UnitFormat.UNIT_KILOHERTZ; + case 63: + case "UNIT_MEGAHERTZ": + return UnitFormat.UNIT_MEGAHERTZ; + case 64: + case "UNIT_GIGAHERTZ": + return UnitFormat.UNIT_GIGAHERTZ; + case 65: + case "UNIT_DOLLAR": + return UnitFormat.UNIT_DOLLAR; + case 66: + case "UNIT_EURO": + return UnitFormat.UNIT_EURO; + case 67: + case "UNIT_ROUBLE": + return UnitFormat.UNIT_ROUBLE; + case 68: + case "UNIT_CELSIUS": + return UnitFormat.UNIT_CELSIUS; + case 69: + case "UNIT_FAHRENHEIT": + return UnitFormat.UNIT_FAHRENHEIT; + case 70: + case "UNIT_KELVIN": + return UnitFormat.UNIT_KELVIN; + case 71: + case "UNIT_FLOP_PER_SECOND": + return UnitFormat.UNIT_FLOP_PER_SECOND; + case 72: + case "UNIT_KILOFLOP_PER_SECOND": + return UnitFormat.UNIT_KILOFLOP_PER_SECOND; + case 73: + case "UNIT_MEGAFLOP_PER_SECOND": + return UnitFormat.UNIT_MEGAFLOP_PER_SECOND; + case 74: + case "UNIT_GIGAFLOP_PER_SECOND": + return UnitFormat.UNIT_GIGAFLOP_PER_SECOND; + case 75: + case "UNIT_PETAFLOP_PER_SECOND": + return UnitFormat.UNIT_PETAFLOP_PER_SECOND; + case 76: + case "UNIT_EXAFLOP_PER_SECOND": + return UnitFormat.UNIT_EXAFLOP_PER_SECOND; + case 77: + case "UNIT_METERS_PER_SECOND": + return UnitFormat.UNIT_METERS_PER_SECOND; + case 78: + case "UNIT_KILOMETERS_PER_HOUR": + return UnitFormat.UNIT_KILOMETERS_PER_HOUR; + case 79: + case "UNIT_MILES_PER_HOUR": + return UnitFormat.UNIT_MILES_PER_HOUR; + case 80: + case "UNIT_MILLIMETER": + return UnitFormat.UNIT_MILLIMETER; + case 81: + case "UNIT_CENTIMETER": + return UnitFormat.UNIT_CENTIMETER; + case 82: + case "UNIT_METER": + return UnitFormat.UNIT_METER; + case 83: + case "UNIT_KILOMETER": + return UnitFormat.UNIT_KILOMETER; + case 84: + case "UNIT_MILE": + return UnitFormat.UNIT_MILE; + case 85: + case "UNIT_PPM": + return UnitFormat.UNIT_PPM; + case 86: + case "UNIT_EVENTS_PER_SECOND": + return UnitFormat.UNIT_EVENTS_PER_SECOND; + case 87: + case "UNIT_PACKETS": + return UnitFormat.UNIT_PACKETS; + case 88: + case "UNIT_DBM": + return UnitFormat.UNIT_DBM; + case 89: + case "UNIT_VIRTUAL_CPU": + return UnitFormat.UNIT_VIRTUAL_CPU; + case 90: + case "UNIT_MESSAGES_PER_SECOND": + return UnitFormat.UNIT_MESSAGES_PER_SECOND; + case -1: + case "UNRECOGNIZED": + default: + return UnitFormat.UNRECOGNIZED; + } +} + +export function unitFormatToJSON(object: UnitFormat): string { + switch (object) { + case UnitFormat.UNIT_FORMAT_UNSPECIFIED: + return "UNIT_FORMAT_UNSPECIFIED"; + case UnitFormat.UNIT_NONE: + return "UNIT_NONE"; + case UnitFormat.UNIT_COUNT: + return "UNIT_COUNT"; + case UnitFormat.UNIT_PERCENT: + return "UNIT_PERCENT"; + case UnitFormat.UNIT_PERCENT_UNIT: + return "UNIT_PERCENT_UNIT"; + case UnitFormat.UNIT_NANOSECONDS: + return "UNIT_NANOSECONDS"; + case UnitFormat.UNIT_MICROSECONDS: + return "UNIT_MICROSECONDS"; + case UnitFormat.UNIT_MILLISECONDS: + return "UNIT_MILLISECONDS"; + case UnitFormat.UNIT_SECONDS: + return "UNIT_SECONDS"; + case UnitFormat.UNIT_MINUTES: + return "UNIT_MINUTES"; + case UnitFormat.UNIT_HOURS: + return "UNIT_HOURS"; + case UnitFormat.UNIT_DAYS: + return "UNIT_DAYS"; + case UnitFormat.UNIT_BITS_SI: + return "UNIT_BITS_SI"; + case UnitFormat.UNIT_BYTES_SI: + return "UNIT_BYTES_SI"; + case UnitFormat.UNIT_KILOBYTES: + return "UNIT_KILOBYTES"; + case UnitFormat.UNIT_MEGABYTES: + return "UNIT_MEGABYTES"; + case UnitFormat.UNIT_GIGABYTES: + return "UNIT_GIGABYTES"; + case UnitFormat.UNIT_TERABYTES: + return "UNIT_TERABYTES"; + case UnitFormat.UNIT_PETABYTES: + return "UNIT_PETABYTES"; + case UnitFormat.UNIT_EXABYTES: + return "UNIT_EXABYTES"; + case UnitFormat.UNIT_BITS_IEC: + return "UNIT_BITS_IEC"; + case UnitFormat.UNIT_BYTES_IEC: + return "UNIT_BYTES_IEC"; + case UnitFormat.UNIT_KIBIBYTES: + return "UNIT_KIBIBYTES"; + case UnitFormat.UNIT_MEBIBYTES: + return "UNIT_MEBIBYTES"; + case UnitFormat.UNIT_GIBIBYTES: + return "UNIT_GIBIBYTES"; + case UnitFormat.UNIT_TEBIBYTES: + return "UNIT_TEBIBYTES"; + case UnitFormat.UNIT_PEBIBYTES: + return "UNIT_PEBIBYTES"; + case UnitFormat.UNIT_EXBIBYTES: + return "UNIT_EXBIBYTES"; + case UnitFormat.UNIT_REQUESTS_PER_SECOND: + return "UNIT_REQUESTS_PER_SECOND"; + case UnitFormat.UNIT_OPERATIONS_PER_SECOND: + return "UNIT_OPERATIONS_PER_SECOND"; + case UnitFormat.UNIT_WRITES_PER_SECOND: + return "UNIT_WRITES_PER_SECOND"; + case UnitFormat.UNIT_READS_PER_SECOND: + return "UNIT_READS_PER_SECOND"; + case UnitFormat.UNIT_PACKETS_PER_SECOND: + return "UNIT_PACKETS_PER_SECOND"; + case UnitFormat.UNIT_IO_OPERATIONS_PER_SECOND: + return "UNIT_IO_OPERATIONS_PER_SECOND"; + case UnitFormat.UNIT_COUNTS_PER_SECOND: + return "UNIT_COUNTS_PER_SECOND"; + case UnitFormat.UNIT_BITS_SI_PER_SECOND: + return "UNIT_BITS_SI_PER_SECOND"; + case UnitFormat.UNIT_BYTES_SI_PER_SECOND: + return "UNIT_BYTES_SI_PER_SECOND"; + case UnitFormat.UNIT_KILOBITS_PER_SECOND: + return "UNIT_KILOBITS_PER_SECOND"; + case UnitFormat.UNIT_KILOBYTES_PER_SECOND: + return "UNIT_KILOBYTES_PER_SECOND"; + case UnitFormat.UNIT_MEGABITS_PER_SECOND: + return "UNIT_MEGABITS_PER_SECOND"; + case UnitFormat.UNIT_MEGABYTES_PER_SECOND: + return "UNIT_MEGABYTES_PER_SECOND"; + case UnitFormat.UNIT_GIGABITS_PER_SECOND: + return "UNIT_GIGABITS_PER_SECOND"; + case UnitFormat.UNIT_GIGABYTES_PER_SECOND: + return "UNIT_GIGABYTES_PER_SECOND"; + case UnitFormat.UNIT_TERABITS_PER_SECOND: + return "UNIT_TERABITS_PER_SECOND"; + case UnitFormat.UNIT_TERABYTES_PER_SECOND: + return "UNIT_TERABYTES_PER_SECOND"; + case UnitFormat.UNIT_PETABITS_PER_SECOND: + return "UNIT_PETABITS_PER_SECOND"; + case UnitFormat.UNIT_PETABYTES_PER_SECOND: + return "UNIT_PETABYTES_PER_SECOND"; + case UnitFormat.UNIT_BITS_IEC_PER_SECOND: + return "UNIT_BITS_IEC_PER_SECOND"; + case UnitFormat.UNIT_BYTES_IEC_PER_SECOND: + return "UNIT_BYTES_IEC_PER_SECOND"; + case UnitFormat.UNIT_KIBIBITS_PER_SECOND: + return "UNIT_KIBIBITS_PER_SECOND"; + case UnitFormat.UNIT_KIBIBYTES_PER_SECOND: + return "UNIT_KIBIBYTES_PER_SECOND"; + case UnitFormat.UNIT_MEBIBITS_PER_SECOND: + return "UNIT_MEBIBITS_PER_SECOND"; + case UnitFormat.UNIT_MEBIBYTES_PER_SECOND: + return "UNIT_MEBIBYTES_PER_SECOND"; + case UnitFormat.UNIT_GIBIBITS_PER_SECOND: + return "UNIT_GIBIBITS_PER_SECOND"; + case UnitFormat.UNIT_GIBIBYTES_PER_SECOND: + return "UNIT_GIBIBYTES_PER_SECOND"; + case UnitFormat.UNIT_TEBIBITS_PER_SECOND: + return "UNIT_TEBIBITS_PER_SECOND"; + case UnitFormat.UNIT_TEBIBYTES_PER_SECOND: + return "UNIT_TEBIBYTES_PER_SECOND"; + case UnitFormat.UNIT_PEBIBITS_PER_SECOND: + return "UNIT_PEBIBITS_PER_SECOND"; + case UnitFormat.UNIT_PEBIBYTES_PER_SECOND: + return "UNIT_PEBIBYTES_PER_SECOND"; + case UnitFormat.UNIT_DATETIME_UTC: + return "UNIT_DATETIME_UTC"; + case UnitFormat.UNIT_DATETIME_LOCAL: + return "UNIT_DATETIME_LOCAL"; + case UnitFormat.UNIT_HERTZ: + return "UNIT_HERTZ"; + case UnitFormat.UNIT_KILOHERTZ: + return "UNIT_KILOHERTZ"; + case UnitFormat.UNIT_MEGAHERTZ: + return "UNIT_MEGAHERTZ"; + case UnitFormat.UNIT_GIGAHERTZ: + return "UNIT_GIGAHERTZ"; + case UnitFormat.UNIT_DOLLAR: + return "UNIT_DOLLAR"; + case UnitFormat.UNIT_EURO: + return "UNIT_EURO"; + case UnitFormat.UNIT_ROUBLE: + return "UNIT_ROUBLE"; + case UnitFormat.UNIT_CELSIUS: + return "UNIT_CELSIUS"; + case UnitFormat.UNIT_FAHRENHEIT: + return "UNIT_FAHRENHEIT"; + case UnitFormat.UNIT_KELVIN: + return "UNIT_KELVIN"; + case UnitFormat.UNIT_FLOP_PER_SECOND: + return "UNIT_FLOP_PER_SECOND"; + case UnitFormat.UNIT_KILOFLOP_PER_SECOND: + return "UNIT_KILOFLOP_PER_SECOND"; + case UnitFormat.UNIT_MEGAFLOP_PER_SECOND: + return "UNIT_MEGAFLOP_PER_SECOND"; + case UnitFormat.UNIT_GIGAFLOP_PER_SECOND: + return "UNIT_GIGAFLOP_PER_SECOND"; + case UnitFormat.UNIT_PETAFLOP_PER_SECOND: + return "UNIT_PETAFLOP_PER_SECOND"; + case UnitFormat.UNIT_EXAFLOP_PER_SECOND: + return "UNIT_EXAFLOP_PER_SECOND"; + case UnitFormat.UNIT_METERS_PER_SECOND: + return "UNIT_METERS_PER_SECOND"; + case UnitFormat.UNIT_KILOMETERS_PER_HOUR: + return "UNIT_KILOMETERS_PER_HOUR"; + case UnitFormat.UNIT_MILES_PER_HOUR: + return "UNIT_MILES_PER_HOUR"; + case UnitFormat.UNIT_MILLIMETER: + return "UNIT_MILLIMETER"; + case UnitFormat.UNIT_CENTIMETER: + return "UNIT_CENTIMETER"; + case UnitFormat.UNIT_METER: + return "UNIT_METER"; + case UnitFormat.UNIT_KILOMETER: + return "UNIT_KILOMETER"; + case UnitFormat.UNIT_MILE: + return "UNIT_MILE"; + case UnitFormat.UNIT_PPM: + return "UNIT_PPM"; + case UnitFormat.UNIT_EVENTS_PER_SECOND: + return "UNIT_EVENTS_PER_SECOND"; + case UnitFormat.UNIT_PACKETS: + return "UNIT_PACKETS"; + case UnitFormat.UNIT_DBM: + return "UNIT_DBM"; + case UnitFormat.UNIT_VIRTUAL_CPU: + return "UNIT_VIRTUAL_CPU"; + case UnitFormat.UNIT_MESSAGES_PER_SECOND: + return "UNIT_MESSAGES_PER_SECOND"; + default: + return "UNKNOWN"; + } +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/monitoring/v3/widget.ts b/src/generated/yandex/cloud/monitoring/v3/widget.ts new file mode 100644 index 00000000..3b506512 --- /dev/null +++ b/src/generated/yandex/cloud/monitoring/v3/widget.ts @@ -0,0 +1,302 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { TextWidget } from "../../../../yandex/cloud/monitoring/v3/text_widget"; +import { TitleWidget } from "../../../../yandex/cloud/monitoring/v3/title_widget"; +import { ChartWidget } from "../../../../yandex/cloud/monitoring/v3/chart_widget"; + +export const protobufPackage = "yandex.cloud.monitoring.v3"; + +/** Widget. */ +export interface Widget { + $type: "yandex.cloud.monitoring.v3.Widget"; + /** Required. Widget layout position. */ + position?: Widget_LayoutPosition; + /** Text widget. */ + text?: TextWidget | undefined; + /** Title widget. */ + title?: TitleWidget | undefined; + /** Chart widget. */ + chart?: ChartWidget | undefined; +} + +/** Layout item for widget item positioning. */ +export interface Widget_LayoutPosition { + $type: "yandex.cloud.monitoring.v3.Widget.LayoutPosition"; + /** Required. X-axis top-left corner coordinate. */ + x: number; + /** Required. Y-axis top-left corner coordinate. */ + y: number; + /** Required. Weight. */ + w: number; + /** Required. Height. */ + h: number; +} + +const baseWidget: object = { $type: "yandex.cloud.monitoring.v3.Widget" }; + +export const Widget = { + $type: "yandex.cloud.monitoring.v3.Widget" as const, + + encode( + message: Widget, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.position !== undefined) { + Widget_LayoutPosition.encode( + message.position, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.text !== undefined) { + TextWidget.encode(message.text, writer.uint32(18).fork()).ldelim(); + } + if (message.title !== undefined) { + TitleWidget.encode(message.title, writer.uint32(26).fork()).ldelim(); + } + if (message.chart !== undefined) { + ChartWidget.encode(message.chart, writer.uint32(42).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Widget { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseWidget } as Widget; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.position = Widget_LayoutPosition.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.text = TextWidget.decode(reader, reader.uint32()); + break; + case 3: + message.title = TitleWidget.decode(reader, reader.uint32()); + break; + case 5: + message.chart = ChartWidget.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Widget { + const message = { ...baseWidget } as Widget; + message.position = + object.position !== undefined && object.position !== null + ? Widget_LayoutPosition.fromJSON(object.position) + : undefined; + message.text = + object.text !== undefined && object.text !== null + ? TextWidget.fromJSON(object.text) + : undefined; + message.title = + object.title !== undefined && object.title !== null + ? TitleWidget.fromJSON(object.title) + : undefined; + message.chart = + object.chart !== undefined && object.chart !== null + ? ChartWidget.fromJSON(object.chart) + : undefined; + return message; + }, + + toJSON(message: Widget): unknown { + const obj: any = {}; + message.position !== undefined && + (obj.position = message.position + ? Widget_LayoutPosition.toJSON(message.position) + : undefined); + message.text !== undefined && + (obj.text = message.text ? TextWidget.toJSON(message.text) : undefined); + message.title !== undefined && + (obj.title = message.title + ? TitleWidget.toJSON(message.title) + : undefined); + message.chart !== undefined && + (obj.chart = message.chart + ? ChartWidget.toJSON(message.chart) + : undefined); + return obj; + }, + + fromPartial, I>>(object: I): Widget { + const message = { ...baseWidget } as Widget; + message.position = + object.position !== undefined && object.position !== null + ? Widget_LayoutPosition.fromPartial(object.position) + : undefined; + message.text = + object.text !== undefined && object.text !== null + ? TextWidget.fromPartial(object.text) + : undefined; + message.title = + object.title !== undefined && object.title !== null + ? TitleWidget.fromPartial(object.title) + : undefined; + message.chart = + object.chart !== undefined && object.chart !== null + ? ChartWidget.fromPartial(object.chart) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Widget.$type, Widget); + +const baseWidget_LayoutPosition: object = { + $type: "yandex.cloud.monitoring.v3.Widget.LayoutPosition", + x: 0, + y: 0, + w: 0, + h: 0, +}; + +export const Widget_LayoutPosition = { + $type: "yandex.cloud.monitoring.v3.Widget.LayoutPosition" as const, + + encode( + message: Widget_LayoutPosition, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.x !== 0) { + writer.uint32(8).int64(message.x); + } + if (message.y !== 0) { + writer.uint32(16).int64(message.y); + } + if (message.w !== 0) { + writer.uint32(24).int64(message.w); + } + if (message.h !== 0) { + writer.uint32(32).int64(message.h); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Widget_LayoutPosition { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseWidget_LayoutPosition } as Widget_LayoutPosition; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.x = longToNumber(reader.int64() as Long); + break; + case 2: + message.y = longToNumber(reader.int64() as Long); + break; + case 3: + message.w = longToNumber(reader.int64() as Long); + break; + case 4: + message.h = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Widget_LayoutPosition { + const message = { ...baseWidget_LayoutPosition } as Widget_LayoutPosition; + message.x = + object.x !== undefined && object.x !== null ? Number(object.x) : 0; + message.y = + object.y !== undefined && object.y !== null ? Number(object.y) : 0; + message.w = + object.w !== undefined && object.w !== null ? Number(object.w) : 0; + message.h = + object.h !== undefined && object.h !== null ? Number(object.h) : 0; + return message; + }, + + toJSON(message: Widget_LayoutPosition): unknown { + const obj: any = {}; + message.x !== undefined && (obj.x = Math.round(message.x)); + message.y !== undefined && (obj.y = Math.round(message.y)); + message.w !== undefined && (obj.w = Math.round(message.w)); + message.h !== undefined && (obj.h = Math.round(message.h)); + return obj; + }, + + fromPartial, I>>( + object: I + ): Widget_LayoutPosition { + const message = { ...baseWidget_LayoutPosition } as Widget_LayoutPosition; + message.x = object.x ?? 0; + message.y = object.y ?? 0; + message.w = object.w ?? 0; + message.h = object.h ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(Widget_LayoutPosition.$type, Widget_LayoutPosition); + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/operation/operation_service.ts b/src/generated/yandex/cloud/operation/operation_service.ts index 18ca3a71..f95834b9 100644 --- a/src/generated/yandex/cloud/operation/operation_service.ts +++ b/src/generated/yandex/cloud/operation/operation_service.ts @@ -176,7 +176,7 @@ export const OperationServiceService = { /** * Cancels the specified operation. * - * Note that currently Yandex Object Storage API does not support cancelling operations. + * Note that currently Object Storage API does not support cancelling operations. */ cancel: { path: "/yandex.cloud.operation.OperationService/Cancel", @@ -197,7 +197,7 @@ export interface OperationServiceServer extends UntypedServiceImplementation { /** * Cancels the specified operation. * - * Note that currently Yandex Object Storage API does not support cancelling operations. + * Note that currently Object Storage API does not support cancelling operations. */ cancel: handleUnaryCall; } @@ -222,7 +222,7 @@ export interface OperationServiceClient extends Client { /** * Cancels the specified operation. * - * Note that currently Yandex Object Storage API does not support cancelling operations. + * Note that currently Object Storage API does not support cancelling operations. */ cancel( request: CancelOperationRequest, diff --git a/src/generated/yandex/cloud/organizationmanager/index.ts b/src/generated/yandex/cloud/organizationmanager/index.ts index 0308023f..89935ab8 100644 --- a/src/generated/yandex/cloud/organizationmanager/index.ts +++ b/src/generated/yandex/cloud/organizationmanager/index.ts @@ -1,3 +1,5 @@ +export * as group from './v1/group' +export * as group_service from './v1/group_service' export * as organization from './v1/organization' export * as organization_service from './v1/organization_service' export * as user_account from './v1/user_account' diff --git a/src/generated/yandex/cloud/organizationmanager/v1/group.ts b/src/generated/yandex/cloud/organizationmanager/v1/group.ts new file mode 100644 index 00000000..1f2cc1d7 --- /dev/null +++ b/src/generated/yandex/cloud/organizationmanager/v1/group.ts @@ -0,0 +1,193 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { Timestamp } from "../../../../google/protobuf/timestamp"; + +export const protobufPackage = "yandex.cloud.organizationmanager.v1"; + +/** + * A Group resource. + * For more information, see [Groups](/docs/organization/groups). + */ +export interface Group { + $type: "yandex.cloud.organizationmanager.v1.Group"; + /** ID of the group. */ + id: string; + /** ID of the organization that the group belongs to. */ + organizationId: string; + /** Creation timestamp. */ + createdAt?: Date; + /** Name of the group. */ + name: string; + /** Description of the group. */ + description: string; +} + +const baseGroup: object = { + $type: "yandex.cloud.organizationmanager.v1.Group", + id: "", + organizationId: "", + name: "", + description: "", +}; + +export const Group = { + $type: "yandex.cloud.organizationmanager.v1.Group" as const, + + encode(message: Group, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + if (message.id !== "") { + writer.uint32(10).string(message.id); + } + if (message.organizationId !== "") { + writer.uint32(18).string(message.organizationId); + } + if (message.createdAt !== undefined) { + Timestamp.encode( + toTimestamp(message.createdAt), + writer.uint32(26).fork() + ).ldelim(); + } + if (message.name !== "") { + writer.uint32(34).string(message.name); + } + if (message.description !== "") { + writer.uint32(42).string(message.description); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Group { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGroup } as Group; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = reader.string(); + break; + case 2: + message.organizationId = reader.string(); + break; + case 3: + message.createdAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 4: + message.name = reader.string(); + break; + case 5: + message.description = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Group { + const message = { ...baseGroup } as Group; + message.id = + object.id !== undefined && object.id !== null ? String(object.id) : ""; + message.organizationId = + object.organizationId !== undefined && object.organizationId !== null + ? String(object.organizationId) + : ""; + message.createdAt = + object.createdAt !== undefined && object.createdAt !== null + ? fromJsonTimestamp(object.createdAt) + : undefined; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + return message; + }, + + toJSON(message: Group): unknown { + const obj: any = {}; + message.id !== undefined && (obj.id = message.id); + message.organizationId !== undefined && + (obj.organizationId = message.organizationId); + message.createdAt !== undefined && + (obj.createdAt = message.createdAt.toISOString()); + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + return obj; + }, + + fromPartial, I>>(object: I): Group { + const message = { ...baseGroup } as Group; + message.id = object.id ?? ""; + message.organizationId = object.organizationId ?? ""; + message.createdAt = object.createdAt ?? undefined; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(Group.$type, Group); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function toTimestamp(date: Date): Timestamp { + const seconds = date.getTime() / 1_000; + const nanos = (date.getTime() % 1_000) * 1_000_000; + return { $type: "google.protobuf.Timestamp", seconds, nanos }; +} + +function fromTimestamp(t: Timestamp): Date { + let millis = t.seconds * 1_000; + millis += t.nanos / 1_000_000; + return new Date(millis); +} + +function fromJsonTimestamp(o: any): Date { + if (o instanceof Date) { + return o; + } else if (typeof o === "string") { + return new Date(o); + } else { + return fromTimestamp(Timestamp.fromJSON(o)); + } +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/organizationmanager/v1/group_service.ts b/src/generated/yandex/cloud/organizationmanager/v1/group_service.ts new file mode 100644 index 00000000..d03f407b --- /dev/null +++ b/src/generated/yandex/cloud/organizationmanager/v1/group_service.ts @@ -0,0 +1,2155 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../typeRegistry"; +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleUnaryCall, + Client, + ClientUnaryCall, + Metadata, + CallOptions, + ServiceError, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; +import { FieldMask } from "../../../../google/protobuf/field_mask"; +import { Group } from "../../../../yandex/cloud/organizationmanager/v1/group"; +import { Operation } from "../../../../yandex/cloud/operation/operation"; +import { + ListAccessBindingsRequest, + ListAccessBindingsResponse, + SetAccessBindingsRequest, + UpdateAccessBindingsRequest, +} from "../../../../yandex/cloud/access/access"; + +export const protobufPackage = "yandex.cloud.organizationmanager.v1"; + +export interface GetGroupRequest { + $type: "yandex.cloud.organizationmanager.v1.GetGroupRequest"; + /** + * ID of the Group resource to return. + * To get the group ID, use a [GroupService.List] request. + */ + groupId: string; +} + +export interface ListGroupsRequest { + $type: "yandex.cloud.organizationmanager.v1.ListGroupsRequest"; + /** + * ID of the organization to list groups in. + * To get the organization ID, use a [yandex.cloud.organizationmanager.v1.OrganizationService.List] request. + */ + organizationId: string; + /** + * The maximum number of results per page to return. If the number of available + * results is larger than [page_size], + * the service returns a [ListGroupsResponse.next_page_token] + * that can be used to get the next page of results in subsequent list requests. + * Default value: 100. + */ + pageSize: number; + /** + * Page token. Set [page_token] + * to the [ListGroupsResponse.next_page_token] + * returned by a previous list request to get the next page of results. + */ + pageToken: string; + /** + * A filter expression that filters resources listed in the response. + * The expression must specify: + * 1. The field name. Currently you can use filtering only on the [Group.name] field. + * 2. An `=` operator. + * 3. The value in double quotes (`"`). Must be 3-63 characters long and match the regular expression `[a-z][-a-z0-9]{1,61}[a-z0-9]`. + */ + filter: string; +} + +export interface ListGroupsResponse { + $type: "yandex.cloud.organizationmanager.v1.ListGroupsResponse"; + /** List of Group resources. */ + groups: Group[]; + /** + * This token allows you to get the next page of results for list requests. If the number of results + * is larger than [ListGroupsRequest.page_size], use + * the [next_page_token] as the value + * for the [ListGroupsRequest.page_token] query parameter + * in the next list request. Each subsequent list request will have its own + * [next_page_token] to continue paging through the results. + */ + nextPageToken: string; +} + +export interface CreateGroupRequest { + $type: "yandex.cloud.organizationmanager.v1.CreateGroupRequest"; + /** + * ID of the organization to create a group in. + * To get the organization ID, use a [yandex.cloud.organizationmanager.v1.OrganizationService.List] request. + */ + organizationId: string; + /** + * Name of the group. + * The name must be unique within the organization. + */ + name: string; + /** Description of the group. */ + description: string; +} + +export interface CreateGroupMetadata { + $type: "yandex.cloud.organizationmanager.v1.CreateGroupMetadata"; + /** ID of the group that is being created. */ + groupId: string; +} + +export interface UpdateGroupRequest { + $type: "yandex.cloud.organizationmanager.v1.UpdateGroupRequest"; + /** + * ID of the Group resource to update. + * To get the group ID, use a [GroupService.List] request. + */ + groupId: string; + /** Field mask that specifies which fields of the Group resource are going to be updated. */ + updateMask?: FieldMask; + /** + * Name of the group. + * The name must be unique within the organization. + */ + name: string; + /** Description of the group. */ + description: string; +} + +export interface UpdateGroupMetadata { + $type: "yandex.cloud.organizationmanager.v1.UpdateGroupMetadata"; + /** ID of the Group resource that is being updated. */ + groupId: string; +} + +export interface DeleteGroupRequest { + $type: "yandex.cloud.organizationmanager.v1.DeleteGroupRequest"; + /** + * ID of the group to delete. + * To get the group ID, use a [GroupService.List] request. + */ + groupId: string; +} + +export interface DeleteGroupMetadata { + $type: "yandex.cloud.organizationmanager.v1.DeleteGroupMetadata"; + /** ID of the group that is being deleted. */ + groupId: string; +} + +export interface ListGroupOperationsRequest { + $type: "yandex.cloud.organizationmanager.v1.ListGroupOperationsRequest"; + /** ID of the Group resource to list operations for. */ + groupId: string; + /** + * The maximum number of results per page to return. If the number of available + * results is larger than [page_size], the service returns a [ListGroupOperationsResponse.next_page_token] + * that can be used to get the next page of results in subsequent list requests. + * Default value: 100. + */ + pageSize: number; + /** + * Page token. Set [page_token] + * to the [ListGroupOperationsResponse.next_page_token] + * returned by a previous list request to get the next page of results. + */ + pageToken: string; +} + +export interface ListGroupOperationsResponse { + $type: "yandex.cloud.organizationmanager.v1.ListGroupOperationsResponse"; + /** List of operations for the specified group. */ + operations: Operation[]; + /** + * This token allows you to get the next page of results for list requests. If the number of results + * is larger than [ListGroupOperationsRequest.page_size], use the [next_page_token] as the value + * for the [ListGroupOperationsRequest.page_token] query parameter in the next list request. + * Each subsequent list request will have its own [next_page_token] to continue paging through the results. + */ + nextPageToken: string; +} + +export interface ListGroupMembersRequest { + $type: "yandex.cloud.organizationmanager.v1.ListGroupMembersRequest"; + /** ID of the Group resource to list members for. */ + groupId: string; + /** + * The maximum number of results per page to return. If the number of available + * results is larger than [page_size], the service returns a [ListGroupMembersResponse.next_page_token] + * that can be used to get the next page of results in subsequent list requests. + * Acceptable values are 0 to 1000, inclusive. Default value: 100. + */ + pageSize: number; + /** + * Page token. Set [page_token] + * to the [ListGroupMembersResponse.next_page_token] + * returned by a previous list request to get the next page of results. + */ + pageToken: string; +} + +export interface ListGroupMembersResponse { + $type: "yandex.cloud.organizationmanager.v1.ListGroupMembersResponse"; + /** List of members for the specified group. */ + members: GroupMember[]; + /** + * This token allows you to get the next page of results for list requests. If the number of results + * is larger than [ListGroupMembersRequest.page_size], use the [next_page_token] as the value + * for the [ListGroupMembersRequest.page_token] query parameter in the next list request. + * Each subsequent list request will have its own [next_page_token] to continue paging through the results. + */ + nextPageToken: string; +} + +export interface GroupMember { + $type: "yandex.cloud.organizationmanager.v1.GroupMember"; + /** ID of the subject. */ + subjectId: string; + /** + * Type of the subject. + * + * It can contain one of the following values: + * * `userAccount`: An account on Yandex, added to Yandex Cloud. + * * `federatedUser`: A federated account. This type represents a user from an identity federation, like Active Directory. + */ + subjectType: string; +} + +export interface UpdateGroupMembersRequest { + $type: "yandex.cloud.organizationmanager.v1.UpdateGroupMembersRequest"; + /** + * ID of the group to update. + * To get the group ID, use a [GroupService.List] request. + */ + groupId: string; + /** Updates to group members. */ + memberDeltas: MemberDelta[]; +} + +export interface UpdateGroupMembersMetadata { + $type: "yandex.cloud.organizationmanager.v1.UpdateGroupMembersMetadata"; + /** ID of the group that is being updated. */ + groupId: string; +} + +export interface MemberDelta { + $type: "yandex.cloud.organizationmanager.v1.MemberDelta"; + /** The action that is being performed on a group member. */ + action: MemberDelta_MemberAction; + /** + * ID of the subject that is being added or removed from a group. + * + * Subject type can be one of following values: + * * `userAccount`: An account on Yandex, added to Yandex Cloud. + * * `federatedUser`: A federated account. This type represents a user from an identity federation, like Active Directory. + */ + subjectId: string; +} + +export enum MemberDelta_MemberAction { + MEMBER_ACTION_UNSPECIFIED = 0, + /** ADD - Addition of a group member. */ + ADD = 1, + /** REMOVE - Removal of a group member. */ + REMOVE = 2, + UNRECOGNIZED = -1, +} + +export function memberDelta_MemberActionFromJSON( + object: any +): MemberDelta_MemberAction { + switch (object) { + case 0: + case "MEMBER_ACTION_UNSPECIFIED": + return MemberDelta_MemberAction.MEMBER_ACTION_UNSPECIFIED; + case 1: + case "ADD": + return MemberDelta_MemberAction.ADD; + case 2: + case "REMOVE": + return MemberDelta_MemberAction.REMOVE; + case -1: + case "UNRECOGNIZED": + default: + return MemberDelta_MemberAction.UNRECOGNIZED; + } +} + +export function memberDelta_MemberActionToJSON( + object: MemberDelta_MemberAction +): string { + switch (object) { + case MemberDelta_MemberAction.MEMBER_ACTION_UNSPECIFIED: + return "MEMBER_ACTION_UNSPECIFIED"; + case MemberDelta_MemberAction.ADD: + return "ADD"; + case MemberDelta_MemberAction.REMOVE: + return "REMOVE"; + default: + return "UNKNOWN"; + } +} + +const baseGetGroupRequest: object = { + $type: "yandex.cloud.organizationmanager.v1.GetGroupRequest", + groupId: "", +}; + +export const GetGroupRequest = { + $type: "yandex.cloud.organizationmanager.v1.GetGroupRequest" as const, + + encode( + message: GetGroupRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.groupId !== "") { + writer.uint32(10).string(message.groupId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): GetGroupRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGetGroupRequest } as GetGroupRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.groupId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GetGroupRequest { + const message = { ...baseGetGroupRequest } as GetGroupRequest; + message.groupId = + object.groupId !== undefined && object.groupId !== null + ? String(object.groupId) + : ""; + return message; + }, + + toJSON(message: GetGroupRequest): unknown { + const obj: any = {}; + message.groupId !== undefined && (obj.groupId = message.groupId); + return obj; + }, + + fromPartial, I>>( + object: I + ): GetGroupRequest { + const message = { ...baseGetGroupRequest } as GetGroupRequest; + message.groupId = object.groupId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(GetGroupRequest.$type, GetGroupRequest); + +const baseListGroupsRequest: object = { + $type: "yandex.cloud.organizationmanager.v1.ListGroupsRequest", + organizationId: "", + pageSize: 0, + pageToken: "", + filter: "", +}; + +export const ListGroupsRequest = { + $type: "yandex.cloud.organizationmanager.v1.ListGroupsRequest" as const, + + encode( + message: ListGroupsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.organizationId !== "") { + writer.uint32(10).string(message.organizationId); + } + if (message.pageSize !== 0) { + writer.uint32(16).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(26).string(message.pageToken); + } + if (message.filter !== "") { + writer.uint32(34).string(message.filter); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ListGroupsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListGroupsRequest } as ListGroupsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.organizationId = reader.string(); + break; + case 2: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 3: + message.pageToken = reader.string(); + break; + case 4: + message.filter = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListGroupsRequest { + const message = { ...baseListGroupsRequest } as ListGroupsRequest; + message.organizationId = + object.organizationId !== undefined && object.organizationId !== null + ? String(object.organizationId) + : ""; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + message.filter = + object.filter !== undefined && object.filter !== null + ? String(object.filter) + : ""; + return message; + }, + + toJSON(message: ListGroupsRequest): unknown { + const obj: any = {}; + message.organizationId !== undefined && + (obj.organizationId = message.organizationId); + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + message.filter !== undefined && (obj.filter = message.filter); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListGroupsRequest { + const message = { ...baseListGroupsRequest } as ListGroupsRequest; + message.organizationId = object.organizationId ?? ""; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + message.filter = object.filter ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListGroupsRequest.$type, ListGroupsRequest); + +const baseListGroupsResponse: object = { + $type: "yandex.cloud.organizationmanager.v1.ListGroupsResponse", + nextPageToken: "", +}; + +export const ListGroupsResponse = { + $type: "yandex.cloud.organizationmanager.v1.ListGroupsResponse" as const, + + encode( + message: ListGroupsResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.groups) { + Group.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ListGroupsResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListGroupsResponse } as ListGroupsResponse; + message.groups = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.groups.push(Group.decode(reader, reader.uint32())); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListGroupsResponse { + const message = { ...baseListGroupsResponse } as ListGroupsResponse; + message.groups = (object.groups ?? []).map((e: any) => Group.fromJSON(e)); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListGroupsResponse): unknown { + const obj: any = {}; + if (message.groups) { + obj.groups = message.groups.map((e) => (e ? Group.toJSON(e) : undefined)); + } else { + obj.groups = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListGroupsResponse { + const message = { ...baseListGroupsResponse } as ListGroupsResponse; + message.groups = object.groups?.map((e) => Group.fromPartial(e)) || []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListGroupsResponse.$type, ListGroupsResponse); + +const baseCreateGroupRequest: object = { + $type: "yandex.cloud.organizationmanager.v1.CreateGroupRequest", + organizationId: "", + name: "", + description: "", +}; + +export const CreateGroupRequest = { + $type: "yandex.cloud.organizationmanager.v1.CreateGroupRequest" as const, + + encode( + message: CreateGroupRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.organizationId !== "") { + writer.uint32(10).string(message.organizationId); + } + if (message.name !== "") { + writer.uint32(18).string(message.name); + } + if (message.description !== "") { + writer.uint32(26).string(message.description); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): CreateGroupRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseCreateGroupRequest } as CreateGroupRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.organizationId = reader.string(); + break; + case 2: + message.name = reader.string(); + break; + case 3: + message.description = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateGroupRequest { + const message = { ...baseCreateGroupRequest } as CreateGroupRequest; + message.organizationId = + object.organizationId !== undefined && object.organizationId !== null + ? String(object.organizationId) + : ""; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + return message; + }, + + toJSON(message: CreateGroupRequest): unknown { + const obj: any = {}; + message.organizationId !== undefined && + (obj.organizationId = message.organizationId); + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + return obj; + }, + + fromPartial, I>>( + object: I + ): CreateGroupRequest { + const message = { ...baseCreateGroupRequest } as CreateGroupRequest; + message.organizationId = object.organizationId ?? ""; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(CreateGroupRequest.$type, CreateGroupRequest); + +const baseCreateGroupMetadata: object = { + $type: "yandex.cloud.organizationmanager.v1.CreateGroupMetadata", + groupId: "", +}; + +export const CreateGroupMetadata = { + $type: "yandex.cloud.organizationmanager.v1.CreateGroupMetadata" as const, + + encode( + message: CreateGroupMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.groupId !== "") { + writer.uint32(10).string(message.groupId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): CreateGroupMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseCreateGroupMetadata } as CreateGroupMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.groupId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateGroupMetadata { + const message = { ...baseCreateGroupMetadata } as CreateGroupMetadata; + message.groupId = + object.groupId !== undefined && object.groupId !== null + ? String(object.groupId) + : ""; + return message; + }, + + toJSON(message: CreateGroupMetadata): unknown { + const obj: any = {}; + message.groupId !== undefined && (obj.groupId = message.groupId); + return obj; + }, + + fromPartial, I>>( + object: I + ): CreateGroupMetadata { + const message = { ...baseCreateGroupMetadata } as CreateGroupMetadata; + message.groupId = object.groupId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(CreateGroupMetadata.$type, CreateGroupMetadata); + +const baseUpdateGroupRequest: object = { + $type: "yandex.cloud.organizationmanager.v1.UpdateGroupRequest", + groupId: "", + name: "", + description: "", +}; + +export const UpdateGroupRequest = { + $type: "yandex.cloud.organizationmanager.v1.UpdateGroupRequest" as const, + + encode( + message: UpdateGroupRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.groupId !== "") { + writer.uint32(10).string(message.groupId); + } + if (message.updateMask !== undefined) { + FieldMask.encode(message.updateMask, writer.uint32(18).fork()).ldelim(); + } + if (message.name !== "") { + writer.uint32(26).string(message.name); + } + if (message.description !== "") { + writer.uint32(34).string(message.description); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): UpdateGroupRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseUpdateGroupRequest } as UpdateGroupRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.groupId = reader.string(); + break; + case 2: + message.updateMask = FieldMask.decode(reader, reader.uint32()); + break; + case 3: + message.name = reader.string(); + break; + case 4: + message.description = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateGroupRequest { + const message = { ...baseUpdateGroupRequest } as UpdateGroupRequest; + message.groupId = + object.groupId !== undefined && object.groupId !== null + ? String(object.groupId) + : ""; + message.updateMask = + object.updateMask !== undefined && object.updateMask !== null + ? FieldMask.fromJSON(object.updateMask) + : undefined; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + return message; + }, + + toJSON(message: UpdateGroupRequest): unknown { + const obj: any = {}; + message.groupId !== undefined && (obj.groupId = message.groupId); + message.updateMask !== undefined && + (obj.updateMask = message.updateMask + ? FieldMask.toJSON(message.updateMask) + : undefined); + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateGroupRequest { + const message = { ...baseUpdateGroupRequest } as UpdateGroupRequest; + message.groupId = object.groupId ?? ""; + message.updateMask = + object.updateMask !== undefined && object.updateMask !== null + ? FieldMask.fromPartial(object.updateMask) + : undefined; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(UpdateGroupRequest.$type, UpdateGroupRequest); + +const baseUpdateGroupMetadata: object = { + $type: "yandex.cloud.organizationmanager.v1.UpdateGroupMetadata", + groupId: "", +}; + +export const UpdateGroupMetadata = { + $type: "yandex.cloud.organizationmanager.v1.UpdateGroupMetadata" as const, + + encode( + message: UpdateGroupMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.groupId !== "") { + writer.uint32(10).string(message.groupId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): UpdateGroupMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseUpdateGroupMetadata } as UpdateGroupMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.groupId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateGroupMetadata { + const message = { ...baseUpdateGroupMetadata } as UpdateGroupMetadata; + message.groupId = + object.groupId !== undefined && object.groupId !== null + ? String(object.groupId) + : ""; + return message; + }, + + toJSON(message: UpdateGroupMetadata): unknown { + const obj: any = {}; + message.groupId !== undefined && (obj.groupId = message.groupId); + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateGroupMetadata { + const message = { ...baseUpdateGroupMetadata } as UpdateGroupMetadata; + message.groupId = object.groupId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(UpdateGroupMetadata.$type, UpdateGroupMetadata); + +const baseDeleteGroupRequest: object = { + $type: "yandex.cloud.organizationmanager.v1.DeleteGroupRequest", + groupId: "", +}; + +export const DeleteGroupRequest = { + $type: "yandex.cloud.organizationmanager.v1.DeleteGroupRequest" as const, + + encode( + message: DeleteGroupRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.groupId !== "") { + writer.uint32(10).string(message.groupId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): DeleteGroupRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDeleteGroupRequest } as DeleteGroupRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.groupId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteGroupRequest { + const message = { ...baseDeleteGroupRequest } as DeleteGroupRequest; + message.groupId = + object.groupId !== undefined && object.groupId !== null + ? String(object.groupId) + : ""; + return message; + }, + + toJSON(message: DeleteGroupRequest): unknown { + const obj: any = {}; + message.groupId !== undefined && (obj.groupId = message.groupId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteGroupRequest { + const message = { ...baseDeleteGroupRequest } as DeleteGroupRequest; + message.groupId = object.groupId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(DeleteGroupRequest.$type, DeleteGroupRequest); + +const baseDeleteGroupMetadata: object = { + $type: "yandex.cloud.organizationmanager.v1.DeleteGroupMetadata", + groupId: "", +}; + +export const DeleteGroupMetadata = { + $type: "yandex.cloud.organizationmanager.v1.DeleteGroupMetadata" as const, + + encode( + message: DeleteGroupMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.groupId !== "") { + writer.uint32(10).string(message.groupId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): DeleteGroupMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDeleteGroupMetadata } as DeleteGroupMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.groupId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteGroupMetadata { + const message = { ...baseDeleteGroupMetadata } as DeleteGroupMetadata; + message.groupId = + object.groupId !== undefined && object.groupId !== null + ? String(object.groupId) + : ""; + return message; + }, + + toJSON(message: DeleteGroupMetadata): unknown { + const obj: any = {}; + message.groupId !== undefined && (obj.groupId = message.groupId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteGroupMetadata { + const message = { ...baseDeleteGroupMetadata } as DeleteGroupMetadata; + message.groupId = object.groupId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(DeleteGroupMetadata.$type, DeleteGroupMetadata); + +const baseListGroupOperationsRequest: object = { + $type: "yandex.cloud.organizationmanager.v1.ListGroupOperationsRequest", + groupId: "", + pageSize: 0, + pageToken: "", +}; + +export const ListGroupOperationsRequest = { + $type: + "yandex.cloud.organizationmanager.v1.ListGroupOperationsRequest" as const, + + encode( + message: ListGroupOperationsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.groupId !== "") { + writer.uint32(10).string(message.groupId); + } + if (message.pageSize !== 0) { + writer.uint32(16).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(26).string(message.pageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListGroupOperationsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListGroupOperationsRequest, + } as ListGroupOperationsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.groupId = reader.string(); + break; + case 2: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 3: + message.pageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListGroupOperationsRequest { + const message = { + ...baseListGroupOperationsRequest, + } as ListGroupOperationsRequest; + message.groupId = + object.groupId !== undefined && object.groupId !== null + ? String(object.groupId) + : ""; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + return message; + }, + + toJSON(message: ListGroupOperationsRequest): unknown { + const obj: any = {}; + message.groupId !== undefined && (obj.groupId = message.groupId); + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListGroupOperationsRequest { + const message = { + ...baseListGroupOperationsRequest, + } as ListGroupOperationsRequest; + message.groupId = object.groupId ?? ""; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListGroupOperationsRequest.$type, + ListGroupOperationsRequest +); + +const baseListGroupOperationsResponse: object = { + $type: "yandex.cloud.organizationmanager.v1.ListGroupOperationsResponse", + nextPageToken: "", +}; + +export const ListGroupOperationsResponse = { + $type: + "yandex.cloud.organizationmanager.v1.ListGroupOperationsResponse" as const, + + encode( + message: ListGroupOperationsResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.operations) { + Operation.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListGroupOperationsResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListGroupOperationsResponse, + } as ListGroupOperationsResponse; + message.operations = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.operations.push(Operation.decode(reader, reader.uint32())); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListGroupOperationsResponse { + const message = { + ...baseListGroupOperationsResponse, + } as ListGroupOperationsResponse; + message.operations = (object.operations ?? []).map((e: any) => + Operation.fromJSON(e) + ); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListGroupOperationsResponse): unknown { + const obj: any = {}; + if (message.operations) { + obj.operations = message.operations.map((e) => + e ? Operation.toJSON(e) : undefined + ); + } else { + obj.operations = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListGroupOperationsResponse { + const message = { + ...baseListGroupOperationsResponse, + } as ListGroupOperationsResponse; + message.operations = + object.operations?.map((e) => Operation.fromPartial(e)) || []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListGroupOperationsResponse.$type, + ListGroupOperationsResponse +); + +const baseListGroupMembersRequest: object = { + $type: "yandex.cloud.organizationmanager.v1.ListGroupMembersRequest", + groupId: "", + pageSize: 0, + pageToken: "", +}; + +export const ListGroupMembersRequest = { + $type: "yandex.cloud.organizationmanager.v1.ListGroupMembersRequest" as const, + + encode( + message: ListGroupMembersRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.groupId !== "") { + writer.uint32(10).string(message.groupId); + } + if (message.pageSize !== 0) { + writer.uint32(16).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(26).string(message.pageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListGroupMembersRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListGroupMembersRequest, + } as ListGroupMembersRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.groupId = reader.string(); + break; + case 2: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 3: + message.pageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListGroupMembersRequest { + const message = { + ...baseListGroupMembersRequest, + } as ListGroupMembersRequest; + message.groupId = + object.groupId !== undefined && object.groupId !== null + ? String(object.groupId) + : ""; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + return message; + }, + + toJSON(message: ListGroupMembersRequest): unknown { + const obj: any = {}; + message.groupId !== undefined && (obj.groupId = message.groupId); + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListGroupMembersRequest { + const message = { + ...baseListGroupMembersRequest, + } as ListGroupMembersRequest; + message.groupId = object.groupId ?? ""; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListGroupMembersRequest.$type, ListGroupMembersRequest); + +const baseListGroupMembersResponse: object = { + $type: "yandex.cloud.organizationmanager.v1.ListGroupMembersResponse", + nextPageToken: "", +}; + +export const ListGroupMembersResponse = { + $type: + "yandex.cloud.organizationmanager.v1.ListGroupMembersResponse" as const, + + encode( + message: ListGroupMembersResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.members) { + GroupMember.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListGroupMembersResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListGroupMembersResponse, + } as ListGroupMembersResponse; + message.members = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.members.push(GroupMember.decode(reader, reader.uint32())); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListGroupMembersResponse { + const message = { + ...baseListGroupMembersResponse, + } as ListGroupMembersResponse; + message.members = (object.members ?? []).map((e: any) => + GroupMember.fromJSON(e) + ); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListGroupMembersResponse): unknown { + const obj: any = {}; + if (message.members) { + obj.members = message.members.map((e) => + e ? GroupMember.toJSON(e) : undefined + ); + } else { + obj.members = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListGroupMembersResponse { + const message = { + ...baseListGroupMembersResponse, + } as ListGroupMembersResponse; + message.members = + object.members?.map((e) => GroupMember.fromPartial(e)) || []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListGroupMembersResponse.$type, + ListGroupMembersResponse +); + +const baseGroupMember: object = { + $type: "yandex.cloud.organizationmanager.v1.GroupMember", + subjectId: "", + subjectType: "", +}; + +export const GroupMember = { + $type: "yandex.cloud.organizationmanager.v1.GroupMember" as const, + + encode( + message: GroupMember, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.subjectId !== "") { + writer.uint32(10).string(message.subjectId); + } + if (message.subjectType !== "") { + writer.uint32(18).string(message.subjectType); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): GroupMember { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGroupMember } as GroupMember; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.subjectId = reader.string(); + break; + case 2: + message.subjectType = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GroupMember { + const message = { ...baseGroupMember } as GroupMember; + message.subjectId = + object.subjectId !== undefined && object.subjectId !== null + ? String(object.subjectId) + : ""; + message.subjectType = + object.subjectType !== undefined && object.subjectType !== null + ? String(object.subjectType) + : ""; + return message; + }, + + toJSON(message: GroupMember): unknown { + const obj: any = {}; + message.subjectId !== undefined && (obj.subjectId = message.subjectId); + message.subjectType !== undefined && + (obj.subjectType = message.subjectType); + return obj; + }, + + fromPartial, I>>( + object: I + ): GroupMember { + const message = { ...baseGroupMember } as GroupMember; + message.subjectId = object.subjectId ?? ""; + message.subjectType = object.subjectType ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(GroupMember.$type, GroupMember); + +const baseUpdateGroupMembersRequest: object = { + $type: "yandex.cloud.organizationmanager.v1.UpdateGroupMembersRequest", + groupId: "", +}; + +export const UpdateGroupMembersRequest = { + $type: + "yandex.cloud.organizationmanager.v1.UpdateGroupMembersRequest" as const, + + encode( + message: UpdateGroupMembersRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.groupId !== "") { + writer.uint32(10).string(message.groupId); + } + for (const v of message.memberDeltas) { + MemberDelta.encode(v!, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateGroupMembersRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateGroupMembersRequest, + } as UpdateGroupMembersRequest; + message.memberDeltas = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.groupId = reader.string(); + break; + case 2: + message.memberDeltas.push( + MemberDelta.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateGroupMembersRequest { + const message = { + ...baseUpdateGroupMembersRequest, + } as UpdateGroupMembersRequest; + message.groupId = + object.groupId !== undefined && object.groupId !== null + ? String(object.groupId) + : ""; + message.memberDeltas = (object.memberDeltas ?? []).map((e: any) => + MemberDelta.fromJSON(e) + ); + return message; + }, + + toJSON(message: UpdateGroupMembersRequest): unknown { + const obj: any = {}; + message.groupId !== undefined && (obj.groupId = message.groupId); + if (message.memberDeltas) { + obj.memberDeltas = message.memberDeltas.map((e) => + e ? MemberDelta.toJSON(e) : undefined + ); + } else { + obj.memberDeltas = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateGroupMembersRequest { + const message = { + ...baseUpdateGroupMembersRequest, + } as UpdateGroupMembersRequest; + message.groupId = object.groupId ?? ""; + message.memberDeltas = + object.memberDeltas?.map((e) => MemberDelta.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set( + UpdateGroupMembersRequest.$type, + UpdateGroupMembersRequest +); + +const baseUpdateGroupMembersMetadata: object = { + $type: "yandex.cloud.organizationmanager.v1.UpdateGroupMembersMetadata", + groupId: "", +}; + +export const UpdateGroupMembersMetadata = { + $type: + "yandex.cloud.organizationmanager.v1.UpdateGroupMembersMetadata" as const, + + encode( + message: UpdateGroupMembersMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.groupId !== "") { + writer.uint32(10).string(message.groupId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateGroupMembersMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateGroupMembersMetadata, + } as UpdateGroupMembersMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.groupId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateGroupMembersMetadata { + const message = { + ...baseUpdateGroupMembersMetadata, + } as UpdateGroupMembersMetadata; + message.groupId = + object.groupId !== undefined && object.groupId !== null + ? String(object.groupId) + : ""; + return message; + }, + + toJSON(message: UpdateGroupMembersMetadata): unknown { + const obj: any = {}; + message.groupId !== undefined && (obj.groupId = message.groupId); + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateGroupMembersMetadata { + const message = { + ...baseUpdateGroupMembersMetadata, + } as UpdateGroupMembersMetadata; + message.groupId = object.groupId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + UpdateGroupMembersMetadata.$type, + UpdateGroupMembersMetadata +); + +const baseMemberDelta: object = { + $type: "yandex.cloud.organizationmanager.v1.MemberDelta", + action: 0, + subjectId: "", +}; + +export const MemberDelta = { + $type: "yandex.cloud.organizationmanager.v1.MemberDelta" as const, + + encode( + message: MemberDelta, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.action !== 0) { + writer.uint32(8).int32(message.action); + } + if (message.subjectId !== "") { + writer.uint32(18).string(message.subjectId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): MemberDelta { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMemberDelta } as MemberDelta; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.action = reader.int32() as any; + break; + case 2: + message.subjectId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): MemberDelta { + const message = { ...baseMemberDelta } as MemberDelta; + message.action = + object.action !== undefined && object.action !== null + ? memberDelta_MemberActionFromJSON(object.action) + : 0; + message.subjectId = + object.subjectId !== undefined && object.subjectId !== null + ? String(object.subjectId) + : ""; + return message; + }, + + toJSON(message: MemberDelta): unknown { + const obj: any = {}; + message.action !== undefined && + (obj.action = memberDelta_MemberActionToJSON(message.action)); + message.subjectId !== undefined && (obj.subjectId = message.subjectId); + return obj; + }, + + fromPartial, I>>( + object: I + ): MemberDelta { + const message = { ...baseMemberDelta } as MemberDelta; + message.action = object.action ?? 0; + message.subjectId = object.subjectId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(MemberDelta.$type, MemberDelta); + +/** A set of methods for managing groups. */ +export const GroupServiceService = { + /** + * Returns the specified Group resource. + * + * To get the list of available Group resources, make a [List] request. + */ + get: { + path: "/yandex.cloud.organizationmanager.v1.GroupService/Get", + requestStream: false, + responseStream: false, + requestSerialize: (value: GetGroupRequest) => + Buffer.from(GetGroupRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => GetGroupRequest.decode(value), + responseSerialize: (value: Group) => + Buffer.from(Group.encode(value).finish()), + responseDeserialize: (value: Buffer) => Group.decode(value), + }, + /** Retrieves the list of group resources. */ + list: { + path: "/yandex.cloud.organizationmanager.v1.GroupService/List", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListGroupsRequest) => + Buffer.from(ListGroupsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => ListGroupsRequest.decode(value), + responseSerialize: (value: ListGroupsResponse) => + Buffer.from(ListGroupsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => ListGroupsResponse.decode(value), + }, + /** Creates a group in the specified organization. */ + create: { + path: "/yandex.cloud.organizationmanager.v1.GroupService/Create", + requestStream: false, + responseStream: false, + requestSerialize: (value: CreateGroupRequest) => + Buffer.from(CreateGroupRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => CreateGroupRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Updates the specified group. */ + update: { + path: "/yandex.cloud.organizationmanager.v1.GroupService/Update", + requestStream: false, + responseStream: false, + requestSerialize: (value: UpdateGroupRequest) => + Buffer.from(UpdateGroupRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => UpdateGroupRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Deletes the specified group. */ + delete: { + path: "/yandex.cloud.organizationmanager.v1.GroupService/Delete", + requestStream: false, + responseStream: false, + requestSerialize: (value: DeleteGroupRequest) => + Buffer.from(DeleteGroupRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => DeleteGroupRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Lists operations for the specified group. */ + listOperations: { + path: "/yandex.cloud.organizationmanager.v1.GroupService/ListOperations", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListGroupOperationsRequest) => + Buffer.from(ListGroupOperationsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + ListGroupOperationsRequest.decode(value), + responseSerialize: (value: ListGroupOperationsResponse) => + Buffer.from(ListGroupOperationsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + ListGroupOperationsResponse.decode(value), + }, + /** List group active members. */ + listMembers: { + path: "/yandex.cloud.organizationmanager.v1.GroupService/ListMembers", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListGroupMembersRequest) => + Buffer.from(ListGroupMembersRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + ListGroupMembersRequest.decode(value), + responseSerialize: (value: ListGroupMembersResponse) => + Buffer.from(ListGroupMembersResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + ListGroupMembersResponse.decode(value), + }, + /** Update group members. */ + updateMembers: { + path: "/yandex.cloud.organizationmanager.v1.GroupService/UpdateMembers", + requestStream: false, + responseStream: false, + requestSerialize: (value: UpdateGroupMembersRequest) => + Buffer.from(UpdateGroupMembersRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + UpdateGroupMembersRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Lists access bindings for the specified group. */ + listAccessBindings: { + path: "/yandex.cloud.organizationmanager.v1.GroupService/ListAccessBindings", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListAccessBindingsRequest) => + Buffer.from(ListAccessBindingsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + ListAccessBindingsRequest.decode(value), + responseSerialize: (value: ListAccessBindingsResponse) => + Buffer.from(ListAccessBindingsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + ListAccessBindingsResponse.decode(value), + }, + /** Sets access bindings for the specified group. */ + setAccessBindings: { + path: "/yandex.cloud.organizationmanager.v1.GroupService/SetAccessBindings", + requestStream: false, + responseStream: false, + requestSerialize: (value: SetAccessBindingsRequest) => + Buffer.from(SetAccessBindingsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + SetAccessBindingsRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Updates access bindings for the specified group. */ + updateAccessBindings: { + path: "/yandex.cloud.organizationmanager.v1.GroupService/UpdateAccessBindings", + requestStream: false, + responseStream: false, + requestSerialize: (value: UpdateAccessBindingsRequest) => + Buffer.from(UpdateAccessBindingsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + UpdateAccessBindingsRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, +} as const; + +export interface GroupServiceServer extends UntypedServiceImplementation { + /** + * Returns the specified Group resource. + * + * To get the list of available Group resources, make a [List] request. + */ + get: handleUnaryCall; + /** Retrieves the list of group resources. */ + list: handleUnaryCall; + /** Creates a group in the specified organization. */ + create: handleUnaryCall; + /** Updates the specified group. */ + update: handleUnaryCall; + /** Deletes the specified group. */ + delete: handleUnaryCall; + /** Lists operations for the specified group. */ + listOperations: handleUnaryCall< + ListGroupOperationsRequest, + ListGroupOperationsResponse + >; + /** List group active members. */ + listMembers: handleUnaryCall< + ListGroupMembersRequest, + ListGroupMembersResponse + >; + /** Update group members. */ + updateMembers: handleUnaryCall; + /** Lists access bindings for the specified group. */ + listAccessBindings: handleUnaryCall< + ListAccessBindingsRequest, + ListAccessBindingsResponse + >; + /** Sets access bindings for the specified group. */ + setAccessBindings: handleUnaryCall; + /** Updates access bindings for the specified group. */ + updateAccessBindings: handleUnaryCall; +} + +export interface GroupServiceClient extends Client { + /** + * Returns the specified Group resource. + * + * To get the list of available Group resources, make a [List] request. + */ + get( + request: GetGroupRequest, + callback: (error: ServiceError | null, response: Group) => void + ): ClientUnaryCall; + get( + request: GetGroupRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Group) => void + ): ClientUnaryCall; + get( + request: GetGroupRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Group) => void + ): ClientUnaryCall; + /** Retrieves the list of group resources. */ + list( + request: ListGroupsRequest, + callback: (error: ServiceError | null, response: ListGroupsResponse) => void + ): ClientUnaryCall; + list( + request: ListGroupsRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: ListGroupsResponse) => void + ): ClientUnaryCall; + list( + request: ListGroupsRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: ListGroupsResponse) => void + ): ClientUnaryCall; + /** Creates a group in the specified organization. */ + create( + request: CreateGroupRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + create( + request: CreateGroupRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + create( + request: CreateGroupRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Updates the specified group. */ + update( + request: UpdateGroupRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + update( + request: UpdateGroupRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + update( + request: UpdateGroupRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Deletes the specified group. */ + delete( + request: DeleteGroupRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + delete( + request: DeleteGroupRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + delete( + request: DeleteGroupRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Lists operations for the specified group. */ + listOperations( + request: ListGroupOperationsRequest, + callback: ( + error: ServiceError | null, + response: ListGroupOperationsResponse + ) => void + ): ClientUnaryCall; + listOperations( + request: ListGroupOperationsRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListGroupOperationsResponse + ) => void + ): ClientUnaryCall; + listOperations( + request: ListGroupOperationsRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListGroupOperationsResponse + ) => void + ): ClientUnaryCall; + /** List group active members. */ + listMembers( + request: ListGroupMembersRequest, + callback: ( + error: ServiceError | null, + response: ListGroupMembersResponse + ) => void + ): ClientUnaryCall; + listMembers( + request: ListGroupMembersRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListGroupMembersResponse + ) => void + ): ClientUnaryCall; + listMembers( + request: ListGroupMembersRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListGroupMembersResponse + ) => void + ): ClientUnaryCall; + /** Update group members. */ + updateMembers( + request: UpdateGroupMembersRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + updateMembers( + request: UpdateGroupMembersRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + updateMembers( + request: UpdateGroupMembersRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Lists access bindings for the specified group. */ + listAccessBindings( + request: ListAccessBindingsRequest, + callback: ( + error: ServiceError | null, + response: ListAccessBindingsResponse + ) => void + ): ClientUnaryCall; + listAccessBindings( + request: ListAccessBindingsRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListAccessBindingsResponse + ) => void + ): ClientUnaryCall; + listAccessBindings( + request: ListAccessBindingsRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListAccessBindingsResponse + ) => void + ): ClientUnaryCall; + /** Sets access bindings for the specified group. */ + setAccessBindings( + request: SetAccessBindingsRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + setAccessBindings( + request: SetAccessBindingsRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + setAccessBindings( + request: SetAccessBindingsRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Updates access bindings for the specified group. */ + updateAccessBindings( + request: UpdateAccessBindingsRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + updateAccessBindings( + request: UpdateAccessBindingsRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + updateAccessBindings( + request: UpdateAccessBindingsRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; +} + +export const GroupServiceClient = makeGenericClientConstructor( + GroupServiceService, + "yandex.cloud.organizationmanager.v1.GroupService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): GroupServiceClient; + service: typeof GroupServiceService; +}; + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/organizationmanager/v1/user_service.ts b/src/generated/yandex/cloud/organizationmanager/v1/user_service.ts index 0bd8bd79..00932066 100644 --- a/src/generated/yandex/cloud/organizationmanager/v1/user_service.ts +++ b/src/generated/yandex/cloud/organizationmanager/v1/user_service.ts @@ -53,7 +53,7 @@ export interface ListMembersResponse { export interface ListMembersResponse_OrganizationUser { $type: "yandex.cloud.organizationmanager.v1.ListMembersResponse.OrganizationUser"; - /** OpenID standard claims with additional Yandex Cloud Organization claims. */ + /** OpenID standard claims with additional Cloud Organization claims. */ subjectClaims?: SubjectClaims; } diff --git a/src/generated/yandex/cloud/serverless/apigateway/v1/apigateway_service.ts b/src/generated/yandex/cloud/serverless/apigateway/v1/apigateway_service.ts index 8692333c..b22c0f8d 100644 --- a/src/generated/yandex/cloud/serverless/apigateway/v1/apigateway_service.ts +++ b/src/generated/yandex/cloud/serverless/apigateway/v1/apigateway_service.ts @@ -165,8 +165,16 @@ export interface AddDomainRequest { $type: "yandex.cloud.serverless.apigateway.v1.AddDomainRequest"; /** ID of the API gateway that the domain is attached to. */ apiGatewayId: string; - /** ID of the attaching domain. */ + /** + * ID of the attaching domain. + * + * @deprecated + */ domainId: string; + /** Name of the attaching domain. */ + domainName: string; + /** ID of certificate for the attaching domain. */ + certificateId: string; } export interface RemoveDomainRequest { @@ -201,6 +209,8 @@ export interface AddDomainMetadata { apiGatewayId: string; /** ID of the attaching domain. */ domainId: string; + /** Name of the attaching domain. */ + domainName: string; } export interface RemoveDomainMetadata { @@ -1187,6 +1197,8 @@ const baseAddDomainRequest: object = { $type: "yandex.cloud.serverless.apigateway.v1.AddDomainRequest", apiGatewayId: "", domainId: "", + domainName: "", + certificateId: "", }; export const AddDomainRequest = { @@ -1202,6 +1214,12 @@ export const AddDomainRequest = { if (message.domainId !== "") { writer.uint32(18).string(message.domainId); } + if (message.domainName !== "") { + writer.uint32(26).string(message.domainName); + } + if (message.certificateId !== "") { + writer.uint32(34).string(message.certificateId); + } return writer; }, @@ -1218,6 +1236,12 @@ export const AddDomainRequest = { case 2: message.domainId = reader.string(); break; + case 3: + message.domainName = reader.string(); + break; + case 4: + message.certificateId = reader.string(); + break; default: reader.skipType(tag & 7); break; @@ -1236,6 +1260,14 @@ export const AddDomainRequest = { object.domainId !== undefined && object.domainId !== null ? String(object.domainId) : ""; + message.domainName = + object.domainName !== undefined && object.domainName !== null + ? String(object.domainName) + : ""; + message.certificateId = + object.certificateId !== undefined && object.certificateId !== null + ? String(object.certificateId) + : ""; return message; }, @@ -1244,6 +1276,9 @@ export const AddDomainRequest = { message.apiGatewayId !== undefined && (obj.apiGatewayId = message.apiGatewayId); message.domainId !== undefined && (obj.domainId = message.domainId); + message.domainName !== undefined && (obj.domainName = message.domainName); + message.certificateId !== undefined && + (obj.certificateId = message.certificateId); return obj; }, @@ -1253,6 +1288,8 @@ export const AddDomainRequest = { const message = { ...baseAddDomainRequest } as AddDomainRequest; message.apiGatewayId = object.apiGatewayId ?? ""; message.domainId = object.domainId ?? ""; + message.domainName = object.domainName ?? ""; + message.certificateId = object.certificateId ?? ""; return message; }, }; @@ -1567,6 +1604,7 @@ const baseAddDomainMetadata: object = { $type: "yandex.cloud.serverless.apigateway.v1.AddDomainMetadata", apiGatewayId: "", domainId: "", + domainName: "", }; export const AddDomainMetadata = { @@ -1582,6 +1620,9 @@ export const AddDomainMetadata = { if (message.domainId !== "") { writer.uint32(18).string(message.domainId); } + if (message.domainName !== "") { + writer.uint32(26).string(message.domainName); + } return writer; }, @@ -1598,6 +1639,9 @@ export const AddDomainMetadata = { case 2: message.domainId = reader.string(); break; + case 3: + message.domainName = reader.string(); + break; default: reader.skipType(tag & 7); break; @@ -1616,6 +1660,10 @@ export const AddDomainMetadata = { object.domainId !== undefined && object.domainId !== null ? String(object.domainId) : ""; + message.domainName = + object.domainName !== undefined && object.domainName !== null + ? String(object.domainName) + : ""; return message; }, @@ -1624,6 +1672,7 @@ export const AddDomainMetadata = { message.apiGatewayId !== undefined && (obj.apiGatewayId = message.apiGatewayId); message.domainId !== undefined && (obj.domainId = message.domainId); + message.domainName !== undefined && (obj.domainName = message.domainName); return obj; }, @@ -1633,6 +1682,7 @@ export const AddDomainMetadata = { const message = { ...baseAddDomainMetadata } as AddDomainMetadata; message.apiGatewayId = object.apiGatewayId ?? ""; message.domainId = object.domainId ?? ""; + message.domainName = object.domainName ?? ""; return message; }, }; diff --git a/src/generated/yandex/cloud/serverless/apigateway/websocket/v1/connection.ts b/src/generated/yandex/cloud/serverless/apigateway/websocket/v1/connection.ts new file mode 100644 index 00000000..61aa7d1b --- /dev/null +++ b/src/generated/yandex/cloud/serverless/apigateway/websocket/v1/connection.ts @@ -0,0 +1,284 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { Timestamp } from "../../../../../../google/protobuf/timestamp"; + +export const protobufPackage = + "yandex.cloud.serverless.apigateway.websocket.v1"; + +export interface Connection { + $type: "yandex.cloud.serverless.apigateway.websocket.v1.Connection"; + /** ID of the connection. */ + id: string; + /** ID of the API Gateway. */ + gatewayId: string; + /** The information about the caller making the request to API Gateway. */ + identity?: Identity; + /** The timestamp at which connection was established. */ + connectedAt?: Date; + /** The timestamp at which connection was last accessed. */ + lastActiveAt?: Date; +} + +export interface Identity { + $type: "yandex.cloud.serverless.apigateway.websocket.v1.Identity"; + /** The source IP address of the caller making the request to API Gateway. */ + sourceIp: string; + /** The User Agent of the caller making the request to API Gateway. */ + userAgent: string; +} + +const baseConnection: object = { + $type: "yandex.cloud.serverless.apigateway.websocket.v1.Connection", + id: "", + gatewayId: "", +}; + +export const Connection = { + $type: "yandex.cloud.serverless.apigateway.websocket.v1.Connection" as const, + + encode( + message: Connection, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.id !== "") { + writer.uint32(10).string(message.id); + } + if (message.gatewayId !== "") { + writer.uint32(18).string(message.gatewayId); + } + if (message.identity !== undefined) { + Identity.encode(message.identity, writer.uint32(26).fork()).ldelim(); + } + if (message.connectedAt !== undefined) { + Timestamp.encode( + toTimestamp(message.connectedAt), + writer.uint32(34).fork() + ).ldelim(); + } + if (message.lastActiveAt !== undefined) { + Timestamp.encode( + toTimestamp(message.lastActiveAt), + writer.uint32(42).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Connection { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseConnection } as Connection; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = reader.string(); + break; + case 2: + message.gatewayId = reader.string(); + break; + case 3: + message.identity = Identity.decode(reader, reader.uint32()); + break; + case 4: + message.connectedAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 5: + message.lastActiveAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Connection { + const message = { ...baseConnection } as Connection; + message.id = + object.id !== undefined && object.id !== null ? String(object.id) : ""; + message.gatewayId = + object.gatewayId !== undefined && object.gatewayId !== null + ? String(object.gatewayId) + : ""; + message.identity = + object.identity !== undefined && object.identity !== null + ? Identity.fromJSON(object.identity) + : undefined; + message.connectedAt = + object.connectedAt !== undefined && object.connectedAt !== null + ? fromJsonTimestamp(object.connectedAt) + : undefined; + message.lastActiveAt = + object.lastActiveAt !== undefined && object.lastActiveAt !== null + ? fromJsonTimestamp(object.lastActiveAt) + : undefined; + return message; + }, + + toJSON(message: Connection): unknown { + const obj: any = {}; + message.id !== undefined && (obj.id = message.id); + message.gatewayId !== undefined && (obj.gatewayId = message.gatewayId); + message.identity !== undefined && + (obj.identity = message.identity + ? Identity.toJSON(message.identity) + : undefined); + message.connectedAt !== undefined && + (obj.connectedAt = message.connectedAt.toISOString()); + message.lastActiveAt !== undefined && + (obj.lastActiveAt = message.lastActiveAt.toISOString()); + return obj; + }, + + fromPartial, I>>( + object: I + ): Connection { + const message = { ...baseConnection } as Connection; + message.id = object.id ?? ""; + message.gatewayId = object.gatewayId ?? ""; + message.identity = + object.identity !== undefined && object.identity !== null + ? Identity.fromPartial(object.identity) + : undefined; + message.connectedAt = object.connectedAt ?? undefined; + message.lastActiveAt = object.lastActiveAt ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(Connection.$type, Connection); + +const baseIdentity: object = { + $type: "yandex.cloud.serverless.apigateway.websocket.v1.Identity", + sourceIp: "", + userAgent: "", +}; + +export const Identity = { + $type: "yandex.cloud.serverless.apigateway.websocket.v1.Identity" as const, + + encode( + message: Identity, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.sourceIp !== "") { + writer.uint32(10).string(message.sourceIp); + } + if (message.userAgent !== "") { + writer.uint32(18).string(message.userAgent); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Identity { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseIdentity } as Identity; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.sourceIp = reader.string(); + break; + case 2: + message.userAgent = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Identity { + const message = { ...baseIdentity } as Identity; + message.sourceIp = + object.sourceIp !== undefined && object.sourceIp !== null + ? String(object.sourceIp) + : ""; + message.userAgent = + object.userAgent !== undefined && object.userAgent !== null + ? String(object.userAgent) + : ""; + return message; + }, + + toJSON(message: Identity): unknown { + const obj: any = {}; + message.sourceIp !== undefined && (obj.sourceIp = message.sourceIp); + message.userAgent !== undefined && (obj.userAgent = message.userAgent); + return obj; + }, + + fromPartial, I>>(object: I): Identity { + const message = { ...baseIdentity } as Identity; + message.sourceIp = object.sourceIp ?? ""; + message.userAgent = object.userAgent ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(Identity.$type, Identity); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function toTimestamp(date: Date): Timestamp { + const seconds = date.getTime() / 1_000; + const nanos = (date.getTime() % 1_000) * 1_000_000; + return { $type: "google.protobuf.Timestamp", seconds, nanos }; +} + +function fromTimestamp(t: Timestamp): Date { + let millis = t.seconds * 1_000; + millis += t.nanos / 1_000_000; + return new Date(millis); +} + +function fromJsonTimestamp(o: any): Date { + if (o instanceof Date) { + return o; + } else if (typeof o === "string") { + return new Date(o); + } else { + return fromTimestamp(Timestamp.fromJSON(o)); + } +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/serverless/apigateway/websocket/v1/connection_service.ts b/src/generated/yandex/cloud/serverless/apigateway/websocket/v1/connection_service.ts new file mode 100644 index 00000000..21bc92dd --- /dev/null +++ b/src/generated/yandex/cloud/serverless/apigateway/websocket/v1/connection_service.ts @@ -0,0 +1,631 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../../typeRegistry"; +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleUnaryCall, + Client, + ClientUnaryCall, + Metadata, + CallOptions, + ServiceError, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; +import { Connection } from "../../../../../../yandex/cloud/serverless/apigateway/websocket/v1/connection"; + +export const protobufPackage = + "yandex.cloud.serverless.apigateway.websocket.v1"; + +export interface GetConnectionRequest { + $type: "yandex.cloud.serverless.apigateway.websocket.v1.GetConnectionRequest"; + /** ID of the connection to get. */ + connectionId: string; +} + +export interface SendToConnectionRequest { + $type: "yandex.cloud.serverless.apigateway.websocket.v1.SendToConnectionRequest"; + /** ID of the connection to which send. */ + connectionId: string; + /** Data to send. */ + data: Buffer; + /** Type of the sending data. */ + type: SendToConnectionRequest_DataType; +} + +export enum SendToConnectionRequest_DataType { + DATA_TYPE_UNSPECIFIED = 0, + /** BINARY - Binary data. */ + BINARY = 1, + /** TEXT - Text data. */ + TEXT = 2, + UNRECOGNIZED = -1, +} + +export function sendToConnectionRequest_DataTypeFromJSON( + object: any +): SendToConnectionRequest_DataType { + switch (object) { + case 0: + case "DATA_TYPE_UNSPECIFIED": + return SendToConnectionRequest_DataType.DATA_TYPE_UNSPECIFIED; + case 1: + case "BINARY": + return SendToConnectionRequest_DataType.BINARY; + case 2: + case "TEXT": + return SendToConnectionRequest_DataType.TEXT; + case -1: + case "UNRECOGNIZED": + default: + return SendToConnectionRequest_DataType.UNRECOGNIZED; + } +} + +export function sendToConnectionRequest_DataTypeToJSON( + object: SendToConnectionRequest_DataType +): string { + switch (object) { + case SendToConnectionRequest_DataType.DATA_TYPE_UNSPECIFIED: + return "DATA_TYPE_UNSPECIFIED"; + case SendToConnectionRequest_DataType.BINARY: + return "BINARY"; + case SendToConnectionRequest_DataType.TEXT: + return "TEXT"; + default: + return "UNKNOWN"; + } +} + +export interface SendToConnectionResponse { + $type: "yandex.cloud.serverless.apigateway.websocket.v1.SendToConnectionResponse"; +} + +export interface DisconnectRequest { + $type: "yandex.cloud.serverless.apigateway.websocket.v1.DisconnectRequest"; + /** ID of the connection to disconnect. */ + connectionId: string; +} + +export interface DisconnectResponse { + $type: "yandex.cloud.serverless.apigateway.websocket.v1.DisconnectResponse"; +} + +const baseGetConnectionRequest: object = { + $type: "yandex.cloud.serverless.apigateway.websocket.v1.GetConnectionRequest", + connectionId: "", +}; + +export const GetConnectionRequest = { + $type: + "yandex.cloud.serverless.apigateway.websocket.v1.GetConnectionRequest" as const, + + encode( + message: GetConnectionRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.connectionId !== "") { + writer.uint32(10).string(message.connectionId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): GetConnectionRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGetConnectionRequest } as GetConnectionRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.connectionId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GetConnectionRequest { + const message = { ...baseGetConnectionRequest } as GetConnectionRequest; + message.connectionId = + object.connectionId !== undefined && object.connectionId !== null + ? String(object.connectionId) + : ""; + return message; + }, + + toJSON(message: GetConnectionRequest): unknown { + const obj: any = {}; + message.connectionId !== undefined && + (obj.connectionId = message.connectionId); + return obj; + }, + + fromPartial, I>>( + object: I + ): GetConnectionRequest { + const message = { ...baseGetConnectionRequest } as GetConnectionRequest; + message.connectionId = object.connectionId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(GetConnectionRequest.$type, GetConnectionRequest); + +const baseSendToConnectionRequest: object = { + $type: + "yandex.cloud.serverless.apigateway.websocket.v1.SendToConnectionRequest", + connectionId: "", + type: 0, +}; + +export const SendToConnectionRequest = { + $type: + "yandex.cloud.serverless.apigateway.websocket.v1.SendToConnectionRequest" as const, + + encode( + message: SendToConnectionRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.connectionId !== "") { + writer.uint32(10).string(message.connectionId); + } + if (message.data.length !== 0) { + writer.uint32(18).bytes(message.data); + } + if (message.type !== 0) { + writer.uint32(24).int32(message.type); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): SendToConnectionRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseSendToConnectionRequest, + } as SendToConnectionRequest; + message.data = Buffer.alloc(0); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.connectionId = reader.string(); + break; + case 2: + message.data = reader.bytes() as Buffer; + break; + case 3: + message.type = reader.int32() as any; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): SendToConnectionRequest { + const message = { + ...baseSendToConnectionRequest, + } as SendToConnectionRequest; + message.connectionId = + object.connectionId !== undefined && object.connectionId !== null + ? String(object.connectionId) + : ""; + message.data = + object.data !== undefined && object.data !== null + ? Buffer.from(bytesFromBase64(object.data)) + : Buffer.alloc(0); + message.type = + object.type !== undefined && object.type !== null + ? sendToConnectionRequest_DataTypeFromJSON(object.type) + : 0; + return message; + }, + + toJSON(message: SendToConnectionRequest): unknown { + const obj: any = {}; + message.connectionId !== undefined && + (obj.connectionId = message.connectionId); + message.data !== undefined && + (obj.data = base64FromBytes( + message.data !== undefined ? message.data : Buffer.alloc(0) + )); + message.type !== undefined && + (obj.type = sendToConnectionRequest_DataTypeToJSON(message.type)); + return obj; + }, + + fromPartial, I>>( + object: I + ): SendToConnectionRequest { + const message = { + ...baseSendToConnectionRequest, + } as SendToConnectionRequest; + message.connectionId = object.connectionId ?? ""; + message.data = object.data ?? Buffer.alloc(0); + message.type = object.type ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(SendToConnectionRequest.$type, SendToConnectionRequest); + +const baseSendToConnectionResponse: object = { + $type: + "yandex.cloud.serverless.apigateway.websocket.v1.SendToConnectionResponse", +}; + +export const SendToConnectionResponse = { + $type: + "yandex.cloud.serverless.apigateway.websocket.v1.SendToConnectionResponse" as const, + + encode( + _: SendToConnectionResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): SendToConnectionResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseSendToConnectionResponse, + } as SendToConnectionResponse; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(_: any): SendToConnectionResponse { + const message = { + ...baseSendToConnectionResponse, + } as SendToConnectionResponse; + return message; + }, + + toJSON(_: SendToConnectionResponse): unknown { + const obj: any = {}; + return obj; + }, + + fromPartial, I>>( + _: I + ): SendToConnectionResponse { + const message = { + ...baseSendToConnectionResponse, + } as SendToConnectionResponse; + return message; + }, +}; + +messageTypeRegistry.set( + SendToConnectionResponse.$type, + SendToConnectionResponse +); + +const baseDisconnectRequest: object = { + $type: "yandex.cloud.serverless.apigateway.websocket.v1.DisconnectRequest", + connectionId: "", +}; + +export const DisconnectRequest = { + $type: + "yandex.cloud.serverless.apigateway.websocket.v1.DisconnectRequest" as const, + + encode( + message: DisconnectRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.connectionId !== "") { + writer.uint32(10).string(message.connectionId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): DisconnectRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDisconnectRequest } as DisconnectRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.connectionId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DisconnectRequest { + const message = { ...baseDisconnectRequest } as DisconnectRequest; + message.connectionId = + object.connectionId !== undefined && object.connectionId !== null + ? String(object.connectionId) + : ""; + return message; + }, + + toJSON(message: DisconnectRequest): unknown { + const obj: any = {}; + message.connectionId !== undefined && + (obj.connectionId = message.connectionId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DisconnectRequest { + const message = { ...baseDisconnectRequest } as DisconnectRequest; + message.connectionId = object.connectionId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(DisconnectRequest.$type, DisconnectRequest); + +const baseDisconnectResponse: object = { + $type: "yandex.cloud.serverless.apigateway.websocket.v1.DisconnectResponse", +}; + +export const DisconnectResponse = { + $type: + "yandex.cloud.serverless.apigateway.websocket.v1.DisconnectResponse" as const, + + encode( + _: DisconnectResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): DisconnectResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDisconnectResponse } as DisconnectResponse; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(_: any): DisconnectResponse { + const message = { ...baseDisconnectResponse } as DisconnectResponse; + return message; + }, + + toJSON(_: DisconnectResponse): unknown { + const obj: any = {}; + return obj; + }, + + fromPartial, I>>( + _: I + ): DisconnectResponse { + const message = { ...baseDisconnectResponse } as DisconnectResponse; + return message; + }, +}; + +messageTypeRegistry.set(DisconnectResponse.$type, DisconnectResponse); + +/** A set of methods for managing API Gateway WebSocket connections. */ +export const ConnectionServiceService = { + /** Returns the specified connection info. */ + get: { + path: "/yandex.cloud.serverless.apigateway.websocket.v1.ConnectionService/Get", + requestStream: false, + responseStream: false, + requestSerialize: (value: GetConnectionRequest) => + Buffer.from(GetConnectionRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => GetConnectionRequest.decode(value), + responseSerialize: (value: Connection) => + Buffer.from(Connection.encode(value).finish()), + responseDeserialize: (value: Buffer) => Connection.decode(value), + }, + /** Sends data to the specified connection. */ + send: { + path: "/yandex.cloud.serverless.apigateway.websocket.v1.ConnectionService/Send", + requestStream: false, + responseStream: false, + requestSerialize: (value: SendToConnectionRequest) => + Buffer.from(SendToConnectionRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + SendToConnectionRequest.decode(value), + responseSerialize: (value: SendToConnectionResponse) => + Buffer.from(SendToConnectionResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + SendToConnectionResponse.decode(value), + }, + /** Disconnects the specified connection. */ + disconnect: { + path: "/yandex.cloud.serverless.apigateway.websocket.v1.ConnectionService/Disconnect", + requestStream: false, + responseStream: false, + requestSerialize: (value: DisconnectRequest) => + Buffer.from(DisconnectRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => DisconnectRequest.decode(value), + responseSerialize: (value: DisconnectResponse) => + Buffer.from(DisconnectResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => DisconnectResponse.decode(value), + }, +} as const; + +export interface ConnectionServiceServer extends UntypedServiceImplementation { + /** Returns the specified connection info. */ + get: handleUnaryCall; + /** Sends data to the specified connection. */ + send: handleUnaryCall; + /** Disconnects the specified connection. */ + disconnect: handleUnaryCall; +} + +export interface ConnectionServiceClient extends Client { + /** Returns the specified connection info. */ + get( + request: GetConnectionRequest, + callback: (error: ServiceError | null, response: Connection) => void + ): ClientUnaryCall; + get( + request: GetConnectionRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Connection) => void + ): ClientUnaryCall; + get( + request: GetConnectionRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Connection) => void + ): ClientUnaryCall; + /** Sends data to the specified connection. */ + send( + request: SendToConnectionRequest, + callback: ( + error: ServiceError | null, + response: SendToConnectionResponse + ) => void + ): ClientUnaryCall; + send( + request: SendToConnectionRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: SendToConnectionResponse + ) => void + ): ClientUnaryCall; + send( + request: SendToConnectionRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: SendToConnectionResponse + ) => void + ): ClientUnaryCall; + /** Disconnects the specified connection. */ + disconnect( + request: DisconnectRequest, + callback: (error: ServiceError | null, response: DisconnectResponse) => void + ): ClientUnaryCall; + disconnect( + request: DisconnectRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: DisconnectResponse) => void + ): ClientUnaryCall; + disconnect( + request: DisconnectRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: DisconnectResponse) => void + ): ClientUnaryCall; +} + +export const ConnectionServiceClient = makeGenericClientConstructor( + ConnectionServiceService, + "yandex.cloud.serverless.apigateway.websocket.v1.ConnectionService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): ConnectionServiceClient; + service: typeof ConnectionServiceService; +}; + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +const atob: (b64: string) => string = + globalThis.atob || + ((b64) => globalThis.Buffer.from(b64, "base64").toString("binary")); +function bytesFromBase64(b64: string): Uint8Array { + const bin = atob(b64); + const arr = new Uint8Array(bin.length); + for (let i = 0; i < bin.length; ++i) { + arr[i] = bin.charCodeAt(i); + } + return arr; +} + +const btoa: (bin: string) => string = + globalThis.btoa || + ((bin) => globalThis.Buffer.from(bin, "binary").toString("base64")); +function base64FromBytes(arr: Uint8Array): string { + const bin: string[] = []; + for (const byte of arr) { + bin.push(String.fromCharCode(byte)); + } + return btoa(bin.join("")); +} + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/serverless/containers/v1/container.ts b/src/generated/yandex/cloud/serverless/containers/v1/container.ts index b1c7d6d6..95e2d564 100644 --- a/src/generated/yandex/cloud/serverless/containers/v1/container.ts +++ b/src/generated/yandex/cloud/serverless/containers/v1/container.ts @@ -89,6 +89,7 @@ export interface Revision { status: Revision_Status; secrets: Secret[]; connectivity?: Connectivity; + provisionPolicy?: ProvisionPolicy; } export enum Revision_Status { @@ -168,6 +169,11 @@ export interface Resources { coreFraction: number; } +export interface ProvisionPolicy { + $type: "yandex.cloud.serverless.containers.v1.ProvisionPolicy"; + minInstances: number; +} + export interface Secret { $type: "yandex.cloud.serverless.containers.v1.Secret"; id: string; @@ -500,6 +506,12 @@ export const Revision = { writer.uint32(98).fork() ).ldelim(); } + if (message.provisionPolicy !== undefined) { + ProvisionPolicy.encode( + message.provisionPolicy, + writer.uint32(106).fork() + ).ldelim(); + } return writer; }, @@ -549,6 +561,12 @@ export const Revision = { case 12: message.connectivity = Connectivity.decode(reader, reader.uint32()); break; + case 13: + message.provisionPolicy = ProvisionPolicy.decode( + reader, + reader.uint32() + ); + break; default: reader.skipType(tag & 7); break; @@ -604,6 +622,10 @@ export const Revision = { object.connectivity !== undefined && object.connectivity !== null ? Connectivity.fromJSON(object.connectivity) : undefined; + message.provisionPolicy = + object.provisionPolicy !== undefined && object.provisionPolicy !== null + ? ProvisionPolicy.fromJSON(object.provisionPolicy) + : undefined; return message; }, @@ -643,6 +665,10 @@ export const Revision = { (obj.connectivity = message.connectivity ? Connectivity.toJSON(message.connectivity) : undefined); + message.provisionPolicy !== undefined && + (obj.provisionPolicy = message.provisionPolicy + ? ProvisionPolicy.toJSON(message.provisionPolicy) + : undefined); return obj; }, @@ -672,6 +698,10 @@ export const Revision = { object.connectivity !== undefined && object.connectivity !== null ? Connectivity.fromPartial(object.connectivity) : undefined; + message.provisionPolicy = + object.provisionPolicy !== undefined && object.provisionPolicy !== null + ? ProvisionPolicy.fromPartial(object.provisionPolicy) + : undefined; return message; }, }; @@ -1120,6 +1150,69 @@ export const Resources = { messageTypeRegistry.set(Resources.$type, Resources); +const baseProvisionPolicy: object = { + $type: "yandex.cloud.serverless.containers.v1.ProvisionPolicy", + minInstances: 0, +}; + +export const ProvisionPolicy = { + $type: "yandex.cloud.serverless.containers.v1.ProvisionPolicy" as const, + + encode( + message: ProvisionPolicy, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.minInstances !== 0) { + writer.uint32(8).int64(message.minInstances); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ProvisionPolicy { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseProvisionPolicy } as ProvisionPolicy; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.minInstances = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ProvisionPolicy { + const message = { ...baseProvisionPolicy } as ProvisionPolicy; + message.minInstances = + object.minInstances !== undefined && object.minInstances !== null + ? Number(object.minInstances) + : 0; + return message; + }, + + toJSON(message: ProvisionPolicy): unknown { + const obj: any = {}; + message.minInstances !== undefined && + (obj.minInstances = Math.round(message.minInstances)); + return obj; + }, + + fromPartial, I>>( + object: I + ): ProvisionPolicy { + const message = { ...baseProvisionPolicy } as ProvisionPolicy; + message.minInstances = object.minInstances ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(ProvisionPolicy.$type, ProvisionPolicy); + const baseSecret: object = { $type: "yandex.cloud.serverless.containers.v1.Secret", id: "", diff --git a/src/generated/yandex/cloud/serverless/containers/v1/container_service.ts b/src/generated/yandex/cloud/serverless/containers/v1/container_service.ts index f25a4fe1..93308962 100644 --- a/src/generated/yandex/cloud/serverless/containers/v1/container_service.ts +++ b/src/generated/yandex/cloud/serverless/containers/v1/container_service.ts @@ -18,6 +18,7 @@ import { FieldMask } from "../../../../../google/protobuf/field_mask"; import { Resources, Connectivity, + ProvisionPolicy, Command, Args, Container, @@ -134,6 +135,7 @@ export interface DeployContainerRevisionRequest { concurrency: number; secrets: Secret[]; connectivity?: Connectivity; + provisionPolicy?: ProvisionPolicy; } export interface ImageSpec { @@ -1532,6 +1534,12 @@ export const DeployContainerRevisionRequest = { writer.uint32(90).fork() ).ldelim(); } + if (message.provisionPolicy !== undefined) { + ProvisionPolicy.encode( + message.provisionPolicy, + writer.uint32(98).fork() + ).ldelim(); + } return writer; }, @@ -1575,6 +1583,12 @@ export const DeployContainerRevisionRequest = { case 11: message.connectivity = Connectivity.decode(reader, reader.uint32()); break; + case 12: + message.provisionPolicy = ProvisionPolicy.decode( + reader, + reader.uint32() + ); + break; default: reader.skipType(tag & 7); break; @@ -1622,6 +1636,10 @@ export const DeployContainerRevisionRequest = { object.connectivity !== undefined && object.connectivity !== null ? Connectivity.fromJSON(object.connectivity) : undefined; + message.provisionPolicy = + object.provisionPolicy !== undefined && object.provisionPolicy !== null + ? ProvisionPolicy.fromJSON(object.provisionPolicy) + : undefined; return message; }, @@ -1658,6 +1676,10 @@ export const DeployContainerRevisionRequest = { (obj.connectivity = message.connectivity ? Connectivity.toJSON(message.connectivity) : undefined); + message.provisionPolicy !== undefined && + (obj.provisionPolicy = message.provisionPolicy + ? ProvisionPolicy.toJSON(message.provisionPolicy) + : undefined); return obj; }, @@ -1688,6 +1710,10 @@ export const DeployContainerRevisionRequest = { object.connectivity !== undefined && object.connectivity !== null ? Connectivity.fromPartial(object.connectivity) : undefined; + message.provisionPolicy = + object.provisionPolicy !== undefined && object.provisionPolicy !== null + ? ProvisionPolicy.fromPartial(object.provisionPolicy) + : undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/serverless/functions/v1/function_service.ts b/src/generated/yandex/cloud/serverless/functions/v1/function_service.ts index 8d05039f..078cb865 100644 --- a/src/generated/yandex/cloud/serverless/functions/v1/function_service.ts +++ b/src/generated/yandex/cloud/serverless/functions/v1/function_service.ts @@ -100,7 +100,7 @@ export interface ListFunctionsRequest { * 1. The field name. Currently filtering can only be applied to the [Function.name] field. * 2. An `=` operator. * 3. The value in double quotes (`"`). Must be 3-63 characters long and match the regular expression `[a-z][-a-z0-9]{1,61}[a-z0-9]`. - * Example of a filter: `name=my-function`. + * Example of a filter: `name="my-function"`. */ filter: string; } @@ -242,10 +242,10 @@ export interface ListFunctionsVersionsRequest { * A filter expression that filters resources listed in the response. * * The expression must specify: - * 1. The field name. Currently filtering can only be applied to the [Function.name] field. + * 1. The field name. Currently filtering can only be applied to the [Version.status] and [Version.runtime] fields. * 2. An `=` operator. * 3. The value in double quotes (`"`). Must be 3-63 characters long and match the regular expression `[a-z][-a-z0-9]{1,61}[a-z0-9]`. - * Example of a filter: `name=my-function`. + * Example of a filter: `status="ACTIVE"`. */ filter: string; } @@ -432,10 +432,10 @@ export interface ListFunctionTagHistoryRequest { * A filter expression that filters resources listed in the response. * * The expression must specify: - * 1. The field name. Currently filtering can only be applied to the [Function.name] field. - * 2. An `=` operator. + * 1. The field name. Currently filtering can only be applied to the [FunctionTagHistoryRecord.effective_from] and [FunctionTagHistoryRecord.effective_to] fields. + * 2. An `=` or `>` or `<` operator. * 3. The value in double quotes (`"`). Must be 3-63 characters long and match the regular expression `[a-z][-a-z0-9]{1,61}[a-z0-9]`. - * For example, `name=my-function`. + * For example, `effective_to>2021-01-01T12:00:00Z`. */ filter: string; } diff --git a/src/generated/yandex/cloud/serverless/index.ts b/src/generated/yandex/cloud/serverless/index.ts index bccb9a2c..5ad4989b 100644 --- a/src/generated/yandex/cloud/serverless/index.ts +++ b/src/generated/yandex/cloud/serverless/index.ts @@ -8,4 +8,6 @@ export * as mdbproxy_proxy from './mdbproxy/v1/proxy' export * as mdbproxy_proxy_service from './mdbproxy/v1/proxy_service' export * as triggers_predicate from './triggers/v1/predicate' export * as triggers_trigger from './triggers/v1/trigger' -export * as triggers_trigger_service from './triggers/v1/trigger_service' \ No newline at end of file +export * as triggers_trigger_service from './triggers/v1/trigger_service' +export * as apigateway_connection from './apigateway/websocket/v1/connection' +export * as apigateway_connection_service from './apigateway/websocket/v1/connection_service' \ No newline at end of file diff --git a/src/generated/yandex/cloud/serverless/triggers/v1/trigger.ts b/src/generated/yandex/cloud/serverless/triggers/v1/trigger.ts index 8aa68a5a..c6ae3981 100644 --- a/src/generated/yandex/cloud/serverless/triggers/v1/trigger.ts +++ b/src/generated/yandex/cloud/serverless/triggers/v1/trigger.ts @@ -19,11 +19,12 @@ export enum TriggerType { /** * MESSAGE_QUEUE - The trigger is activated by messages from a message queue. * - * Only Yandex Message Queue is currently supported. + * Only Message Queue is currently supported. */ MESSAGE_QUEUE = 3, - /** IOT_MESSAGE - The trigger is activated by messages from Yandex IoT Core. */ + /** IOT_MESSAGE - The trigger is activated by messages from IoT Core. */ IOT_MESSAGE = 4, + IOT_BROKER_MESSAGE = 12, OBJECT_STORAGE = 5, CONTAINER_REGISTRY = 6, /** CLOUD_LOGS - The trigger is activated by cloud log group events */ @@ -34,6 +35,8 @@ export enum TriggerType { BILLING_BUDGET = 9, /** YDS - The trigger is activated by YDS events */ YDS = 10, + /** MAIL - The trigger is activated by email */ + MAIL = 11, UNRECOGNIZED = -1, } @@ -51,6 +54,9 @@ export function triggerTypeFromJSON(object: any): TriggerType { case 4: case "IOT_MESSAGE": return TriggerType.IOT_MESSAGE; + case 12: + case "IOT_BROKER_MESSAGE": + return TriggerType.IOT_BROKER_MESSAGE; case 5: case "OBJECT_STORAGE": return TriggerType.OBJECT_STORAGE; @@ -69,6 +75,9 @@ export function triggerTypeFromJSON(object: any): TriggerType { case 10: case "YDS": return TriggerType.YDS; + case 11: + case "MAIL": + return TriggerType.MAIL; case -1: case "UNRECOGNIZED": default: @@ -86,6 +95,8 @@ export function triggerTypeToJSON(object: TriggerType): string { return "MESSAGE_QUEUE"; case TriggerType.IOT_MESSAGE: return "IOT_MESSAGE"; + case TriggerType.IOT_BROKER_MESSAGE: + return "IOT_BROKER_MESSAGE"; case TriggerType.OBJECT_STORAGE: return "OBJECT_STORAGE"; case TriggerType.CONTAINER_REGISTRY: @@ -98,6 +109,8 @@ export function triggerTypeToJSON(object: TriggerType): string { return "BILLING_BUDGET"; case TriggerType.YDS: return "YDS"; + case TriggerType.MAIL: + return "MAIL"; default: return "UNKNOWN"; } @@ -277,14 +290,16 @@ export interface Trigger_Rule { timer?: Trigger_Timer | undefined; /** Rule for a message queue trigger. */ messageQueue?: Trigger_MessageQueue | undefined; - /** Rule for a Yandex IoT Core trigger. */ + /** Rule for a IoT Core trigger. */ iotMessage?: Trigger_IoTMessage | undefined; + iotBrokerMessage?: Trigger_IoTBrokerMessage | undefined; objectStorage?: Trigger_ObjectStorage | undefined; containerRegistry?: Trigger_ContainerRegistry | undefined; cloudLogs?: Trigger_CloudLogs | undefined; logging?: Trigger_Logging | undefined; billingBudget?: BillingBudget | undefined; dataStream?: DataStream | undefined; + mail?: Mail | undefined; } /** Rule for activating a timed trigger. */ @@ -303,7 +318,7 @@ export interface Trigger_Timer { /** Rule for activating a message queue trigger. */ export interface Trigger_MessageQueue { $type: "yandex.cloud.serverless.triggers.v1.Trigger.MessageQueue"; - /** ID of the message queue in Yandex Message Queue. */ + /** ID of the message queue in Message Queue. */ queueId: string; /** ID of the service account which has read access to the message queue. */ serviceAccountId: string; @@ -317,12 +332,12 @@ export interface Trigger_MessageQueue { invokeContainer?: InvokeContainerOnce | undefined; } -/** Rule for activating a Yandex IoT Core trigger. */ +/** Rule for activating a IoT Core trigger. */ export interface Trigger_IoTMessage { $type: "yandex.cloud.serverless.triggers.v1.Trigger.IoTMessage"; - /** ID of the Yandex IoT Core registry. */ + /** ID of the IoT Core registry. */ registryId: string; - /** ID of the Yandex IoT Core device in the registry. */ + /** ID of the IoT Core device in the registry. */ deviceId: string; /** MQTT topic whose messages activate the trigger. */ mqttTopic: string; @@ -332,6 +347,19 @@ export interface Trigger_IoTMessage { invokeContainer?: InvokeContainerWithRetry | undefined; } +/** Rule for activating a IoT Core Broker trigger. */ +export interface Trigger_IoTBrokerMessage { + $type: "yandex.cloud.serverless.triggers.v1.Trigger.IoTBrokerMessage"; + /** ID of the IoT Core broker. */ + brokerId: string; + /** MQTT topic whose messages activate the trigger. */ + mqttTopic: string; + /** Instructions for invoking a function with retries as needed. */ + invokeFunction?: InvokeFunctionWithRetry | undefined; + /** Instructions for invoking a container with retries as needed. */ + invokeContainer?: InvokeContainerWithRetry | undefined; +} + export interface Trigger_ObjectStorage { $type: "yandex.cloud.serverless.triggers.v1.Trigger.ObjectStorage"; /** Type (name) of events, at least one value is required. */ @@ -541,6 +569,17 @@ export interface DataStream { invokeContainer?: InvokeContainerWithRetry | undefined; } +export interface Mail { + $type: "yandex.cloud.serverless.triggers.v1.Mail"; + /** + * Address to receive emails for trigger activation. + * Field is ignored for write requests and populated on trigger creation. + */ + email: string; + invokeFunction?: InvokeFunctionWithRetry | undefined; + invokeContainer?: InvokeContainerWithRetry | undefined; +} + const baseTrigger: object = { $type: "yandex.cloud.serverless.triggers.v1.Trigger", id: "", @@ -823,6 +862,12 @@ export const Trigger_Rule = { writer.uint32(34).fork() ).ldelim(); } + if (message.iotBrokerMessage !== undefined) { + Trigger_IoTBrokerMessage.encode( + message.iotBrokerMessage, + writer.uint32(114).fork() + ).ldelim(); + } if (message.objectStorage !== undefined) { Trigger_ObjectStorage.encode( message.objectStorage, @@ -856,6 +901,9 @@ export const Trigger_Rule = { if (message.dataStream !== undefined) { DataStream.encode(message.dataStream, writer.uint32(98).fork()).ldelim(); } + if (message.mail !== undefined) { + Mail.encode(message.mail, writer.uint32(106).fork()).ldelim(); + } return writer; }, @@ -881,6 +929,12 @@ export const Trigger_Rule = { reader.uint32() ); break; + case 14: + message.iotBrokerMessage = Trigger_IoTBrokerMessage.decode( + reader, + reader.uint32() + ); + break; case 5: message.objectStorage = Trigger_ObjectStorage.decode( reader, @@ -905,6 +959,9 @@ export const Trigger_Rule = { case 12: message.dataStream = DataStream.decode(reader, reader.uint32()); break; + case 13: + message.mail = Mail.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -927,6 +984,10 @@ export const Trigger_Rule = { object.iotMessage !== undefined && object.iotMessage !== null ? Trigger_IoTMessage.fromJSON(object.iotMessage) : undefined; + message.iotBrokerMessage = + object.iotBrokerMessage !== undefined && object.iotBrokerMessage !== null + ? Trigger_IoTBrokerMessage.fromJSON(object.iotBrokerMessage) + : undefined; message.objectStorage = object.objectStorage !== undefined && object.objectStorage !== null ? Trigger_ObjectStorage.fromJSON(object.objectStorage) @@ -952,6 +1013,10 @@ export const Trigger_Rule = { object.dataStream !== undefined && object.dataStream !== null ? DataStream.fromJSON(object.dataStream) : undefined; + message.mail = + object.mail !== undefined && object.mail !== null + ? Mail.fromJSON(object.mail) + : undefined; return message; }, @@ -969,6 +1034,10 @@ export const Trigger_Rule = { (obj.iotMessage = message.iotMessage ? Trigger_IoTMessage.toJSON(message.iotMessage) : undefined); + message.iotBrokerMessage !== undefined && + (obj.iotBrokerMessage = message.iotBrokerMessage + ? Trigger_IoTBrokerMessage.toJSON(message.iotBrokerMessage) + : undefined); message.objectStorage !== undefined && (obj.objectStorage = message.objectStorage ? Trigger_ObjectStorage.toJSON(message.objectStorage) @@ -993,6 +1062,8 @@ export const Trigger_Rule = { (obj.dataStream = message.dataStream ? DataStream.toJSON(message.dataStream) : undefined); + message.mail !== undefined && + (obj.mail = message.mail ? Mail.toJSON(message.mail) : undefined); return obj; }, @@ -1012,6 +1083,10 @@ export const Trigger_Rule = { object.iotMessage !== undefined && object.iotMessage !== null ? Trigger_IoTMessage.fromPartial(object.iotMessage) : undefined; + message.iotBrokerMessage = + object.iotBrokerMessage !== undefined && object.iotBrokerMessage !== null + ? Trigger_IoTBrokerMessage.fromPartial(object.iotBrokerMessage) + : undefined; message.objectStorage = object.objectStorage !== undefined && object.objectStorage !== null ? Trigger_ObjectStorage.fromPartial(object.objectStorage) @@ -1037,6 +1112,10 @@ export const Trigger_Rule = { object.dataStream !== undefined && object.dataStream !== null ? DataStream.fromPartial(object.dataStream) : undefined; + message.mail = + object.mail !== undefined && object.mail !== null + ? Mail.fromPartial(object.mail) + : undefined; return message; }, }; @@ -1489,6 +1568,142 @@ export const Trigger_IoTMessage = { messageTypeRegistry.set(Trigger_IoTMessage.$type, Trigger_IoTMessage); +const baseTrigger_IoTBrokerMessage: object = { + $type: "yandex.cloud.serverless.triggers.v1.Trigger.IoTBrokerMessage", + brokerId: "", + mqttTopic: "", +}; + +export const Trigger_IoTBrokerMessage = { + $type: + "yandex.cloud.serverless.triggers.v1.Trigger.IoTBrokerMessage" as const, + + encode( + message: Trigger_IoTBrokerMessage, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.brokerId !== "") { + writer.uint32(10).string(message.brokerId); + } + if (message.mqttTopic !== "") { + writer.uint32(18).string(message.mqttTopic); + } + if (message.invokeFunction !== undefined) { + InvokeFunctionWithRetry.encode( + message.invokeFunction, + writer.uint32(810).fork() + ).ldelim(); + } + if (message.invokeContainer !== undefined) { + InvokeContainerWithRetry.encode( + message.invokeContainer, + writer.uint32(818).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Trigger_IoTBrokerMessage { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseTrigger_IoTBrokerMessage, + } as Trigger_IoTBrokerMessage; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.brokerId = reader.string(); + break; + case 2: + message.mqttTopic = reader.string(); + break; + case 101: + message.invokeFunction = InvokeFunctionWithRetry.decode( + reader, + reader.uint32() + ); + break; + case 102: + message.invokeContainer = InvokeContainerWithRetry.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Trigger_IoTBrokerMessage { + const message = { + ...baseTrigger_IoTBrokerMessage, + } as Trigger_IoTBrokerMessage; + message.brokerId = + object.brokerId !== undefined && object.brokerId !== null + ? String(object.brokerId) + : ""; + message.mqttTopic = + object.mqttTopic !== undefined && object.mqttTopic !== null + ? String(object.mqttTopic) + : ""; + message.invokeFunction = + object.invokeFunction !== undefined && object.invokeFunction !== null + ? InvokeFunctionWithRetry.fromJSON(object.invokeFunction) + : undefined; + message.invokeContainer = + object.invokeContainer !== undefined && object.invokeContainer !== null + ? InvokeContainerWithRetry.fromJSON(object.invokeContainer) + : undefined; + return message; + }, + + toJSON(message: Trigger_IoTBrokerMessage): unknown { + const obj: any = {}; + message.brokerId !== undefined && (obj.brokerId = message.brokerId); + message.mqttTopic !== undefined && (obj.mqttTopic = message.mqttTopic); + message.invokeFunction !== undefined && + (obj.invokeFunction = message.invokeFunction + ? InvokeFunctionWithRetry.toJSON(message.invokeFunction) + : undefined); + message.invokeContainer !== undefined && + (obj.invokeContainer = message.invokeContainer + ? InvokeContainerWithRetry.toJSON(message.invokeContainer) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Trigger_IoTBrokerMessage { + const message = { + ...baseTrigger_IoTBrokerMessage, + } as Trigger_IoTBrokerMessage; + message.brokerId = object.brokerId ?? ""; + message.mqttTopic = object.mqttTopic ?? ""; + message.invokeFunction = + object.invokeFunction !== undefined && object.invokeFunction !== null + ? InvokeFunctionWithRetry.fromPartial(object.invokeFunction) + : undefined; + message.invokeContainer = + object.invokeContainer !== undefined && object.invokeContainer !== null + ? InvokeContainerWithRetry.fromPartial(object.invokeContainer) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Trigger_IoTBrokerMessage.$type, + Trigger_IoTBrokerMessage +); + const baseTrigger_ObjectStorage: object = { $type: "yandex.cloud.serverless.triggers.v1.Trigger.ObjectStorage", eventType: 0, @@ -3421,6 +3636,111 @@ export const DataStream = { messageTypeRegistry.set(DataStream.$type, DataStream); +const baseMail: object = { + $type: "yandex.cloud.serverless.triggers.v1.Mail", + email: "", +}; + +export const Mail = { + $type: "yandex.cloud.serverless.triggers.v1.Mail" as const, + + encode(message: Mail, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + if (message.email !== "") { + writer.uint32(18).string(message.email); + } + if (message.invokeFunction !== undefined) { + InvokeFunctionWithRetry.encode( + message.invokeFunction, + writer.uint32(810).fork() + ).ldelim(); + } + if (message.invokeContainer !== undefined) { + InvokeContainerWithRetry.encode( + message.invokeContainer, + writer.uint32(826).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Mail { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMail } as Mail; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + message.email = reader.string(); + break; + case 101: + message.invokeFunction = InvokeFunctionWithRetry.decode( + reader, + reader.uint32() + ); + break; + case 103: + message.invokeContainer = InvokeContainerWithRetry.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mail { + const message = { ...baseMail } as Mail; + message.email = + object.email !== undefined && object.email !== null + ? String(object.email) + : ""; + message.invokeFunction = + object.invokeFunction !== undefined && object.invokeFunction !== null + ? InvokeFunctionWithRetry.fromJSON(object.invokeFunction) + : undefined; + message.invokeContainer = + object.invokeContainer !== undefined && object.invokeContainer !== null + ? InvokeContainerWithRetry.fromJSON(object.invokeContainer) + : undefined; + return message; + }, + + toJSON(message: Mail): unknown { + const obj: any = {}; + message.email !== undefined && (obj.email = message.email); + message.invokeFunction !== undefined && + (obj.invokeFunction = message.invokeFunction + ? InvokeFunctionWithRetry.toJSON(message.invokeFunction) + : undefined); + message.invokeContainer !== undefined && + (obj.invokeContainer = message.invokeContainer + ? InvokeContainerWithRetry.toJSON(message.invokeContainer) + : undefined); + return obj; + }, + + fromPartial, I>>(object: I): Mail { + const message = { ...baseMail } as Mail; + message.email = object.email ?? ""; + message.invokeFunction = + object.invokeFunction !== undefined && object.invokeFunction !== null + ? InvokeFunctionWithRetry.fromPartial(object.invokeFunction) + : undefined; + message.invokeContainer = + object.invokeContainer !== undefined && object.invokeContainer !== null + ? InvokeContainerWithRetry.fromPartial(object.invokeContainer) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Mail.$type, Mail); + declare var self: any | undefined; declare var window: any | undefined; declare var global: any | undefined; diff --git a/src/generated/yandex/cloud/service_clients.ts b/src/generated/yandex/cloud/service_clients.ts index 6f89b569..c007e866 100644 --- a/src/generated/yandex/cloud/service_clients.ts +++ b/src/generated/yandex/cloud/service_clients.ts @@ -31,6 +31,7 @@ export const HostTypeServiceClient = cloudApi.compute.host_type_service.HostType export const ComputeImageServiceClient = cloudApi.compute.image_service.ImageServiceClient; export const InstanceServiceClient = cloudApi.compute.instance_service.InstanceServiceClient; export const PlacementGroupServiceClient = cloudApi.compute.placement_group_service.PlacementGroupServiceClient; +export const SnapshotScheduleServiceClient = cloudApi.compute.snapshot_schedule_service.SnapshotScheduleServiceClient; export const SnapshotServiceClient = cloudApi.compute.snapshot_service.SnapshotServiceClient; export const ZoneServiceClient = cloudApi.compute.zone_service.ZoneServiceClient; export const InstanceGroupServiceClient = cloudApi.compute.instance_group_service.InstanceGroupServiceClient; @@ -62,6 +63,8 @@ export const ServiceAccountServiceClient = cloudApi.iam.service_account_service. export const UserAccountServiceClient = cloudApi.iam.user_account_service.UserAccountServiceClient; export const YandexPassportUserAccountServiceClient = cloudApi.iam.yandex_passport_user_account_service.YandexPassportUserAccountServiceClient; export const AccessKeyServiceClient = cloudApi.iam.access_key_service.AccessKeyServiceClient; +export const BrokerDataServiceClient = cloudApi.iot.broker_broker_data_service.BrokerDataServiceClient; +export const BrokerServiceClient = cloudApi.iot.broker_service.BrokerServiceClient; export const DeviceDataServiceClient = cloudApi.iot.devices_device_data_service.DeviceDataServiceClient; export const DeviceServiceClient = cloudApi.iot.devices_device_service.DeviceServiceClient; export const RegistryDataServiceClient = cloudApi.iot.devices_registry_data_service.RegistryDataServiceClient; @@ -124,7 +127,9 @@ export const SqlServerClusterServiceClient = cloudApi.mdb.sqlserver_cluster_serv export const SqlServerDatabaseServiceClient = cloudApi.mdb.sqlserver_database_service.DatabaseServiceClient; export const SqlServerResourcePresetServiceClient = cloudApi.mdb.sqlserver_resource_preset_service.ResourcePresetServiceClient; export const SqlServerUserServiceClient = cloudApi.mdb.sqlserver_user_service.UserServiceClient; +export const DashboardServiceClient = cloudApi.monitoring.dashboard_service.DashboardServiceClient; export const OperationServiceClient = cloudApi.operation.operation_service.OperationServiceClient; +export const GroupServiceClient = cloudApi.organizationmanager.group_service.GroupServiceClient; export const OrganizationServiceClient = cloudApi.organizationmanager.organization_service.OrganizationServiceClient; export const UserServiceClient = cloudApi.organizationmanager.user_service.UserServiceClient; export const OmCertificateServiceClient = cloudApi.organizationmanager.certificate_service.CertificateServiceClient; @@ -136,6 +141,7 @@ export const ContainerServiceClient = cloudApi.serverless.containers_container_s export const FunctionServiceClient = cloudApi.serverless.functions_function_service.FunctionServiceClient; export const ProxyServiceClient = cloudApi.serverless.mdbproxy_proxy_service.ProxyServiceClient; export const TriggerServiceClient = cloudApi.serverless.triggers_trigger_service.TriggerServiceClient; +export const WebSocketConnectionServiceClient = cloudApi.serverless.apigateway_connection_service.ConnectionServiceClient; export const BucketServiceClient = cloudApi.storage.bucket_service.BucketServiceClient; export const AddressServiceClient = cloudApi.vpc.address_service.AddressServiceClient; export const GatewayServiceClient = cloudApi.vpc.gateway_service.GatewayServiceClient; diff --git a/src/generated/yandex/cloud/storage/v1/bucket.ts b/src/generated/yandex/cloud/storage/v1/bucket.ts index 7c274cdd..7116ec3c 100644 --- a/src/generated/yandex/cloud/storage/v1/bucket.ts +++ b/src/generated/yandex/cloud/storage/v1/bucket.ts @@ -89,7 +89,7 @@ export interface Bucket { /** * Name of the bucket. * - * The name is unique within Yandex Cloud. For naming limitations and rules, see + * The name is unique within the platform. For naming limitations and rules, see * [documentation](/docs/storage/concepts/bucket#naming). */ name: string; @@ -101,8 +101,8 @@ export interface Bucket { */ anonymousAccessFlags?: AnonymousAccessFlags; /** - * Default storage class for objects in the bucket. Supported classes are standard storage (`STANDARD`) and - * cold storage (`COLD`, `STANDARD_IA`, `NEARLINE` all synonyms). + * Default storage class for objects in the bucket. Supported classes are standard storage (`STANDARD`), cold storage + * (`COLD`, `STANDARD_IA`, `NEARLINE` all synonyms), and ice storage (`ICE` and `GLACIER` are synonyms). * For details, see [documentation](/docs/storage/concepts/storage-class). */ defaultStorageClass: string; @@ -158,7 +158,7 @@ export interface ACL_Grant { permission: ACL_Grant_Permission; /** The grantee type for the grant. */ grantType: ACL_Grant_GrantType; - /** ID of the Yandex Cloud user who is a grantee. Required when the [grant_type] is `GRANT_TYPE_ACCOUNT`. */ + /** ID of the account who is a grantee. Required when the [grant_type] is `GRANT_TYPE_ACCOUNT`. */ granteeId: string; } @@ -257,7 +257,7 @@ export function aCL_Grant_PermissionToJSON( export enum ACL_Grant_GrantType { GRANT_TYPE_UNSPECIFIED = 0, /** - * GRANT_TYPE_ACCOUNT - A grantee is a [Yandex Cloud account](/docs/iam/concepts/#accounts). + * GRANT_TYPE_ACCOUNT - A grantee is an [account on the platform](/docs/iam/concepts/#accounts). * * For this grantee type, you need to specify the user ID in [Bucket.acl.grants.grantee_id] field. To get user ID, see * [instruction](/docs/iam/operations/users/get). @@ -267,8 +267,8 @@ export enum ACL_Grant_GrantType { */ GRANT_TYPE_ACCOUNT = 1, /** - * GRANT_TYPE_ALL_AUTHENTICATED_USERS - Grantees are all authenticated Yandex Cloud users, both from your clouds and other users' clouds. Access - * permission to this group allows any Yandex Cloud account to access the resource via a signed (authenticated) + * GRANT_TYPE_ALL_AUTHENTICATED_USERS - Grantees are all authenticated users, both from your clouds and other users' clouds. Access + * permission to this group allows any account on the platform to access the resource via a signed (authenticated) * request. * * Maps to using `uri="http://acs.amazonaws.com/groups/global/AuthenticatedUsers"` value for `x-amz-grant-*` @@ -664,10 +664,10 @@ export interface LifecycleRule_NoncurrentTransition { */ noncurrentDays?: number; /** - * Storage class to which a non-current version of an object is transitioned. + * Storage class to which a non-current version of an object is transitioned from standard storage. * * The only supported class is cold storage (`COLD`, `STANDARD_IA`, `NEARLINE` all synonyms). Transitions from cold - * to standard storage are not allowed. + * to standard storage and transitions to or from ice storage are not allowed. */ storageClass: string; } @@ -702,10 +702,10 @@ export interface LifecycleRule_Transition { */ days?: number; /** - * Storage class to which an object is transitioned. + * Storage class to which an object is transitioned from standard storage. * * The only supported class is cold storage (`COLD`, `STANDARD_IA`, `NEARLINE` all synonyms). Transitions from cold - * to standard storage are not allowed. + * to standard storage and transitions to or from ice storage are not allowed. */ storageClass: string; } @@ -764,8 +764,8 @@ export interface Counters { export interface OptionalSizeByClass { $type: "yandex.cloud.storage.v1.OptionalSizeByClass"; /** - * Storage class. Supported classes are standard storage (`STANDARD`) and cold storage (`COLD`, `STANDARD_IA`, - * `NEARLINE` all synonyms). + * Storage class. Supported classes are standard storage (`STANDARD`), cold storage (`COLD`, `STANDARD_IA`, `NEARLINE` + * all synonyms), and ice storage (`ICE` and `GLACIER` are synonyms). * For details, see [documentation](/docs/storage/concepts/storage-class). */ storageClass: string; @@ -777,8 +777,8 @@ export interface OptionalSizeByClass { export interface SizeByClass { $type: "yandex.cloud.storage.v1.SizeByClass"; /** - * Storage class. Supported classes are standard storage (`STANDARD`) and cold storage (`COLD`, `STANDARD_IA`, - * `NEARLINE` all synonyms). + * Storage class. Supported classes are standard storage (`STANDARD`), cold storage (`COLD`, `STANDARD_IA`, `NEARLINE` + * all synonyms), and ice storage (`ICE` and `GLACIER` are synonyms). * For details, see [documentation](/docs/storage/concepts/storage-class). */ storageClass: string; @@ -790,8 +790,8 @@ export interface SizeByClass { export interface CountersByClass { $type: "yandex.cloud.storage.v1.CountersByClass"; /** - * Storage class. Supported classes are standard storage (`STANDARD`) and cold storage (`COLD`, `STANDARD_IA`, - * `NEARLINE` all synonyms). + * Storage class. Supported classes are standard storage (`STANDARD`), cold storage (`COLD`, `STANDARD_IA`, `NEARLINE` + * all synonyms), and ice storage (`ice` and `GLACIER` are synonyms). * For details, see [documentation](/docs/storage/concepts/storage-class). */ storageClass: string; @@ -815,8 +815,8 @@ export interface BucketStats { /** Object-related statistics by storage class and type of upload (simple vs. multipart), in bytes. */ storageClassCounters: CountersByClass[]; /** - * Default storage class for objects in the bucket. Supported classes are standard storage (`STANDARD`) and - * cold storage (`COLD`, `STANDARD_IA`, `NEARLINE` all synonyms). + * Default storage class for objects in the bucket. Supported classes are standard storage (`STANDARD`), cold storage + * (`COLD`, `STANDARD_IA`, `NEARLINE` all synonyms), and ice storage (`ICE` and `GLACIER` are synonyms). * For details, see [documentation](/docs/storage/concepts/storage-class). */ defaultStorageClass?: string; @@ -849,7 +849,7 @@ export interface HTTPSConfig { /** End of the TLS certificate validity period (Not After field) */ notAfter?: Date; /** - * ID of the TLS certificate in Yandex Certificate Manager. + * ID of the TLS certificate in Certificate Manager. * * To get information about the certificate from Certificate Manager, make a * [yandex.cloud.certificatemanager.v1.CertificateService.Get] request. @@ -862,7 +862,7 @@ export enum HTTPSConfig_SourceType { SOURCE_TYPE_UNSPECIFIED = 0, /** SOURCE_TYPE_SELF_MANAGED - Your certificate, uploaded directly. */ SOURCE_TYPE_SELF_MANAGED = 1, - /** SOURCE_TYPE_MANAGED_BY_CERTIFICATE_MANAGER - Certificate managed by Yandex Certificate Manager. */ + /** SOURCE_TYPE_MANAGED_BY_CERTIFICATE_MANAGER - Certificate managed by Certificate Manager. */ SOURCE_TYPE_MANAGED_BY_CERTIFICATE_MANAGER = 2, UNRECOGNIZED = -1, } diff --git a/src/generated/yandex/cloud/storage/v1/bucket_service.ts b/src/generated/yandex/cloud/storage/v1/bucket_service.ts index 5d1be9c6..cdf8d935 100644 --- a/src/generated/yandex/cloud/storage/v1/bucket_service.ts +++ b/src/generated/yandex/cloud/storage/v1/bucket_service.ts @@ -131,7 +131,7 @@ export interface CreateBucketRequest { /** * Name of the bucket. * - * The name must be unique within Yandex Cloud. For naming limitations and rules, see + * The name must be unique within the platform. For naming limitations and rules, see * [documentation](/docs/storage/concepts/bucket#naming). */ name: string; @@ -142,8 +142,8 @@ export interface CreateBucketRequest { */ folderId: string; /** - * Default storage class for objects in the bucket. Supported classes are standard storage (`STANDARD`) and - * cold storage (`COLD`, `STANDARD_IA`, `NEARLINE` all synonyms). + * Default storage class for objects in the bucket. Supported classes are standard storage (`STANDARD`), cold storage + * (`COLD`, `STANDARD_IA`, `NEARLINE` all synonyms), and ice storage (`ICE` and `GLACIER` are synonyms). * For details, see [documentation](/docs/storage/concepts/storage-class). */ defaultStorageClass: string; @@ -188,8 +188,8 @@ export interface UpdateBucketRequest { */ anonymousAccessFlags?: AnonymousAccessFlags; /** - * Default storage class for objects in the bucket. Supported classes are standard storage (`STANDARD`) and - * cold storage (`COLD`, `STANDARD_IA`, `NEARLINE` all synonyms). + * Default storage class for objects in the bucket. Supported classes are standard storage (`STANDARD`), cold storage + * (`COLD`, `STANDARD_IA`, `NEARLINE` all synonyms), and ice storage (`ICE` and `GLACIER` are synonyms). * For details, see [documentation](/docs/storage/concepts/storage-class). */ defaultStorageClass: string; @@ -273,7 +273,7 @@ export interface SelfManagedHTTPSConfigParams { privateKeyPem: string; } -/** A resource for a TLS certificate from Yandex Certificate Manager. */ +/** A resource for a TLS certificate from Certificate Manager. */ export interface CertificateManagerHTTPSConfigParams { $type: "yandex.cloud.storage.v1.CertificateManagerHTTPSConfigParams"; /** @@ -296,7 +296,7 @@ export interface SetBucketHTTPSConfigRequest { */ selfManaged?: SelfManagedHTTPSConfigParams | undefined; /** - * TLS certificate from Yandex Certificate Manager. + * TLS certificate from Certificate Manager. * * To create a certificate in Certificate Manager, make a * [yandex.cloud.certificatemanager.v1.CertificateService.Create] request. diff --git a/src/generated/yandex/cloud/vpc/v1/route_table.ts b/src/generated/yandex/cloud/vpc/v1/route_table.ts index e9345a7d..16f590bf 100644 --- a/src/generated/yandex/cloud/vpc/v1/route_table.ts +++ b/src/generated/yandex/cloud/vpc/v1/route_table.ts @@ -40,6 +40,8 @@ export interface StaticRoute { destinationPrefix: string | undefined; /** Next hop IP address */ nextHopAddress: string | undefined; + /** Next hop gateway id */ + gatewayId: string | undefined; /** Resource labels as `` key:value `` pairs. Maximum of 64 per resource. */ labels: { [key: string]: string }; } @@ -330,6 +332,9 @@ export const StaticRoute = { if (message.nextHopAddress !== undefined) { writer.uint32(18).string(message.nextHopAddress); } + if (message.gatewayId !== undefined) { + writer.uint32(34).string(message.gatewayId); + } Object.entries(message.labels).forEach(([key, value]) => { StaticRoute_LabelsEntry.encode( { @@ -357,6 +362,9 @@ export const StaticRoute = { case 2: message.nextHopAddress = reader.string(); break; + case 4: + message.gatewayId = reader.string(); + break; case 3: const entry3 = StaticRoute_LabelsEntry.decode( reader, @@ -385,6 +393,10 @@ export const StaticRoute = { object.nextHopAddress !== undefined && object.nextHopAddress !== null ? String(object.nextHopAddress) : undefined; + message.gatewayId = + object.gatewayId !== undefined && object.gatewayId !== null + ? String(object.gatewayId) + : undefined; message.labels = Object.entries(object.labels ?? {}).reduce<{ [key: string]: string; }>((acc, [key, value]) => { @@ -400,6 +412,7 @@ export const StaticRoute = { (obj.destinationPrefix = message.destinationPrefix); message.nextHopAddress !== undefined && (obj.nextHopAddress = message.nextHopAddress); + message.gatewayId !== undefined && (obj.gatewayId = message.gatewayId); obj.labels = {}; if (message.labels) { Object.entries(message.labels).forEach(([k, v]) => { @@ -415,6 +428,7 @@ export const StaticRoute = { const message = { ...baseStaticRoute } as StaticRoute; message.destinationPrefix = object.destinationPrefix ?? undefined; message.nextHopAddress = object.nextHopAddress ?? undefined; + message.gatewayId = object.gatewayId ?? undefined; message.labels = Object.entries(object.labels ?? {}).reduce<{ [key: string]: string; }>((acc, [key, value]) => { diff --git a/src/generated/yandex/cloud/ydb/v1/database.ts b/src/generated/yandex/cloud/ydb/v1/database.ts index a486e7a6..bfc10595 100644 --- a/src/generated/yandex/cloud/ydb/v1/database.ts +++ b/src/generated/yandex/cloud/ydb/v1/database.ts @@ -95,6 +95,7 @@ export interface Database { documentApiEndpoint: string; kinesisApiEndpoint: string; monitoringConfig?: MonitoringConfig; + deletionProtection: boolean; } export enum Database_Status { @@ -280,6 +281,8 @@ export interface ServerlessDatabase { * You will be charged for the on-demand consumption only if provisioned capacity is consumed. */ provisionedRcuLimit: number; + /** write quota for topic service, defined in bytes per second. */ + topicWriteQuota: number; } export interface ZonalDatabase { @@ -330,6 +333,7 @@ const baseDatabase: object = { locationId: "", documentApiEndpoint: "", kinesisApiEndpoint: "", + deletionProtection: false, }; export const Database = { @@ -442,6 +446,9 @@ export const Database = { writer.uint32(194).fork() ).ldelim(); } + if (message.deletionProtection === true) { + writer.uint32(200).bool(message.deletionProtection); + } return writer; }, @@ -540,6 +547,9 @@ export const Database = { reader.uint32() ); break; + case 25: + message.deletionProtection = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -643,6 +653,11 @@ export const Database = { object.monitoringConfig !== undefined && object.monitoringConfig !== null ? MonitoringConfig.fromJSON(object.monitoringConfig) : undefined; + message.deletionProtection = + object.deletionProtection !== undefined && + object.deletionProtection !== null + ? Boolean(object.deletionProtection) + : false; return message; }, @@ -711,6 +726,8 @@ export const Database = { (obj.monitoringConfig = message.monitoringConfig ? MonitoringConfig.toJSON(message.monitoringConfig) : undefined); + message.deletionProtection !== undefined && + (obj.deletionProtection = message.deletionProtection); return obj; }, @@ -772,6 +789,7 @@ export const Database = { object.monitoringConfig !== undefined && object.monitoringConfig !== null ? MonitoringConfig.fromPartial(object.monitoringConfig) : undefined; + message.deletionProtection = object.deletionProtection ?? false; return message; }, }; @@ -1986,6 +2004,7 @@ const baseServerlessDatabase: object = { storageSizeLimit: 0, enableThrottlingRcuLimit: false, provisionedRcuLimit: 0, + topicWriteQuota: 0, }; export const ServerlessDatabase = { @@ -2007,6 +2026,9 @@ export const ServerlessDatabase = { if (message.provisionedRcuLimit !== 0) { writer.uint32(32).int64(message.provisionedRcuLimit); } + if (message.topicWriteQuota !== 0) { + writer.uint32(40).int64(message.topicWriteQuota); + } return writer; }, @@ -2029,6 +2051,9 @@ export const ServerlessDatabase = { case 4: message.provisionedRcuLimit = longToNumber(reader.int64() as Long); break; + case 5: + message.topicWriteQuota = longToNumber(reader.int64() as Long); + break; default: reader.skipType(tag & 7); break; @@ -2058,6 +2083,10 @@ export const ServerlessDatabase = { object.provisionedRcuLimit !== null ? Number(object.provisionedRcuLimit) : 0; + message.topicWriteQuota = + object.topicWriteQuota !== undefined && object.topicWriteQuota !== null + ? Number(object.topicWriteQuota) + : 0; return message; }, @@ -2071,6 +2100,8 @@ export const ServerlessDatabase = { (obj.enableThrottlingRcuLimit = message.enableThrottlingRcuLimit); message.provisionedRcuLimit !== undefined && (obj.provisionedRcuLimit = Math.round(message.provisionedRcuLimit)); + message.topicWriteQuota !== undefined && + (obj.topicWriteQuota = Math.round(message.topicWriteQuota)); return obj; }, @@ -2082,6 +2113,7 @@ export const ServerlessDatabase = { message.storageSizeLimit = object.storageSizeLimit ?? 0; message.enableThrottlingRcuLimit = object.enableThrottlingRcuLimit ?? false; message.provisionedRcuLimit = object.provisionedRcuLimit ?? 0; + message.topicWriteQuota = object.topicWriteQuota ?? 0; return message; }, }; diff --git a/src/generated/yandex/cloud/ydb/v1/database_service.ts b/src/generated/yandex/cloud/ydb/v1/database_service.ts index 496204a3..5906d075 100644 --- a/src/generated/yandex/cloud/ydb/v1/database_service.ts +++ b/src/generated/yandex/cloud/ydb/v1/database_service.ts @@ -39,6 +39,20 @@ import { export const protobufPackage = "yandex.cloud.ydb.v1"; +export interface MoveDatabaseRequest { + $type: "yandex.cloud.ydb.v1.MoveDatabaseRequest"; + /** ID of the YDB instance to move. */ + databaseId: string; + /** ID of the destination folder. */ + destinationFolderId: string; +} + +export interface MoveDatabaseMetadata { + $type: "yandex.cloud.ydb.v1.MoveDatabaseMetadata"; + databaseId: string; + databaseName: string; +} + export interface RestoreBackupRequest { $type: "yandex.cloud.ydb.v1.RestoreBackupRequest"; /** Required. ID of the YDB backup. */ @@ -152,6 +166,7 @@ export interface CreateDatabaseRequest { labels: { [key: string]: string }; backupConfig?: BackupConfig; monitoringConfig?: MonitoringConfig; + deletionProtection: boolean; } export interface CreateDatabaseRequest_LabelsEntry { @@ -189,6 +204,7 @@ export interface UpdateDatabaseRequest { labels: { [key: string]: string }; backupConfig?: BackupConfig; monitoringConfig?: MonitoringConfig; + deletionProtection: boolean; } export interface UpdateDatabaseRequest_LabelsEntry { @@ -214,6 +230,162 @@ export interface DeleteDatabaseMetadata { databaseName: string; } +const baseMoveDatabaseRequest: object = { + $type: "yandex.cloud.ydb.v1.MoveDatabaseRequest", + databaseId: "", + destinationFolderId: "", +}; + +export const MoveDatabaseRequest = { + $type: "yandex.cloud.ydb.v1.MoveDatabaseRequest" as const, + + encode( + message: MoveDatabaseRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.databaseId !== "") { + writer.uint32(10).string(message.databaseId); + } + if (message.destinationFolderId !== "") { + writer.uint32(18).string(message.destinationFolderId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): MoveDatabaseRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMoveDatabaseRequest } as MoveDatabaseRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.databaseId = reader.string(); + break; + case 2: + message.destinationFolderId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): MoveDatabaseRequest { + const message = { ...baseMoveDatabaseRequest } as MoveDatabaseRequest; + message.databaseId = + object.databaseId !== undefined && object.databaseId !== null + ? String(object.databaseId) + : ""; + message.destinationFolderId = + object.destinationFolderId !== undefined && + object.destinationFolderId !== null + ? String(object.destinationFolderId) + : ""; + return message; + }, + + toJSON(message: MoveDatabaseRequest): unknown { + const obj: any = {}; + message.databaseId !== undefined && (obj.databaseId = message.databaseId); + message.destinationFolderId !== undefined && + (obj.destinationFolderId = message.destinationFolderId); + return obj; + }, + + fromPartial, I>>( + object: I + ): MoveDatabaseRequest { + const message = { ...baseMoveDatabaseRequest } as MoveDatabaseRequest; + message.databaseId = object.databaseId ?? ""; + message.destinationFolderId = object.destinationFolderId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(MoveDatabaseRequest.$type, MoveDatabaseRequest); + +const baseMoveDatabaseMetadata: object = { + $type: "yandex.cloud.ydb.v1.MoveDatabaseMetadata", + databaseId: "", + databaseName: "", +}; + +export const MoveDatabaseMetadata = { + $type: "yandex.cloud.ydb.v1.MoveDatabaseMetadata" as const, + + encode( + message: MoveDatabaseMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.databaseId !== "") { + writer.uint32(10).string(message.databaseId); + } + if (message.databaseName !== "") { + writer.uint32(18).string(message.databaseName); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): MoveDatabaseMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMoveDatabaseMetadata } as MoveDatabaseMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.databaseId = reader.string(); + break; + case 2: + message.databaseName = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): MoveDatabaseMetadata { + const message = { ...baseMoveDatabaseMetadata } as MoveDatabaseMetadata; + message.databaseId = + object.databaseId !== undefined && object.databaseId !== null + ? String(object.databaseId) + : ""; + message.databaseName = + object.databaseName !== undefined && object.databaseName !== null + ? String(object.databaseName) + : ""; + return message; + }, + + toJSON(message: MoveDatabaseMetadata): unknown { + const obj: any = {}; + message.databaseId !== undefined && (obj.databaseId = message.databaseId); + message.databaseName !== undefined && + (obj.databaseName = message.databaseName); + return obj; + }, + + fromPartial, I>>( + object: I + ): MoveDatabaseMetadata { + const message = { ...baseMoveDatabaseMetadata } as MoveDatabaseMetadata; + message.databaseId = object.databaseId ?? ""; + message.databaseName = object.databaseName ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(MoveDatabaseMetadata.$type, MoveDatabaseMetadata); + const baseRestoreBackupRequest: object = { $type: "yandex.cloud.ydb.v1.RestoreBackupRequest", backupId: "", @@ -1101,6 +1273,7 @@ const baseCreateDatabaseRequest: object = { subnetIds: "", assignPublicIps: false, locationId: "", + deletionProtection: false, }; export const CreateDatabaseRequest = { @@ -1192,6 +1365,9 @@ export const CreateDatabaseRequest = { writer.uint32(138).fork() ).ldelim(); } + if (message.deletionProtection === true) { + writer.uint32(144).bool(message.deletionProtection); + } return writer; }, @@ -1276,6 +1452,9 @@ export const CreateDatabaseRequest = { reader.uint32() ); break; + case 18: + message.deletionProtection = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -1355,6 +1534,11 @@ export const CreateDatabaseRequest = { object.monitoringConfig !== undefined && object.monitoringConfig !== null ? MonitoringConfig.fromJSON(object.monitoringConfig) : undefined; + message.deletionProtection = + object.deletionProtection !== undefined && + object.deletionProtection !== null + ? Boolean(object.deletionProtection) + : false; return message; }, @@ -1413,6 +1597,8 @@ export const CreateDatabaseRequest = { (obj.monitoringConfig = message.monitoringConfig ? MonitoringConfig.toJSON(message.monitoringConfig) : undefined); + message.deletionProtection !== undefined && + (obj.deletionProtection = message.deletionProtection); return obj; }, @@ -1470,6 +1656,7 @@ export const CreateDatabaseRequest = { object.monitoringConfig !== undefined && object.monitoringConfig !== null ? MonitoringConfig.fromPartial(object.monitoringConfig) : undefined; + message.deletionProtection = object.deletionProtection ?? false; return message; }, }; @@ -1651,6 +1838,7 @@ const baseUpdateDatabaseRequest: object = { subnetIds: "", assignPublicIps: false, locationId: "", + deletionProtection: false, }; export const UpdateDatabaseRequest = { @@ -1748,6 +1936,9 @@ export const UpdateDatabaseRequest = { writer.uint32(154).fork() ).ldelim(); } + if (message.deletionProtection === true) { + writer.uint32(160).bool(message.deletionProtection); + } return writer; }, @@ -1838,6 +2029,9 @@ export const UpdateDatabaseRequest = { reader.uint32() ); break; + case 20: + message.deletionProtection = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -1925,6 +2119,11 @@ export const UpdateDatabaseRequest = { object.monitoringConfig !== undefined && object.monitoringConfig !== null ? MonitoringConfig.fromJSON(object.monitoringConfig) : undefined; + message.deletionProtection = + object.deletionProtection !== undefined && + object.deletionProtection !== null + ? Boolean(object.deletionProtection) + : false; return message; }, @@ -1988,6 +2187,8 @@ export const UpdateDatabaseRequest = { (obj.monitoringConfig = message.monitoringConfig ? MonitoringConfig.toJSON(message.monitoringConfig) : undefined); + message.deletionProtection !== undefined && + (obj.deletionProtection = message.deletionProtection); return obj; }, @@ -2050,6 +2251,7 @@ export const UpdateDatabaseRequest = { object.monitoringConfig !== undefined && object.monitoringConfig !== null ? MonitoringConfig.fromPartial(object.monitoringConfig) : undefined; + message.deletionProtection = object.deletionProtection ?? false; return message; }, }; @@ -2438,6 +2640,17 @@ export const DatabaseServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, + move: { + path: "/yandex.cloud.ydb.v1.DatabaseService/Move", + requestStream: false, + responseStream: false, + requestSerialize: (value: MoveDatabaseRequest) => + Buffer.from(MoveDatabaseRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => MoveDatabaseRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, listAccessBindings: { path: "/yandex.cloud.ydb.v1.DatabaseService/ListAccessBindings", requestStream: false, @@ -2525,6 +2738,7 @@ export interface DatabaseServiceServer extends UntypedServiceImplementation { start: handleUnaryCall; /** Stops the specified database. */ stop: handleUnaryCall; + move: handleUnaryCall; listAccessBindings: handleUnaryCall< ListAccessBindingsRequest, ListAccessBindingsResponse @@ -2644,6 +2858,21 @@ export interface DatabaseServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; + move( + request: MoveDatabaseRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + move( + request: MoveDatabaseRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + move( + request: MoveDatabaseRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; listAccessBindings( request: ListAccessBindingsRequest, callback: ( diff --git a/src/service-endpoints.ts b/src/service-endpoints.ts index d9a57fbf..215d5738 100644 --- a/src/service-endpoints.ts +++ b/src/service-endpoints.ts @@ -8,6 +8,7 @@ interface ServiceEndpoint { type ServiceEndpointsList = ServiceEndpoint[]; +// @see https://api.cloud.yandex.net/endpoints const SERVICE_ENDPOINTS_LIST: ServiceEndpointsList = [ { serviceIds: ['yandex.cloud.operation.OperationService'], @@ -27,6 +28,7 @@ const SERVICE_ENDPOINTS_LIST: ServiceEndpointsList = [ 'yandex.cloud.compute.v1.SnapshotService', 'yandex.cloud.compute.v1.ZoneService', 'yandex.cloud.compute.v1.instancegroup.InstanceGroupService', + 'yandex.cloud.compute.v1.SnapshotScheduleService', ], endpoint: 'compute.api.cloud.yandex.net:443', }, @@ -157,6 +159,10 @@ const SERVICE_ENDPOINTS_LIST: ServiceEndpointsList = [ serviceIds: ['yandex.cloud.serverless.mdbproxy.v1.ProxyService'], endpoint: 'mdbproxy.api.cloud.yandex.net:443', }, + { + serviceIds: ['yandex.cloud.serverless.apigateway.websocket.v1.ConnectionService'], + endpoint: 'apigateway-connections.api.cloud-preprod.yandex.net:443', + }, { serviceIds: [ 'yandex.cloud.k8s.v1.ClusterService', @@ -194,7 +200,9 @@ const SERVICE_ENDPOINTS_LIST: ServiceEndpointsList = [ endpoint: 'ydb.api.cloud.yandex.net:443', }, { - serviceIds: ['yandex.cloud.iot.devices.v1.RegistryService'], + serviceIds: [ + 'yandex.cloud.iot.devices.v1.RegistryService', + ], endpoint: 'iot-devices.api.cloud.yandex.net:443', }, { @@ -205,6 +213,19 @@ const SERVICE_ENDPOINTS_LIST: ServiceEndpointsList = [ ], endpoint: 'iot-data.api.cloud.yandex.net:443', }, + { + serviceIds: [ + 'yandex.cloud.iot.broker.v1.BrokerDataService', + 'yandex.cloud.iot.broker.v1.BrokerService', + ], + endpoint: 'iot-broker.api.cloud.yandex.net:443', + }, + { + serviceIds: [ + 'yandex.cloud.monitoring.v3.DashboardService', + ], + endpoint: 'monitoring.api.cloud.yandex.net:443', + }, { serviceIds: [ 'yandex.cloud.dataproc.manager.v1.JobService', @@ -324,6 +345,7 @@ const SERVICE_ENDPOINTS_LIST: ServiceEndpointsList = [ 'yandex.cloud.organizationmanager.v1.UserService', 'yandex.cloud.organizationmanager.v1.saml.CertificateService', 'yandex.cloud.organizationmanager.v1.saml.FederationService', + 'yandex.cloud.organizationmanager.v1.GroupService', ], endpoint: 'organization-manager.api.cloud.yandex.net:443', }, From 0dcd2db50ca2b50d215050dbb0947106e88fcc7d Mon Sep 17 00:00:00 2001 From: yandex-cloud-bot Date: Tue, 6 Sep 2022 11:06:15 +0000 Subject: [PATCH 30/54] chore(release): 2.2.0 [skip ci] # [2.2.0](https://github.com/yandex-cloud/nodejs-sdk/compare/v2.1.1...v2.2.0) (2022-09-06) ### Features * update api accordind to changes in proto ([9893514](https://github.com/yandex-cloud/nodejs-sdk/commit/9893514ecf38c667b174167a0b215ffe61292a39)) --- package-lock.json | 4 ++-- package.json | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/package-lock.json b/package-lock.json index 3ce12742..67781203 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.1.1", + "version": "2.2.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.1.1", + "version": "2.2.0", "license": "MIT", "dependencies": { "@grpc/grpc-js": "1.6.0", diff --git a/package.json b/package.json index 125fd9fa..d1be52fc 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.1.1", + "version": "2.2.0", "description": "Yandex.Cloud NodeJS SDK", "keywords": [ "yandex-cloud", From ab327de5d7b6ff3beb55fa2746ac432cf942a2aa Mon Sep 17 00:00:00 2001 From: Ivan Zuev Date: Thu, 15 Sep 2022 18:45:46 +0300 Subject: [PATCH 31/54] fix: apigw connections endpoint --- src/service-endpoints.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service-endpoints.ts b/src/service-endpoints.ts index 215d5738..0c18d07d 100644 --- a/src/service-endpoints.ts +++ b/src/service-endpoints.ts @@ -161,7 +161,7 @@ const SERVICE_ENDPOINTS_LIST: ServiceEndpointsList = [ }, { serviceIds: ['yandex.cloud.serverless.apigateway.websocket.v1.ConnectionService'], - endpoint: 'apigateway-connections.api.cloud-preprod.yandex.net:443', + endpoint: 'apigateway-connections.api.cloud.yandex.net:443', }, { serviceIds: [ From 1d65549b4c8c24dd8f746022ad56c0452b86603e Mon Sep 17 00:00:00 2001 From: yandex-cloud-bot Date: Thu, 15 Sep 2022 16:10:07 +0000 Subject: [PATCH 32/54] chore(release): 2.2.1 [skip ci] ## [2.2.1](https://github.com/yandex-cloud/nodejs-sdk/compare/v2.2.0...v2.2.1) (2022-09-15) ### Bug Fixes * apigw connections endpoint ([ab327de](https://github.com/yandex-cloud/nodejs-sdk/commit/ab327de5d7b6ff3beb55fa2746ac432cf942a2aa)) --- package-lock.json | 4 ++-- package.json | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/package-lock.json b/package-lock.json index 67781203..58a60a92 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.2.0", + "version": "2.2.1", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.2.0", + "version": "2.2.1", "license": "MIT", "dependencies": { "@grpc/grpc-js": "1.6.0", diff --git a/package.json b/package.json index d1be52fc..bf820f13 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.2.0", + "version": "2.2.1", "description": "Yandex.Cloud NodeJS SDK", "keywords": [ "yandex-cloud", From 7402f451d4e6bd282573fde5bbfeef13eb5fecf0 Mon Sep 17 00:00:00 2001 From: Ivan Zuev Date: Thu, 13 Oct 2022 15:56:15 +0300 Subject: [PATCH 33/54] fix: user versions range for dependencies --- package.json | 74 ++++++++++++++++++++++++++-------------------------- 1 file changed, 37 insertions(+), 37 deletions(-) diff --git a/package.json b/package.json index d1be52fc..3f3fd26f 100644 --- a/package.json +++ b/package.json @@ -22,45 +22,45 @@ ], "homepage": "https://github.com/yandex-cloud/nodejs-sdk#readme", "dependencies": { - "@grpc/grpc-js": "1.6.0", - "axios": "0.24.0", - "jsonwebtoken": "8.5.1", - "lodash": "4.17.21", - "log4js": "6.4.0", - "long": "5.2.0", - "luxon": "2.2.0", - "nice-grpc": "1.0.6", - "nice-grpc-client-middleware-deadline": "1.0.6", - "protobufjs": "6.11.3", - "utility-types": "3.10.0" + "@grpc/grpc-js": "^1.6.0", + "axios": "^0.24.0", + "jsonwebtoken": "^8.5.1", + "lodash": "^4.17.21", + "log4js": "^6.4.0", + "long": "^5.2.0", + "luxon": "^2.2.0", + "nice-grpc": "^1.0.6", + "nice-grpc-client-middleware-deadline": "^1.0.6", + "protobufjs": "^6.11.3", + "utility-types": "^3.10.0" }, "devDependencies": { - "@commitlint/cli": "15.0.0", - "@commitlint/config-conventional": "15.0.0", - "@semantic-release/git": "10.0.1", - "@types/jest": "27.0.3", - "@types/jsonwebtoken": "8.5.6", - "@types/lodash": "4.14.178", - "@types/luxon": "2.0.8", - "@types/node": "16.11.3", - "@typescript-eslint/eslint-plugin": "5.7.0", - "@typescript-eslint/parser": "5.7.0", - "eslint": "8.4.1", - "eslint-config-airbnb-base": "15.0.0", - "eslint-config-airbnb-typescript": "16.1.0", - "eslint-plugin-import": "2.25.3", - "eslint-plugin-jsx-a11y": "6.5.1", - "eslint-plugin-prefer-arrow-functions": "3.1.4", - "eslint-plugin-unicorn": "39.0.0", - "fast-glob": "3.2.7", - "grpc-tools": "1.11.2", - "husky": "7.0.4", - "jest": "27.4.5", - "semantic-release": "19.0.3", - "ts-jest": "27.1.1", - "ts-node": "10.4.0", - "ts-proto": "1.95.1", - "typescript": "4.5.4" + "@commitlint/cli": "^15.0.0", + "@commitlint/config-conventional": "^15.0.0", + "@semantic-release/git": "^10.0.1", + "@types/jest": "^27.0.3", + "@types/jsonwebtoken": "^8.5.6", + "@types/lodash": "^4.14.178", + "@types/luxon": "^2.0.8", + "@types/node": "^16.11.3", + "@typescript-eslint/eslint-plugin": "^5.7.0", + "@typescript-eslint/parser": "^5.7.0", + "eslint": "^8.4.1", + "eslint-config-airbnb-base": "^15.0.0", + "eslint-config-airbnb-typescript": "^16.1.0", + "eslint-plugin-import": "^2.25.3", + "eslint-plugin-jsx-a11y": "^6.5.1", + "eslint-plugin-prefer-arrow-functions": "^3.1.4", + "eslint-plugin-unicorn": "^39.0.0", + "fast-glob": "^3.2.7", + "grpc-tools": "^1.11.2", + "husky": "^7.0.4", + "jest": "^27.4.5", + "semantic-release": "^19.0.3", + "ts-jest": "^27.1.1", + "ts-node": "^10.4.0", + "ts-proto": "^1.95.1", + "typescript": "^4.5.4" }, "scripts": { "test": "jest -c config/jest.ts --passWithNoTests", From a4f2f20d82b217981d656d5ef98884b38d8b5903 Mon Sep 17 00:00:00 2001 From: Ivan Zuev Date: Thu, 13 Oct 2022 15:58:08 +0300 Subject: [PATCH 34/54] fix: update @grpc/grpc-js --- package-lock.json | 164 ++++++++++++++++++++++++++++++---------------- package.json | 2 +- 2 files changed, 110 insertions(+), 56 deletions(-) diff --git a/package-lock.json b/package-lock.json index 67781203..defb2b20 100644 --- a/package-lock.json +++ b/package-lock.json @@ -9,45 +9,45 @@ "version": "2.2.0", "license": "MIT", "dependencies": { - "@grpc/grpc-js": "1.6.0", - "axios": "0.24.0", - "jsonwebtoken": "8.5.1", - "lodash": "4.17.21", - "log4js": "6.4.0", - "long": "5.2.0", - "luxon": "2.2.0", - "nice-grpc": "1.0.6", - "nice-grpc-client-middleware-deadline": "1.0.6", - "protobufjs": "6.11.3", - "utility-types": "3.10.0" + "@grpc/grpc-js": "^1.6.12", + "axios": "^0.24.0", + "jsonwebtoken": "^8.5.1", + "lodash": "^4.17.21", + "log4js": "^6.4.0", + "long": "^5.2.0", + "luxon": "^2.2.0", + "nice-grpc": "^1.0.6", + "nice-grpc-client-middleware-deadline": "^1.0.6", + "protobufjs": "^6.11.3", + "utility-types": "^3.10.0" }, "devDependencies": { - "@commitlint/cli": "15.0.0", - "@commitlint/config-conventional": "15.0.0", - "@semantic-release/git": "10.0.1", - "@types/jest": "27.0.3", - "@types/jsonwebtoken": "8.5.6", - "@types/lodash": "4.14.178", - "@types/luxon": "2.0.8", - "@types/node": "16.11.3", - "@typescript-eslint/eslint-plugin": "5.7.0", - "@typescript-eslint/parser": "5.7.0", - "eslint": "8.4.1", - "eslint-config-airbnb-base": "15.0.0", - "eslint-config-airbnb-typescript": "16.1.0", - "eslint-plugin-import": "2.25.3", - "eslint-plugin-jsx-a11y": "6.5.1", - "eslint-plugin-prefer-arrow-functions": "3.1.4", - "eslint-plugin-unicorn": "39.0.0", - "fast-glob": "3.2.7", - "grpc-tools": "1.11.2", - "husky": "7.0.4", - "jest": "27.4.5", - "semantic-release": "19.0.3", - "ts-jest": "27.1.1", - "ts-node": "10.4.0", - "ts-proto": "1.95.1", - "typescript": "4.5.4" + "@commitlint/cli": "^15.0.0", + "@commitlint/config-conventional": "^15.0.0", + "@semantic-release/git": "^10.0.1", + "@types/jest": "^27.0.3", + "@types/jsonwebtoken": "^8.5.6", + "@types/lodash": "^4.14.178", + "@types/luxon": "^2.0.8", + "@types/node": "^16.11.3", + "@typescript-eslint/eslint-plugin": "^5.7.0", + "@typescript-eslint/parser": "^5.7.0", + "eslint": "^8.4.1", + "eslint-config-airbnb-base": "^15.0.0", + "eslint-config-airbnb-typescript": "^16.1.0", + "eslint-plugin-import": "^2.25.3", + "eslint-plugin-jsx-a11y": "^6.5.1", + "eslint-plugin-prefer-arrow-functions": "^3.1.4", + "eslint-plugin-unicorn": "^39.0.0", + "fast-glob": "^3.2.7", + "grpc-tools": "^1.11.2", + "husky": "^7.0.4", + "jest": "^27.4.5", + "semantic-release": "^19.0.3", + "ts-jest": "^27.1.1", + "ts-node": "^10.4.0", + "ts-proto": "^1.95.1", + "typescript": "^4.5.4" }, "engines": { "node": ">=12.0.0" @@ -1129,11 +1129,11 @@ } }, "node_modules/@grpc/grpc-js": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/@grpc/grpc-js/-/grpc-js-1.6.0.tgz", - "integrity": "sha512-KwNibKGx1qmAwsrYu75FhUo3+m6GMJoBfdnYZte9YQ2EM3hZ5Ez+8+Q+FAMONtfU0XJGUkGK5S+q4CXSjx5Ahw==", + "version": "1.7.1", + "resolved": "https://registry.npmjs.org/@grpc/grpc-js/-/grpc-js-1.7.1.tgz", + "integrity": "sha512-GVtMU4oh/TeKkWGzXUEsyZtyvSUIT1z49RtGH1UnEGeL+sLuxKl8QH3KZTlSB329R1sWJmesm5hQ5CxXdYH9dg==", "dependencies": { - "@grpc/proto-loader": "^0.6.4", + "@grpc/proto-loader": "^0.7.0", "@types/node": ">=12.12.47" }, "engines": { @@ -1141,14 +1141,14 @@ } }, "node_modules/@grpc/proto-loader": { - "version": "0.6.9", - "resolved": "https://registry.npmjs.org/@grpc/proto-loader/-/proto-loader-0.6.9.tgz", - "integrity": "sha512-UlcCS8VbsU9d3XTXGiEVFonN7hXk+oMXZtoHHG2oSA1/GcDP1q6OUgs20PzHDGizzyi8ufGSUDlk3O2NyY7leg==", + "version": "0.7.3", + "resolved": "https://registry.npmjs.org/@grpc/proto-loader/-/proto-loader-0.7.3.tgz", + "integrity": "sha512-5dAvoZwna2Py3Ef96Ux9jIkp3iZ62TUsV00p3wVBPNX5K178UbNi8Q7gQVqwXT1Yq9RejIGG9G2IPEo93T6RcA==", "dependencies": { "@types/long": "^4.0.1", "lodash.camelcase": "^4.3.0", "long": "^4.0.0", - "protobufjs": "^6.10.0", + "protobufjs": "^7.0.0", "yargs": "^16.2.0" }, "bin": { @@ -1163,6 +1163,34 @@ "resolved": "https://registry.npmjs.org/long/-/long-4.0.0.tgz", "integrity": "sha512-XsP+KhQif4bjX1kbuSiySJFNAehNxgLb6hPRGJ9QsUr8ajHkuXGdrHmFUTUUXhDwVX2R5bY4JNZEwbUiMhV+MA==" }, + "node_modules/@grpc/proto-loader/node_modules/protobufjs": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-7.1.2.tgz", + "integrity": "sha512-4ZPTPkXCdel3+L81yw3dG6+Kq3umdWKh7Dc7GW/CpNk4SX3hK58iPCWeCyhVTDrbkNeKrYNZ7EojM5WDaEWTLQ==", + "hasInstallScript": true, + "dependencies": { + "@protobufjs/aspromise": "^1.1.2", + "@protobufjs/base64": "^1.1.2", + "@protobufjs/codegen": "^2.0.4", + "@protobufjs/eventemitter": "^1.1.0", + "@protobufjs/fetch": "^1.1.0", + "@protobufjs/float": "^1.0.2", + "@protobufjs/inquire": "^1.1.0", + "@protobufjs/path": "^1.1.2", + "@protobufjs/pool": "^1.1.0", + "@protobufjs/utf8": "^1.1.0", + "@types/node": ">=13.7.0", + "long": "^5.0.0" + }, + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/@grpc/proto-loader/node_modules/protobufjs/node_modules/long": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/long/-/long-5.2.0.tgz", + "integrity": "sha512-9RTUNjK60eJbx3uz+TEGF7fUr29ZDxR5QzXcyDpeSfeH28S9ycINflOgOlppit5U+4kNTe83KQnMEerw7GmE8w==" + }, "node_modules/@grpc/proto-loader/node_modules/yargs": { "version": "16.2.0", "resolved": "https://registry.npmjs.org/yargs/-/yargs-16.2.0.tgz", @@ -6832,7 +6860,7 @@ "node_modules/lodash.camelcase": { "version": "4.3.0", "resolved": "https://registry.npmjs.org/lodash.camelcase/-/lodash.camelcase-4.3.0.tgz", - "integrity": "sha1-soqmKIorn8ZRA1x3EfZathkDMaY=" + "integrity": "sha512-TwuEnCnxbc3rAvhf/LbG7tJUDzhqXyFnv3dtzLOPgCG/hODL7WFnsbwktkD7yUV0RrreP/l1PALq/YSg6VvjlA==" }, "node_modules/lodash.capitalize": { "version": "4.2.1", @@ -13060,23 +13088,23 @@ } }, "@grpc/grpc-js": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/@grpc/grpc-js/-/grpc-js-1.6.0.tgz", - "integrity": "sha512-KwNibKGx1qmAwsrYu75FhUo3+m6GMJoBfdnYZte9YQ2EM3hZ5Ez+8+Q+FAMONtfU0XJGUkGK5S+q4CXSjx5Ahw==", + "version": "1.7.1", + "resolved": "https://registry.npmjs.org/@grpc/grpc-js/-/grpc-js-1.7.1.tgz", + "integrity": "sha512-GVtMU4oh/TeKkWGzXUEsyZtyvSUIT1z49RtGH1UnEGeL+sLuxKl8QH3KZTlSB329R1sWJmesm5hQ5CxXdYH9dg==", "requires": { - "@grpc/proto-loader": "^0.6.4", + "@grpc/proto-loader": "^0.7.0", "@types/node": ">=12.12.47" } }, "@grpc/proto-loader": { - "version": "0.6.9", - "resolved": "https://registry.npmjs.org/@grpc/proto-loader/-/proto-loader-0.6.9.tgz", - "integrity": "sha512-UlcCS8VbsU9d3XTXGiEVFonN7hXk+oMXZtoHHG2oSA1/GcDP1q6OUgs20PzHDGizzyi8ufGSUDlk3O2NyY7leg==", + "version": "0.7.3", + "resolved": "https://registry.npmjs.org/@grpc/proto-loader/-/proto-loader-0.7.3.tgz", + "integrity": "sha512-5dAvoZwna2Py3Ef96Ux9jIkp3iZ62TUsV00p3wVBPNX5K178UbNi8Q7gQVqwXT1Yq9RejIGG9G2IPEo93T6RcA==", "requires": { "@types/long": "^4.0.1", "lodash.camelcase": "^4.3.0", "long": "^4.0.0", - "protobufjs": "^6.10.0", + "protobufjs": "^7.0.0", "yargs": "^16.2.0" }, "dependencies": { @@ -13085,6 +13113,32 @@ "resolved": "https://registry.npmjs.org/long/-/long-4.0.0.tgz", "integrity": "sha512-XsP+KhQif4bjX1kbuSiySJFNAehNxgLb6hPRGJ9QsUr8ajHkuXGdrHmFUTUUXhDwVX2R5bY4JNZEwbUiMhV+MA==" }, + "protobufjs": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-7.1.2.tgz", + "integrity": "sha512-4ZPTPkXCdel3+L81yw3dG6+Kq3umdWKh7Dc7GW/CpNk4SX3hK58iPCWeCyhVTDrbkNeKrYNZ7EojM5WDaEWTLQ==", + "requires": { + "@protobufjs/aspromise": "^1.1.2", + "@protobufjs/base64": "^1.1.2", + "@protobufjs/codegen": "^2.0.4", + "@protobufjs/eventemitter": "^1.1.0", + "@protobufjs/fetch": "^1.1.0", + "@protobufjs/float": "^1.0.2", + "@protobufjs/inquire": "^1.1.0", + "@protobufjs/path": "^1.1.2", + "@protobufjs/pool": "^1.1.0", + "@protobufjs/utf8": "^1.1.0", + "@types/node": ">=13.7.0", + "long": "^5.0.0" + }, + "dependencies": { + "long": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/long/-/long-5.2.0.tgz", + "integrity": "sha512-9RTUNjK60eJbx3uz+TEGF7fUr29ZDxR5QzXcyDpeSfeH28S9ycINflOgOlppit5U+4kNTe83KQnMEerw7GmE8w==" + } + } + }, "yargs": { "version": "16.2.0", "resolved": "https://registry.npmjs.org/yargs/-/yargs-16.2.0.tgz", @@ -17507,7 +17561,7 @@ "lodash.camelcase": { "version": "4.3.0", "resolved": "https://registry.npmjs.org/lodash.camelcase/-/lodash.camelcase-4.3.0.tgz", - "integrity": "sha1-soqmKIorn8ZRA1x3EfZathkDMaY=" + "integrity": "sha512-TwuEnCnxbc3rAvhf/LbG7tJUDzhqXyFnv3dtzLOPgCG/hODL7WFnsbwktkD7yUV0RrreP/l1PALq/YSg6VvjlA==" }, "lodash.capitalize": { "version": "4.2.1", diff --git a/package.json b/package.json index 3f3fd26f..f34d8aa2 100644 --- a/package.json +++ b/package.json @@ -22,7 +22,7 @@ ], "homepage": "https://github.com/yandex-cloud/nodejs-sdk#readme", "dependencies": { - "@grpc/grpc-js": "^1.6.0", + "@grpc/grpc-js": "^1.6.12", "axios": "^0.24.0", "jsonwebtoken": "^8.5.1", "lodash": "^4.17.21", From 2585083ada72e3d6d1de18c3bdda2d9ecf1a56d1 Mon Sep 17 00:00:00 2001 From: yandex-cloud-bot Date: Thu, 13 Oct 2022 13:05:28 +0000 Subject: [PATCH 35/54] chore(release): 2.2.2 [skip ci] ## [2.2.2](https://github.com/yandex-cloud/nodejs-sdk/compare/v2.2.1...v2.2.2) (2022-10-13) ### Bug Fixes * update @grpc/grpc-js ([a4f2f20](https://github.com/yandex-cloud/nodejs-sdk/commit/a4f2f20d82b217981d656d5ef98884b38d8b5903)) * user versions range for dependencies ([7402f45](https://github.com/yandex-cloud/nodejs-sdk/commit/7402f451d4e6bd282573fde5bbfeef13eb5fecf0)) --- package-lock.json | 4 ++-- package.json | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/package-lock.json b/package-lock.json index 9b3fbef1..d02ca84d 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.2.1", + "version": "2.2.2", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.2.1", + "version": "2.2.2", "license": "MIT", "dependencies": { "@grpc/grpc-js": "^1.6.12", diff --git a/package.json b/package.json index 2bdecb8e..81a44674 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.2.1", + "version": "2.2.2", "description": "Yandex.Cloud NodeJS SDK", "keywords": [ "yandex-cloud", From 8e9c7e3df014da740b3529a06e7d1e0e381f6240 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 10 Nov 2022 22:22:55 +0000 Subject: [PATCH 36/54] chore(deps): bump minimatch from 3.0.4 to 3.1.2 Bumps [minimatch](https://github.com/isaacs/minimatch) from 3.0.4 to 3.1.2. - [Release notes](https://github.com/isaacs/minimatch/releases) - [Commits](https://github.com/isaacs/minimatch/compare/v3.0.4...v3.1.2) --- updated-dependencies: - dependency-name: minimatch dependency-type: indirect ... Signed-off-by: dependabot[bot] --- package-lock.json | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/package-lock.json b/package-lock.json index d02ca84d..f8667610 100644 --- a/package-lock.json +++ b/package-lock.json @@ -7245,9 +7245,9 @@ } }, "node_modules/minimatch": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", - "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", "dev": true, "dependencies": { "brace-expansion": "^1.1.7" @@ -17856,9 +17856,9 @@ "dev": true }, "minimatch": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", - "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", "dev": true, "requires": { "brace-expansion": "^1.1.7" From 8532b1447b5c91615cb5920ec644298d7a9287ba Mon Sep 17 00:00:00 2001 From: Ivan Zuev Date: Fri, 18 Nov 2022 15:13:15 +0300 Subject: [PATCH 37/54] feat: add ability to provide ssl options for grpc channel --- src/session.ts | 8 ++++---- src/types.ts | 7 +++++++ 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/src/session.ts b/src/session.ts index 8e94fdfb..4dd758e4 100644 --- a/src/session.ts +++ b/src/session.ts @@ -8,7 +8,7 @@ import { IamTokenCredentialsConfig, OAuthCredentialsConfig, ServiceAccountCredentialsConfig, WrappedServiceClientType, - SessionConfig, + SessionConfig, ChannelSslOptions, } from './types'; import { IamTokenService } from './token-service/iam-token-service'; import { MetadataTokenService } from './token-service/metadata-token-service'; @@ -50,8 +50,8 @@ const newTokenCreator = (config: SessionConfig): () => Promise => { return async () => tokenService.getToken(); }; -const newChannelCredentials = (tokenCreator: TokenCreator) => credentials.combineChannelCredentials( - credentials.createSsl(), +const newChannelCredentials = (tokenCreator: TokenCreator, sslOptions?: ChannelSslOptions) => credentials.combineChannelCredentials( + credentials.createSsl(sslOptions?.rootCerts, sslOptions?.privateKey, sslOptions?.certChain), credentials.createFromMetadataGenerator( ( params: { service_url: string }, @@ -87,7 +87,7 @@ export class Session { ...config, }; this.tokenCreator = newTokenCreator(this.config); - this.channelCredentials = newChannelCredentials(this.tokenCreator); + this.channelCredentials = newChannelCredentials(this.tokenCreator, this.config.ssl); } get pollInterval(): number { diff --git a/src/types.ts b/src/types.ts index 9ed25f58..6fa69b58 100644 --- a/src/types.ts +++ b/src/types.ts @@ -30,8 +30,15 @@ export interface ISslCredentials { clientCertChain?: Buffer; } +export interface ChannelSslOptions { + rootCerts?: Buffer, + privateKey?: Buffer, + certChain?: Buffer, +} + export interface GenericCredentialsConfig { pollInterval?: number; + ssl?: ChannelSslOptions } export interface OAuthCredentialsConfig extends GenericCredentialsConfig { From c82068cec0ea1fd986bea75af35a7b68251fcf58 Mon Sep 17 00:00:00 2001 From: yandex-cloud-bot Date: Fri, 18 Nov 2022 12:18:02 +0000 Subject: [PATCH 38/54] chore(release): 2.3.0-beta.1 [skip ci] # [2.3.0-beta.1](https://github.com/yandex-cloud/nodejs-sdk/compare/v2.2.2...v2.3.0-beta.1) (2022-11-18) ### Features * add ability to provide ssl options for grpc channel ([8532b14](https://github.com/yandex-cloud/nodejs-sdk/commit/8532b1447b5c91615cb5920ec644298d7a9287ba)) --- package-lock.json | 4 ++-- package.json | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/package-lock.json b/package-lock.json index f8667610..bf4fa2a2 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.2.2", + "version": "2.3.0-beta.1", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.2.2", + "version": "2.3.0-beta.1", "license": "MIT", "dependencies": { "@grpc/grpc-js": "^1.6.12", diff --git a/package.json b/package.json index 81a44674..fdde56db 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.2.2", + "version": "2.3.0-beta.1", "description": "Yandex.Cloud NodeJS SDK", "keywords": [ "yandex-cloud", From d1d32b87e4a078beb64d2614a724fafc4aa10022 Mon Sep 17 00:00:00 2001 From: yandex-cloud-bot Date: Fri, 18 Nov 2022 13:58:27 +0000 Subject: [PATCH 39/54] chore(release): 2.3.0 [skip ci] # [2.3.0](https://github.com/yandex-cloud/nodejs-sdk/compare/v2.2.2...v2.3.0) (2022-11-18) ### Features * add ability to provide ssl options for grpc channel ([8532b14](https://github.com/yandex-cloud/nodejs-sdk/commit/8532b1447b5c91615cb5920ec644298d7a9287ba)) --- package-lock.json | 4 ++-- package.json | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/package-lock.json b/package-lock.json index bf4fa2a2..dcb73ec0 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.3.0-beta.1", + "version": "2.3.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.3.0-beta.1", + "version": "2.3.0", "license": "MIT", "dependencies": { "@grpc/grpc-js": "^1.6.12", diff --git a/package.json b/package.json index fdde56db..5d7ce53b 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.3.0-beta.1", + "version": "2.3.0", "description": "Yandex.Cloud NodeJS SDK", "keywords": [ "yandex-cloud", From d320aac22c6f3156cfb121dcdaaaddfb62760613 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 22 Dec 2022 20:09:14 +0000 Subject: [PATCH 40/54] chore(deps): bump jsonwebtoken from 8.5.1 to 9.0.0 Bumps [jsonwebtoken](https://github.com/auth0/node-jsonwebtoken) from 8.5.1 to 9.0.0. - [Release notes](https://github.com/auth0/node-jsonwebtoken/releases) - [Changelog](https://github.com/auth0/node-jsonwebtoken/blob/master/CHANGELOG.md) - [Commits](https://github.com/auth0/node-jsonwebtoken/compare/v8.5.1...v9.0.0) --- updated-dependencies: - dependency-name: jsonwebtoken dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- package-lock.json | 131 ++++++++++++++-------------------------------- package.json | 2 +- 2 files changed, 40 insertions(+), 93 deletions(-) diff --git a/package-lock.json b/package-lock.json index dcb73ec0..8d97896d 100644 --- a/package-lock.json +++ b/package-lock.json @@ -11,7 +11,7 @@ "dependencies": { "@grpc/grpc-js": "^1.6.12", "axios": "^0.24.0", - "jsonwebtoken": "^8.5.1", + "jsonwebtoken": "^9.0.0", "lodash": "^4.17.21", "log4js": "^6.4.0", "long": "^5.2.0", @@ -6682,32 +6682,32 @@ } }, "node_modules/jsonwebtoken": { - "version": "8.5.1", - "resolved": "https://registry.npmjs.org/jsonwebtoken/-/jsonwebtoken-8.5.1.tgz", - "integrity": "sha512-XjwVfRS6jTMsqYs0EsuJ4LGxXV14zQybNd4L2r0UvbVnSF9Af8x7p5MzbJ90Ioz/9TI41/hTCvznF/loiSzn8w==", + "version": "9.0.0", + "resolved": "https://registry.npmjs.org/jsonwebtoken/-/jsonwebtoken-9.0.0.tgz", + "integrity": "sha512-tuGfYXxkQGDPnLJ7SibiQgVgeDgfbPq2k2ICcbgqW8WxWLBAxKQM/ZCu/IT8SOSwmaYl4dpTFCW5xZv7YbbWUw==", "dependencies": { "jws": "^3.2.2", - "lodash.includes": "^4.3.0", - "lodash.isboolean": "^3.0.3", - "lodash.isinteger": "^4.0.4", - "lodash.isnumber": "^3.0.3", - "lodash.isplainobject": "^4.0.6", - "lodash.isstring": "^4.0.1", - "lodash.once": "^4.0.0", + "lodash": "^4.17.21", "ms": "^2.1.1", - "semver": "^5.6.0" + "semver": "^7.3.8" }, "engines": { - "node": ">=4", - "npm": ">=1.4.28" + "node": ">=12", + "npm": ">=6" } }, "node_modules/jsonwebtoken/node_modules/semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", + "version": "7.3.8", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.8.tgz", + "integrity": "sha512-NB1ctGL5rlHrPJtFDVIVzTyQylMLu9N9VICA6HSFJo8MCGVTMW6gfpicwKmmK/dAjTOrqu5l63JJOpDSrAis3A==", + "dependencies": { + "lru-cache": "^6.0.0" + }, "bin": { - "semver": "bin/semver" + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" } }, "node_modules/jsx-ast-utils": { @@ -6880,41 +6880,23 @@ "integrity": "sha1-LRd/ZS+jHpObRDjVNBSZ36OCXpk=", "dev": true }, - "node_modules/lodash.includes": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/lodash.includes/-/lodash.includes-4.3.0.tgz", - "integrity": "sha1-YLuYqHy5I8aMoeUTJUgzFISfVT8=" - }, - "node_modules/lodash.isboolean": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/lodash.isboolean/-/lodash.isboolean-3.0.3.tgz", - "integrity": "sha1-bC4XHbKiV82WgC/UOwGyDV9YcPY=" - }, - "node_modules/lodash.isinteger": { - "version": "4.0.4", - "resolved": "https://registry.npmjs.org/lodash.isinteger/-/lodash.isinteger-4.0.4.tgz", - "integrity": "sha1-YZwK89A/iwTDH1iChAt3sRzWg0M=" - }, "node_modules/lodash.ismatch": { "version": "4.4.0", "resolved": "https://registry.npmjs.org/lodash.ismatch/-/lodash.ismatch-4.4.0.tgz", "integrity": "sha1-dWy1FQyjum8RCFp4hJZF8Yj4Xzc=", "dev": true }, - "node_modules/lodash.isnumber": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/lodash.isnumber/-/lodash.isnumber-3.0.3.tgz", - "integrity": "sha1-POdoEMWSjQM1IwGsKHMX8RwLH/w=" - }, "node_modules/lodash.isplainobject": { "version": "4.0.6", "resolved": "https://registry.npmjs.org/lodash.isplainobject/-/lodash.isplainobject-4.0.6.tgz", - "integrity": "sha1-fFJqUtibRcRcxpC4gWO+BJf1UMs=" + "integrity": "sha1-fFJqUtibRcRcxpC4gWO+BJf1UMs=", + "dev": true }, "node_modules/lodash.isstring": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/lodash.isstring/-/lodash.isstring-4.0.1.tgz", - "integrity": "sha1-1SfftUVuynzJu5XV2ur4i6VKVFE=" + "integrity": "sha1-1SfftUVuynzJu5XV2ur4i6VKVFE=", + "dev": true }, "node_modules/lodash.memoize": { "version": "4.1.2", @@ -6928,11 +6910,6 @@ "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", "dev": true }, - "node_modules/lodash.once": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/lodash.once/-/lodash.once-4.1.1.tgz", - "integrity": "sha1-DdOXEhPHxW34gJd9UEyI+0cal6w=" - }, "node_modules/lodash.uniqby": { "version": "4.7.0", "resolved": "https://registry.npmjs.org/lodash.uniqby/-/lodash.uniqby-4.7.0.tgz", @@ -6963,7 +6940,6 @@ "version": "6.0.0", "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", - "dev": true, "dependencies": { "yallist": "^4.0.0" }, @@ -12216,8 +12192,7 @@ "node_modules/yallist": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", - "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", - "dev": true + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" }, "node_modules/yaml": { "version": "1.10.2", @@ -17413,26 +17388,23 @@ } }, "jsonwebtoken": { - "version": "8.5.1", - "resolved": "https://registry.npmjs.org/jsonwebtoken/-/jsonwebtoken-8.5.1.tgz", - "integrity": "sha512-XjwVfRS6jTMsqYs0EsuJ4LGxXV14zQybNd4L2r0UvbVnSF9Af8x7p5MzbJ90Ioz/9TI41/hTCvznF/loiSzn8w==", + "version": "9.0.0", + "resolved": "https://registry.npmjs.org/jsonwebtoken/-/jsonwebtoken-9.0.0.tgz", + "integrity": "sha512-tuGfYXxkQGDPnLJ7SibiQgVgeDgfbPq2k2ICcbgqW8WxWLBAxKQM/ZCu/IT8SOSwmaYl4dpTFCW5xZv7YbbWUw==", "requires": { "jws": "^3.2.2", - "lodash.includes": "^4.3.0", - "lodash.isboolean": "^3.0.3", - "lodash.isinteger": "^4.0.4", - "lodash.isnumber": "^3.0.3", - "lodash.isplainobject": "^4.0.6", - "lodash.isstring": "^4.0.1", - "lodash.once": "^4.0.0", + "lodash": "^4.17.21", "ms": "^2.1.1", - "semver": "^5.6.0" + "semver": "^7.3.8" }, "dependencies": { "semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==" + "version": "7.3.8", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.8.tgz", + "integrity": "sha512-NB1ctGL5rlHrPJtFDVIVzTyQylMLu9N9VICA6HSFJo8MCGVTMW6gfpicwKmmK/dAjTOrqu5l63JJOpDSrAis3A==", + "requires": { + "lru-cache": "^6.0.0" + } } } }, @@ -17581,41 +17553,23 @@ "integrity": "sha1-LRd/ZS+jHpObRDjVNBSZ36OCXpk=", "dev": true }, - "lodash.includes": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/lodash.includes/-/lodash.includes-4.3.0.tgz", - "integrity": "sha1-YLuYqHy5I8aMoeUTJUgzFISfVT8=" - }, - "lodash.isboolean": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/lodash.isboolean/-/lodash.isboolean-3.0.3.tgz", - "integrity": "sha1-bC4XHbKiV82WgC/UOwGyDV9YcPY=" - }, - "lodash.isinteger": { - "version": "4.0.4", - "resolved": "https://registry.npmjs.org/lodash.isinteger/-/lodash.isinteger-4.0.4.tgz", - "integrity": "sha1-YZwK89A/iwTDH1iChAt3sRzWg0M=" - }, "lodash.ismatch": { "version": "4.4.0", "resolved": "https://registry.npmjs.org/lodash.ismatch/-/lodash.ismatch-4.4.0.tgz", "integrity": "sha1-dWy1FQyjum8RCFp4hJZF8Yj4Xzc=", "dev": true }, - "lodash.isnumber": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/lodash.isnumber/-/lodash.isnumber-3.0.3.tgz", - "integrity": "sha1-POdoEMWSjQM1IwGsKHMX8RwLH/w=" - }, "lodash.isplainobject": { "version": "4.0.6", "resolved": "https://registry.npmjs.org/lodash.isplainobject/-/lodash.isplainobject-4.0.6.tgz", - "integrity": "sha1-fFJqUtibRcRcxpC4gWO+BJf1UMs=" + "integrity": "sha1-fFJqUtibRcRcxpC4gWO+BJf1UMs=", + "dev": true }, "lodash.isstring": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/lodash.isstring/-/lodash.isstring-4.0.1.tgz", - "integrity": "sha1-1SfftUVuynzJu5XV2ur4i6VKVFE=" + "integrity": "sha1-1SfftUVuynzJu5XV2ur4i6VKVFE=", + "dev": true }, "lodash.memoize": { "version": "4.1.2", @@ -17629,11 +17583,6 @@ "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", "dev": true }, - "lodash.once": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/lodash.once/-/lodash.once-4.1.1.tgz", - "integrity": "sha1-DdOXEhPHxW34gJd9UEyI+0cal6w=" - }, "lodash.uniqby": { "version": "4.7.0", "resolved": "https://registry.npmjs.org/lodash.uniqby/-/lodash.uniqby-4.7.0.tgz", @@ -17661,7 +17610,6 @@ "version": "6.0.0", "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", - "dev": true, "requires": { "yallist": "^4.0.0" } @@ -21491,8 +21439,7 @@ "yallist": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", - "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", - "dev": true + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" }, "yaml": { "version": "1.10.2", diff --git a/package.json b/package.json index 5d7ce53b..b852941e 100644 --- a/package.json +++ b/package.json @@ -24,7 +24,7 @@ "dependencies": { "@grpc/grpc-js": "^1.6.12", "axios": "^0.24.0", - "jsonwebtoken": "^8.5.1", + "jsonwebtoken": "^9.0.0", "lodash": "^4.17.21", "log4js": "^6.4.0", "long": "^5.2.0", From f155e8cff9aa75f560bf9c0945992dcd23b7011a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 7 Jan 2023 16:22:33 +0000 Subject: [PATCH 41/54] chore(deps): bump json5 from 1.0.1 to 1.0.2 Bumps [json5](https://github.com/json5/json5) from 1.0.1 to 1.0.2. - [Release notes](https://github.com/json5/json5/releases) - [Changelog](https://github.com/json5/json5/blob/main/CHANGELOG.md) - [Commits](https://github.com/json5/json5/compare/v1.0.1...v1.0.2) --- updated-dependencies: - dependency-name: json5 dependency-type: indirect ... Signed-off-by: dependabot[bot] --- package-lock.json | 32 +++++++++++++------------------- 1 file changed, 13 insertions(+), 19 deletions(-) diff --git a/package-lock.json b/package-lock.json index 8d97896d..b7fddca8 100644 --- a/package-lock.json +++ b/package-lock.json @@ -6623,13 +6623,10 @@ "dev": true }, "node_modules/json5": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.0.tgz", - "integrity": "sha512-f+8cldu7X/y7RAJurMEJmdoKXGB/X550w2Nr3tTbezL6RwEE/iMcm+tZnXeoZtKuOq6ft8+CqzEkrIgx1fPoQA==", + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", "dev": true, - "dependencies": { - "minimist": "^1.2.5" - }, "bin": { "json5": "lib/cli.js" }, @@ -11770,9 +11767,9 @@ } }, "node_modules/tsconfig-paths/node_modules/json5": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/json5/-/json5-1.0.1.tgz", - "integrity": "sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow==", + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/json5/-/json5-1.0.2.tgz", + "integrity": "sha512-g1MWMLBiz8FKi1e4w0UyVL3w+iJceWAFBAaBnnGKOpNa5f8TLktkbre1+s6oICydWAm+HRUGTmI+//xv2hvXYA==", "dev": true, "dependencies": { "minimist": "^1.2.0" @@ -17347,13 +17344,10 @@ "dev": true }, "json5": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.0.tgz", - "integrity": "sha512-f+8cldu7X/y7RAJurMEJmdoKXGB/X550w2Nr3tTbezL6RwEE/iMcm+tZnXeoZtKuOq6ft8+CqzEkrIgx1fPoQA==", - "dev": true, - "requires": { - "minimist": "^1.2.5" - } + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "dev": true }, "jsonfile": { "version": "6.1.0", @@ -21109,9 +21103,9 @@ }, "dependencies": { "json5": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/json5/-/json5-1.0.1.tgz", - "integrity": "sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow==", + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/json5/-/json5-1.0.2.tgz", + "integrity": "sha512-g1MWMLBiz8FKi1e4w0UyVL3w+iJceWAFBAaBnnGKOpNa5f8TLktkbre1+s6oICydWAm+HRUGTmI+//xv2hvXYA==", "dev": true, "requires": { "minimist": "^1.2.0" From 5f0ce29e7537b0ddfa71e4daf33f2256b34ea5b1 Mon Sep 17 00:00:00 2001 From: Ivan Zuev Date: Mon, 9 Jan 2023 13:45:48 +0300 Subject: [PATCH 42/54] ci: add dependabot config --- .github/dependabot.yml | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 .github/dependabot.yml diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 00000000..478d4522 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,8 @@ +version: 2 +updates: + - package-ecosystem: "npm" + directory: "/" + schedule: + interval: "weekly" + commit-message: + prefix: "fix(deps):" From aaafb5063fa348dc0941fe9e383b30d6aea620bc Mon Sep 17 00:00:00 2001 From: Ivan Zuev Date: Mon, 9 Jan 2023 13:56:45 +0300 Subject: [PATCH 43/54] ci(dependabot): security updates only --- .github/dependabot.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 478d4522..de85544a 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -6,3 +6,4 @@ updates: interval: "weekly" commit-message: prefix: "fix(deps):" + open-pull-requests-limit: 0 From 6b8e171c995e1765294588b497eec1efd4514527 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Jan 2023 10:59:45 +0000 Subject: [PATCH 44/54] fix(deps): bump minimist from 1.2.5 to 1.2.7 Bumps [minimist](https://github.com/minimistjs/minimist) from 1.2.5 to 1.2.7. - [Release notes](https://github.com/minimistjs/minimist/releases) - [Changelog](https://github.com/minimistjs/minimist/blob/main/CHANGELOG.md) - [Commits](https://github.com/minimistjs/minimist/compare/v1.2.5...v1.2.7) --- updated-dependencies: - dependency-name: minimist dependency-type: indirect ... Signed-off-by: dependabot[bot] --- package-lock.json | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/package-lock.json b/package-lock.json index b7fddca8..d2a3ec21 100644 --- a/package-lock.json +++ b/package-lock.json @@ -7230,10 +7230,13 @@ } }, "node_modules/minimist": { - "version": "1.2.5", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz", - "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==", - "dev": true + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.7.tgz", + "integrity": "sha512-bzfL1YUZsP41gmu/qjrEk0Q6i2ix/cVeAhbCbqH9u3zYutS1cLg00qhrD0M2MVdCcx4Sc0UpP2eBWo9rotpq6g==", + "dev": true, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } }, "node_modules/minimist-options": { "version": "4.1.0", @@ -17807,9 +17810,9 @@ } }, "minimist": { - "version": "1.2.5", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz", - "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==", + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.7.tgz", + "integrity": "sha512-bzfL1YUZsP41gmu/qjrEk0Q6i2ix/cVeAhbCbqH9u3zYutS1cLg00qhrD0M2MVdCcx4Sc0UpP2eBWo9rotpq6g==", "dev": true }, "minimist-options": { From c35141927512e9424e38b92434d00b8c2cc54d46 Mon Sep 17 00:00:00 2001 From: yandex-cloud-bot Date: Mon, 9 Jan 2023 11:13:50 +0000 Subject: [PATCH 45/54] chore(release): 2.3.1 [skip ci] ## [2.3.1](https://github.com/yandex-cloud/nodejs-sdk/compare/v2.3.0...v2.3.1) (2023-01-09) ### Bug Fixes * **deps:** bump minimist from 1.2.5 to 1.2.7 ([6b8e171](https://github.com/yandex-cloud/nodejs-sdk/commit/6b8e171c995e1765294588b497eec1efd4514527)) --- package-lock.json | 4 ++-- package.json | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/package-lock.json b/package-lock.json index d2a3ec21..2fbd3243 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.3.0", + "version": "2.3.1", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.3.0", + "version": "2.3.1", "license": "MIT", "dependencies": { "@grpc/grpc-js": "^1.6.12", diff --git a/package.json b/package.json index b852941e..7485da1b 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.3.0", + "version": "2.3.1", "description": "Yandex.Cloud NodeJS SDK", "keywords": [ "yandex-cloud", From fa604556dd15f42cdda0045dbd6776ce12328e17 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Jan 2023 14:26:20 +0000 Subject: [PATCH 46/54] fix(deps): bump luxon from 2.2.0 to 2.5.2 Bumps [luxon](https://github.com/moment/luxon) from 2.2.0 to 2.5.2. - [Release notes](https://github.com/moment/luxon/releases) - [Changelog](https://github.com/moment/luxon/blob/master/CHANGELOG.md) - [Commits](https://github.com/moment/luxon/compare/2.2.0...2.5.2) --- updated-dependencies: - dependency-name: luxon dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- package-lock.json | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/package-lock.json b/package-lock.json index 2fbd3243..e0406079 100644 --- a/package-lock.json +++ b/package-lock.json @@ -6945,9 +6945,9 @@ } }, "node_modules/luxon": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/luxon/-/luxon-2.2.0.tgz", - "integrity": "sha512-LwmknessH4jVIseCsizUgveIHwlLv/RQZWC2uDSMfGJs7w8faPUi2JFxfyfMcTPrpNbChTem3Uz6IKRtn+LcIA==", + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/luxon/-/luxon-2.5.2.tgz", + "integrity": "sha512-Yg7/RDp4nedqmLgyH0LwgGRvMEKVzKbUdkBYyCosbHgJ+kaOUx0qzSiSatVc3DFygnirTPYnMM2P5dg2uH1WvA==", "engines": { "node": ">=12" } @@ -17612,9 +17612,9 @@ } }, "luxon": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/luxon/-/luxon-2.2.0.tgz", - "integrity": "sha512-LwmknessH4jVIseCsizUgveIHwlLv/RQZWC2uDSMfGJs7w8faPUi2JFxfyfMcTPrpNbChTem3Uz6IKRtn+LcIA==" + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/luxon/-/luxon-2.5.2.tgz", + "integrity": "sha512-Yg7/RDp4nedqmLgyH0LwgGRvMEKVzKbUdkBYyCosbHgJ+kaOUx0qzSiSatVc3DFygnirTPYnMM2P5dg2uH1WvA==" }, "make-dir": { "version": "3.1.0", From 74e054fae6722588f8b5deb3a77ae702a49dd791 Mon Sep 17 00:00:00 2001 From: yandex-cloud-bot Date: Mon, 9 Jan 2023 19:03:57 +0000 Subject: [PATCH 47/54] chore(release): 2.3.2 [skip ci] ## [2.3.2](https://github.com/yandex-cloud/nodejs-sdk/compare/v2.3.1...v2.3.2) (2023-01-09) ### Bug Fixes * **deps:** bump luxon from 2.2.0 to 2.5.2 ([fa60455](https://github.com/yandex-cloud/nodejs-sdk/commit/fa604556dd15f42cdda0045dbd6776ce12328e17)) --- package-lock.json | 4 ++-- package.json | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/package-lock.json b/package-lock.json index e0406079..41719282 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.3.1", + "version": "2.3.2", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.3.1", + "version": "2.3.2", "license": "MIT", "dependencies": { "@grpc/grpc-js": "^1.6.12", diff --git a/package.json b/package.json index 7485da1b..eb12599b 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.3.1", + "version": "2.3.2", "description": "Yandex.Cloud NodeJS SDK", "keywords": [ "yandex-cloud", From 8d1d280655f77d4fe9cee13036ab0ef8282174c5 Mon Sep 17 00:00:00 2001 From: Nikolay Matrosov Date: Mon, 13 Feb 2023 15:39:57 +0100 Subject: [PATCH 48/54] feat: add retry middleware & configure retries in waitForOperation utility function --- package-lock.json | 61 ++++++++++++++++++++++++++++----- package.json | 1 + src/types.ts | 14 +++++--- src/utils/client-factory.ts | 5 ++- src/utils/operation/wait-for.ts | 11 ++++-- 5 files changed, 77 insertions(+), 15 deletions(-) diff --git a/package-lock.json b/package-lock.json index 41719282..1cb511ba 100644 --- a/package-lock.json +++ b/package-lock.json @@ -18,6 +18,7 @@ "luxon": "^2.2.0", "nice-grpc": "^1.0.6", "nice-grpc-client-middleware-deadline": "^1.0.6", + "nice-grpc-client-middleware-retry": "^1.1.2", "protobufjs": "^6.11.3", "utility-types": "^3.10.0" }, @@ -7352,14 +7353,30 @@ "resolved": "https://registry.npmjs.org/node-abort-controller/-/node-abort-controller-2.0.0.tgz", "integrity": "sha512-L8RfEgjBTHAISTuagw51PprVAqNZoG6KSB6LQ6H1bskMVkFs5E71IyjauLBv3XbuomJlguWF/VnRHdJ1gqiAqA==" }, - "node_modules/nice-grpc-common": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/nice-grpc-common/-/nice-grpc-common-1.0.4.tgz", - "integrity": "sha512-cpKGONNYqi2XP+5z4B4bzhLNrJu5lPbIScM0sqsht6sG9TgdN7ws3qCH82Fht94CfOifL6pQlvkgnEJp5nl2cQ==", + "node_modules/nice-grpc-client-middleware-retry": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/nice-grpc-client-middleware-retry/-/nice-grpc-client-middleware-retry-1.1.2.tgz", + "integrity": "sha512-7RWHpQxQ6Dq++TTaP/S/GFd4MuSd1jjeQr8q41oQ6phyPRTZmNx8gdbVQg2Q70iATFjJ9eQfj4/QysfeQ3LURQ==", "dependencies": { + "abort-controller-x": "^0.2.6", + "nice-grpc-common": "^1.1.0", "node-abort-controller": "^2.0.0" } }, + "node_modules/nice-grpc-client-middleware-retry/node_modules/node-abort-controller": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/node-abort-controller/-/node-abort-controller-2.0.0.tgz", + "integrity": "sha512-L8RfEgjBTHAISTuagw51PprVAqNZoG6KSB6LQ6H1bskMVkFs5E71IyjauLBv3XbuomJlguWF/VnRHdJ1gqiAqA==" + }, + "node_modules/nice-grpc-common": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/nice-grpc-common/-/nice-grpc-common-1.1.0.tgz", + "integrity": "sha512-klxJ/lxMyH1KkT02woMBWL3A7BQSTH5jGodJIpNbbv7WKeFBWJaEtT6p7kZJBhGYXtSsQ+TyMU1EJR9BH14YfQ==", + "dependencies": { + "node-abort-controller": "^2.0.0", + "ts-error": "^1.0.6" + } + }, "node_modules/nice-grpc-common/node_modules/node-abort-controller": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/node-abort-controller/-/node-abort-controller-2.0.0.tgz", @@ -11613,6 +11630,11 @@ "node": ">=8" } }, + "node_modules/ts-error": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/ts-error/-/ts-error-1.0.6.tgz", + "integrity": "sha512-tLJxacIQUM82IR7JO1UUkKlYuUTmoY9HBJAmNWFzheSlDS5SPMcNIepejHJa4BpPQLAcbRhRf3GDJzyj6rbKvA==" + }, "node_modules/ts-jest": { "version": "27.1.1", "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-27.1.1.tgz", @@ -17913,11 +17935,13 @@ } } }, - "nice-grpc-common": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/nice-grpc-common/-/nice-grpc-common-1.0.4.tgz", - "integrity": "sha512-cpKGONNYqi2XP+5z4B4bzhLNrJu5lPbIScM0sqsht6sG9TgdN7ws3qCH82Fht94CfOifL6pQlvkgnEJp5nl2cQ==", + "nice-grpc-client-middleware-retry": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/nice-grpc-client-middleware-retry/-/nice-grpc-client-middleware-retry-1.1.2.tgz", + "integrity": "sha512-7RWHpQxQ6Dq++TTaP/S/GFd4MuSd1jjeQr8q41oQ6phyPRTZmNx8gdbVQg2Q70iATFjJ9eQfj4/QysfeQ3LURQ==", "requires": { + "abort-controller-x": "^0.2.6", + "nice-grpc-common": "^1.1.0", "node-abort-controller": "^2.0.0" }, "dependencies": { @@ -17928,6 +17952,22 @@ } } }, + "nice-grpc-common": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/nice-grpc-common/-/nice-grpc-common-1.1.0.tgz", + "integrity": "sha512-klxJ/lxMyH1KkT02woMBWL3A7BQSTH5jGodJIpNbbv7WKeFBWJaEtT6p7kZJBhGYXtSsQ+TyMU1EJR9BH14YfQ==", + "requires": { + "node-abort-controller": "^2.0.0", + "ts-error": "^1.0.6" + }, + "dependencies": { + "node-abort-controller": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/node-abort-controller/-/node-abort-controller-2.0.0.tgz", + "integrity": "sha512-L8RfEgjBTHAISTuagw51PprVAqNZoG6KSB6LQ6H1bskMVkFs5E71IyjauLBv3XbuomJlguWF/VnRHdJ1gqiAqA==" + } + } + }, "node-abort-controller": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/node-abort-controller/-/node-abort-controller-1.2.1.tgz", @@ -20998,6 +21038,11 @@ "integrity": "sha512-c1PTsA3tYrIsLGkJkzHF+w9F2EyxfXGo4UyJc4pFL++FMjnq0HJS69T3M7d//gKrFKwy429bouPescbjecU+Zw==", "dev": true }, + "ts-error": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/ts-error/-/ts-error-1.0.6.tgz", + "integrity": "sha512-tLJxacIQUM82IR7JO1UUkKlYuUTmoY9HBJAmNWFzheSlDS5SPMcNIepejHJa4BpPQLAcbRhRf3GDJzyj6rbKvA==" + }, "ts-jest": { "version": "27.1.1", "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-27.1.1.tgz", diff --git a/package.json b/package.json index eb12599b..1f40be95 100644 --- a/package.json +++ b/package.json @@ -31,6 +31,7 @@ "luxon": "^2.2.0", "nice-grpc": "^1.0.6", "nice-grpc-client-middleware-deadline": "^1.0.6", + "nice-grpc-client-middleware-retry": "^1.1.2", "protobufjs": "^6.11.3", "utility-types": "^3.10.0" }, diff --git a/src/types.ts b/src/types.ts index 6fa69b58..c78baae0 100644 --- a/src/types.ts +++ b/src/types.ts @@ -1,21 +1,26 @@ import { - ChannelCredentials, ChannelOptions, Client, ServiceDefinition, + ChannelCredentials, + ChannelOptions, + Client, + ServiceDefinition, } from '@grpc/grpc-js'; import { RawClient } from 'nice-grpc'; -import { NormalizedServiceDefinition } from 'nice-grpc/lib/service-definitions'; import { DeadlineOptions } from 'nice-grpc-client-middleware-deadline'; +import { RetryOptions } from 'nice-grpc-client-middleware-retry'; +import { NormalizedServiceDefinition } from 'nice-grpc/lib/service-definitions'; export interface TokenService { getToken: () => Promise; } export interface GeneratedServiceClientCtor { + service: T + new( address: string, credentials: ChannelCredentials, options?: Partial, ): Client; - service: T } export interface IIAmCredentials { @@ -59,4 +64,5 @@ export type SessionConfig = | ServiceAccountCredentialsConfig | GenericCredentialsConfig; -export type WrappedServiceClientType = RawClient, DeadlineOptions>; +// eslint-disable-next-line max-len +export type WrappedServiceClientType = RawClient, DeadlineOptions & RetryOptions>; diff --git a/src/utils/client-factory.ts b/src/utils/client-factory.ts index 86a1d26c..57e4cc96 100644 --- a/src/utils/client-factory.ts +++ b/src/utils/client-factory.ts @@ -1,4 +1,7 @@ import { createClientFactory } from 'nice-grpc'; import { deadlineMiddleware } from 'nice-grpc-client-middleware-deadline'; +import { retryMiddleware } from 'nice-grpc-client-middleware-retry'; -export const clientFactory = createClientFactory().use(deadlineMiddleware); +export const clientFactory = createClientFactory() + .use(retryMiddleware) + .use(deadlineMiddleware); diff --git a/src/utils/operation/wait-for.ts b/src/utils/operation/wait-for.ts index bafdc384..fb9ad558 100644 --- a/src/utils/operation/wait-for.ts +++ b/src/utils/operation/wait-for.ts @@ -1,6 +1,9 @@ -import { Session } from '../../session'; +import { + cloudApi, + serviceClients, +} from '../..'; import { Operation } from '../../generated/yandex/cloud/operation/operation'; -import { serviceClients, cloudApi } from '../..'; +import { Session } from '../../session'; const { operation: { operation_service: { GetOperationRequest } } } = cloudApi; @@ -18,6 +21,10 @@ export const waitForOperation = (op: Operation, session: Session, timeoutMs: num GetOperationRequest.fromPartial({ operationId: op.id, }), + { + retry: true, + retryMaxAttempts: 3, + }, ); checksCount++; From c8c3af8a1a5854782bc8229fdc15b71b1eb3ad47 Mon Sep 17 00:00:00 2001 From: yandex-cloud-bot Date: Mon, 13 Feb 2023 17:54:17 +0000 Subject: [PATCH 49/54] chore(release): 2.4.0 [skip ci] # [2.4.0](https://github.com/yandex-cloud/nodejs-sdk/compare/v2.3.2...v2.4.0) (2023-02-13) ### Features * add retry middleware & configure retries in waitForOperation utility function ([8d1d280](https://github.com/yandex-cloud/nodejs-sdk/commit/8d1d280655f77d4fe9cee13036ab0ef8282174c5)) --- package-lock.json | 4 ++-- package.json | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/package-lock.json b/package-lock.json index 1cb511ba..a8c64ca4 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.3.2", + "version": "2.4.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.3.2", + "version": "2.4.0", "license": "MIT", "dependencies": { "@grpc/grpc-js": "^1.6.12", diff --git a/package.json b/package.json index 1f40be95..7dca7280 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.3.2", + "version": "2.4.0", "description": "Yandex.Cloud NodeJS SDK", "keywords": [ "yandex-cloud", From 66b45a3d1acbc83a6a6752c54c96e6eeed816f86 Mon Sep 17 00:00:00 2001 From: Nikolay Matrosov Date: Mon, 13 Feb 2023 21:28:18 +0100 Subject: [PATCH 50/54] fix(retry): copy retry middleware to SDK and fix it --- package-lock.json | 89 +++++++++++-------------- package.json | 3 +- src/middleware/retry.ts | 129 ++++++++++++++++++++++++++++++++++++ src/types.ts | 2 +- src/utils/client-factory.ts | 2 +- 5 files changed, 173 insertions(+), 52 deletions(-) create mode 100644 src/middleware/retry.ts diff --git a/package-lock.json b/package-lock.json index a8c64ca4..a9065e2b 100644 --- a/package-lock.json +++ b/package-lock.json @@ -10,6 +10,7 @@ "license": "MIT", "dependencies": { "@grpc/grpc-js": "^1.6.12", + "abort-controller-x": "^0.4.1", "axios": "^0.24.0", "jsonwebtoken": "^9.0.0", "lodash": "^4.17.21", @@ -18,7 +19,7 @@ "luxon": "^2.2.0", "nice-grpc": "^1.0.6", "nice-grpc-client-middleware-deadline": "^1.0.6", - "nice-grpc-client-middleware-retry": "^1.1.2", + "node-abort-controller": "^3.1.1", "protobufjs": "^6.11.3", "utility-types": "^3.10.0" }, @@ -2302,12 +2303,9 @@ "dev": true }, "node_modules/abort-controller-x": { - "version": "0.2.6", - "resolved": "https://registry.npmjs.org/abort-controller-x/-/abort-controller-x-0.2.6.tgz", - "integrity": "sha512-U8MmmcfIzl7qnzoog1woxKX/eYkQin3WR7k/S2dtpGLlSlsndXnvOYQEq8y1VnHC3+ofNFAT0GRgHq1lBbXlDQ==", - "dependencies": { - "node-abort-controller": "^1.2.1 || ^2.0.0" - } + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/abort-controller-x/-/abort-controller-x-0.4.1.tgz", + "integrity": "sha512-lJ2ssrl3FoTK3cX/g15lRCkXFWKiwRTRtBjfwounO2EM/Q65rI/MEZsfsch1juWU2pH2aLSaq0HGowlDP/imrw==" }, "node_modules/acorn": { "version": "8.6.0", @@ -7353,21 +7351,6 @@ "resolved": "https://registry.npmjs.org/node-abort-controller/-/node-abort-controller-2.0.0.tgz", "integrity": "sha512-L8RfEgjBTHAISTuagw51PprVAqNZoG6KSB6LQ6H1bskMVkFs5E71IyjauLBv3XbuomJlguWF/VnRHdJ1gqiAqA==" }, - "node_modules/nice-grpc-client-middleware-retry": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/nice-grpc-client-middleware-retry/-/nice-grpc-client-middleware-retry-1.1.2.tgz", - "integrity": "sha512-7RWHpQxQ6Dq++TTaP/S/GFd4MuSd1jjeQr8q41oQ6phyPRTZmNx8gdbVQg2Q70iATFjJ9eQfj4/QysfeQ3LURQ==", - "dependencies": { - "abort-controller-x": "^0.2.6", - "nice-grpc-common": "^1.1.0", - "node-abort-controller": "^2.0.0" - } - }, - "node_modules/nice-grpc-client-middleware-retry/node_modules/node-abort-controller": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/node-abort-controller/-/node-abort-controller-2.0.0.tgz", - "integrity": "sha512-L8RfEgjBTHAISTuagw51PprVAqNZoG6KSB6LQ6H1bskMVkFs5E71IyjauLBv3XbuomJlguWF/VnRHdJ1gqiAqA==" - }, "node_modules/nice-grpc-common": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/nice-grpc-common/-/nice-grpc-common-1.1.0.tgz", @@ -7382,11 +7365,24 @@ "resolved": "https://registry.npmjs.org/node-abort-controller/-/node-abort-controller-2.0.0.tgz", "integrity": "sha512-L8RfEgjBTHAISTuagw51PprVAqNZoG6KSB6LQ6H1bskMVkFs5E71IyjauLBv3XbuomJlguWF/VnRHdJ1gqiAqA==" }, - "node_modules/node-abort-controller": { + "node_modules/nice-grpc/node_modules/abort-controller-x": { + "version": "0.2.7", + "resolved": "https://registry.npmjs.org/abort-controller-x/-/abort-controller-x-0.2.7.tgz", + "integrity": "sha512-hq/lt8yODKrwuZa69GhSTl2l2kcrus2khZ7OjD6Bmqmx6tbW6dnV8cVGnkkdLCWnjXpgSx8zjQo+HUc9mvoQ/w==", + "dependencies": { + "node-abort-controller": "^1.2.1 || ^2.0.0" + } + }, + "node_modules/nice-grpc/node_modules/node-abort-controller": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/node-abort-controller/-/node-abort-controller-1.2.1.tgz", "integrity": "sha512-79PYeJuj6S9+yOHirR0JBLFOgjB6sQCir10uN6xRx25iD+ZD4ULqgRn3MwWBRaQGB0vEgReJzWwJo42T1R6YbQ==" }, + "node_modules/node-abort-controller": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/node-abort-controller/-/node-abort-controller-3.1.1.tgz", + "integrity": "sha512-AGK2yQKIjRuqnc6VkX2Xj5d+QW8xZ87pa1UK6yA6ouUyuxfHuMP6umE5QK7UmTeOAymo+Zx1Fxiuw9rVx8taHQ==" + }, "node_modules/node-emoji": { "version": "1.11.0", "resolved": "https://registry.npmjs.org/node-emoji/-/node-emoji-1.11.0.tgz", @@ -14053,12 +14049,9 @@ "dev": true }, "abort-controller-x": { - "version": "0.2.6", - "resolved": "https://registry.npmjs.org/abort-controller-x/-/abort-controller-x-0.2.6.tgz", - "integrity": "sha512-U8MmmcfIzl7qnzoog1woxKX/eYkQin3WR7k/S2dtpGLlSlsndXnvOYQEq8y1VnHC3+ofNFAT0GRgHq1lBbXlDQ==", - "requires": { - "node-abort-controller": "^1.2.1 || ^2.0.0" - } + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/abort-controller-x/-/abort-controller-x-0.4.1.tgz", + "integrity": "sha512-lJ2ssrl3FoTK3cX/g15lRCkXFWKiwRTRtBjfwounO2EM/Q65rI/MEZsfsch1juWU2pH2aLSaq0HGowlDP/imrw==" }, "acorn": { "version": "8.6.0", @@ -17917,6 +17910,21 @@ "abort-controller-x": "^0.2.4", "nice-grpc-common": "^1.0.4", "node-abort-controller": "^1.2.1" + }, + "dependencies": { + "abort-controller-x": { + "version": "0.2.7", + "resolved": "https://registry.npmjs.org/abort-controller-x/-/abort-controller-x-0.2.7.tgz", + "integrity": "sha512-hq/lt8yODKrwuZa69GhSTl2l2kcrus2khZ7OjD6Bmqmx6tbW6dnV8cVGnkkdLCWnjXpgSx8zjQo+HUc9mvoQ/w==", + "requires": { + "node-abort-controller": "^1.2.1 || ^2.0.0" + } + }, + "node-abort-controller": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/node-abort-controller/-/node-abort-controller-1.2.1.tgz", + "integrity": "sha512-79PYeJuj6S9+yOHirR0JBLFOgjB6sQCir10uN6xRx25iD+ZD4ULqgRn3MwWBRaQGB0vEgReJzWwJo42T1R6YbQ==" + } } }, "nice-grpc-client-middleware-deadline": { @@ -17935,23 +17943,6 @@ } } }, - "nice-grpc-client-middleware-retry": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/nice-grpc-client-middleware-retry/-/nice-grpc-client-middleware-retry-1.1.2.tgz", - "integrity": "sha512-7RWHpQxQ6Dq++TTaP/S/GFd4MuSd1jjeQr8q41oQ6phyPRTZmNx8gdbVQg2Q70iATFjJ9eQfj4/QysfeQ3LURQ==", - "requires": { - "abort-controller-x": "^0.2.6", - "nice-grpc-common": "^1.1.0", - "node-abort-controller": "^2.0.0" - }, - "dependencies": { - "node-abort-controller": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/node-abort-controller/-/node-abort-controller-2.0.0.tgz", - "integrity": "sha512-L8RfEgjBTHAISTuagw51PprVAqNZoG6KSB6LQ6H1bskMVkFs5E71IyjauLBv3XbuomJlguWF/VnRHdJ1gqiAqA==" - } - } - }, "nice-grpc-common": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/nice-grpc-common/-/nice-grpc-common-1.1.0.tgz", @@ -17969,9 +17960,9 @@ } }, "node-abort-controller": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/node-abort-controller/-/node-abort-controller-1.2.1.tgz", - "integrity": "sha512-79PYeJuj6S9+yOHirR0JBLFOgjB6sQCir10uN6xRx25iD+ZD4ULqgRn3MwWBRaQGB0vEgReJzWwJo42T1R6YbQ==" + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/node-abort-controller/-/node-abort-controller-3.1.1.tgz", + "integrity": "sha512-AGK2yQKIjRuqnc6VkX2Xj5d+QW8xZ87pa1UK6yA6ouUyuxfHuMP6umE5QK7UmTeOAymo+Zx1Fxiuw9rVx8taHQ==" }, "node-emoji": { "version": "1.11.0", diff --git a/package.json b/package.json index 7dca7280..f85aef3f 100644 --- a/package.json +++ b/package.json @@ -31,7 +31,8 @@ "luxon": "^2.2.0", "nice-grpc": "^1.0.6", "nice-grpc-client-middleware-deadline": "^1.0.6", - "nice-grpc-client-middleware-retry": "^1.1.2", + "abort-controller-x": "^0.4.1", + "node-abort-controller": "^3.1.1", "protobufjs": "^6.11.3", "utility-types": "^3.10.0" }, diff --git a/src/middleware/retry.ts b/src/middleware/retry.ts new file mode 100644 index 00000000..58e8f32b --- /dev/null +++ b/src/middleware/retry.ts @@ -0,0 +1,129 @@ +import { + delay, + rethrowAbortError, +} from 'abort-controller-x'; +import { + ClientError, + ClientMiddleware, + Status, +} from 'nice-grpc'; +import { AbortController } from 'node-abort-controller'; + +/** + * These options are added to `CallOptions` by + * `nice-grpc-client-middleware-retry`. + */ +export type RetryOptions = { + /** + * Boolean indicating whether retries are enabled. + * + * If the method is marked as idempotent in Protobuf, i.e. has + * + * option idempotency_level = IDEMPOTENT; + * + * then the default is `true`. Otherwise, the default is `false`. + * + * Method options currently work only when compiling with `ts-proto`. + */ + retry?: boolean; + /** + * Base delay between retry attempts in milliseconds. + * + * Defaults to 1000. + * + * Example: if `retryBaseDelayMs` is 100, then retries will be attempted in + * 100ms, 200ms, 400ms etc. (not counting jitter). + */ + retryBaseDelayMs?: number; + /** + * Maximum delay between attempts in milliseconds. + * + * Defaults to 15 seconds. + * + * Example: if `retryBaseDelayMs` is 1000 and `retryMaxDelayMs` is 3000, then + * retries will be attempted in 1000ms, 2000ms, 3000ms, 3000ms etc (not + * counting jitter). + */ + retryMaxDelayMs?: number; + /** + * Maximum for the total number of attempts. `Infinity` is supported. + * + * Defaults to 1, i.e. a single retry will be attempted. + */ + retryMaxAttempts?: number; + /** + * Array of retryable status codes. + * + * Default is `[UNKNOWN, RESOURCE_EXHAUSTED, INTERNAL, UNAVAILABLE]`. + */ + retryableStatuses?: Status[]; + /** + * Called after receiving error with retryable status code before setting + * backoff delay timer. + * + * If the error code is not retryable, or the maximum attempts exceeded, this + * function will not be called and the error will be thrown from the client + * method. + */ + onRetryableError?(error: ClientError, attempt: number, delayMs: number): void; +}; + +const defaultRetryableStatuses: Status[] = [ + Status.UNKNOWN, + Status.RESOURCE_EXHAUSTED, + Status.INTERNAL, + Status.UNAVAILABLE, +]; + +/** + * Client middleware that adds automatic retries to unary calls. + */ +export const retryMiddleware: ClientMiddleware = async function* retryMiddleware(call, options) { + const { idempotencyLevel } = call.method.options ?? {}; + const isIdempotent = idempotencyLevel === 'IDEMPOTENT' + || idempotencyLevel === 'NO_SIDE_EFFECTS'; + + const { + retry = isIdempotent, + retryBaseDelayMs = 1000, + retryMaxDelayMs = 15_000, + retryMaxAttempts = 1, + onRetryableError, + retryableStatuses = defaultRetryableStatuses, + ...restOptions + } = options; + + if (call.requestStream || call.responseStream || !retry) { + return yield* call.next(call.request, restOptions); + } + + const signal = options.signal ?? new AbortController().signal; + + for (let attempt = 0; ; attempt++) { + try { + return yield* call.next(call.request, restOptions); + } catch (error: unknown) { + rethrowAbortError(error); + + if ( + attempt >= retryMaxAttempts + || !(error instanceof ClientError) + || !retryableStatuses.includes(error.code) + ) { + throw error; + } + + // https://aws.amazon.com/ru/blogs/architecture/exponential-backoff-and-jitter/ + const backoff = Math.min( + retryMaxDelayMs, + 2 ** attempt * retryBaseDelayMs, + ); + const delayMs = Math.round((backoff * (1 + Math.random())) / 2); + + onRetryableError?.(error, attempt, delayMs); + + // eslint-disable-next-line no-await-in-loop + await delay(signal, delayMs); + } + } +}; diff --git a/src/types.ts b/src/types.ts index c78baae0..18bda45f 100644 --- a/src/types.ts +++ b/src/types.ts @@ -6,8 +6,8 @@ import { } from '@grpc/grpc-js'; import { RawClient } from 'nice-grpc'; import { DeadlineOptions } from 'nice-grpc-client-middleware-deadline'; -import { RetryOptions } from 'nice-grpc-client-middleware-retry'; import { NormalizedServiceDefinition } from 'nice-grpc/lib/service-definitions'; +import { RetryOptions } from './middleware/retry'; export interface TokenService { getToken: () => Promise; diff --git a/src/utils/client-factory.ts b/src/utils/client-factory.ts index 57e4cc96..e74b0824 100644 --- a/src/utils/client-factory.ts +++ b/src/utils/client-factory.ts @@ -1,6 +1,6 @@ import { createClientFactory } from 'nice-grpc'; import { deadlineMiddleware } from 'nice-grpc-client-middleware-deadline'; -import { retryMiddleware } from 'nice-grpc-client-middleware-retry'; +import { retryMiddleware } from '../middleware/retry'; export const clientFactory = createClientFactory() .use(retryMiddleware) From 41e38e4c0e11176a1cf630b7d57991891326cfb6 Mon Sep 17 00:00:00 2001 From: yandex-cloud-bot Date: Tue, 14 Feb 2023 07:59:25 +0000 Subject: [PATCH 51/54] chore(release): 2.4.1 [skip ci] ## [2.4.1](https://github.com/yandex-cloud/nodejs-sdk/compare/v2.4.0...v2.4.1) (2023-02-14) ### Bug Fixes * **retry:** copy retry middleware to SDK and fix it ([66b45a3](https://github.com/yandex-cloud/nodejs-sdk/commit/66b45a3d1acbc83a6a6752c54c96e6eeed816f86)) --- package-lock.json | 4 ++-- package.json | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/package-lock.json b/package-lock.json index a9065e2b..f2501683 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.4.0", + "version": "2.4.1", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.4.0", + "version": "2.4.1", "license": "MIT", "dependencies": { "@grpc/grpc-js": "^1.6.12", diff --git a/package.json b/package.json index f85aef3f..80df07ef 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.4.0", + "version": "2.4.1", "description": "Yandex.Cloud NodeJS SDK", "keywords": [ "yandex-cloud", From 20091c95992761c73ebb6b8716c360d269f20b03 Mon Sep 17 00:00:00 2001 From: Ivan Zuev Date: Thu, 9 Mar 2023 16:59:06 +0300 Subject: [PATCH 52/54] fix: allow to override endpoint for OperationService --- src/utils/operation/wait-for.ts | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/utils/operation/wait-for.ts b/src/utils/operation/wait-for.ts index fb9ad558..fef09752 100644 --- a/src/utils/operation/wait-for.ts +++ b/src/utils/operation/wait-for.ts @@ -9,8 +9,13 @@ const { operation: { operation_service: { GetOperationRequest } } } = cloudApi; const DEFAULT_TIMEOUT_MS = 10 * 60 * 1000; // 10 minutes -export const waitForOperation = (op: Operation, session: Session, timeoutMs: number = DEFAULT_TIMEOUT_MS): Promise => { - const client = session.client(serviceClients.OperationServiceClient); +export const waitForOperation = ( + op: Operation, + session: Session, + timeoutMs: number = DEFAULT_TIMEOUT_MS, + operationServiceEndpoint?: string, +): Promise => { + const client = session.client(serviceClients.OperationServiceClient, operationServiceEndpoint); const maxChecksCount = Math.ceil(timeoutMs / session.pollInterval); let checksCount = 0; From ff2f8f00e720a4e2a4b1d7f01febedf8c25d8553 Mon Sep 17 00:00:00 2001 From: yandex-cloud-bot Date: Thu, 9 Mar 2023 14:04:02 +0000 Subject: [PATCH 53/54] chore(release): 2.4.2 [skip ci] ## [2.4.2](https://github.com/yandex-cloud/nodejs-sdk/compare/v2.4.1...v2.4.2) (2023-03-09) ### Bug Fixes * allow to override endpoint for OperationService ([20091c9](https://github.com/yandex-cloud/nodejs-sdk/commit/20091c95992761c73ebb6b8716c360d269f20b03)) --- package-lock.json | 4 ++-- package.json | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/package-lock.json b/package-lock.json index f2501683..fcf64e8b 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.4.1", + "version": "2.4.2", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.4.1", + "version": "2.4.2", "license": "MIT", "dependencies": { "@grpc/grpc-js": "^1.6.12", diff --git a/package.json b/package.json index 80df07ef..b3984eb7 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@yandex-cloud/nodejs-sdk", - "version": "2.4.1", + "version": "2.4.2", "description": "Yandex.Cloud NodeJS SDK", "keywords": [ "yandex-cloud", From d9b00ebc475b915394bbbe1e18077469e3f862ea Mon Sep 17 00:00:00 2001 From: Ivan Zuev Date: Mon, 13 Mar 2023 18:48:53 +0300 Subject: [PATCH 54/54] feat!: use standalone class for resolving service endpoints BREAKING CHANGE: Session.client doesn't accept custom endpoint anymore --- src/index.ts | 1 + src/service-endpoints.test.ts | 10 +- src/service-endpoints.ts | 821 ++++++++++++++----------- src/session.ts | 20 +- src/token-service/iam-token-service.ts | 9 +- src/utils/operation/wait-for.ts | 3 +- 6 files changed, 486 insertions(+), 378 deletions(-) diff --git a/src/index.ts b/src/index.ts index 0bd23339..983a8e64 100644 --- a/src/index.ts +++ b/src/index.ts @@ -4,3 +4,4 @@ export * from './session'; export * from './utils/operation'; export * from './utils/decode-message'; export { WrappedServiceClientType } from './types'; +export * from './service-endpoints'; diff --git a/src/service-endpoints.test.ts b/src/service-endpoints.test.ts index e40b369a..77399840 100644 --- a/src/service-endpoints.test.ts +++ b/src/service-endpoints.test.ts @@ -1,16 +1,18 @@ -import { getServiceClientEndpoint } from './service-endpoints'; +import { ServiceEndpointResolver } from './service-endpoints'; import { serviceClients } from '.'; import { GeneratedServiceClientCtor } from './types'; // eslint-disable-next-line @typescript-eslint/ban-types type MockServiceClientCtor = GeneratedServiceClientCtor<{}>; +const serviceEndpointResolver = new ServiceEndpointResolver(); + describe('service endpoints', () => { it('each service in generated service_clients module should have endpoint declared in service-endpoints', () => { for (const [, ServiceClient] of Object.entries(serviceClients)) { // eslint-disable-next-line @typescript-eslint/no-loop-func expect(() => { - const endpoint = getServiceClientEndpoint(ServiceClient as MockServiceClientCtor); + const endpoint = serviceEndpointResolver.resolve(ServiceClient as MockServiceClientCtor); expect(endpoint).toBeTruthy(); }).not.toThrow(); @@ -21,13 +23,13 @@ describe('service endpoints', () => { const serviceName = 'myCustomService'; expect(() => { - getServiceClientEndpoint({ serviceName } as unknown as MockServiceClientCtor); + serviceEndpointResolver.resolve({ serviceName } as unknown as MockServiceClientCtor); }).toThrow(`Endpoint for service ${serviceName} is no defined`); }); it('should throw exception if client class has no serviceName option', () => { expect(() => { - getServiceClientEndpoint({} as unknown as MockServiceClientCtor); + serviceEndpointResolver.resolve({} as unknown as MockServiceClientCtor); }).toThrow('Unable to retrieve serviceName of provided service client class'); }); }); diff --git a/src/service-endpoints.ts b/src/service-endpoints.ts index 0c18d07d..8b8523cb 100644 --- a/src/service-endpoints.ts +++ b/src/service-endpoints.ts @@ -1,375 +1,476 @@ import { ServiceClientConstructor, ServiceDefinition } from '@grpc/grpc-js'; +import { flatten } from 'lodash'; import { GeneratedServiceClientCtor } from './types'; -interface ServiceEndpoint { +export interface ServiceEndpoint { serviceIds: string[]; endpoint: string; } -type ServiceEndpointsList = ServiceEndpoint[]; +export type ServiceSlug = + 'operations' | + 'compute' | + 'iam' | + 'resource-manager' | + 'mdb' | + 'dataproc' | + 'vpc' | + 'container-registry' | + 'load-balancer' | + 'serverless' | + 'mdbproxy' | + 'k8s' | + 'logging' | + 'ydb' | + 'iot' | + 'monitoring' | + 'kms' | + 'api-endpoint' | + 'ai' | + 'alb' | + 'billing' | + 'cdn' | + 'certificate-manager' | + 'datasphere' | + 'datatransfer' | + 'dns' | + 'lockbox' | + 'marketplace' | + 'organization-manager' | + 'storage'; + +export type ServiceEndpointsMap = Record; // @see https://api.cloud.yandex.net/endpoints -const SERVICE_ENDPOINTS_LIST: ServiceEndpointsList = [ - { - serviceIds: ['yandex.cloud.operation.OperationService'], - endpoint: 'operation.api.cloud.yandex.net:443', - }, - { - serviceIds: [ - 'yandex.cloud.compute.v1.DiskPlacementGroupService', - 'yandex.cloud.compute.v1.DiskService', - 'yandex.cloud.compute.v1.DiskTypeService', - 'yandex.cloud.compute.v1.FilesystemService', - 'yandex.cloud.compute.v1.HostGroupService', - 'yandex.cloud.compute.v1.HostTypeService', - 'yandex.cloud.compute.v1.ImageService', - 'yandex.cloud.compute.v1.InstanceService', - 'yandex.cloud.compute.v1.PlacementGroupService', - 'yandex.cloud.compute.v1.SnapshotService', - 'yandex.cloud.compute.v1.ZoneService', - 'yandex.cloud.compute.v1.instancegroup.InstanceGroupService', - 'yandex.cloud.compute.v1.SnapshotScheduleService', - ], - endpoint: 'compute.api.cloud.yandex.net:443', - }, - { - serviceIds: [ - 'yandex.cloud.iam.v1.ApiKeyService', - 'yandex.cloud.iam.v1.IamTokenService', - 'yandex.cloud.iam.v1.KeyService', - 'yandex.cloud.iam.v1.RoleService', - 'yandex.cloud.iam.v1.ServiceAccountService', - 'yandex.cloud.iam.v1.UserAccountService', - 'yandex.cloud.iam.v1.YandexPassportUserAccountService', - 'yandex.cloud.iam.v1.awscompatibility.AccessKeyService', - ], - endpoint: 'iam.api.cloud.yandex.net:443', - }, - { - serviceIds: [ - 'yandex.cloud.resourcemanager.v1.CloudService', - 'yandex.cloud.resourcemanager.v1.FolderService', - ], - endpoint: 'resource-manager.api.cloud.yandex.net:443', - }, - { - serviceIds: [ - 'yandex.cloud.mdb.clickhouse.v1.BackupService', - 'yandex.cloud.mdb.clickhouse.v1.ClusterService', - 'yandex.cloud.mdb.clickhouse.v1.DatabaseService', - 'yandex.cloud.mdb.clickhouse.v1.FormatSchemaService', - 'yandex.cloud.mdb.clickhouse.v1.MlModelService', - 'yandex.cloud.mdb.clickhouse.v1.ResourcePresetService', - 'yandex.cloud.mdb.clickhouse.v1.UserService', - 'yandex.cloud.mdb.clickhouse.v1.VersionsService', - 'yandex.cloud.mdb.elasticsearch.v1.AuthService', - 'yandex.cloud.mdb.elasticsearch.v1.ClusterService', - 'yandex.cloud.mdb.elasticsearch.v1.ResourcePresetService', - 'yandex.cloud.mdb.elasticsearch.v1.UserService', - 'yandex.cloud.mdb.elasticsearch.v1.BackupService', - 'yandex.cloud.mdb.elasticsearch.v1.ExtensionService', - 'yandex.cloud.mdb.greenplum.v1.ClusterService', - 'yandex.cloud.mdb.greenplum.v1.BackupService', - 'yandex.cloud.mdb.greenplum.v1.ResourcePresetService', - 'yandex.cloud.mdb.kafka.v1.ClusterService', - 'yandex.cloud.mdb.kafka.v1.ConnectorService', - 'yandex.cloud.mdb.kafka.v1.ResourcePresetService', - 'yandex.cloud.mdb.kafka.v1.TopicService', - 'yandex.cloud.mdb.kafka.v1.UserService', - 'yandex.cloud.mdb.mongodb.v1.BackupService', - 'yandex.cloud.mdb.mongodb.v1.ClusterService', - 'yandex.cloud.mdb.mongodb.v1.DatabaseService', - 'yandex.cloud.mdb.mongodb.v1.ResourcePresetService', - 'yandex.cloud.mdb.mongodb.v1.UserService', - 'yandex.cloud.mdb.mysql.v1.BackupService', - 'yandex.cloud.mdb.mysql.v1.ClusterService', - 'yandex.cloud.mdb.mysql.v1.DatabaseService', - 'yandex.cloud.mdb.mysql.v1.ResourcePresetService', - 'yandex.cloud.mdb.mysql.v1.UserService', - 'yandex.cloud.mdb.postgresql.v1.BackupService', - 'yandex.cloud.mdb.postgresql.v1.ClusterService', - 'yandex.cloud.mdb.postgresql.v1.DatabaseService', - 'yandex.cloud.mdb.postgresql.v1.ResourcePresetService', - 'yandex.cloud.mdb.postgresql.v1.UserService', - 'yandex.cloud.mdb.redis.v1.BackupService', - 'yandex.cloud.mdb.redis.v1.ClusterService', - 'yandex.cloud.mdb.redis.v1.ResourcePresetService', - 'yandex.cloud.mdb.sqlserver.v1.BackupService', - 'yandex.cloud.mdb.sqlserver.v1.ClusterService', - 'yandex.cloud.mdb.sqlserver.v1.DatabaseService', - 'yandex.cloud.mdb.sqlserver.v1.ResourcePresetService', - 'yandex.cloud.mdb.sqlserver.v1.UserService', - ], - endpoint: 'mdb.api.cloud.yandex.net:443', - }, - { - serviceIds: [ - 'yandex.cloud.dataproc.v1.ClusterService', - 'yandex.cloud.dataproc.v1.JobService', - 'yandex.cloud.dataproc.v1.ResourcePresetService', - 'yandex.cloud.dataproc.v1.SubclusterService', - ], - endpoint: 'dataproc.api.cloud.yandex.net:443', - }, - { - serviceIds: [ - 'yandex.cloud.vpc.v1.AddressService', - 'yandex.cloud.vpc.v1.NetworkService', - 'yandex.cloud.vpc.v1.RouteTableService', - 'yandex.cloud.vpc.v1.SecurityGroupService', - 'yandex.cloud.vpc.v1.SubnetService', - 'yandex.cloud.vpc.v1.GatewayService', - ], - endpoint: 'vpc.api.cloud.yandex.net:443', - }, - { - serviceIds: [ - 'yandex.cloud.containerregistry.v1.ImageService', - 'yandex.cloud.containerregistry.v1.LifecyclePolicyService', - 'yandex.cloud.containerregistry.v1.RegistryService', - 'yandex.cloud.containerregistry.v1.RepositoryService', - 'yandex.cloud.containerregistry.v1.ScannerService', - ], - endpoint: 'container-registry.api.cloud.yandex.net:443', - }, - { - serviceIds: [ - 'yandex.cloud.loadbalancer.v1.NetworkLoadBalancerService', - 'yandex.cloud.loadbalancer.v1.TargetGroupService', - ], - endpoint: 'load-balancer.api.cloud.yandex.net:443', - }, - { - serviceIds: ['yandex.cloud.serverless.functions.v1.FunctionService'], - endpoint: 'serverless-functions.api.cloud.yandex.net:443', - }, - { - serviceIds: ['yandex.cloud.serverless.triggers.v1.TriggerService'], - endpoint: 'serverless-triggers.api.cloud.yandex.net:443', - }, - { - serviceIds: ['yandex.cloud.serverless.apigateway.v1.ApiGatewayService'], - endpoint: 'serverless-apigateway.api.cloud.yandex.net:443', - }, - { - serviceIds: ['yandex.cloud.serverless.containers.v1.ContainerService'], - endpoint: 'serverless-containers.api.cloud.yandex.net:443', - }, - { - serviceIds: ['yandex.cloud.serverless.mdbproxy.v1.ProxyService'], - endpoint: 'mdbproxy.api.cloud.yandex.net:443', - }, - { - serviceIds: ['yandex.cloud.serverless.apigateway.websocket.v1.ConnectionService'], - endpoint: 'apigateway-connections.api.cloud.yandex.net:443', - }, - { - serviceIds: [ - 'yandex.cloud.k8s.v1.ClusterService', - 'yandex.cloud.k8s.v1.NodeGroupService', - 'yandex.cloud.k8s.v1.VersionService', - ], - endpoint: 'mks.api.cloud.yandex.net:443', - }, - { - serviceIds: [ - 'yandex.cloud.logging.v1.LogGroupService', - ], - endpoint: 'logging.api.cloud.yandex.net:443', - }, - { - serviceIds: [ - 'yandex.cloud.logging.v1.LogReadingService', - ], - endpoint: 'reader.logging.yandexcloud.net:443', - }, - { - serviceIds: [ - 'yandex.cloud.logging.v1.LogIngestionService', - ], - endpoint: 'ingester.logging.yandexcloud.net:443', - }, - { - serviceIds: [ - 'yandex.cloud.ydb.v1.BackupService', - 'yandex.cloud.ydb.v1.DatabaseService', - 'yandex.cloud.ydb.v1.LocationService', - 'yandex.cloud.ydb.v1.ResourcePresetService', - 'yandex.cloud.ydb.v1.StorageTypeService', - ], - endpoint: 'ydb.api.cloud.yandex.net:443', - }, - { - serviceIds: [ - 'yandex.cloud.iot.devices.v1.RegistryService', - ], - endpoint: 'iot-devices.api.cloud.yandex.net:443', - }, - { - serviceIds: [ - 'yandex.cloud.iot.devices.v1.RegistryDataService', - 'yandex.cloud.iot.devices.v1.DeviceService', - 'yandex.cloud.iot.devices.v1.DeviceDataService', - ], - endpoint: 'iot-data.api.cloud.yandex.net:443', - }, - { - serviceIds: [ - 'yandex.cloud.iot.broker.v1.BrokerDataService', - 'yandex.cloud.iot.broker.v1.BrokerService', - ], - endpoint: 'iot-broker.api.cloud.yandex.net:443', - }, - { - serviceIds: [ - 'yandex.cloud.monitoring.v3.DashboardService', - ], - endpoint: 'monitoring.api.cloud.yandex.net:443', - }, - { - serviceIds: [ - 'yandex.cloud.dataproc.manager.v1.JobService', - 'yandex.cloud.dataproc.manager.v1.DataprocManagerService', - ], - endpoint: 'dataproc-manager.api.cloud.yandex.net:443', - }, - { - serviceIds: ['yandex.cloud.kms.v1.SymmetricKeyService'], - endpoint: 'kms.api.cloud.yandex.net:443', - }, - { - serviceIds: ['yandex.cloud.kms.v1.SymmetricCryptoService'], - endpoint: 'kms.yandex:443', - }, - { - serviceIds: ['yandex.cloud.endpoint.ApiEndpointService'], - endpoint: 'api.cloud.yandex.net:443', - }, - { - serviceIds: ['yandex.cloud.ai.translate.v2.TranslationService'], - endpoint: 'translate.api.cloud.yandex.net:443', - }, - { - serviceIds: [ - 'yandex.cloud.ai.vision.v1.VisionService', - 'yandex.cloud.ai.vision.v2.ImageClassifierService', - ], - endpoint: 'vision.api.cloud.yandex.net:443', - }, - { - serviceIds: ['yandex.cloud.ai.stt.v2.SttService', 'speechkit.tts.v3.Synthesizer'], - endpoint: 'transcribe.api.cloud.yandex.net:443', - }, - { - serviceIds: [ - 'yandex.cloud.apploadbalancer.v1.BackendGroupService', - 'yandex.cloud.apploadbalancer.v1.HttpRouterService', - 'yandex.cloud.apploadbalancer.v1.LoadBalancerService', - 'yandex.cloud.apploadbalancer.v1.TargetGroupService', - 'yandex.cloud.apploadbalancer.v1.VirtualHostService', - ], - endpoint: 'alb.api.cloud.yandex.net:443', - }, - { - serviceIds: [ - 'yandex.cloud.billing.v1.BillingAccountService', - 'yandex.cloud.billing.v1.BudgetService', - 'yandex.cloud.billing.v1.CustomerService', - 'yandex.cloud.billing.v1.ServiceService', - 'yandex.cloud.billing.v1.SkuService', - ], - endpoint: 'billing.api.cloud.yandex.net:443', - }, - { - serviceIds: [ - 'yandex.cloud.cdn.v1.CacheService', - 'yandex.cloud.cdn.v1.OriginGroupService', - 'yandex.cloud.cdn.v1.OriginService', - 'yandex.cloud.cdn.v1.ProviderService', - 'yandex.cloud.cdn.v1.ResourceService', - 'yandex.cloud.cdn.v1.RawLogsService', - ], - endpoint: 'cdn.api.cloud.yandex.net:443', - }, - { - serviceIds: [ - 'yandex.cloud.certificatemanager.v1.CertificateContentService', - 'yandex.cloud.certificatemanager.v1.CertificateService', - ], - endpoint: 'certificate-manager.api.cloud.yandex.net:443', - }, - { - serviceIds: [ - 'yandex.cloud.datasphere.v1.AppTokenService', - 'yandex.cloud.datasphere.v1.FolderBudgetService', - 'yandex.cloud.datasphere.v1.NodeService', - 'yandex.cloud.datasphere.v1.ProjectDataService', - 'yandex.cloud.datasphere.v1.ProjectService', - ], - endpoint: 'datasphere.api.cloud.yandex.net:443', - }, - { - serviceIds: [ - 'yandex.cloud.datatransfer.v1.EndpointService', - 'yandex.cloud.datatransfer.v1.TransferService', - ], - endpoint: 'datatransfer.api.cloud.yandex.net:443', - }, - { - serviceIds: [ - 'yandex.cloud.dns.v1.DnsZoneService', - ], - endpoint: 'dns.api.cloud.yandex.net:443', - }, - { - serviceIds: [ - 'yandex.cloud.lockbox.v1.SecretService', - ], - endpoint: 'lockbox.api.cloud.yandex.net:443', - }, - { - serviceIds: [ - 'yandex.cloud.lockbox.v1.PayloadService', - ], - endpoint: 'payload.lockbox.api.cloud.yandex.net:443', - }, - { - serviceIds: [ - 'yandex.cloud.marketplace.v1.metering.ImageProductUsageService', - ], - endpoint: 'marketplace.api.cloud.yandex.net:443', - }, - { - serviceIds: [ - 'yandex.cloud.organizationmanager.v1.OrganizationService', - 'yandex.cloud.organizationmanager.v1.UserService', - 'yandex.cloud.organizationmanager.v1.saml.CertificateService', - 'yandex.cloud.organizationmanager.v1.saml.FederationService', - 'yandex.cloud.organizationmanager.v1.GroupService', - ], - endpoint: 'organization-manager.api.cloud.yandex.net:443', - }, - { - serviceIds: [ - 'yandex.cloud.storage.v1.BucketService', - ], - endpoint: 'storage.api.cloud.yandex.net:443', - }, -]; +export const DEFAULT_SERVICE_ENDPOINTS_MAP: ServiceEndpointsMap = { + operations: [ + { + serviceIds: ['yandex.cloud.operation.OperationService'], + endpoint: 'operation.api.cloud.yandex.net:443', + }, + ], + compute: [ + { + serviceIds: [ + 'yandex.cloud.compute.v1.DiskPlacementGroupService', + 'yandex.cloud.compute.v1.DiskService', + 'yandex.cloud.compute.v1.DiskTypeService', + 'yandex.cloud.compute.v1.FilesystemService', + 'yandex.cloud.compute.v1.HostGroupService', + 'yandex.cloud.compute.v1.HostTypeService', + 'yandex.cloud.compute.v1.ImageService', + 'yandex.cloud.compute.v1.InstanceService', + 'yandex.cloud.compute.v1.PlacementGroupService', + 'yandex.cloud.compute.v1.SnapshotService', + 'yandex.cloud.compute.v1.ZoneService', + 'yandex.cloud.compute.v1.instancegroup.InstanceGroupService', + 'yandex.cloud.compute.v1.SnapshotScheduleService', + ], + endpoint: 'compute.api.cloud.yandex.net:443', + }, + ], + iam: [ + { + serviceIds: [ + 'yandex.cloud.iam.v1.ApiKeyService', + 'yandex.cloud.iam.v1.IamTokenService', + 'yandex.cloud.iam.v1.KeyService', + 'yandex.cloud.iam.v1.RoleService', + 'yandex.cloud.iam.v1.ServiceAccountService', + 'yandex.cloud.iam.v1.UserAccountService', + 'yandex.cloud.iam.v1.YandexPassportUserAccountService', + 'yandex.cloud.iam.v1.awscompatibility.AccessKeyService', + ], + endpoint: 'iam.api.cloud.yandex.net:443', + }, + ], + 'resource-manager': [ + { + serviceIds: [ + 'yandex.cloud.resourcemanager.v1.CloudService', + 'yandex.cloud.resourcemanager.v1.FolderService', + ], + endpoint: 'resource-manager.api.cloud.yandex.net:443', + }, + ], + mdb: [ + { + serviceIds: [ + 'yandex.cloud.mdb.clickhouse.v1.BackupService', + 'yandex.cloud.mdb.clickhouse.v1.ClusterService', + 'yandex.cloud.mdb.clickhouse.v1.DatabaseService', + 'yandex.cloud.mdb.clickhouse.v1.FormatSchemaService', + 'yandex.cloud.mdb.clickhouse.v1.MlModelService', + 'yandex.cloud.mdb.clickhouse.v1.ResourcePresetService', + 'yandex.cloud.mdb.clickhouse.v1.UserService', + 'yandex.cloud.mdb.clickhouse.v1.VersionsService', + 'yandex.cloud.mdb.elasticsearch.v1.AuthService', + 'yandex.cloud.mdb.elasticsearch.v1.ClusterService', + 'yandex.cloud.mdb.elasticsearch.v1.ResourcePresetService', + 'yandex.cloud.mdb.elasticsearch.v1.UserService', + 'yandex.cloud.mdb.elasticsearch.v1.BackupService', + 'yandex.cloud.mdb.elasticsearch.v1.ExtensionService', + 'yandex.cloud.mdb.greenplum.v1.ClusterService', + 'yandex.cloud.mdb.greenplum.v1.BackupService', + 'yandex.cloud.mdb.greenplum.v1.ResourcePresetService', + 'yandex.cloud.mdb.kafka.v1.ClusterService', + 'yandex.cloud.mdb.kafka.v1.ConnectorService', + 'yandex.cloud.mdb.kafka.v1.ResourcePresetService', + 'yandex.cloud.mdb.kafka.v1.TopicService', + 'yandex.cloud.mdb.kafka.v1.UserService', + 'yandex.cloud.mdb.mongodb.v1.BackupService', + 'yandex.cloud.mdb.mongodb.v1.ClusterService', + 'yandex.cloud.mdb.mongodb.v1.DatabaseService', + 'yandex.cloud.mdb.mongodb.v1.ResourcePresetService', + 'yandex.cloud.mdb.mongodb.v1.UserService', + 'yandex.cloud.mdb.mysql.v1.BackupService', + 'yandex.cloud.mdb.mysql.v1.ClusterService', + 'yandex.cloud.mdb.mysql.v1.DatabaseService', + 'yandex.cloud.mdb.mysql.v1.ResourcePresetService', + 'yandex.cloud.mdb.mysql.v1.UserService', + 'yandex.cloud.mdb.postgresql.v1.BackupService', + 'yandex.cloud.mdb.postgresql.v1.ClusterService', + 'yandex.cloud.mdb.postgresql.v1.DatabaseService', + 'yandex.cloud.mdb.postgresql.v1.ResourcePresetService', + 'yandex.cloud.mdb.postgresql.v1.UserService', + 'yandex.cloud.mdb.redis.v1.BackupService', + 'yandex.cloud.mdb.redis.v1.ClusterService', + 'yandex.cloud.mdb.redis.v1.ResourcePresetService', + 'yandex.cloud.mdb.sqlserver.v1.BackupService', + 'yandex.cloud.mdb.sqlserver.v1.ClusterService', + 'yandex.cloud.mdb.sqlserver.v1.DatabaseService', + 'yandex.cloud.mdb.sqlserver.v1.ResourcePresetService', + 'yandex.cloud.mdb.sqlserver.v1.UserService', + ], + endpoint: 'mdb.api.cloud.yandex.net:443', + }, + ], + dataproc: [ + { + serviceIds: [ + 'yandex.cloud.dataproc.v1.ClusterService', + 'yandex.cloud.dataproc.v1.JobService', + 'yandex.cloud.dataproc.v1.ResourcePresetService', + 'yandex.cloud.dataproc.v1.SubclusterService', + ], + endpoint: 'dataproc.api.cloud.yandex.net:443', + }, + { + serviceIds: [ + 'yandex.cloud.dataproc.manager.v1.JobService', + 'yandex.cloud.dataproc.manager.v1.DataprocManagerService', + ], + endpoint: 'dataproc-manager.api.cloud.yandex.net:443', + }, + ], + vpc: [ + { + serviceIds: [ + 'yandex.cloud.vpc.v1.AddressService', + 'yandex.cloud.vpc.v1.NetworkService', + 'yandex.cloud.vpc.v1.RouteTableService', + 'yandex.cloud.vpc.v1.SecurityGroupService', + 'yandex.cloud.vpc.v1.SubnetService', + 'yandex.cloud.vpc.v1.GatewayService', + ], + endpoint: 'vpc.api.cloud.yandex.net:443', + }, + ], + 'container-registry': [ + { + serviceIds: [ + 'yandex.cloud.containerregistry.v1.ImageService', + 'yandex.cloud.containerregistry.v1.LifecyclePolicyService', + 'yandex.cloud.containerregistry.v1.RegistryService', + 'yandex.cloud.containerregistry.v1.RepositoryService', + 'yandex.cloud.containerregistry.v1.ScannerService', + ], + endpoint: 'container-registry.api.cloud.yandex.net:443', + }, + ], + 'load-balancer': [ + { + serviceIds: [ + 'yandex.cloud.loadbalancer.v1.NetworkLoadBalancerService', + 'yandex.cloud.loadbalancer.v1.TargetGroupService', + ], + endpoint: 'load-balancer.api.cloud.yandex.net:443', + }, + ], + serverless: [ + { + serviceIds: ['yandex.cloud.serverless.functions.v1.FunctionService'], + endpoint: 'serverless-functions.api.cloud.yandex.net:443', + }, + { + serviceIds: ['yandex.cloud.serverless.triggers.v1.TriggerService'], + endpoint: 'serverless-triggers.api.cloud.yandex.net:443', + }, + { + serviceIds: ['yandex.cloud.serverless.apigateway.v1.ApiGatewayService'], + endpoint: 'serverless-apigateway.api.cloud.yandex.net:443', + }, + { + serviceIds: ['yandex.cloud.serverless.containers.v1.ContainerService'], + endpoint: 'serverless-containers.api.cloud.yandex.net:443', + }, + { + serviceIds: ['yandex.cloud.serverless.apigateway.websocket.v1.ConnectionService'], + endpoint: 'apigateway-connections.api.cloud.yandex.net:443', + }, + ], + mdbproxy: [ + { + serviceIds: ['yandex.cloud.serverless.mdbproxy.v1.ProxyService'], + endpoint: 'mdbproxy.api.cloud.yandex.net:443', + }, + ], + k8s: [ + { + serviceIds: [ + 'yandex.cloud.k8s.v1.ClusterService', + 'yandex.cloud.k8s.v1.NodeGroupService', + 'yandex.cloud.k8s.v1.VersionService', + ], + endpoint: 'mks.api.cloud.yandex.net:443', + }, + ], + logging: [ + { + serviceIds: [ + 'yandex.cloud.logging.v1.LogGroupService', + ], + endpoint: 'logging.api.cloud.yandex.net:443', + }, + { + serviceIds: [ + 'yandex.cloud.logging.v1.LogReadingService', + ], + endpoint: 'reader.logging.yandexcloud.net:443', + }, + { + serviceIds: [ + 'yandex.cloud.logging.v1.LogIngestionService', + ], + endpoint: 'ingester.logging.yandexcloud.net:443', + }, + ], + ydb: [ + { + serviceIds: [ + 'yandex.cloud.ydb.v1.BackupService', + 'yandex.cloud.ydb.v1.DatabaseService', + 'yandex.cloud.ydb.v1.LocationService', + 'yandex.cloud.ydb.v1.ResourcePresetService', + 'yandex.cloud.ydb.v1.StorageTypeService', + ], + endpoint: 'ydb.api.cloud.yandex.net:443', + }, + ], + iot: [ + { + serviceIds: [ + 'yandex.cloud.iot.devices.v1.RegistryService', + ], + endpoint: 'iot-devices.api.cloud.yandex.net:443', + }, + { + serviceIds: [ + 'yandex.cloud.iot.devices.v1.RegistryDataService', + 'yandex.cloud.iot.devices.v1.DeviceService', + 'yandex.cloud.iot.devices.v1.DeviceDataService', + ], + endpoint: 'iot-data.api.cloud.yandex.net:443', + }, + { + serviceIds: [ + 'yandex.cloud.iot.broker.v1.BrokerDataService', + 'yandex.cloud.iot.broker.v1.BrokerService', + ], + endpoint: 'iot-broker.api.cloud.yandex.net:443', + }, + ], + monitoring: [ + { + serviceIds: [ + 'yandex.cloud.monitoring.v3.DashboardService', + ], + endpoint: 'monitoring.api.cloud.yandex.net:443', + }, + ], + kms: [ + { + serviceIds: ['yandex.cloud.kms.v1.SymmetricKeyService'], + endpoint: 'kms.api.cloud.yandex.net:443', + }, + { + serviceIds: ['yandex.cloud.kms.v1.SymmetricCryptoService'], + endpoint: 'kms.yandex:443', + }, + ], + 'api-endpoint': [ + { + serviceIds: ['yandex.cloud.endpoint.ApiEndpointService'], + endpoint: 'api.cloud.yandex.net:443', + }, + ], + ai: [ + { + serviceIds: ['yandex.cloud.ai.translate.v2.TranslationService'], + endpoint: 'translate.api.cloud.yandex.net:443', + }, + { + serviceIds: [ + 'yandex.cloud.ai.vision.v1.VisionService', + 'yandex.cloud.ai.vision.v2.ImageClassifierService', + ], + endpoint: 'vision.api.cloud.yandex.net:443', + }, + { + serviceIds: ['yandex.cloud.ai.stt.v2.SttService', 'speechkit.tts.v3.Synthesizer'], + endpoint: 'transcribe.api.cloud.yandex.net:443', + }, + ], + alb: [ + { + serviceIds: [ + 'yandex.cloud.apploadbalancer.v1.BackendGroupService', + 'yandex.cloud.apploadbalancer.v1.HttpRouterService', + 'yandex.cloud.apploadbalancer.v1.LoadBalancerService', + 'yandex.cloud.apploadbalancer.v1.TargetGroupService', + 'yandex.cloud.apploadbalancer.v1.VirtualHostService', + ], + endpoint: 'alb.api.cloud.yandex.net:443', + }, + ], + billing: [ + { + serviceIds: [ + 'yandex.cloud.billing.v1.BillingAccountService', + 'yandex.cloud.billing.v1.BudgetService', + 'yandex.cloud.billing.v1.CustomerService', + 'yandex.cloud.billing.v1.ServiceService', + 'yandex.cloud.billing.v1.SkuService', + ], + endpoint: 'billing.api.cloud.yandex.net:443', + }, + ], + cdn: [ + { + serviceIds: [ + 'yandex.cloud.cdn.v1.CacheService', + 'yandex.cloud.cdn.v1.OriginGroupService', + 'yandex.cloud.cdn.v1.OriginService', + 'yandex.cloud.cdn.v1.ProviderService', + 'yandex.cloud.cdn.v1.ResourceService', + 'yandex.cloud.cdn.v1.RawLogsService', + ], + endpoint: 'cdn.api.cloud.yandex.net:443', + }, + ], + 'certificate-manager': [ + { + serviceIds: [ + 'yandex.cloud.certificatemanager.v1.CertificateContentService', + 'yandex.cloud.certificatemanager.v1.CertificateService', + ], + endpoint: 'certificate-manager.api.cloud.yandex.net:443', + }, + ], + datasphere: [ + { + serviceIds: [ + 'yandex.cloud.datasphere.v1.AppTokenService', + 'yandex.cloud.datasphere.v1.FolderBudgetService', + 'yandex.cloud.datasphere.v1.NodeService', + 'yandex.cloud.datasphere.v1.ProjectDataService', + 'yandex.cloud.datasphere.v1.ProjectService', + ], + endpoint: 'datasphere.api.cloud.yandex.net:443', + }, + ], + datatransfer: [ + { + serviceIds: [ + 'yandex.cloud.datatransfer.v1.EndpointService', + 'yandex.cloud.datatransfer.v1.TransferService', + ], + endpoint: 'datatransfer.api.cloud.yandex.net:443', + }, + ], + dns: [ + { + serviceIds: [ + 'yandex.cloud.dns.v1.DnsZoneService', + ], + endpoint: 'dns.api.cloud.yandex.net:443', + }, + ], + lockbox: [ + { + serviceIds: [ + 'yandex.cloud.lockbox.v1.SecretService', + ], + endpoint: 'lockbox.api.cloud.yandex.net:443', + }, + { + serviceIds: [ + 'yandex.cloud.lockbox.v1.PayloadService', + ], + endpoint: 'payload.lockbox.api.cloud.yandex.net:443', + }, + ], + marketplace: [ + { + serviceIds: [ + 'yandex.cloud.marketplace.v1.metering.ImageProductUsageService', + ], + endpoint: 'marketplace.api.cloud.yandex.net:443', + }, + ], + 'organization-manager': [ + { + serviceIds: [ + 'yandex.cloud.organizationmanager.v1.OrganizationService', + 'yandex.cloud.organizationmanager.v1.UserService', + 'yandex.cloud.organizationmanager.v1.saml.CertificateService', + 'yandex.cloud.organizationmanager.v1.saml.FederationService', + 'yandex.cloud.organizationmanager.v1.GroupService', + ], + endpoint: 'organization-manager.api.cloud.yandex.net:443', + }, + ], + storage: [ + { + serviceIds: [ + 'yandex.cloud.storage.v1.BucketService', + ], + endpoint: 'storage.api.cloud.yandex.net:443', + }, + ], +}; -export const getServiceClientEndpoint = (generatedClientCtor: GeneratedServiceClientCtor): string => { - const clientCtor = generatedClientCtor as unknown as ServiceClientConstructor; - const serviceName: string = clientCtor.serviceName as string; +export class ServiceEndpointResolver { + private serviceEndpointsMap: ServiceEndpoint[]; - if (!serviceName) { - throw new Error('Unable to retrieve serviceName of provided service client class'); + constructor(serviceEndpointsMap: ServiceEndpointsMap = DEFAULT_SERVICE_ENDPOINTS_MAP) { + this.serviceEndpointsMap = flatten(Object.values(serviceEndpointsMap)); } - const endpointItem = SERVICE_ENDPOINTS_LIST.find((item) => item.serviceIds.includes(serviceName)); + resolve(generatedClientCtor: GeneratedServiceClientCtor): string { + const clientCtor = generatedClientCtor as unknown as ServiceClientConstructor; + const { serviceName } = clientCtor; - if (!endpointItem) { - throw new Error(`Endpoint for service ${serviceName} is no defined`); - } + if (!serviceName) { + throw new Error('Unable to retrieve serviceName of provided service client class'); + } - return endpointItem.endpoint; -}; + const endpointItem = this.serviceEndpointsMap.find((item) => item.serviceIds.includes(serviceName)); + + if (!endpointItem) { + throw new Error(`Endpoint for service ${serviceName} is no defined`); + } + + return endpointItem.endpoint; + } +} diff --git a/src/session.ts b/src/session.ts index 4dd758e4..8b1b6e54 100644 --- a/src/session.ts +++ b/src/session.ts @@ -14,7 +14,7 @@ import { IamTokenService } from './token-service/iam-token-service'; import { MetadataTokenService } from './token-service/metadata-token-service'; import { clientFactory } from './utils/client-factory'; import { serviceClients, cloudApi } from '.'; -import { getServiceClientEndpoint } from './service-endpoints'; +import { ServiceEndpointResolver } from './service-endpoints'; const isOAuth = (config: SessionConfig): config is OAuthCredentialsConfig => 'oauthToken' in config; @@ -30,10 +30,10 @@ const createIamToken = async (iamEndpoint: string, req: Partial Promise => { +const newTokenCreator = (config: SessionConfig, serviceEndpointResolver: ServiceEndpointResolver): () => Promise => { if (isOAuth(config)) { return () => { - const iamEndpoint = getServiceClientEndpoint(serviceClients.IamTokenServiceClient); + const iamEndpoint = serviceEndpointResolver.resolve(serviceClients.IamTokenServiceClient); return createIamToken(iamEndpoint, { yandexPassportOauthToken: config.oauthToken, @@ -45,7 +45,9 @@ const newTokenCreator = (config: SessionConfig): () => Promise => { return async () => iamToken; } - const tokenService = isServiceAccount(config) ? new IamTokenService(config.serviceAccountJson) : new MetadataTokenService(); + const tokenService = isServiceAccount(config) + ? new IamTokenService(serviceEndpointResolver, config.serviceAccountJson) + : new MetadataTokenService(); return async () => tokenService.getToken(); }; @@ -76,17 +78,19 @@ export class Session { private readonly config: Required; private readonly channelCredentials: ChannelCredentials; private readonly tokenCreator: TokenCreator; + private readonly serviceEndpointResolver: ServiceEndpointResolver; private static readonly DEFAULT_CONFIG = { pollInterval: 1000, }; - constructor(config?: SessionConfig) { + constructor(config?: SessionConfig, customServiceEndpointResolver?: ServiceEndpointResolver) { this.config = { ...Session.DEFAULT_CONFIG, ...config, }; - this.tokenCreator = newTokenCreator(this.config); + this.serviceEndpointResolver = customServiceEndpointResolver || new ServiceEndpointResolver(); + this.tokenCreator = newTokenCreator(this.config, this.serviceEndpointResolver); this.channelCredentials = newChannelCredentials(this.tokenCreator, this.config.ssl); } @@ -94,8 +98,8 @@ export class Session { return this.config.pollInterval; } - client(clientClass: GeneratedServiceClientCtor, customEndpoint?: string): WrappedServiceClientType { - const endpoint = customEndpoint || getServiceClientEndpoint(clientClass); + client(clientClass: GeneratedServiceClientCtor): WrappedServiceClientType { + const endpoint = this.serviceEndpointResolver.resolve(clientClass); const channel = createChannel(endpoint, this.channelCredentials); return clientFactory.create(clientClass.service, channel); diff --git a/src/token-service/iam-token-service.ts b/src/token-service/iam-token-service.ts index 06edef5d..e2682578 100644 --- a/src/token-service/iam-token-service.ts +++ b/src/token-service/iam-token-service.ts @@ -3,7 +3,7 @@ import * as jwt from 'jsonwebtoken'; import { DateTime } from 'luxon'; import { createChannel } from 'nice-grpc'; import { cloudApi, serviceClients } from '..'; -import { getServiceClientEndpoint } from '../service-endpoints'; +import { ServiceEndpointResolver } from '../service-endpoints'; import { IIAmCredentials, ISslCredentials, TokenService } from '../types'; import { clientFactory } from '../utils/client-factory'; @@ -13,17 +13,18 @@ export class IamTokenService implements TokenService { public readonly sslCredentials?: ISslCredentials; private readonly iamCredentials: IIAmCredentials; + private readonly serviceEndpointResolver: ServiceEndpointResolver; private jwtExpirationTimeout = 3600 * 1000; private tokenExpirationTimeout = 120 * 1000; private tokenRequestTimeout = 10 * 1000; private token = ''; private tokenTimestamp: DateTime | null; - constructor(iamCredentials: IIAmCredentials, sslCredentials?: ISslCredentials) { + constructor(serviceEndpointResolver: ServiceEndpointResolver, iamCredentials: IIAmCredentials, sslCredentials?: ISslCredentials) { this.iamCredentials = iamCredentials; this.tokenTimestamp = null; - this.sslCredentials = sslCredentials; + this.serviceEndpointResolver = serviceEndpointResolver; } private get expired() { @@ -42,7 +43,7 @@ export class IamTokenService implements TokenService { } private client() { - const endpoint = getServiceClientEndpoint(IamTokenServiceClient); + const endpoint = this.serviceEndpointResolver.resolve(IamTokenServiceClient); const channel = createChannel(endpoint, credentials.createSsl()); return clientFactory.create(IamTokenServiceClient.service, channel); diff --git a/src/utils/operation/wait-for.ts b/src/utils/operation/wait-for.ts index fef09752..d7db9840 100644 --- a/src/utils/operation/wait-for.ts +++ b/src/utils/operation/wait-for.ts @@ -13,9 +13,8 @@ export const waitForOperation = ( op: Operation, session: Session, timeoutMs: number = DEFAULT_TIMEOUT_MS, - operationServiceEndpoint?: string, ): Promise => { - const client = session.client(serviceClients.OperationServiceClient, operationServiceEndpoint); + const client = session.client(serviceClients.OperationServiceClient); const maxChecksCount = Math.ceil(timeoutMs / session.pollInterval); let checksCount = 0;