diff --git a/cloudapi b/cloudapi index 83dafd6b..3cd72656 160000 --- a/cloudapi +++ b/cloudapi @@ -1 +1 @@ -Subproject commit 83dafd6b465cfa321dfd492f840ed480f7269493 +Subproject commit 3cd726562074d55546973cb075bc03754fdedc99 diff --git a/scripts/services.ts b/scripts/services.ts index 5b8faca6..a0fa8594 100644 --- a/scripts/services.ts +++ b/scripts/services.ts @@ -7,6 +7,7 @@ export const servicesConfig: ServicesConfig = { translate_translation_service: { importClassName: 'TranslationServiceClient' }, tts_service: { importClassName: 'SynthesizerClient' }, vision_service: { importClassName: 'VisionServiceClient' }, + vision_image_classifier_service: { importClassName: 'ImageClassifierServiceClient' }, }, apploadbalancer: { backend_group_service: { importClassName: 'BackendGroupServiceClient' }, @@ -28,6 +29,7 @@ export const servicesConfig: ServicesConfig = { origin_service: { importClassName: 'OriginServiceClient' }, provider_service: { importClassName: 'ProviderServiceClient' }, resource_service: { importClassName: 'ResourceServiceClient' }, + raw_logs_service: { importClassName: 'RawLogsServiceClient' }, }, certificatemanager: { certificate_content_service: { importClassName: 'CertificateContentServiceClient' }, @@ -128,10 +130,14 @@ export const servicesConfig: ServicesConfig = { clickhouse_user_service: { importClassName: 'UserServiceClient', exportClassName: 'ClickHouseUserServiceClient' }, clickhouse_versions_service: { importClassName: 'VersionsServiceClient' }, elasticsearch_auth_service: { importClassName: 'AuthServiceClient' }, + elasticsearch_backup_service: { importClassName: 'BackupServiceClient', exportClassName: 'ElasticBackupServiceClient' }, + elasticsearch_extension_service: { importClassName: 'ExtensionServiceClient', exportClassName: 'ElasticExtensionServiceClient' }, elasticsearch_cluster_service: { importClassName: 'ClusterServiceClient', exportClassName: 'ElasticClusterServiceClient' }, elasticsearch_resource_preset_service: { importClassName: 'ResourcePresetServiceClient', exportClassName: 'ElasticResourcePresetServiceClient' }, elasticsearch_user_service: { importClassName: 'UserServiceClient', exportClassName: 'ElasticUserServiceClient' }, greenplum_cluster_service: { importClassName: 'ClusterServiceClient', exportClassName: 'GreenplumClusterServiceClient' }, + greenplum_backup_service: { importClassName: 'BackupServiceClient', exportClassName: 'GreenplumBackupServiceClient' }, + greenplum_resource_preset_service: { importClassName: 'ResourcePresetServiceClient', exportClassName: 'GreenplumResourcePresetServiceClient' }, kafka_cluster_service: { importClassName: 'ClusterServiceClient', exportClassName: 'KafkaClusterServiceClient' }, kafka_connector_service: { importClassName: 'ConnectorServiceClient' }, kafka_resource_preset_service: { importClassName: 'ResourcePresetServiceClient', exportClassName: 'KafkaResourcePresetServiceClient' }, diff --git a/src/generated/yandex/cloud/access/access.ts b/src/generated/yandex/cloud/access/access.ts index d9026df9..35939c1c 100644 --- a/src/generated/yandex/cloud/access/access.ts +++ b/src/generated/yandex/cloud/access/access.ts @@ -63,7 +63,7 @@ export interface Subject { * Type of the subject. * * It can contain one of the following values: - * * `userAccount`: An account on Yandex or Yandex.Connect, added to Yandex.Cloud. + * * `userAccount`: An account on Yandex or Yandex Connect, added to Yandex Cloud. * * `serviceAccount`: A service account. This type represents the [yandex.cloud.iam.v1.ServiceAccount] resource. * * `federatedUser`: A federated account. This type represents a user from an identity federation, like Active Directory. * * `system`: System group. This type represents several accounts with a common system identifier. diff --git a/src/generated/yandex/cloud/ai/index.ts b/src/generated/yandex/cloud/ai/index.ts index ea08c21a..972cfc79 100644 --- a/src/generated/yandex/cloud/ai/index.ts +++ b/src/generated/yandex/cloud/ai/index.ts @@ -8,4 +8,7 @@ export * as vision_face_detection from './vision/v1/face_detection' export * as vision_image_copy_search from './vision/v1/image_copy_search' export * as vision_primitives from './vision/v1/primitives' export * as vision_text_detection from './vision/v1/text_detection' -export * as vision_service from './vision/v1/vision_service' \ No newline at end of file +export * as vision_service from './vision/v1/vision_service' +export * as vision_image from './vision/v2/image' +export * as vision_image_classifier from './vision/v2/image_classifier' +export * as vision_image_classifier_service from './vision/v2/image_classifier_service' diff --git a/src/generated/yandex/cloud/ai/stt/v2/stt_service.ts b/src/generated/yandex/cloud/ai/stt/v2/stt_service.ts index 17545d6a..a53e28b1 100644 --- a/src/generated/yandex/cloud/ai/stt/v2/stt_service.ts +++ b/src/generated/yandex/cloud/ai/stt/v2/stt_service.ts @@ -75,6 +75,8 @@ export interface RecognitionSpec { audioChannelCount: number; /** This mark allows disable normalization text */ rawResults: boolean; + /** Rewrite text in literature style (default: false) */ + literatureText: boolean; } export enum RecognitionSpec_AudioEncoding { @@ -82,6 +84,8 @@ export enum RecognitionSpec_AudioEncoding { /** LINEAR16_PCM - 16-bit signed little-endian (Linear PCM) */ LINEAR16_PCM = 1, OGG_OPUS = 2, + /** MP3 - transcription only */ + MP3 = 3, UNRECOGNIZED = -1, } @@ -98,6 +102,9 @@ export function recognitionSpec_AudioEncodingFromJSON( case 2: case "OGG_OPUS": return RecognitionSpec_AudioEncoding.OGG_OPUS; + case 3: + case "MP3": + return RecognitionSpec_AudioEncoding.MP3; case -1: case "UNRECOGNIZED": default: @@ -115,6 +122,8 @@ export function recognitionSpec_AudioEncodingToJSON( return "LINEAR16_PCM"; case RecognitionSpec_AudioEncoding.OGG_OPUS: return "OGG_OPUS"; + case RecognitionSpec_AudioEncoding.MP3: + return "MP3"; default: return "UNKNOWN"; } @@ -686,6 +695,7 @@ const baseRecognitionSpec: object = { singleUtterance: false, audioChannelCount: 0, rawResults: false, + literatureText: false, }; export const RecognitionSpec = { @@ -722,6 +732,9 @@ export const RecognitionSpec = { if (message.rawResults === true) { writer.uint32(80).bool(message.rawResults); } + if (message.literatureText === true) { + writer.uint32(88).bool(message.literatureText); + } return writer; }, @@ -759,6 +772,9 @@ export const RecognitionSpec = { case 10: message.rawResults = reader.bool(); break; + case 11: + message.literatureText = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -806,6 +822,10 @@ export const RecognitionSpec = { object.rawResults !== undefined && object.rawResults !== null ? Boolean(object.rawResults) : false; + message.literatureText = + object.literatureText !== undefined && object.literatureText !== null + ? Boolean(object.literatureText) + : false; return message; }, @@ -829,6 +849,8 @@ export const RecognitionSpec = { message.audioChannelCount !== undefined && (obj.audioChannelCount = Math.round(message.audioChannelCount)); message.rawResults !== undefined && (obj.rawResults = message.rawResults); + message.literatureText !== undefined && + (obj.literatureText = message.literatureText); return obj; }, @@ -845,6 +867,7 @@ export const RecognitionSpec = { message.singleUtterance = object.singleUtterance ?? false; message.audioChannelCount = object.audioChannelCount ?? 0; message.rawResults = object.rawResults ?? false; + message.literatureText = object.literatureText ?? false; return message; }, }; diff --git a/src/generated/yandex/cloud/ai/stt/v3/stt.ts b/src/generated/yandex/cloud/ai/stt/v3/stt.ts new file mode 100644 index 00000000..c941aa89 --- /dev/null +++ b/src/generated/yandex/cloud/ai/stt/v3/stt.ts @@ -0,0 +1,2814 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; + +export const protobufPackage = "speechkit.stt.v3"; + +export enum CodeType { + CODE_TYPE_UNSPECIFIED = 0, + /** WORKING - all good */ + WORKING = 1, + /** WARNING - for example, if speech is sent not in real time. or unknown context (and we've made fallback) */ + WARNING = 2, + /** CLOSED - after session was closed */ + CLOSED = 3, + UNRECOGNIZED = -1, +} + +export function codeTypeFromJSON(object: any): CodeType { + switch (object) { + case 0: + case "CODE_TYPE_UNSPECIFIED": + return CodeType.CODE_TYPE_UNSPECIFIED; + case 1: + case "WORKING": + return CodeType.WORKING; + case 2: + case "WARNING": + return CodeType.WARNING; + case 3: + case "CLOSED": + return CodeType.CLOSED; + case -1: + case "UNRECOGNIZED": + default: + return CodeType.UNRECOGNIZED; + } +} + +export function codeTypeToJSON(object: CodeType): string { + switch (object) { + case CodeType.CODE_TYPE_UNSPECIFIED: + return "CODE_TYPE_UNSPECIFIED"; + case CodeType.WORKING: + return "WORKING"; + case CodeType.WARNING: + return "WARNING"; + case CodeType.CLOSED: + return "CLOSED"; + default: + return "UNKNOWN"; + } +} + +/** options */ +export interface TextNormalizationOptions { + $type: "speechkit.stt.v3.TextNormalizationOptions"; + textNormalization: TextNormalizationOptions_TextNormalization; + /** Filter profanity (default: false) */ + profanityFilter: boolean; + /** Rewrite text in literature style (default: false) */ + literatureText: boolean; +} + +/** Normalization */ +export enum TextNormalizationOptions_TextNormalization { + TEXT_NORMALIZATION_UNSPECIFIED = 0, + /** TEXT_NORMALIZATION_ENABLED - Enable normalization */ + TEXT_NORMALIZATION_ENABLED = 1, + /** TEXT_NORMALIZATION_DISABLED - Disable normalization */ + TEXT_NORMALIZATION_DISABLED = 2, + UNRECOGNIZED = -1, +} + +export function textNormalizationOptions_TextNormalizationFromJSON( + object: any +): TextNormalizationOptions_TextNormalization { + switch (object) { + case 0: + case "TEXT_NORMALIZATION_UNSPECIFIED": + return TextNormalizationOptions_TextNormalization.TEXT_NORMALIZATION_UNSPECIFIED; + case 1: + case "TEXT_NORMALIZATION_ENABLED": + return TextNormalizationOptions_TextNormalization.TEXT_NORMALIZATION_ENABLED; + case 2: + case "TEXT_NORMALIZATION_DISABLED": + return TextNormalizationOptions_TextNormalization.TEXT_NORMALIZATION_DISABLED; + case -1: + case "UNRECOGNIZED": + default: + return TextNormalizationOptions_TextNormalization.UNRECOGNIZED; + } +} + +export function textNormalizationOptions_TextNormalizationToJSON( + object: TextNormalizationOptions_TextNormalization +): string { + switch (object) { + case TextNormalizationOptions_TextNormalization.TEXT_NORMALIZATION_UNSPECIFIED: + return "TEXT_NORMALIZATION_UNSPECIFIED"; + case TextNormalizationOptions_TextNormalization.TEXT_NORMALIZATION_ENABLED: + return "TEXT_NORMALIZATION_ENABLED"; + case TextNormalizationOptions_TextNormalization.TEXT_NORMALIZATION_DISABLED: + return "TEXT_NORMALIZATION_DISABLED"; + default: + return "UNKNOWN"; + } +} + +export interface DefaultEouClassifier { + $type: "speechkit.stt.v3.DefaultEouClassifier"; + /** EOU sensitivity. Currently two levels, faster with more error and more conservative (our default) */ + type: DefaultEouClassifier_EouSensitivity; + /** hint for max pause between words. Our EoU detector could use this information to distinguish between end of utterance and slow speech (like one two three, etc) */ + maxPauseBetweenWordsHintMs: number; +} + +export enum DefaultEouClassifier_EouSensitivity { + EOU_SENSITIVITY_UNSPECIFIED = 0, + DEFAULT = 1, + HIGH = 2, + UNRECOGNIZED = -1, +} + +export function defaultEouClassifier_EouSensitivityFromJSON( + object: any +): DefaultEouClassifier_EouSensitivity { + switch (object) { + case 0: + case "EOU_SENSITIVITY_UNSPECIFIED": + return DefaultEouClassifier_EouSensitivity.EOU_SENSITIVITY_UNSPECIFIED; + case 1: + case "DEFAULT": + return DefaultEouClassifier_EouSensitivity.DEFAULT; + case 2: + case "HIGH": + return DefaultEouClassifier_EouSensitivity.HIGH; + case -1: + case "UNRECOGNIZED": + default: + return DefaultEouClassifier_EouSensitivity.UNRECOGNIZED; + } +} + +export function defaultEouClassifier_EouSensitivityToJSON( + object: DefaultEouClassifier_EouSensitivity +): string { + switch (object) { + case DefaultEouClassifier_EouSensitivity.EOU_SENSITIVITY_UNSPECIFIED: + return "EOU_SENSITIVITY_UNSPECIFIED"; + case DefaultEouClassifier_EouSensitivity.DEFAULT: + return "DEFAULT"; + case DefaultEouClassifier_EouSensitivity.HIGH: + return "HIGH"; + default: + return "UNKNOWN"; + } +} + +/** use EOU provided by user */ +export interface ExternalEouClassifier { + $type: "speechkit.stt.v3.ExternalEouClassifier"; +} + +export interface EouClassifierOptions { + $type: "speechkit.stt.v3.EouClassifierOptions"; + /** EOU classifier provided by SpeechKit. Default */ + defaultClassifier?: DefaultEouClassifier | undefined; + /** EoU is enforced by external messages from user */ + externalClassifier?: ExternalEouClassifier | undefined; +} + +/** RAW Audio format spec (no container to infer type). used in AudioFormat options */ +export interface RawAudio { + $type: "speechkit.stt.v3.RawAudio"; + /** type of audio encoding */ + audioEncoding: RawAudio_AudioEncoding; + /** PCM sample rate */ + sampleRateHertz: number; + /** PCM channel count. Currently only single channel audio is supported in real-time recognition */ + audioChannelCount: number; +} + +export enum RawAudio_AudioEncoding { + AUDIO_ENCODING_UNSPECIFIED = 0, + LINEAR16_PCM = 1, + UNRECOGNIZED = -1, +} + +export function rawAudio_AudioEncodingFromJSON( + object: any +): RawAudio_AudioEncoding { + switch (object) { + case 0: + case "AUDIO_ENCODING_UNSPECIFIED": + return RawAudio_AudioEncoding.AUDIO_ENCODING_UNSPECIFIED; + case 1: + case "LINEAR16_PCM": + return RawAudio_AudioEncoding.LINEAR16_PCM; + case -1: + case "UNRECOGNIZED": + default: + return RawAudio_AudioEncoding.UNRECOGNIZED; + } +} + +export function rawAudio_AudioEncodingToJSON( + object: RawAudio_AudioEncoding +): string { + switch (object) { + case RawAudio_AudioEncoding.AUDIO_ENCODING_UNSPECIFIED: + return "AUDIO_ENCODING_UNSPECIFIED"; + case RawAudio_AudioEncoding.LINEAR16_PCM: + return "LINEAR16_PCM"; + default: + return "UNKNOWN"; + } +} + +/** Audio with fixed type in container. used in AudioFormat options */ +export interface ContainerAudio { + $type: "speechkit.stt.v3.ContainerAudio"; + /** type of audio container */ + containerAudioType: ContainerAudio_ContainerAudioType; +} + +export enum ContainerAudio_ContainerAudioType { + CONTAINER_AUDIO_TYPE_UNSPECIFIED = 0, + WAV = 1, + OGG_OPUS = 2, + MP3 = 3, + UNRECOGNIZED = -1, +} + +export function containerAudio_ContainerAudioTypeFromJSON( + object: any +): ContainerAudio_ContainerAudioType { + switch (object) { + case 0: + case "CONTAINER_AUDIO_TYPE_UNSPECIFIED": + return ContainerAudio_ContainerAudioType.CONTAINER_AUDIO_TYPE_UNSPECIFIED; + case 1: + case "WAV": + return ContainerAudio_ContainerAudioType.WAV; + case 2: + case "OGG_OPUS": + return ContainerAudio_ContainerAudioType.OGG_OPUS; + case 3: + case "MP3": + return ContainerAudio_ContainerAudioType.MP3; + case -1: + case "UNRECOGNIZED": + default: + return ContainerAudio_ContainerAudioType.UNRECOGNIZED; + } +} + +export function containerAudio_ContainerAudioTypeToJSON( + object: ContainerAudio_ContainerAudioType +): string { + switch (object) { + case ContainerAudio_ContainerAudioType.CONTAINER_AUDIO_TYPE_UNSPECIFIED: + return "CONTAINER_AUDIO_TYPE_UNSPECIFIED"; + case ContainerAudio_ContainerAudioType.WAV: + return "WAV"; + case ContainerAudio_ContainerAudioType.OGG_OPUS: + return "OGG_OPUS"; + case ContainerAudio_ContainerAudioType.MP3: + return "MP3"; + default: + return "UNKNOWN"; + } +} + +/** audio format options */ +export interface AudioFormatOptions { + $type: "speechkit.stt.v3.AudioFormatOptions"; + /** audio without container */ + rawAudio?: RawAudio | undefined; + /** audio is wrapped in container */ + containerAudio?: ContainerAudio | undefined; +} + +export interface LanguageRestrictionOptions { + $type: "speechkit.stt.v3.LanguageRestrictionOptions"; + restrictionType: LanguageRestrictionOptions_LanguageRestrictionType; + languageCode: string[]; +} + +export enum LanguageRestrictionOptions_LanguageRestrictionType { + LANGUAGE_RESTRICTION_TYPE_UNSPECIFIED = 0, + WHITELIST = 1, + BLACKLIST = 2, + UNRECOGNIZED = -1, +} + +export function languageRestrictionOptions_LanguageRestrictionTypeFromJSON( + object: any +): LanguageRestrictionOptions_LanguageRestrictionType { + switch (object) { + case 0: + case "LANGUAGE_RESTRICTION_TYPE_UNSPECIFIED": + return LanguageRestrictionOptions_LanguageRestrictionType.LANGUAGE_RESTRICTION_TYPE_UNSPECIFIED; + case 1: + case "WHITELIST": + return LanguageRestrictionOptions_LanguageRestrictionType.WHITELIST; + case 2: + case "BLACKLIST": + return LanguageRestrictionOptions_LanguageRestrictionType.BLACKLIST; + case -1: + case "UNRECOGNIZED": + default: + return LanguageRestrictionOptions_LanguageRestrictionType.UNRECOGNIZED; + } +} + +export function languageRestrictionOptions_LanguageRestrictionTypeToJSON( + object: LanguageRestrictionOptions_LanguageRestrictionType +): string { + switch (object) { + case LanguageRestrictionOptions_LanguageRestrictionType.LANGUAGE_RESTRICTION_TYPE_UNSPECIFIED: + return "LANGUAGE_RESTRICTION_TYPE_UNSPECIFIED"; + case LanguageRestrictionOptions_LanguageRestrictionType.WHITELIST: + return "WHITELIST"; + case LanguageRestrictionOptions_LanguageRestrictionType.BLACKLIST: + return "BLACKLIST"; + default: + return "UNKNOWN"; + } +} + +export interface RecognitionModelOptions { + $type: "speechkit.stt.v3.RecognitionModelOptions"; + /** reserved for future, do not use */ + model: string; + /** config for input audio */ + audioFormat?: AudioFormatOptions; + /** text normalization options */ + textNormalization?: TextNormalizationOptions; + /** possible languages in audio */ + languageRestriction?: LanguageRestrictionOptions; + /** how to deal with audio data (in real time, after all data is received, etc). Default is REAL_TIME */ + audioProcessingType: RecognitionModelOptions_AudioProcessingType; +} + +export enum RecognitionModelOptions_AudioProcessingType { + AUDIO_PROCESSING_TYPE_UNSPECIFIED = 0, + REAL_TIME = 1, + FULL_DATA = 2, + UNRECOGNIZED = -1, +} + +export function recognitionModelOptions_AudioProcessingTypeFromJSON( + object: any +): RecognitionModelOptions_AudioProcessingType { + switch (object) { + case 0: + case "AUDIO_PROCESSING_TYPE_UNSPECIFIED": + return RecognitionModelOptions_AudioProcessingType.AUDIO_PROCESSING_TYPE_UNSPECIFIED; + case 1: + case "REAL_TIME": + return RecognitionModelOptions_AudioProcessingType.REAL_TIME; + case 2: + case "FULL_DATA": + return RecognitionModelOptions_AudioProcessingType.FULL_DATA; + case -1: + case "UNRECOGNIZED": + default: + return RecognitionModelOptions_AudioProcessingType.UNRECOGNIZED; + } +} + +export function recognitionModelOptions_AudioProcessingTypeToJSON( + object: RecognitionModelOptions_AudioProcessingType +): string { + switch (object) { + case RecognitionModelOptions_AudioProcessingType.AUDIO_PROCESSING_TYPE_UNSPECIFIED: + return "AUDIO_PROCESSING_TYPE_UNSPECIFIED"; + case RecognitionModelOptions_AudioProcessingType.REAL_TIME: + return "REAL_TIME"; + case RecognitionModelOptions_AudioProcessingType.FULL_DATA: + return "FULL_DATA"; + default: + return "UNKNOWN"; + } +} + +export interface StreamingOptions { + $type: "speechkit.stt.v3.StreamingOptions"; + /** configuration for speech recognition model */ + recognitionModel?: RecognitionModelOptions; + /** configuration for end of utterance detection model */ + eouClassifier?: EouClassifierOptions; +} + +/** data chunk with audio */ +export interface AudioChunk { + $type: "speechkit.stt.v3.AudioChunk"; + /** bytes with audio data */ + data: Buffer; +} + +export interface SilenceChunk { + $type: "speechkit.stt.v3.SilenceChunk"; + /** duration of silence chunk in ms */ + durationMs: number; +} + +/** force EOU */ +export interface Eou { + $type: "speechkit.stt.v3.Eou"; +} + +/** + * streaming audio request + * Events are control messages from user + * first message should be session options + * the next messages are audio data chunks or control messages + */ +export interface StreamingRequest { + $type: "speechkit.stt.v3.StreamingRequest"; + /** Session options. should be first message from user */ + sessionOptions?: StreamingOptions | undefined; + /** chunk with audio data */ + chunk?: AudioChunk | undefined; + /** chunk with silence */ + silenceChunk?: SilenceChunk | undefined; + /** request to end current utterance. Works only with external EoU detector */ + eou?: Eou | undefined; +} + +/** recognized word */ +export interface Word { + $type: "speechkit.stt.v3.Word"; + /** word text */ + text: string; + /** estimation of word start time in ms */ + startTimeMs: number; + /** estimation of word end time in ms */ + endTimeMs: number; +} + +/** recognition of specific time frame */ +export interface Alternative { + $type: "speechkit.stt.v3.Alternative"; + /** words in time frame */ + words: Word[]; + /** text in time frame */ + text: string; + /** start of time frame */ + startTimeMs: number; + /** end of time frame */ + endTimeMs: number; + /** hypothesis confidence. Currently is not used */ + confidence: number; +} + +/** Update information from */ +export interface EouUpdate { + $type: "speechkit.stt.v3.EouUpdate"; + /** end of utterance estimated time */ + timeMs: number; +} + +/** update of hypothesis */ +export interface AlternativeUpdate { + $type: "speechkit.stt.v3.AlternativeUpdate"; + /** list of hypothesis for timeframes */ + alternatives: Alternative[]; + /** tag for distinguish audio channels. */ + channelTag: string; +} + +/** AudioCursors are state of ASR recognition stream */ +export interface AudioCursors { + $type: "speechkit.stt.v3.AudioCursors"; + /** amount of audio chunks server received. This cursor is moved after each audio chunk was received by server. */ + receivedDataMs: number; + /** input stream reset data */ + resetTimeMs: number; + /** + * how much audio was processed. This time includes trimming silences as well. This cursor is moved after server received enough data + * to update recognition results (includes silence as well) + */ + partialTimeMs: number; + /** + * Time of last final. This cursor is moved when server decides that recognition from start of audio until final_time_ms will not change anymore + * usually this even is followed by EOU detection (but this could change in future) + */ + finalTimeMs: number; + /** This is index of last final server send. Incremented after each new final. */ + finalIndex: number; + /** + * Estimated time of EOU. Cursor is updated after each new EOU is sent + * For external classifier this equals to received_data_ms at the moment EOU event arrives + * For internal classifier this is estimation of time. The time is not exact and has the same guarantees as word timings + */ + eouTimeMs: number; +} + +/** refinement for final hypo. For example, text normalization is refinement. */ +export interface FinalRefinement { + $type: "speechkit.stt.v3.FinalRefinement"; + /** index of final for which server sends additional information */ + finalIndex: number; + /** normalized text instead of raw one */ + normalizedText?: AlternativeUpdate | undefined; +} + +/** status message */ +export interface StatusCode { + $type: "speechkit.stt.v3.StatusCode"; + /** code type */ + codeType: CodeType; + /** human readable message */ + message: string; +} + +/** session identifier */ +export interface SessionUuid { + $type: "speechkit.stt.v3.SessionUuid"; + /** internal session identifier */ + uuid: string; + /** user session identifier */ + userRequestId: string; +} + +/** + * responses from server + * each response contains session uuid + * AudioCursors + * plus specific even + */ +export interface StreamingResponse { + $type: "speechkit.stt.v3.StreamingResponse"; + /** session identifier */ + sessionUuid?: SessionUuid; + /** progress bar for stream session recognition: how many data we obtained; final and partial times; etc */ + audioCursors?: AudioCursors; + /** wall clock on server side. This is time when server wrote results to stream */ + responseWallTimeMs: number; + /** + * partial results, server will send them regularly after enough audio data was received from user. This are current text estimation + * from final_time_ms to partial_time_ms. Could change after new data will arrive + */ + partial?: AlternativeUpdate | undefined; + /** final results, the recognition is now fixed until final_time_ms. For now, final is sent only if the EOU event was triggered. This could be change in future releases */ + final?: AlternativeUpdate | undefined; + /** + * After EOU classifier, send the message with final, send the EouUpdate with time of EOU + * before eou_update we send final with the same time. there could be several finals before eou update + */ + eouUpdate?: EouUpdate | undefined; + /** + * For each final, if normalization is enabled, sent the normalized text (or some other advanced post-processing). + * Final normalization will introduce additional latency + */ + finalRefinement?: FinalRefinement | undefined; + /** Status messages, send by server with fixed interval (keep-alive) */ + statusCode?: StatusCode | undefined; +} + +const baseTextNormalizationOptions: object = { + $type: "speechkit.stt.v3.TextNormalizationOptions", + textNormalization: 0, + profanityFilter: false, + literatureText: false, +}; + +export const TextNormalizationOptions = { + $type: "speechkit.stt.v3.TextNormalizationOptions" as const, + + encode( + message: TextNormalizationOptions, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.textNormalization !== 0) { + writer.uint32(8).int32(message.textNormalization); + } + if (message.profanityFilter === true) { + writer.uint32(16).bool(message.profanityFilter); + } + if (message.literatureText === true) { + writer.uint32(24).bool(message.literatureText); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): TextNormalizationOptions { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseTextNormalizationOptions, + } as TextNormalizationOptions; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.textNormalization = reader.int32() as any; + break; + case 2: + message.profanityFilter = reader.bool(); + break; + case 3: + message.literatureText = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): TextNormalizationOptions { + const message = { + ...baseTextNormalizationOptions, + } as TextNormalizationOptions; + message.textNormalization = + object.textNormalization !== undefined && + object.textNormalization !== null + ? textNormalizationOptions_TextNormalizationFromJSON( + object.textNormalization + ) + : 0; + message.profanityFilter = + object.profanityFilter !== undefined && object.profanityFilter !== null + ? Boolean(object.profanityFilter) + : false; + message.literatureText = + object.literatureText !== undefined && object.literatureText !== null + ? Boolean(object.literatureText) + : false; + return message; + }, + + toJSON(message: TextNormalizationOptions): unknown { + const obj: any = {}; + message.textNormalization !== undefined && + (obj.textNormalization = textNormalizationOptions_TextNormalizationToJSON( + message.textNormalization + )); + message.profanityFilter !== undefined && + (obj.profanityFilter = message.profanityFilter); + message.literatureText !== undefined && + (obj.literatureText = message.literatureText); + return obj; + }, + + fromPartial, I>>( + object: I + ): TextNormalizationOptions { + const message = { + ...baseTextNormalizationOptions, + } as TextNormalizationOptions; + message.textNormalization = object.textNormalization ?? 0; + message.profanityFilter = object.profanityFilter ?? false; + message.literatureText = object.literatureText ?? false; + return message; + }, +}; + +messageTypeRegistry.set( + TextNormalizationOptions.$type, + TextNormalizationOptions +); + +const baseDefaultEouClassifier: object = { + $type: "speechkit.stt.v3.DefaultEouClassifier", + type: 0, + maxPauseBetweenWordsHintMs: 0, +}; + +export const DefaultEouClassifier = { + $type: "speechkit.stt.v3.DefaultEouClassifier" as const, + + encode( + message: DefaultEouClassifier, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.type !== 0) { + writer.uint32(8).int32(message.type); + } + if (message.maxPauseBetweenWordsHintMs !== 0) { + writer.uint32(16).int64(message.maxPauseBetweenWordsHintMs); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DefaultEouClassifier { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDefaultEouClassifier } as DefaultEouClassifier; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.type = reader.int32() as any; + break; + case 2: + message.maxPauseBetweenWordsHintMs = longToNumber( + reader.int64() as Long + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DefaultEouClassifier { + const message = { ...baseDefaultEouClassifier } as DefaultEouClassifier; + message.type = + object.type !== undefined && object.type !== null + ? defaultEouClassifier_EouSensitivityFromJSON(object.type) + : 0; + message.maxPauseBetweenWordsHintMs = + object.maxPauseBetweenWordsHintMs !== undefined && + object.maxPauseBetweenWordsHintMs !== null + ? Number(object.maxPauseBetweenWordsHintMs) + : 0; + return message; + }, + + toJSON(message: DefaultEouClassifier): unknown { + const obj: any = {}; + message.type !== undefined && + (obj.type = defaultEouClassifier_EouSensitivityToJSON(message.type)); + message.maxPauseBetweenWordsHintMs !== undefined && + (obj.maxPauseBetweenWordsHintMs = Math.round( + message.maxPauseBetweenWordsHintMs + )); + return obj; + }, + + fromPartial, I>>( + object: I + ): DefaultEouClassifier { + const message = { ...baseDefaultEouClassifier } as DefaultEouClassifier; + message.type = object.type ?? 0; + message.maxPauseBetweenWordsHintMs = object.maxPauseBetweenWordsHintMs ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(DefaultEouClassifier.$type, DefaultEouClassifier); + +const baseExternalEouClassifier: object = { + $type: "speechkit.stt.v3.ExternalEouClassifier", +}; + +export const ExternalEouClassifier = { + $type: "speechkit.stt.v3.ExternalEouClassifier" as const, + + encode( + _: ExternalEouClassifier, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ExternalEouClassifier { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseExternalEouClassifier } as ExternalEouClassifier; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(_: any): ExternalEouClassifier { + const message = { ...baseExternalEouClassifier } as ExternalEouClassifier; + return message; + }, + + toJSON(_: ExternalEouClassifier): unknown { + const obj: any = {}; + return obj; + }, + + fromPartial, I>>( + _: I + ): ExternalEouClassifier { + const message = { ...baseExternalEouClassifier } as ExternalEouClassifier; + return message; + }, +}; + +messageTypeRegistry.set(ExternalEouClassifier.$type, ExternalEouClassifier); + +const baseEouClassifierOptions: object = { + $type: "speechkit.stt.v3.EouClassifierOptions", +}; + +export const EouClassifierOptions = { + $type: "speechkit.stt.v3.EouClassifierOptions" as const, + + encode( + message: EouClassifierOptions, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.defaultClassifier !== undefined) { + DefaultEouClassifier.encode( + message.defaultClassifier, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.externalClassifier !== undefined) { + ExternalEouClassifier.encode( + message.externalClassifier, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): EouClassifierOptions { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseEouClassifierOptions } as EouClassifierOptions; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.defaultClassifier = DefaultEouClassifier.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.externalClassifier = ExternalEouClassifier.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): EouClassifierOptions { + const message = { ...baseEouClassifierOptions } as EouClassifierOptions; + message.defaultClassifier = + object.defaultClassifier !== undefined && + object.defaultClassifier !== null + ? DefaultEouClassifier.fromJSON(object.defaultClassifier) + : undefined; + message.externalClassifier = + object.externalClassifier !== undefined && + object.externalClassifier !== null + ? ExternalEouClassifier.fromJSON(object.externalClassifier) + : undefined; + return message; + }, + + toJSON(message: EouClassifierOptions): unknown { + const obj: any = {}; + message.defaultClassifier !== undefined && + (obj.defaultClassifier = message.defaultClassifier + ? DefaultEouClassifier.toJSON(message.defaultClassifier) + : undefined); + message.externalClassifier !== undefined && + (obj.externalClassifier = message.externalClassifier + ? ExternalEouClassifier.toJSON(message.externalClassifier) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): EouClassifierOptions { + const message = { ...baseEouClassifierOptions } as EouClassifierOptions; + message.defaultClassifier = + object.defaultClassifier !== undefined && + object.defaultClassifier !== null + ? DefaultEouClassifier.fromPartial(object.defaultClassifier) + : undefined; + message.externalClassifier = + object.externalClassifier !== undefined && + object.externalClassifier !== null + ? ExternalEouClassifier.fromPartial(object.externalClassifier) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(EouClassifierOptions.$type, EouClassifierOptions); + +const baseRawAudio: object = { + $type: "speechkit.stt.v3.RawAudio", + audioEncoding: 0, + sampleRateHertz: 0, + audioChannelCount: 0, +}; + +export const RawAudio = { + $type: "speechkit.stt.v3.RawAudio" as const, + + encode( + message: RawAudio, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.audioEncoding !== 0) { + writer.uint32(8).int32(message.audioEncoding); + } + if (message.sampleRateHertz !== 0) { + writer.uint32(16).int64(message.sampleRateHertz); + } + if (message.audioChannelCount !== 0) { + writer.uint32(24).int64(message.audioChannelCount); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): RawAudio { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseRawAudio } as RawAudio; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.audioEncoding = reader.int32() as any; + break; + case 2: + message.sampleRateHertz = longToNumber(reader.int64() as Long); + break; + case 3: + message.audioChannelCount = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RawAudio { + const message = { ...baseRawAudio } as RawAudio; + message.audioEncoding = + object.audioEncoding !== undefined && object.audioEncoding !== null + ? rawAudio_AudioEncodingFromJSON(object.audioEncoding) + : 0; + message.sampleRateHertz = + object.sampleRateHertz !== undefined && object.sampleRateHertz !== null + ? Number(object.sampleRateHertz) + : 0; + message.audioChannelCount = + object.audioChannelCount !== undefined && + object.audioChannelCount !== null + ? Number(object.audioChannelCount) + : 0; + return message; + }, + + toJSON(message: RawAudio): unknown { + const obj: any = {}; + message.audioEncoding !== undefined && + (obj.audioEncoding = rawAudio_AudioEncodingToJSON(message.audioEncoding)); + message.sampleRateHertz !== undefined && + (obj.sampleRateHertz = Math.round(message.sampleRateHertz)); + message.audioChannelCount !== undefined && + (obj.audioChannelCount = Math.round(message.audioChannelCount)); + return obj; + }, + + fromPartial, I>>(object: I): RawAudio { + const message = { ...baseRawAudio } as RawAudio; + message.audioEncoding = object.audioEncoding ?? 0; + message.sampleRateHertz = object.sampleRateHertz ?? 0; + message.audioChannelCount = object.audioChannelCount ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(RawAudio.$type, RawAudio); + +const baseContainerAudio: object = { + $type: "speechkit.stt.v3.ContainerAudio", + containerAudioType: 0, +}; + +export const ContainerAudio = { + $type: "speechkit.stt.v3.ContainerAudio" as const, + + encode( + message: ContainerAudio, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.containerAudioType !== 0) { + writer.uint32(8).int32(message.containerAudioType); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ContainerAudio { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseContainerAudio } as ContainerAudio; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.containerAudioType = reader.int32() as any; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ContainerAudio { + const message = { ...baseContainerAudio } as ContainerAudio; + message.containerAudioType = + object.containerAudioType !== undefined && + object.containerAudioType !== null + ? containerAudio_ContainerAudioTypeFromJSON(object.containerAudioType) + : 0; + return message; + }, + + toJSON(message: ContainerAudio): unknown { + const obj: any = {}; + message.containerAudioType !== undefined && + (obj.containerAudioType = containerAudio_ContainerAudioTypeToJSON( + message.containerAudioType + )); + return obj; + }, + + fromPartial, I>>( + object: I + ): ContainerAudio { + const message = { ...baseContainerAudio } as ContainerAudio; + message.containerAudioType = object.containerAudioType ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(ContainerAudio.$type, ContainerAudio); + +const baseAudioFormatOptions: object = { + $type: "speechkit.stt.v3.AudioFormatOptions", +}; + +export const AudioFormatOptions = { + $type: "speechkit.stt.v3.AudioFormatOptions" as const, + + encode( + message: AudioFormatOptions, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.rawAudio !== undefined) { + RawAudio.encode(message.rawAudio, writer.uint32(10).fork()).ldelim(); + } + if (message.containerAudio !== undefined) { + ContainerAudio.encode( + message.containerAudio, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): AudioFormatOptions { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseAudioFormatOptions } as AudioFormatOptions; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.rawAudio = RawAudio.decode(reader, reader.uint32()); + break; + case 2: + message.containerAudio = ContainerAudio.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AudioFormatOptions { + const message = { ...baseAudioFormatOptions } as AudioFormatOptions; + message.rawAudio = + object.rawAudio !== undefined && object.rawAudio !== null + ? RawAudio.fromJSON(object.rawAudio) + : undefined; + message.containerAudio = + object.containerAudio !== undefined && object.containerAudio !== null + ? ContainerAudio.fromJSON(object.containerAudio) + : undefined; + return message; + }, + + toJSON(message: AudioFormatOptions): unknown { + const obj: any = {}; + message.rawAudio !== undefined && + (obj.rawAudio = message.rawAudio + ? RawAudio.toJSON(message.rawAudio) + : undefined); + message.containerAudio !== undefined && + (obj.containerAudio = message.containerAudio + ? ContainerAudio.toJSON(message.containerAudio) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): AudioFormatOptions { + const message = { ...baseAudioFormatOptions } as AudioFormatOptions; + message.rawAudio = + object.rawAudio !== undefined && object.rawAudio !== null + ? RawAudio.fromPartial(object.rawAudio) + : undefined; + message.containerAudio = + object.containerAudio !== undefined && object.containerAudio !== null + ? ContainerAudio.fromPartial(object.containerAudio) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(AudioFormatOptions.$type, AudioFormatOptions); + +const baseLanguageRestrictionOptions: object = { + $type: "speechkit.stt.v3.LanguageRestrictionOptions", + restrictionType: 0, + languageCode: "", +}; + +export const LanguageRestrictionOptions = { + $type: "speechkit.stt.v3.LanguageRestrictionOptions" as const, + + encode( + message: LanguageRestrictionOptions, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.restrictionType !== 0) { + writer.uint32(8).int32(message.restrictionType); + } + for (const v of message.languageCode) { + writer.uint32(18).string(v!); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): LanguageRestrictionOptions { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseLanguageRestrictionOptions, + } as LanguageRestrictionOptions; + message.languageCode = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.restrictionType = reader.int32() as any; + break; + case 2: + message.languageCode.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): LanguageRestrictionOptions { + const message = { + ...baseLanguageRestrictionOptions, + } as LanguageRestrictionOptions; + message.restrictionType = + object.restrictionType !== undefined && object.restrictionType !== null + ? languageRestrictionOptions_LanguageRestrictionTypeFromJSON( + object.restrictionType + ) + : 0; + message.languageCode = (object.languageCode ?? []).map((e: any) => + String(e) + ); + return message; + }, + + toJSON(message: LanguageRestrictionOptions): unknown { + const obj: any = {}; + message.restrictionType !== undefined && + (obj.restrictionType = + languageRestrictionOptions_LanguageRestrictionTypeToJSON( + message.restrictionType + )); + if (message.languageCode) { + obj.languageCode = message.languageCode.map((e) => e); + } else { + obj.languageCode = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): LanguageRestrictionOptions { + const message = { + ...baseLanguageRestrictionOptions, + } as LanguageRestrictionOptions; + message.restrictionType = object.restrictionType ?? 0; + message.languageCode = object.languageCode?.map((e) => e) || []; + return message; + }, +}; + +messageTypeRegistry.set( + LanguageRestrictionOptions.$type, + LanguageRestrictionOptions +); + +const baseRecognitionModelOptions: object = { + $type: "speechkit.stt.v3.RecognitionModelOptions", + model: "", + audioProcessingType: 0, +}; + +export const RecognitionModelOptions = { + $type: "speechkit.stt.v3.RecognitionModelOptions" as const, + + encode( + message: RecognitionModelOptions, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.model !== "") { + writer.uint32(10).string(message.model); + } + if (message.audioFormat !== undefined) { + AudioFormatOptions.encode( + message.audioFormat, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.textNormalization !== undefined) { + TextNormalizationOptions.encode( + message.textNormalization, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.languageRestriction !== undefined) { + LanguageRestrictionOptions.encode( + message.languageRestriction, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.audioProcessingType !== 0) { + writer.uint32(40).int32(message.audioProcessingType); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): RecognitionModelOptions { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseRecognitionModelOptions, + } as RecognitionModelOptions; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.model = reader.string(); + break; + case 2: + message.audioFormat = AudioFormatOptions.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.textNormalization = TextNormalizationOptions.decode( + reader, + reader.uint32() + ); + break; + case 4: + message.languageRestriction = LanguageRestrictionOptions.decode( + reader, + reader.uint32() + ); + break; + case 5: + message.audioProcessingType = reader.int32() as any; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RecognitionModelOptions { + const message = { + ...baseRecognitionModelOptions, + } as RecognitionModelOptions; + message.model = + object.model !== undefined && object.model !== null + ? String(object.model) + : ""; + message.audioFormat = + object.audioFormat !== undefined && object.audioFormat !== null + ? AudioFormatOptions.fromJSON(object.audioFormat) + : undefined; + message.textNormalization = + object.textNormalization !== undefined && + object.textNormalization !== null + ? TextNormalizationOptions.fromJSON(object.textNormalization) + : undefined; + message.languageRestriction = + object.languageRestriction !== undefined && + object.languageRestriction !== null + ? LanguageRestrictionOptions.fromJSON(object.languageRestriction) + : undefined; + message.audioProcessingType = + object.audioProcessingType !== undefined && + object.audioProcessingType !== null + ? recognitionModelOptions_AudioProcessingTypeFromJSON( + object.audioProcessingType + ) + : 0; + return message; + }, + + toJSON(message: RecognitionModelOptions): unknown { + const obj: any = {}; + message.model !== undefined && (obj.model = message.model); + message.audioFormat !== undefined && + (obj.audioFormat = message.audioFormat + ? AudioFormatOptions.toJSON(message.audioFormat) + : undefined); + message.textNormalization !== undefined && + (obj.textNormalization = message.textNormalization + ? TextNormalizationOptions.toJSON(message.textNormalization) + : undefined); + message.languageRestriction !== undefined && + (obj.languageRestriction = message.languageRestriction + ? LanguageRestrictionOptions.toJSON(message.languageRestriction) + : undefined); + message.audioProcessingType !== undefined && + (obj.audioProcessingType = + recognitionModelOptions_AudioProcessingTypeToJSON( + message.audioProcessingType + )); + return obj; + }, + + fromPartial, I>>( + object: I + ): RecognitionModelOptions { + const message = { + ...baseRecognitionModelOptions, + } as RecognitionModelOptions; + message.model = object.model ?? ""; + message.audioFormat = + object.audioFormat !== undefined && object.audioFormat !== null + ? AudioFormatOptions.fromPartial(object.audioFormat) + : undefined; + message.textNormalization = + object.textNormalization !== undefined && + object.textNormalization !== null + ? TextNormalizationOptions.fromPartial(object.textNormalization) + : undefined; + message.languageRestriction = + object.languageRestriction !== undefined && + object.languageRestriction !== null + ? LanguageRestrictionOptions.fromPartial(object.languageRestriction) + : undefined; + message.audioProcessingType = object.audioProcessingType ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(RecognitionModelOptions.$type, RecognitionModelOptions); + +const baseStreamingOptions: object = { + $type: "speechkit.stt.v3.StreamingOptions", +}; + +export const StreamingOptions = { + $type: "speechkit.stt.v3.StreamingOptions" as const, + + encode( + message: StreamingOptions, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.recognitionModel !== undefined) { + RecognitionModelOptions.encode( + message.recognitionModel, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.eouClassifier !== undefined) { + EouClassifierOptions.encode( + message.eouClassifier, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): StreamingOptions { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseStreamingOptions } as StreamingOptions; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.recognitionModel = RecognitionModelOptions.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.eouClassifier = EouClassifierOptions.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): StreamingOptions { + const message = { ...baseStreamingOptions } as StreamingOptions; + message.recognitionModel = + object.recognitionModel !== undefined && object.recognitionModel !== null + ? RecognitionModelOptions.fromJSON(object.recognitionModel) + : undefined; + message.eouClassifier = + object.eouClassifier !== undefined && object.eouClassifier !== null + ? EouClassifierOptions.fromJSON(object.eouClassifier) + : undefined; + return message; + }, + + toJSON(message: StreamingOptions): unknown { + const obj: any = {}; + message.recognitionModel !== undefined && + (obj.recognitionModel = message.recognitionModel + ? RecognitionModelOptions.toJSON(message.recognitionModel) + : undefined); + message.eouClassifier !== undefined && + (obj.eouClassifier = message.eouClassifier + ? EouClassifierOptions.toJSON(message.eouClassifier) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): StreamingOptions { + const message = { ...baseStreamingOptions } as StreamingOptions; + message.recognitionModel = + object.recognitionModel !== undefined && object.recognitionModel !== null + ? RecognitionModelOptions.fromPartial(object.recognitionModel) + : undefined; + message.eouClassifier = + object.eouClassifier !== undefined && object.eouClassifier !== null + ? EouClassifierOptions.fromPartial(object.eouClassifier) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(StreamingOptions.$type, StreamingOptions); + +const baseAudioChunk: object = { $type: "speechkit.stt.v3.AudioChunk" }; + +export const AudioChunk = { + $type: "speechkit.stt.v3.AudioChunk" as const, + + encode( + message: AudioChunk, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.data.length !== 0) { + writer.uint32(10).bytes(message.data); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): AudioChunk { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseAudioChunk } as AudioChunk; + message.data = Buffer.alloc(0); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.data = reader.bytes() as Buffer; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AudioChunk { + const message = { ...baseAudioChunk } as AudioChunk; + message.data = + object.data !== undefined && object.data !== null + ? Buffer.from(bytesFromBase64(object.data)) + : Buffer.alloc(0); + return message; + }, + + toJSON(message: AudioChunk): unknown { + const obj: any = {}; + message.data !== undefined && + (obj.data = base64FromBytes( + message.data !== undefined ? message.data : Buffer.alloc(0) + )); + return obj; + }, + + fromPartial, I>>( + object: I + ): AudioChunk { + const message = { ...baseAudioChunk } as AudioChunk; + message.data = object.data ?? Buffer.alloc(0); + return message; + }, +}; + +messageTypeRegistry.set(AudioChunk.$type, AudioChunk); + +const baseSilenceChunk: object = { + $type: "speechkit.stt.v3.SilenceChunk", + durationMs: 0, +}; + +export const SilenceChunk = { + $type: "speechkit.stt.v3.SilenceChunk" as const, + + encode( + message: SilenceChunk, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.durationMs !== 0) { + writer.uint32(8).int64(message.durationMs); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): SilenceChunk { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseSilenceChunk } as SilenceChunk; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.durationMs = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): SilenceChunk { + const message = { ...baseSilenceChunk } as SilenceChunk; + message.durationMs = + object.durationMs !== undefined && object.durationMs !== null + ? Number(object.durationMs) + : 0; + return message; + }, + + toJSON(message: SilenceChunk): unknown { + const obj: any = {}; + message.durationMs !== undefined && + (obj.durationMs = Math.round(message.durationMs)); + return obj; + }, + + fromPartial, I>>( + object: I + ): SilenceChunk { + const message = { ...baseSilenceChunk } as SilenceChunk; + message.durationMs = object.durationMs ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(SilenceChunk.$type, SilenceChunk); + +const baseEou: object = { $type: "speechkit.stt.v3.Eou" }; + +export const Eou = { + $type: "speechkit.stt.v3.Eou" as const, + + encode(_: Eou, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Eou { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseEou } as Eou; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(_: any): Eou { + const message = { ...baseEou } as Eou; + return message; + }, + + toJSON(_: Eou): unknown { + const obj: any = {}; + return obj; + }, + + fromPartial, I>>(_: I): Eou { + const message = { ...baseEou } as Eou; + return message; + }, +}; + +messageTypeRegistry.set(Eou.$type, Eou); + +const baseStreamingRequest: object = { + $type: "speechkit.stt.v3.StreamingRequest", +}; + +export const StreamingRequest = { + $type: "speechkit.stt.v3.StreamingRequest" as const, + + encode( + message: StreamingRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.sessionOptions !== undefined) { + StreamingOptions.encode( + message.sessionOptions, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.chunk !== undefined) { + AudioChunk.encode(message.chunk, writer.uint32(18).fork()).ldelim(); + } + if (message.silenceChunk !== undefined) { + SilenceChunk.encode( + message.silenceChunk, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.eou !== undefined) { + Eou.encode(message.eou, writer.uint32(34).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): StreamingRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseStreamingRequest } as StreamingRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.sessionOptions = StreamingOptions.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.chunk = AudioChunk.decode(reader, reader.uint32()); + break; + case 3: + message.silenceChunk = SilenceChunk.decode(reader, reader.uint32()); + break; + case 4: + message.eou = Eou.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): StreamingRequest { + const message = { ...baseStreamingRequest } as StreamingRequest; + message.sessionOptions = + object.sessionOptions !== undefined && object.sessionOptions !== null + ? StreamingOptions.fromJSON(object.sessionOptions) + : undefined; + message.chunk = + object.chunk !== undefined && object.chunk !== null + ? AudioChunk.fromJSON(object.chunk) + : undefined; + message.silenceChunk = + object.silenceChunk !== undefined && object.silenceChunk !== null + ? SilenceChunk.fromJSON(object.silenceChunk) + : undefined; + message.eou = + object.eou !== undefined && object.eou !== null + ? Eou.fromJSON(object.eou) + : undefined; + return message; + }, + + toJSON(message: StreamingRequest): unknown { + const obj: any = {}; + message.sessionOptions !== undefined && + (obj.sessionOptions = message.sessionOptions + ? StreamingOptions.toJSON(message.sessionOptions) + : undefined); + message.chunk !== undefined && + (obj.chunk = message.chunk + ? AudioChunk.toJSON(message.chunk) + : undefined); + message.silenceChunk !== undefined && + (obj.silenceChunk = message.silenceChunk + ? SilenceChunk.toJSON(message.silenceChunk) + : undefined); + message.eou !== undefined && + (obj.eou = message.eou ? Eou.toJSON(message.eou) : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): StreamingRequest { + const message = { ...baseStreamingRequest } as StreamingRequest; + message.sessionOptions = + object.sessionOptions !== undefined && object.sessionOptions !== null + ? StreamingOptions.fromPartial(object.sessionOptions) + : undefined; + message.chunk = + object.chunk !== undefined && object.chunk !== null + ? AudioChunk.fromPartial(object.chunk) + : undefined; + message.silenceChunk = + object.silenceChunk !== undefined && object.silenceChunk !== null + ? SilenceChunk.fromPartial(object.silenceChunk) + : undefined; + message.eou = + object.eou !== undefined && object.eou !== null + ? Eou.fromPartial(object.eou) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(StreamingRequest.$type, StreamingRequest); + +const baseWord: object = { + $type: "speechkit.stt.v3.Word", + text: "", + startTimeMs: 0, + endTimeMs: 0, +}; + +export const Word = { + $type: "speechkit.stt.v3.Word" as const, + + encode(message: Word, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + if (message.text !== "") { + writer.uint32(10).string(message.text); + } + if (message.startTimeMs !== 0) { + writer.uint32(16).int64(message.startTimeMs); + } + if (message.endTimeMs !== 0) { + writer.uint32(24).int64(message.endTimeMs); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Word { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseWord } as Word; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.text = reader.string(); + break; + case 2: + message.startTimeMs = longToNumber(reader.int64() as Long); + break; + case 3: + message.endTimeMs = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Word { + const message = { ...baseWord } as Word; + message.text = + object.text !== undefined && object.text !== null + ? String(object.text) + : ""; + message.startTimeMs = + object.startTimeMs !== undefined && object.startTimeMs !== null + ? Number(object.startTimeMs) + : 0; + message.endTimeMs = + object.endTimeMs !== undefined && object.endTimeMs !== null + ? Number(object.endTimeMs) + : 0; + return message; + }, + + toJSON(message: Word): unknown { + const obj: any = {}; + message.text !== undefined && (obj.text = message.text); + message.startTimeMs !== undefined && + (obj.startTimeMs = Math.round(message.startTimeMs)); + message.endTimeMs !== undefined && + (obj.endTimeMs = Math.round(message.endTimeMs)); + return obj; + }, + + fromPartial, I>>(object: I): Word { + const message = { ...baseWord } as Word; + message.text = object.text ?? ""; + message.startTimeMs = object.startTimeMs ?? 0; + message.endTimeMs = object.endTimeMs ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(Word.$type, Word); + +const baseAlternative: object = { + $type: "speechkit.stt.v3.Alternative", + text: "", + startTimeMs: 0, + endTimeMs: 0, + confidence: 0, +}; + +export const Alternative = { + $type: "speechkit.stt.v3.Alternative" as const, + + encode( + message: Alternative, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.words) { + Word.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.text !== "") { + writer.uint32(18).string(message.text); + } + if (message.startTimeMs !== 0) { + writer.uint32(24).int64(message.startTimeMs); + } + if (message.endTimeMs !== 0) { + writer.uint32(32).int64(message.endTimeMs); + } + if (message.confidence !== 0) { + writer.uint32(41).double(message.confidence); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Alternative { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseAlternative } as Alternative; + message.words = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.words.push(Word.decode(reader, reader.uint32())); + break; + case 2: + message.text = reader.string(); + break; + case 3: + message.startTimeMs = longToNumber(reader.int64() as Long); + break; + case 4: + message.endTimeMs = longToNumber(reader.int64() as Long); + break; + case 5: + message.confidence = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Alternative { + const message = { ...baseAlternative } as Alternative; + message.words = (object.words ?? []).map((e: any) => Word.fromJSON(e)); + message.text = + object.text !== undefined && object.text !== null + ? String(object.text) + : ""; + message.startTimeMs = + object.startTimeMs !== undefined && object.startTimeMs !== null + ? Number(object.startTimeMs) + : 0; + message.endTimeMs = + object.endTimeMs !== undefined && object.endTimeMs !== null + ? Number(object.endTimeMs) + : 0; + message.confidence = + object.confidence !== undefined && object.confidence !== null + ? Number(object.confidence) + : 0; + return message; + }, + + toJSON(message: Alternative): unknown { + const obj: any = {}; + if (message.words) { + obj.words = message.words.map((e) => (e ? Word.toJSON(e) : undefined)); + } else { + obj.words = []; + } + message.text !== undefined && (obj.text = message.text); + message.startTimeMs !== undefined && + (obj.startTimeMs = Math.round(message.startTimeMs)); + message.endTimeMs !== undefined && + (obj.endTimeMs = Math.round(message.endTimeMs)); + message.confidence !== undefined && (obj.confidence = message.confidence); + return obj; + }, + + fromPartial, I>>( + object: I + ): Alternative { + const message = { ...baseAlternative } as Alternative; + message.words = object.words?.map((e) => Word.fromPartial(e)) || []; + message.text = object.text ?? ""; + message.startTimeMs = object.startTimeMs ?? 0; + message.endTimeMs = object.endTimeMs ?? 0; + message.confidence = object.confidence ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(Alternative.$type, Alternative); + +const baseEouUpdate: object = { + $type: "speechkit.stt.v3.EouUpdate", + timeMs: 0, +}; + +export const EouUpdate = { + $type: "speechkit.stt.v3.EouUpdate" as const, + + encode( + message: EouUpdate, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.timeMs !== 0) { + writer.uint32(16).int64(message.timeMs); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): EouUpdate { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseEouUpdate } as EouUpdate; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + message.timeMs = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): EouUpdate { + const message = { ...baseEouUpdate } as EouUpdate; + message.timeMs = + object.timeMs !== undefined && object.timeMs !== null + ? Number(object.timeMs) + : 0; + return message; + }, + + toJSON(message: EouUpdate): unknown { + const obj: any = {}; + message.timeMs !== undefined && (obj.timeMs = Math.round(message.timeMs)); + return obj; + }, + + fromPartial, I>>( + object: I + ): EouUpdate { + const message = { ...baseEouUpdate } as EouUpdate; + message.timeMs = object.timeMs ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(EouUpdate.$type, EouUpdate); + +const baseAlternativeUpdate: object = { + $type: "speechkit.stt.v3.AlternativeUpdate", + channelTag: "", +}; + +export const AlternativeUpdate = { + $type: "speechkit.stt.v3.AlternativeUpdate" as const, + + encode( + message: AlternativeUpdate, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.alternatives) { + Alternative.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.channelTag !== "") { + writer.uint32(18).string(message.channelTag); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): AlternativeUpdate { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseAlternativeUpdate } as AlternativeUpdate; + message.alternatives = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.alternatives.push( + Alternative.decode(reader, reader.uint32()) + ); + break; + case 2: + message.channelTag = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AlternativeUpdate { + const message = { ...baseAlternativeUpdate } as AlternativeUpdate; + message.alternatives = (object.alternatives ?? []).map((e: any) => + Alternative.fromJSON(e) + ); + message.channelTag = + object.channelTag !== undefined && object.channelTag !== null + ? String(object.channelTag) + : ""; + return message; + }, + + toJSON(message: AlternativeUpdate): unknown { + const obj: any = {}; + if (message.alternatives) { + obj.alternatives = message.alternatives.map((e) => + e ? Alternative.toJSON(e) : undefined + ); + } else { + obj.alternatives = []; + } + message.channelTag !== undefined && (obj.channelTag = message.channelTag); + return obj; + }, + + fromPartial, I>>( + object: I + ): AlternativeUpdate { + const message = { ...baseAlternativeUpdate } as AlternativeUpdate; + message.alternatives = + object.alternatives?.map((e) => Alternative.fromPartial(e)) || []; + message.channelTag = object.channelTag ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(AlternativeUpdate.$type, AlternativeUpdate); + +const baseAudioCursors: object = { + $type: "speechkit.stt.v3.AudioCursors", + receivedDataMs: 0, + resetTimeMs: 0, + partialTimeMs: 0, + finalTimeMs: 0, + finalIndex: 0, + eouTimeMs: 0, +}; + +export const AudioCursors = { + $type: "speechkit.stt.v3.AudioCursors" as const, + + encode( + message: AudioCursors, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.receivedDataMs !== 0) { + writer.uint32(8).int64(message.receivedDataMs); + } + if (message.resetTimeMs !== 0) { + writer.uint32(16).int64(message.resetTimeMs); + } + if (message.partialTimeMs !== 0) { + writer.uint32(24).int64(message.partialTimeMs); + } + if (message.finalTimeMs !== 0) { + writer.uint32(32).int64(message.finalTimeMs); + } + if (message.finalIndex !== 0) { + writer.uint32(40).int64(message.finalIndex); + } + if (message.eouTimeMs !== 0) { + writer.uint32(48).int64(message.eouTimeMs); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): AudioCursors { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseAudioCursors } as AudioCursors; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.receivedDataMs = longToNumber(reader.int64() as Long); + break; + case 2: + message.resetTimeMs = longToNumber(reader.int64() as Long); + break; + case 3: + message.partialTimeMs = longToNumber(reader.int64() as Long); + break; + case 4: + message.finalTimeMs = longToNumber(reader.int64() as Long); + break; + case 5: + message.finalIndex = longToNumber(reader.int64() as Long); + break; + case 6: + message.eouTimeMs = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AudioCursors { + const message = { ...baseAudioCursors } as AudioCursors; + message.receivedDataMs = + object.receivedDataMs !== undefined && object.receivedDataMs !== null + ? Number(object.receivedDataMs) + : 0; + message.resetTimeMs = + object.resetTimeMs !== undefined && object.resetTimeMs !== null + ? Number(object.resetTimeMs) + : 0; + message.partialTimeMs = + object.partialTimeMs !== undefined && object.partialTimeMs !== null + ? Number(object.partialTimeMs) + : 0; + message.finalTimeMs = + object.finalTimeMs !== undefined && object.finalTimeMs !== null + ? Number(object.finalTimeMs) + : 0; + message.finalIndex = + object.finalIndex !== undefined && object.finalIndex !== null + ? Number(object.finalIndex) + : 0; + message.eouTimeMs = + object.eouTimeMs !== undefined && object.eouTimeMs !== null + ? Number(object.eouTimeMs) + : 0; + return message; + }, + + toJSON(message: AudioCursors): unknown { + const obj: any = {}; + message.receivedDataMs !== undefined && + (obj.receivedDataMs = Math.round(message.receivedDataMs)); + message.resetTimeMs !== undefined && + (obj.resetTimeMs = Math.round(message.resetTimeMs)); + message.partialTimeMs !== undefined && + (obj.partialTimeMs = Math.round(message.partialTimeMs)); + message.finalTimeMs !== undefined && + (obj.finalTimeMs = Math.round(message.finalTimeMs)); + message.finalIndex !== undefined && + (obj.finalIndex = Math.round(message.finalIndex)); + message.eouTimeMs !== undefined && + (obj.eouTimeMs = Math.round(message.eouTimeMs)); + return obj; + }, + + fromPartial, I>>( + object: I + ): AudioCursors { + const message = { ...baseAudioCursors } as AudioCursors; + message.receivedDataMs = object.receivedDataMs ?? 0; + message.resetTimeMs = object.resetTimeMs ?? 0; + message.partialTimeMs = object.partialTimeMs ?? 0; + message.finalTimeMs = object.finalTimeMs ?? 0; + message.finalIndex = object.finalIndex ?? 0; + message.eouTimeMs = object.eouTimeMs ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(AudioCursors.$type, AudioCursors); + +const baseFinalRefinement: object = { + $type: "speechkit.stt.v3.FinalRefinement", + finalIndex: 0, +}; + +export const FinalRefinement = { + $type: "speechkit.stt.v3.FinalRefinement" as const, + + encode( + message: FinalRefinement, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.finalIndex !== 0) { + writer.uint32(8).int64(message.finalIndex); + } + if (message.normalizedText !== undefined) { + AlternativeUpdate.encode( + message.normalizedText, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): FinalRefinement { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseFinalRefinement } as FinalRefinement; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.finalIndex = longToNumber(reader.int64() as Long); + break; + case 2: + message.normalizedText = AlternativeUpdate.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): FinalRefinement { + const message = { ...baseFinalRefinement } as FinalRefinement; + message.finalIndex = + object.finalIndex !== undefined && object.finalIndex !== null + ? Number(object.finalIndex) + : 0; + message.normalizedText = + object.normalizedText !== undefined && object.normalizedText !== null + ? AlternativeUpdate.fromJSON(object.normalizedText) + : undefined; + return message; + }, + + toJSON(message: FinalRefinement): unknown { + const obj: any = {}; + message.finalIndex !== undefined && + (obj.finalIndex = Math.round(message.finalIndex)); + message.normalizedText !== undefined && + (obj.normalizedText = message.normalizedText + ? AlternativeUpdate.toJSON(message.normalizedText) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): FinalRefinement { + const message = { ...baseFinalRefinement } as FinalRefinement; + message.finalIndex = object.finalIndex ?? 0; + message.normalizedText = + object.normalizedText !== undefined && object.normalizedText !== null + ? AlternativeUpdate.fromPartial(object.normalizedText) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(FinalRefinement.$type, FinalRefinement); + +const baseStatusCode: object = { + $type: "speechkit.stt.v3.StatusCode", + codeType: 0, + message: "", +}; + +export const StatusCode = { + $type: "speechkit.stt.v3.StatusCode" as const, + + encode( + message: StatusCode, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.codeType !== 0) { + writer.uint32(8).int32(message.codeType); + } + if (message.message !== "") { + writer.uint32(18).string(message.message); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): StatusCode { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseStatusCode } as StatusCode; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.codeType = reader.int32() as any; + break; + case 2: + message.message = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): StatusCode { + const message = { ...baseStatusCode } as StatusCode; + message.codeType = + object.codeType !== undefined && object.codeType !== null + ? codeTypeFromJSON(object.codeType) + : 0; + message.message = + object.message !== undefined && object.message !== null + ? String(object.message) + : ""; + return message; + }, + + toJSON(message: StatusCode): unknown { + const obj: any = {}; + message.codeType !== undefined && + (obj.codeType = codeTypeToJSON(message.codeType)); + message.message !== undefined && (obj.message = message.message); + return obj; + }, + + fromPartial, I>>( + object: I + ): StatusCode { + const message = { ...baseStatusCode } as StatusCode; + message.codeType = object.codeType ?? 0; + message.message = object.message ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(StatusCode.$type, StatusCode); + +const baseSessionUuid: object = { + $type: "speechkit.stt.v3.SessionUuid", + uuid: "", + userRequestId: "", +}; + +export const SessionUuid = { + $type: "speechkit.stt.v3.SessionUuid" as const, + + encode( + message: SessionUuid, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.uuid !== "") { + writer.uint32(10).string(message.uuid); + } + if (message.userRequestId !== "") { + writer.uint32(18).string(message.userRequestId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): SessionUuid { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseSessionUuid } as SessionUuid; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.uuid = reader.string(); + break; + case 2: + message.userRequestId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): SessionUuid { + const message = { ...baseSessionUuid } as SessionUuid; + message.uuid = + object.uuid !== undefined && object.uuid !== null + ? String(object.uuid) + : ""; + message.userRequestId = + object.userRequestId !== undefined && object.userRequestId !== null + ? String(object.userRequestId) + : ""; + return message; + }, + + toJSON(message: SessionUuid): unknown { + const obj: any = {}; + message.uuid !== undefined && (obj.uuid = message.uuid); + message.userRequestId !== undefined && + (obj.userRequestId = message.userRequestId); + return obj; + }, + + fromPartial, I>>( + object: I + ): SessionUuid { + const message = { ...baseSessionUuid } as SessionUuid; + message.uuid = object.uuid ?? ""; + message.userRequestId = object.userRequestId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(SessionUuid.$type, SessionUuid); + +const baseStreamingResponse: object = { + $type: "speechkit.stt.v3.StreamingResponse", + responseWallTimeMs: 0, +}; + +export const StreamingResponse = { + $type: "speechkit.stt.v3.StreamingResponse" as const, + + encode( + message: StreamingResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.sessionUuid !== undefined) { + SessionUuid.encode( + message.sessionUuid, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.audioCursors !== undefined) { + AudioCursors.encode( + message.audioCursors, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.responseWallTimeMs !== 0) { + writer.uint32(24).int64(message.responseWallTimeMs); + } + if (message.partial !== undefined) { + AlternativeUpdate.encode( + message.partial, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.final !== undefined) { + AlternativeUpdate.encode( + message.final, + writer.uint32(42).fork() + ).ldelim(); + } + if (message.eouUpdate !== undefined) { + EouUpdate.encode(message.eouUpdate, writer.uint32(50).fork()).ldelim(); + } + if (message.finalRefinement !== undefined) { + FinalRefinement.encode( + message.finalRefinement, + writer.uint32(58).fork() + ).ldelim(); + } + if (message.statusCode !== undefined) { + StatusCode.encode(message.statusCode, writer.uint32(66).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): StreamingResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseStreamingResponse } as StreamingResponse; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.sessionUuid = SessionUuid.decode(reader, reader.uint32()); + break; + case 2: + message.audioCursors = AudioCursors.decode(reader, reader.uint32()); + break; + case 3: + message.responseWallTimeMs = longToNumber(reader.int64() as Long); + break; + case 4: + message.partial = AlternativeUpdate.decode(reader, reader.uint32()); + break; + case 5: + message.final = AlternativeUpdate.decode(reader, reader.uint32()); + break; + case 6: + message.eouUpdate = EouUpdate.decode(reader, reader.uint32()); + break; + case 7: + message.finalRefinement = FinalRefinement.decode( + reader, + reader.uint32() + ); + break; + case 8: + message.statusCode = StatusCode.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): StreamingResponse { + const message = { ...baseStreamingResponse } as StreamingResponse; + message.sessionUuid = + object.sessionUuid !== undefined && object.sessionUuid !== null + ? SessionUuid.fromJSON(object.sessionUuid) + : undefined; + message.audioCursors = + object.audioCursors !== undefined && object.audioCursors !== null + ? AudioCursors.fromJSON(object.audioCursors) + : undefined; + message.responseWallTimeMs = + object.responseWallTimeMs !== undefined && + object.responseWallTimeMs !== null + ? Number(object.responseWallTimeMs) + : 0; + message.partial = + object.partial !== undefined && object.partial !== null + ? AlternativeUpdate.fromJSON(object.partial) + : undefined; + message.final = + object.final !== undefined && object.final !== null + ? AlternativeUpdate.fromJSON(object.final) + : undefined; + message.eouUpdate = + object.eouUpdate !== undefined && object.eouUpdate !== null + ? EouUpdate.fromJSON(object.eouUpdate) + : undefined; + message.finalRefinement = + object.finalRefinement !== undefined && object.finalRefinement !== null + ? FinalRefinement.fromJSON(object.finalRefinement) + : undefined; + message.statusCode = + object.statusCode !== undefined && object.statusCode !== null + ? StatusCode.fromJSON(object.statusCode) + : undefined; + return message; + }, + + toJSON(message: StreamingResponse): unknown { + const obj: any = {}; + message.sessionUuid !== undefined && + (obj.sessionUuid = message.sessionUuid + ? SessionUuid.toJSON(message.sessionUuid) + : undefined); + message.audioCursors !== undefined && + (obj.audioCursors = message.audioCursors + ? AudioCursors.toJSON(message.audioCursors) + : undefined); + message.responseWallTimeMs !== undefined && + (obj.responseWallTimeMs = Math.round(message.responseWallTimeMs)); + message.partial !== undefined && + (obj.partial = message.partial + ? AlternativeUpdate.toJSON(message.partial) + : undefined); + message.final !== undefined && + (obj.final = message.final + ? AlternativeUpdate.toJSON(message.final) + : undefined); + message.eouUpdate !== undefined && + (obj.eouUpdate = message.eouUpdate + ? EouUpdate.toJSON(message.eouUpdate) + : undefined); + message.finalRefinement !== undefined && + (obj.finalRefinement = message.finalRefinement + ? FinalRefinement.toJSON(message.finalRefinement) + : undefined); + message.statusCode !== undefined && + (obj.statusCode = message.statusCode + ? StatusCode.toJSON(message.statusCode) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): StreamingResponse { + const message = { ...baseStreamingResponse } as StreamingResponse; + message.sessionUuid = + object.sessionUuid !== undefined && object.sessionUuid !== null + ? SessionUuid.fromPartial(object.sessionUuid) + : undefined; + message.audioCursors = + object.audioCursors !== undefined && object.audioCursors !== null + ? AudioCursors.fromPartial(object.audioCursors) + : undefined; + message.responseWallTimeMs = object.responseWallTimeMs ?? 0; + message.partial = + object.partial !== undefined && object.partial !== null + ? AlternativeUpdate.fromPartial(object.partial) + : undefined; + message.final = + object.final !== undefined && object.final !== null + ? AlternativeUpdate.fromPartial(object.final) + : undefined; + message.eouUpdate = + object.eouUpdate !== undefined && object.eouUpdate !== null + ? EouUpdate.fromPartial(object.eouUpdate) + : undefined; + message.finalRefinement = + object.finalRefinement !== undefined && object.finalRefinement !== null + ? FinalRefinement.fromPartial(object.finalRefinement) + : undefined; + message.statusCode = + object.statusCode !== undefined && object.statusCode !== null + ? StatusCode.fromPartial(object.statusCode) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(StreamingResponse.$type, StreamingResponse); + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +const atob: (b64: string) => string = + globalThis.atob || + ((b64) => globalThis.Buffer.from(b64, "base64").toString("binary")); +function bytesFromBase64(b64: string): Uint8Array { + const bin = atob(b64); + const arr = new Uint8Array(bin.length); + for (let i = 0; i < bin.length; ++i) { + arr[i] = bin.charCodeAt(i); + } + return arr; +} + +const btoa: (bin: string) => string = + globalThis.btoa || + ((bin) => globalThis.Buffer.from(bin, "binary").toString("base64")); +function base64FromBytes(arr: Uint8Array): string { + const bin: string[] = []; + for (const byte of arr) { + bin.push(String.fromCharCode(byte)); + } + return btoa(bin.join("")); +} + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/ai/stt/v3/stt_service.ts b/src/generated/yandex/cloud/ai/stt/v3/stt_service.ts new file mode 100644 index 00000000..56c6cc74 --- /dev/null +++ b/src/generated/yandex/cloud/ai/stt/v3/stt_service.ts @@ -0,0 +1,73 @@ +/* eslint-disable */ +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleBidiStreamingCall, + Client, + ClientDuplexStream, + CallOptions, + Metadata, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; +import { + StreamingRequest, + StreamingResponse, +} from "../../../../../yandex/cloud/ai/stt/v3/stt"; + +export const protobufPackage = "speechkit.stt.v3"; + +/** A set of methods for voice recognition. */ +export const RecognizerService = { + /** Expects audio in real-time */ + recognizeStreaming: { + path: "/speechkit.stt.v3.Recognizer/RecognizeStreaming", + requestStream: true, + responseStream: true, + requestSerialize: (value: StreamingRequest) => + Buffer.from(StreamingRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => StreamingRequest.decode(value), + responseSerialize: (value: StreamingResponse) => + Buffer.from(StreamingResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => StreamingResponse.decode(value), + }, +} as const; + +export interface RecognizerServer extends UntypedServiceImplementation { + /** Expects audio in real-time */ + recognizeStreaming: handleBidiStreamingCall< + StreamingRequest, + StreamingResponse + >; +} + +export interface RecognizerClient extends Client { + /** Expects audio in real-time */ + recognizeStreaming(): ClientDuplexStream; + recognizeStreaming( + options: Partial + ): ClientDuplexStream; + recognizeStreaming( + metadata: Metadata, + options?: Partial + ): ClientDuplexStream; +} + +export const RecognizerClient = makeGenericClientConstructor( + RecognizerService, + "speechkit.stt.v3.Recognizer" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): RecognizerClient; + service: typeof RecognizerService; +}; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/ai/translate/v2/translation_service.ts b/src/generated/yandex/cloud/ai/translate/v2/translation_service.ts index 6c0239af..35032682 100644 --- a/src/generated/yandex/cloud/ai/translate/v2/translation_service.ts +++ b/src/generated/yandex/cloud/ai/translate/v2/translation_service.ts @@ -52,6 +52,8 @@ export interface TranslateRequest { model: string; /** Glossary to be applied for the translation. For more information, see [Glossaries](/docs/translate/concepts/glossary). */ glossaryConfig?: TranslateGlossaryConfig; + /** use speller */ + speller: boolean; } export enum TranslateRequest_Format { @@ -182,6 +184,7 @@ const baseTranslateRequest: object = { texts: "", folderId: "", model: "", + speller: false, }; export const TranslateRequest = { @@ -215,6 +218,9 @@ export const TranslateRequest = { writer.uint32(58).fork() ).ldelim(); } + if (message.speller === true) { + writer.uint32(64).bool(message.speller); + } return writer; }, @@ -250,6 +256,9 @@ export const TranslateRequest = { reader.uint32() ); break; + case 8: + message.speller = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -287,6 +296,10 @@ export const TranslateRequest = { object.glossaryConfig !== undefined && object.glossaryConfig !== null ? TranslateGlossaryConfig.fromJSON(object.glossaryConfig) : undefined; + message.speller = + object.speller !== undefined && object.speller !== null + ? Boolean(object.speller) + : false; return message; }, @@ -309,6 +322,7 @@ export const TranslateRequest = { (obj.glossaryConfig = message.glossaryConfig ? TranslateGlossaryConfig.toJSON(message.glossaryConfig) : undefined); + message.speller !== undefined && (obj.speller = message.speller); return obj; }, @@ -326,6 +340,7 @@ export const TranslateRequest = { object.glossaryConfig !== undefined && object.glossaryConfig !== null ? TranslateGlossaryConfig.fromPartial(object.glossaryConfig) : undefined; + message.speller = object.speller ?? false; return message; }, }; diff --git a/src/generated/yandex/cloud/ai/tts/v3/tts.ts b/src/generated/yandex/cloud/ai/tts/v3/tts.ts index dc8c1b19..8db6f39d 100644 --- a/src/generated/yandex/cloud/ai/tts/v3/tts.ts +++ b/src/generated/yandex/cloud/ai/tts/v3/tts.ts @@ -76,6 +76,7 @@ export enum ContainerAudio_ContainerAudioType { /** WAV - Audio bit depth 16-bit signed little-endian (Linear PCM). */ WAV = 1, OGG_OPUS = 2, + MP3 = 3, UNRECOGNIZED = -1, } @@ -92,6 +93,9 @@ export function containerAudio_ContainerAudioTypeFromJSON( case 2: case "OGG_OPUS": return ContainerAudio_ContainerAudioType.OGG_OPUS; + case 3: + case "MP3": + return ContainerAudio_ContainerAudioType.MP3; case -1: case "UNRECOGNIZED": default: @@ -109,6 +113,8 @@ export function containerAudio_ContainerAudioTypeToJSON( return "WAV"; case ContainerAudio_ContainerAudioType.OGG_OPUS: return "OGG_OPUS"; + case ContainerAudio_ContainerAudioType.MP3: + return "MP3"; default: return "UNKNOWN"; } @@ -180,16 +186,11 @@ export interface Hints { speed: number | undefined; /** hint to regulate volume. For LOUDNESS_NORMALIZATION_TYPE_UNSPECIFIED normalization will use MAX_PEAK, if volume in (0, 1], LUFS if volume in [-145, 0). */ volume: number | undefined; + role: string | undefined; } export interface UtteranceSynthesisRequest { $type: "speechkit.tts.v3.UtteranceSynthesisRequest"; - /** - * The name of the model. - * - * Specifies basic synthesis functionality. Currently should be empty. - */ - model: string; /** Raw text (e.g. "Hello, Alice"). */ text: string | undefined; /** Text template instance, e.g. `{"Hello, {username}" with username="Alice"}`. */ @@ -1084,6 +1085,9 @@ export const Hints = { if (message.volume !== undefined) { writer.uint32(33).double(message.volume); } + if (message.role !== undefined) { + writer.uint32(42).string(message.role); + } return writer; }, @@ -1106,6 +1110,9 @@ export const Hints = { case 4: message.volume = reader.double(); break; + case 5: + message.role = reader.string(); + break; default: reader.skipType(tag & 7); break; @@ -1132,6 +1139,10 @@ export const Hints = { object.volume !== undefined && object.volume !== null ? Number(object.volume) : undefined; + message.role = + object.role !== undefined && object.role !== null + ? String(object.role) + : undefined; return message; }, @@ -1144,6 +1155,7 @@ export const Hints = { : undefined); message.speed !== undefined && (obj.speed = message.speed); message.volume !== undefined && (obj.volume = message.volume); + message.role !== undefined && (obj.role = message.role); return obj; }, @@ -1156,6 +1168,7 @@ export const Hints = { : undefined; message.speed = object.speed ?? undefined; message.volume = object.volume ?? undefined; + message.role = object.role ?? undefined; return message; }, }; @@ -1164,7 +1177,6 @@ messageTypeRegistry.set(Hints.$type, Hints); const baseUtteranceSynthesisRequest: object = { $type: "speechkit.tts.v3.UtteranceSynthesisRequest", - model: "", loudnessNormalizationType: 0, unsafeMode: false, }; @@ -1176,9 +1188,6 @@ export const UtteranceSynthesisRequest = { message: UtteranceSynthesisRequest, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { - if (message.model !== "") { - writer.uint32(10).string(message.model); - } if (message.text !== undefined) { writer.uint32(18).string(message.text); } @@ -1219,9 +1228,6 @@ export const UtteranceSynthesisRequest = { while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { - case 1: - message.model = reader.string(); - break; case 2: message.text = reader.string(); break; @@ -1255,10 +1261,6 @@ export const UtteranceSynthesisRequest = { const message = { ...baseUtteranceSynthesisRequest, } as UtteranceSynthesisRequest; - message.model = - object.model !== undefined && object.model !== null - ? String(object.model) - : ""; message.text = object.text !== undefined && object.text !== null ? String(object.text) @@ -1288,7 +1290,6 @@ export const UtteranceSynthesisRequest = { toJSON(message: UtteranceSynthesisRequest): unknown { const obj: any = {}; - message.model !== undefined && (obj.model = message.model); message.text !== undefined && (obj.text = message.text); message.textTemplate !== undefined && (obj.textTemplate = message.textTemplate @@ -1318,7 +1319,6 @@ export const UtteranceSynthesisRequest = { const message = { ...baseUtteranceSynthesisRequest, } as UtteranceSynthesisRequest; - message.model = object.model ?? ""; message.text = object.text ?? undefined; message.textTemplate = object.textTemplate !== undefined && object.textTemplate !== null diff --git a/src/generated/yandex/cloud/ai/vision/v2/image.ts b/src/generated/yandex/cloud/ai/vision/v2/image.ts new file mode 100644 index 00000000..1f0f05fc --- /dev/null +++ b/src/generated/yandex/cloud/ai/vision/v2/image.ts @@ -0,0 +1,193 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; + +export const protobufPackage = "yandex.cloud.ai.vision.v2"; + +export interface Image { + $type: "yandex.cloud.ai.vision.v2.Image"; + /** bytes with data */ + content: Buffer | undefined; + /** type of data */ + imageType: Image_ImageType; +} + +/** type of image */ +export enum Image_ImageType { + IMAGE_TYPE_UNSPECIFIED = 0, + JPEG = 1, + PNG = 2, + UNRECOGNIZED = -1, +} + +export function image_ImageTypeFromJSON(object: any): Image_ImageType { + switch (object) { + case 0: + case "IMAGE_TYPE_UNSPECIFIED": + return Image_ImageType.IMAGE_TYPE_UNSPECIFIED; + case 1: + case "JPEG": + return Image_ImageType.JPEG; + case 2: + case "PNG": + return Image_ImageType.PNG; + case -1: + case "UNRECOGNIZED": + default: + return Image_ImageType.UNRECOGNIZED; + } +} + +export function image_ImageTypeToJSON(object: Image_ImageType): string { + switch (object) { + case Image_ImageType.IMAGE_TYPE_UNSPECIFIED: + return "IMAGE_TYPE_UNSPECIFIED"; + case Image_ImageType.JPEG: + return "JPEG"; + case Image_ImageType.PNG: + return "PNG"; + default: + return "UNKNOWN"; + } +} + +const baseImage: object = { + $type: "yandex.cloud.ai.vision.v2.Image", + imageType: 0, +}; + +export const Image = { + $type: "yandex.cloud.ai.vision.v2.Image" as const, + + encode(message: Image, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + if (message.content !== undefined) { + writer.uint32(10).bytes(message.content); + } + if (message.imageType !== 0) { + writer.uint32(16).int32(message.imageType); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Image { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseImage } as Image; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.content = reader.bytes() as Buffer; + break; + case 2: + message.imageType = reader.int32() as any; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Image { + const message = { ...baseImage } as Image; + message.content = + object.content !== undefined && object.content !== null + ? Buffer.from(bytesFromBase64(object.content)) + : undefined; + message.imageType = + object.imageType !== undefined && object.imageType !== null + ? image_ImageTypeFromJSON(object.imageType) + : 0; + return message; + }, + + toJSON(message: Image): unknown { + const obj: any = {}; + message.content !== undefined && + (obj.content = + message.content !== undefined + ? base64FromBytes(message.content) + : undefined); + message.imageType !== undefined && + (obj.imageType = image_ImageTypeToJSON(message.imageType)); + return obj; + }, + + fromPartial, I>>(object: I): Image { + const message = { ...baseImage } as Image; + message.content = object.content ?? undefined; + message.imageType = object.imageType ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(Image.$type, Image); + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +const atob: (b64: string) => string = + globalThis.atob || + ((b64) => globalThis.Buffer.from(b64, "base64").toString("binary")); +function bytesFromBase64(b64: string): Uint8Array { + const bin = atob(b64); + const arr = new Uint8Array(bin.length); + for (let i = 0; i < bin.length; ++i) { + arr[i] = bin.charCodeAt(i); + } + return arr; +} + +const btoa: (bin: string) => string = + globalThis.btoa || + ((bin) => globalThis.Buffer.from(bin, "binary").toString("base64")); +function base64FromBytes(arr: Uint8Array): string { + const bin: string[] = []; + for (const byte of arr) { + bin.push(String.fromCharCode(byte)); + } + return btoa(bin.join("")); +} + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/ai/vision/v2/image_classifier.ts b/src/generated/yandex/cloud/ai/vision/v2/image_classifier.ts new file mode 100644 index 00000000..09d0627f --- /dev/null +++ b/src/generated/yandex/cloud/ai/vision/v2/image_classifier.ts @@ -0,0 +1,541 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { Image } from "../../../../../yandex/cloud/ai/vision/v2/image"; + +export const protobufPackage = "yandex.cloud.ai.vision.v2"; + +/** Description of single label */ +export interface Label { + $type: "yandex.cloud.ai.vision.v2.Label"; + /** Label name */ + name: string; + /** human readable description of label */ + description: string; +} + +/** Image annotation for specific label */ +export interface ClassAnnotation { + $type: "yandex.cloud.ai.vision.v2.ClassAnnotation"; + /** list of annotated labels */ + label?: Label; + /** confidence for each label */ + confidence: number; +} + +/** Specification of model used for annotation */ +export interface ClassifierSpecification { + $type: "yandex.cloud.ai.vision.v2.ClassifierSpecification"; + /** List of labels, annotated by service */ + labels: Label[]; + /** type of annotation: exclusive (multi-class) or non-exclusive (multi-label) */ + classificationType: ClassifierSpecification_ClassificationType; +} + +export enum ClassifierSpecification_ClassificationType { + CLASSIFICATION_TYPE_UNSPECIFIED = 0, + MULTI_LABEL = 1, + MULTI_CLASS = 2, + UNRECOGNIZED = -1, +} + +export function classifierSpecification_ClassificationTypeFromJSON( + object: any +): ClassifierSpecification_ClassificationType { + switch (object) { + case 0: + case "CLASSIFICATION_TYPE_UNSPECIFIED": + return ClassifierSpecification_ClassificationType.CLASSIFICATION_TYPE_UNSPECIFIED; + case 1: + case "MULTI_LABEL": + return ClassifierSpecification_ClassificationType.MULTI_LABEL; + case 2: + case "MULTI_CLASS": + return ClassifierSpecification_ClassificationType.MULTI_CLASS; + case -1: + case "UNRECOGNIZED": + default: + return ClassifierSpecification_ClassificationType.UNRECOGNIZED; + } +} + +export function classifierSpecification_ClassificationTypeToJSON( + object: ClassifierSpecification_ClassificationType +): string { + switch (object) { + case ClassifierSpecification_ClassificationType.CLASSIFICATION_TYPE_UNSPECIFIED: + return "CLASSIFICATION_TYPE_UNSPECIFIED"; + case ClassifierSpecification_ClassificationType.MULTI_LABEL: + return "MULTI_LABEL"; + case ClassifierSpecification_ClassificationType.MULTI_CLASS: + return "MULTI_CLASS"; + default: + return "UNKNOWN"; + } +} + +/** */ +export interface AnnotationResponse { + $type: "yandex.cloud.ai.vision.v2.AnnotationResponse"; + /** internal service requestId */ + requestId: string; + /** class specification */ + classifierSpecification?: ClassifierSpecification; + /** annotations for each class */ + annotations: ClassAnnotation[]; +} + +/** request for annotation */ +export interface AnnotationRequest { + $type: "yandex.cloud.ai.vision.v2.AnnotationRequest"; + /** image to annotate */ + image?: Image; +} + +const baseLabel: object = { + $type: "yandex.cloud.ai.vision.v2.Label", + name: "", + description: "", +}; + +export const Label = { + $type: "yandex.cloud.ai.vision.v2.Label" as const, + + encode(message: Label, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + if (message.description !== "") { + writer.uint32(18).string(message.description); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Label { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseLabel } as Label; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.description = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Label { + const message = { ...baseLabel } as Label; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + return message; + }, + + toJSON(message: Label): unknown { + const obj: any = {}; + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + return obj; + }, + + fromPartial, I>>(object: I): Label { + const message = { ...baseLabel } as Label; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(Label.$type, Label); + +const baseClassAnnotation: object = { + $type: "yandex.cloud.ai.vision.v2.ClassAnnotation", + confidence: 0, +}; + +export const ClassAnnotation = { + $type: "yandex.cloud.ai.vision.v2.ClassAnnotation" as const, + + encode( + message: ClassAnnotation, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.label !== undefined) { + Label.encode(message.label, writer.uint32(10).fork()).ldelim(); + } + if (message.confidence !== 0) { + writer.uint32(17).double(message.confidence); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ClassAnnotation { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseClassAnnotation } as ClassAnnotation; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.label = Label.decode(reader, reader.uint32()); + break; + case 2: + message.confidence = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ClassAnnotation { + const message = { ...baseClassAnnotation } as ClassAnnotation; + message.label = + object.label !== undefined && object.label !== null + ? Label.fromJSON(object.label) + : undefined; + message.confidence = + object.confidence !== undefined && object.confidence !== null + ? Number(object.confidence) + : 0; + return message; + }, + + toJSON(message: ClassAnnotation): unknown { + const obj: any = {}; + message.label !== undefined && + (obj.label = message.label ? Label.toJSON(message.label) : undefined); + message.confidence !== undefined && (obj.confidence = message.confidence); + return obj; + }, + + fromPartial, I>>( + object: I + ): ClassAnnotation { + const message = { ...baseClassAnnotation } as ClassAnnotation; + message.label = + object.label !== undefined && object.label !== null + ? Label.fromPartial(object.label) + : undefined; + message.confidence = object.confidence ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(ClassAnnotation.$type, ClassAnnotation); + +const baseClassifierSpecification: object = { + $type: "yandex.cloud.ai.vision.v2.ClassifierSpecification", + classificationType: 0, +}; + +export const ClassifierSpecification = { + $type: "yandex.cloud.ai.vision.v2.ClassifierSpecification" as const, + + encode( + message: ClassifierSpecification, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.labels) { + Label.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.classificationType !== 0) { + writer.uint32(16).int32(message.classificationType); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ClassifierSpecification { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseClassifierSpecification, + } as ClassifierSpecification; + message.labels = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.labels.push(Label.decode(reader, reader.uint32())); + break; + case 2: + message.classificationType = reader.int32() as any; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ClassifierSpecification { + const message = { + ...baseClassifierSpecification, + } as ClassifierSpecification; + message.labels = (object.labels ?? []).map((e: any) => Label.fromJSON(e)); + message.classificationType = + object.classificationType !== undefined && + object.classificationType !== null + ? classifierSpecification_ClassificationTypeFromJSON( + object.classificationType + ) + : 0; + return message; + }, + + toJSON(message: ClassifierSpecification): unknown { + const obj: any = {}; + if (message.labels) { + obj.labels = message.labels.map((e) => (e ? Label.toJSON(e) : undefined)); + } else { + obj.labels = []; + } + message.classificationType !== undefined && + (obj.classificationType = + classifierSpecification_ClassificationTypeToJSON( + message.classificationType + )); + return obj; + }, + + fromPartial, I>>( + object: I + ): ClassifierSpecification { + const message = { + ...baseClassifierSpecification, + } as ClassifierSpecification; + message.labels = object.labels?.map((e) => Label.fromPartial(e)) || []; + message.classificationType = object.classificationType ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(ClassifierSpecification.$type, ClassifierSpecification); + +const baseAnnotationResponse: object = { + $type: "yandex.cloud.ai.vision.v2.AnnotationResponse", + requestId: "", +}; + +export const AnnotationResponse = { + $type: "yandex.cloud.ai.vision.v2.AnnotationResponse" as const, + + encode( + message: AnnotationResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.requestId !== "") { + writer.uint32(10).string(message.requestId); + } + if (message.classifierSpecification !== undefined) { + ClassifierSpecification.encode( + message.classifierSpecification, + writer.uint32(18).fork() + ).ldelim(); + } + for (const v of message.annotations) { + ClassAnnotation.encode(v!, writer.uint32(26).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): AnnotationResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseAnnotationResponse } as AnnotationResponse; + message.annotations = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.requestId = reader.string(); + break; + case 2: + message.classifierSpecification = ClassifierSpecification.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.annotations.push( + ClassAnnotation.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AnnotationResponse { + const message = { ...baseAnnotationResponse } as AnnotationResponse; + message.requestId = + object.requestId !== undefined && object.requestId !== null + ? String(object.requestId) + : ""; + message.classifierSpecification = + object.classifierSpecification !== undefined && + object.classifierSpecification !== null + ? ClassifierSpecification.fromJSON(object.classifierSpecification) + : undefined; + message.annotations = (object.annotations ?? []).map((e: any) => + ClassAnnotation.fromJSON(e) + ); + return message; + }, + + toJSON(message: AnnotationResponse): unknown { + const obj: any = {}; + message.requestId !== undefined && (obj.requestId = message.requestId); + message.classifierSpecification !== undefined && + (obj.classifierSpecification = message.classifierSpecification + ? ClassifierSpecification.toJSON(message.classifierSpecification) + : undefined); + if (message.annotations) { + obj.annotations = message.annotations.map((e) => + e ? ClassAnnotation.toJSON(e) : undefined + ); + } else { + obj.annotations = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): AnnotationResponse { + const message = { ...baseAnnotationResponse } as AnnotationResponse; + message.requestId = object.requestId ?? ""; + message.classifierSpecification = + object.classifierSpecification !== undefined && + object.classifierSpecification !== null + ? ClassifierSpecification.fromPartial(object.classifierSpecification) + : undefined; + message.annotations = + object.annotations?.map((e) => ClassAnnotation.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(AnnotationResponse.$type, AnnotationResponse); + +const baseAnnotationRequest: object = { + $type: "yandex.cloud.ai.vision.v2.AnnotationRequest", +}; + +export const AnnotationRequest = { + $type: "yandex.cloud.ai.vision.v2.AnnotationRequest" as const, + + encode( + message: AnnotationRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.image !== undefined) { + Image.encode(message.image, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): AnnotationRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseAnnotationRequest } as AnnotationRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.image = Image.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AnnotationRequest { + const message = { ...baseAnnotationRequest } as AnnotationRequest; + message.image = + object.image !== undefined && object.image !== null + ? Image.fromJSON(object.image) + : undefined; + return message; + }, + + toJSON(message: AnnotationRequest): unknown { + const obj: any = {}; + message.image !== undefined && + (obj.image = message.image ? Image.toJSON(message.image) : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): AnnotationRequest { + const message = { ...baseAnnotationRequest } as AnnotationRequest; + message.image = + object.image !== undefined && object.image !== null + ? Image.fromPartial(object.image) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(AnnotationRequest.$type, AnnotationRequest); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/ai/vision/v2/image_classifier_service.ts b/src/generated/yandex/cloud/ai/vision/v2/image_classifier_service.ts new file mode 100644 index 00000000..1d9f8dae --- /dev/null +++ b/src/generated/yandex/cloud/ai/vision/v2/image_classifier_service.ts @@ -0,0 +1,75 @@ +/* eslint-disable */ +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleUnaryCall, + Client, + ClientUnaryCall, + Metadata, + CallOptions, + ServiceError, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; +import { + AnnotationRequest, + AnnotationResponse, +} from "../../../../../yandex/cloud/ai/vision/v2/image_classifier"; + +export const protobufPackage = "yandex.cloud.ai.vision.v2"; + +export const ImageClassifierServiceService = { + annotate: { + path: "/yandex.cloud.ai.vision.v2.ImageClassifierService/Annotate", + requestStream: false, + responseStream: false, + requestSerialize: (value: AnnotationRequest) => + Buffer.from(AnnotationRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => AnnotationRequest.decode(value), + responseSerialize: (value: AnnotationResponse) => + Buffer.from(AnnotationResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => AnnotationResponse.decode(value), + }, +} as const; + +export interface ImageClassifierServiceServer + extends UntypedServiceImplementation { + annotate: handleUnaryCall; +} + +export interface ImageClassifierServiceClient extends Client { + annotate( + request: AnnotationRequest, + callback: (error: ServiceError | null, response: AnnotationResponse) => void + ): ClientUnaryCall; + annotate( + request: AnnotationRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: AnnotationResponse) => void + ): ClientUnaryCall; + annotate( + request: AnnotationRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: AnnotationResponse) => void + ): ClientUnaryCall; +} + +export const ImageClassifierServiceClient = makeGenericClientConstructor( + ImageClassifierServiceService, + "yandex.cloud.ai.vision.v2.ImageClassifierService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): ImageClassifierServiceClient; + service: typeof ImageClassifierServiceService; +}; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/apploadbalancer/v1/backend_group.ts b/src/generated/yandex/cloud/apploadbalancer/v1/backend_group.ts index 4803e7bd..43d3bc79 100644 --- a/src/generated/yandex/cloud/apploadbalancer/v1/backend_group.ts +++ b/src/generated/yandex/cloud/apploadbalancer/v1/backend_group.ts @@ -37,16 +37,16 @@ export enum LoadBalancingMode { */ LEAST_REQUEST = 2, /** - * MAGLEV_HASH - Maglev hashing load balancing mode, used only if session affinity is working for the backend group. + * MAGLEV_HASH - Maglev hashing load balancing mode. * * Each endpoint is hashed, and a hash table with 65537 rows is filled accordingly, so that every endpoint occupies - * the same amount of rows. An attribute of each request, specified in session affinity configuration of the backend - * group, is also hashed by the same function. The row with the same number as the resulting value is looked up in the - * table to determine the endpoint that receives the request. + * the same amount of rows. An attribute of each request is also hashed by the same function (if session affinity is + * enabled for the backend group, the attribute to hash is specified in session affinity configuration). The row + * with the same number as the resulting value is looked up in the table to determine the endpoint that receives + * the request. * - * If session affinity is not working for the backend group (i.e. it is not configured or the group contains more - * than one backend with positive weight), endpoints for backends with `MAGLEV_HASH` load balancing mode are picked at - * `RANDOM` instead. + * If the backend group with session affinity enabled contains more than one backend with positive weight, endpoints + * for backends with `MAGLEV_HASH` load balancing mode are picked at `RANDOM` instead. */ MAGLEV_HASH = 3, UNRECOGNIZED = -1, @@ -112,7 +112,7 @@ export interface BackendGroup { http?: HttpBackendGroup | undefined; /** List of gRPC backends that the backend group consists of. */ grpc?: GrpcBackendGroup | undefined; - /** List of stream backends that the backend group consist of. */ + /** List of stream (TCP) backends that the backend group consists of. */ stream?: StreamBackendGroup | undefined; /** Creation timestamp. */ createdAt?: Date; @@ -124,10 +124,16 @@ export interface BackendGroup_LabelsEntry { value: string; } -/** A Stream backend group resource. */ +/** A stream (TCP) backend group resource. */ export interface StreamBackendGroup { $type: "yandex.cloud.apploadbalancer.v1.StreamBackendGroup"; + /** List of stream (TCP) backends. */ backends: StreamBackend[]; + /** + * Connection-based session affinity configuration. + * + * For now, a connection is defined only by an IP address of the client. + */ connection?: ConnectionSessionAffinity | undefined; } @@ -178,10 +184,12 @@ export interface CookieSessionAffinity { /** Name of the cookie that is used for session affinity. */ name: string; /** - * Maximum age of cookies that are generated for sessions (persistent cookies). + * Maximum age of cookies that are generated for sessions. * - * If not set, session cookies are used, which are stored by clients in temporary memory and are deleted + * If set to `0`, session cookies are used, which are stored by clients in temporary memory and are deleted * on client restarts. + * + * If not set, the balancer does not generate cookies and only uses incoming ones for establishing session affinity. */ ttl?: Duration; } @@ -211,7 +219,8 @@ export interface LoadBalancingConfig { panicThreshold: number; /** * Percentage of traffic that a load balancer node sends to healthy backends in its availability zone. - * The rest is divided equally between other zones. For details about zone-aware routing, see [documentation](/docs/application-load-balancer/concepts/backend-group#locality). + * The rest is divided equally between other zones. For details about zone-aware routing, see + * [documentation](/docs/application-load-balancer/concepts/backend-group#locality). * * If there are no healthy backends in an availability zone, all the traffic is divided between other zones. * @@ -237,24 +246,52 @@ export interface LoadBalancingConfig { /** * Load balancing mode for the backend. * - * For detals about load balancing modes, see + * For details about load balancing modes, see * [documentation](/docs/application-load-balancer/concepts/backend-group#balancing-mode). */ mode: LoadBalancingMode; } -/** A stream backend resource. */ +/** A stream (TCP) backend resource. */ export interface StreamBackend { $type: "yandex.cloud.apploadbalancer.v1.StreamBackend"; + /** Name of the backend. */ name: string; - /** If not set, backend will be disabled. */ + /** + * Backend weight. Traffic is distributed between backends of a backend group according to their weights. + * + * Weights must be set either for all backends in a group or for none of them. + * Setting no weights is the same as setting equal non-zero weights for all backends. + * + * If the weight is non-positive, traffic is not sent to the backend. + */ backendWeight?: number; + /** Load balancing configuration for the backend. */ loadBalancingConfig?: LoadBalancingConfig; - /** Optional alternative port for all targets. */ + /** Port used by all targets to receive traffic. */ port: number; + /** + * Target groups that belong to the backend. For details about target groups, see + * [documentation](/docs/application-load-balancer/concepts/target-group). + */ targetGroups?: TargetGroupsBackend | undefined; + /** + * Health checks to perform on targets from target groups. + * For details about health checking, see [documentation](/docs/application-load-balancer/concepts/backend-group#health-checks). + * + * If no health checks are specified, active health checking is not performed. + */ healthchecks: HealthCheck[]; + /** + * Settings for TLS connections between load balancer nodes and backend targets. + * + * If specified, the load balancer establishes TLS-encrypted TCP connections with targets and compares received + * certificates with the one specified in [BackendTls.validation_context]. + * If not specified, the load balancer establishes unencrypted TCP connections with targets. + */ tls?: BackendTls; + /** If set, proxy protocol will be enabled for this backend. */ + enableProxyProtocol: boolean; } /** An HTTP backend resource. */ @@ -359,6 +396,20 @@ export interface TargetGroupsBackend { targetGroupIds: string[]; } +/** Transport settings to be used instead of the settings configured per-cluster */ +export interface PlaintextTransportSettings { + $type: "yandex.cloud.apploadbalancer.v1.PlaintextTransportSettings"; +} + +/** Transport settings to be used instead of the settings configured per-cluster */ +export interface SecureTransportSettings { + $type: "yandex.cloud.apploadbalancer.v1.SecureTransportSettings"; + /** SNI string for TLS connections. */ + sni: string; + /** Validation context for backend TLS connections. */ + validationContext?: ValidationContext; +} + /** A resource for backend TLS settings. */ export interface BackendTls { $type: "yandex.cloud.apploadbalancer.v1.BackendTls"; @@ -427,6 +478,8 @@ export interface HealthCheck { http?: HealthCheck_HttpHealthCheck | undefined; /** gRPC health check settings. */ grpc?: HealthCheck_GrpcHealthCheck | undefined; + plaintext?: PlaintextTransportSettings | undefined; + tls?: SecureTransportSettings | undefined; } /** A resource for TCP stream health check settings. */ @@ -1484,6 +1537,7 @@ const baseStreamBackend: object = { $type: "yandex.cloud.apploadbalancer.v1.StreamBackend", name: "", port: 0, + enableProxyProtocol: false, }; export const StreamBackend = { @@ -1523,6 +1577,9 @@ export const StreamBackend = { if (message.tls !== undefined) { BackendTls.encode(message.tls, writer.uint32(58).fork()).ldelim(); } + if (message.enableProxyProtocol === true) { + writer.uint32(64).bool(message.enableProxyProtocol); + } return writer; }, @@ -1566,6 +1623,9 @@ export const StreamBackend = { case 7: message.tls = BackendTls.decode(reader, reader.uint32()); break; + case 8: + message.enableProxyProtocol = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -1604,6 +1664,11 @@ export const StreamBackend = { object.tls !== undefined && object.tls !== null ? BackendTls.fromJSON(object.tls) : undefined; + message.enableProxyProtocol = + object.enableProxyProtocol !== undefined && + object.enableProxyProtocol !== null + ? Boolean(object.enableProxyProtocol) + : false; return message; }, @@ -1630,6 +1695,8 @@ export const StreamBackend = { } message.tls !== undefined && (obj.tls = message.tls ? BackendTls.toJSON(message.tls) : undefined); + message.enableProxyProtocol !== undefined && + (obj.enableProxyProtocol = message.enableProxyProtocol); return obj; }, @@ -1655,6 +1722,7 @@ export const StreamBackend = { object.tls !== undefined && object.tls !== null ? BackendTls.fromPartial(object.tls) : undefined; + message.enableProxyProtocol = object.enableProxyProtocol ?? false; return message; }, }; @@ -2126,6 +2194,162 @@ export const TargetGroupsBackend = { messageTypeRegistry.set(TargetGroupsBackend.$type, TargetGroupsBackend); +const basePlaintextTransportSettings: object = { + $type: "yandex.cloud.apploadbalancer.v1.PlaintextTransportSettings", +}; + +export const PlaintextTransportSettings = { + $type: "yandex.cloud.apploadbalancer.v1.PlaintextTransportSettings" as const, + + encode( + _: PlaintextTransportSettings, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): PlaintextTransportSettings { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...basePlaintextTransportSettings, + } as PlaintextTransportSettings; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(_: any): PlaintextTransportSettings { + const message = { + ...basePlaintextTransportSettings, + } as PlaintextTransportSettings; + return message; + }, + + toJSON(_: PlaintextTransportSettings): unknown { + const obj: any = {}; + return obj; + }, + + fromPartial, I>>( + _: I + ): PlaintextTransportSettings { + const message = { + ...basePlaintextTransportSettings, + } as PlaintextTransportSettings; + return message; + }, +}; + +messageTypeRegistry.set( + PlaintextTransportSettings.$type, + PlaintextTransportSettings +); + +const baseSecureTransportSettings: object = { + $type: "yandex.cloud.apploadbalancer.v1.SecureTransportSettings", + sni: "", +}; + +export const SecureTransportSettings = { + $type: "yandex.cloud.apploadbalancer.v1.SecureTransportSettings" as const, + + encode( + message: SecureTransportSettings, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.sni !== "") { + writer.uint32(10).string(message.sni); + } + if (message.validationContext !== undefined) { + ValidationContext.encode( + message.validationContext, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): SecureTransportSettings { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseSecureTransportSettings, + } as SecureTransportSettings; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.sni = reader.string(); + break; + case 3: + message.validationContext = ValidationContext.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): SecureTransportSettings { + const message = { + ...baseSecureTransportSettings, + } as SecureTransportSettings; + message.sni = + object.sni !== undefined && object.sni !== null ? String(object.sni) : ""; + message.validationContext = + object.validationContext !== undefined && + object.validationContext !== null + ? ValidationContext.fromJSON(object.validationContext) + : undefined; + return message; + }, + + toJSON(message: SecureTransportSettings): unknown { + const obj: any = {}; + message.sni !== undefined && (obj.sni = message.sni); + message.validationContext !== undefined && + (obj.validationContext = message.validationContext + ? ValidationContext.toJSON(message.validationContext) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): SecureTransportSettings { + const message = { + ...baseSecureTransportSettings, + } as SecureTransportSettings; + message.sni = object.sni ?? ""; + message.validationContext = + object.validationContext !== undefined && + object.validationContext !== null + ? ValidationContext.fromPartial(object.validationContext) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(SecureTransportSettings.$type, SecureTransportSettings); + const baseBackendTls: object = { $type: "yandex.cloud.apploadbalancer.v1.BackendTls", sni: "", @@ -2328,6 +2552,18 @@ export const HealthCheck = { writer.uint32(74).fork() ).ldelim(); } + if (message.plaintext !== undefined) { + PlaintextTransportSettings.encode( + message.plaintext, + writer.uint32(82).fork() + ).ldelim(); + } + if (message.tls !== undefined) { + SecureTransportSettings.encode( + message.tls, + writer.uint32(90).fork() + ).ldelim(); + } return writer; }, @@ -2374,6 +2610,15 @@ export const HealthCheck = { reader.uint32() ); break; + case 10: + message.plaintext = PlaintextTransportSettings.decode( + reader, + reader.uint32() + ); + break; + case 11: + message.tls = SecureTransportSettings.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -2422,6 +2667,14 @@ export const HealthCheck = { object.grpc !== undefined && object.grpc !== null ? HealthCheck_GrpcHealthCheck.fromJSON(object.grpc) : undefined; + message.plaintext = + object.plaintext !== undefined && object.plaintext !== null + ? PlaintextTransportSettings.fromJSON(object.plaintext) + : undefined; + message.tls = + object.tls !== undefined && object.tls !== null + ? SecureTransportSettings.fromJSON(object.tls) + : undefined; return message; }, @@ -2455,6 +2708,14 @@ export const HealthCheck = { (obj.grpc = message.grpc ? HealthCheck_GrpcHealthCheck.toJSON(message.grpc) : undefined); + message.plaintext !== undefined && + (obj.plaintext = message.plaintext + ? PlaintextTransportSettings.toJSON(message.plaintext) + : undefined); + message.tls !== undefined && + (obj.tls = message.tls + ? SecureTransportSettings.toJSON(message.tls) + : undefined); return obj; }, @@ -2486,6 +2747,14 @@ export const HealthCheck = { object.grpc !== undefined && object.grpc !== null ? HealthCheck_GrpcHealthCheck.fromPartial(object.grpc) : undefined; + message.plaintext = + object.plaintext !== undefined && object.plaintext !== null + ? PlaintextTransportSettings.fromPartial(object.plaintext) + : undefined; + message.tls = + object.tls !== undefined && object.tls !== null + ? SecureTransportSettings.fromPartial(object.tls) + : undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/apploadbalancer/v1/backend_group_service.ts b/src/generated/yandex/cloud/apploadbalancer/v1/backend_group_service.ts index da092846..a7add978 100644 --- a/src/generated/yandex/cloud/apploadbalancer/v1/backend_group_service.ts +++ b/src/generated/yandex/cloud/apploadbalancer/v1/backend_group_service.ts @@ -19,6 +19,7 @@ import { BackendGroup, HttpBackendGroup, GrpcBackendGroup, + StreamBackendGroup, HttpBackend, GrpcBackend, StreamBackend, @@ -131,6 +132,8 @@ export interface UpdateBackendGroupRequest { http?: HttpBackendGroup | undefined; /** New list of gRPC backends that the backend group will consist of. */ grpc?: GrpcBackendGroup | undefined; + /** New list of stream (TCP) backends that the backend group will consist of. */ + stream?: StreamBackendGroup | undefined; } export interface UpdateBackendGroupRequest_LabelsEntry { @@ -169,6 +172,8 @@ export interface CreateBackendGroupRequest { http?: HttpBackendGroup | undefined; /** List of gRPC backends that the backend group consists of. */ grpc?: GrpcBackendGroup | undefined; + /** List of stream (TCP) backends that the backend group consists of. */ + stream?: StreamBackendGroup | undefined; } export interface CreateBackendGroupRequest_LabelsEntry { @@ -217,7 +222,7 @@ export interface UpdateBackendRequest { http?: HttpBackend | undefined; /** New settings for the gRPC backend. */ grpc?: GrpcBackend | undefined; - /** New settings for the Stream backend. */ + /** New settings for the stream (TCP) backend. */ stream?: StreamBackend | undefined; } @@ -758,6 +763,12 @@ export const UpdateBackendGroupRequest = { if (message.grpc !== undefined) { GrpcBackendGroup.encode(message.grpc, writer.uint32(58).fork()).ldelim(); } + if (message.stream !== undefined) { + StreamBackendGroup.encode( + message.stream, + writer.uint32(66).fork() + ).ldelim(); + } return writer; }, @@ -801,6 +812,9 @@ export const UpdateBackendGroupRequest = { case 7: message.grpc = GrpcBackendGroup.decode(reader, reader.uint32()); break; + case 8: + message.stream = StreamBackendGroup.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -843,6 +857,10 @@ export const UpdateBackendGroupRequest = { object.grpc !== undefined && object.grpc !== null ? GrpcBackendGroup.fromJSON(object.grpc) : undefined; + message.stream = + object.stream !== undefined && object.stream !== null + ? StreamBackendGroup.fromJSON(object.stream) + : undefined; return message; }, @@ -871,6 +889,10 @@ export const UpdateBackendGroupRequest = { (obj.grpc = message.grpc ? GrpcBackendGroup.toJSON(message.grpc) : undefined); + message.stream !== undefined && + (obj.stream = message.stream + ? StreamBackendGroup.toJSON(message.stream) + : undefined); return obj; }, @@ -903,6 +925,10 @@ export const UpdateBackendGroupRequest = { object.grpc !== undefined && object.grpc !== null ? GrpcBackendGroup.fromPartial(object.grpc) : undefined; + message.stream = + object.stream !== undefined && object.stream !== null + ? StreamBackendGroup.fromPartial(object.stream) + : undefined; return message; }, }; @@ -1114,6 +1140,12 @@ export const CreateBackendGroupRequest = { if (message.grpc !== undefined) { GrpcBackendGroup.encode(message.grpc, writer.uint32(50).fork()).ldelim(); } + if (message.stream !== undefined) { + StreamBackendGroup.encode( + message.stream, + writer.uint32(58).fork() + ).ldelim(); + } return writer; }, @@ -1154,6 +1186,9 @@ export const CreateBackendGroupRequest = { case 6: message.grpc = GrpcBackendGroup.decode(reader, reader.uint32()); break; + case 7: + message.stream = StreamBackendGroup.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -1192,6 +1227,10 @@ export const CreateBackendGroupRequest = { object.grpc !== undefined && object.grpc !== null ? GrpcBackendGroup.fromJSON(object.grpc) : undefined; + message.stream = + object.stream !== undefined && object.stream !== null + ? StreamBackendGroup.fromJSON(object.stream) + : undefined; return message; }, @@ -1215,6 +1254,10 @@ export const CreateBackendGroupRequest = { (obj.grpc = message.grpc ? GrpcBackendGroup.toJSON(message.grpc) : undefined); + message.stream !== undefined && + (obj.stream = message.stream + ? StreamBackendGroup.toJSON(message.stream) + : undefined); return obj; }, @@ -1243,6 +1286,10 @@ export const CreateBackendGroupRequest = { object.grpc !== undefined && object.grpc !== null ? GrpcBackendGroup.fromPartial(object.grpc) : undefined; + message.stream = + object.stream !== undefined && object.stream !== null + ? StreamBackendGroup.fromPartial(object.stream) + : undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/apploadbalancer/v1/load_balancer.ts b/src/generated/yandex/cloud/apploadbalancer/v1/load_balancer.ts index 09180897..7811394d 100644 --- a/src/generated/yandex/cloud/apploadbalancer/v1/load_balancer.ts +++ b/src/generated/yandex/cloud/apploadbalancer/v1/load_balancer.ts @@ -227,11 +227,17 @@ export interface Listener { * Endpoints are defined by their IP addresses and ports. */ endpoints: Endpoint[]; - /** HTTP listener settings. */ + /** Unencrypted HTTP listener settings. */ http?: HttpListener | undefined; - /** HTTPS (HTTP over TLS) listener settings. */ + /** + * TLS-encrypted HTTP or TCP stream listener settings. + * + * All handlers within a listener ([TlsListener.default_handler] and [TlsListener.sni_handlers]) must be of one + * type, [HttpHandler] or [StreamHandler]. Mixing HTTP and TCP stream traffic in a TLS-encrypted listener is not + * supported. + */ tls?: TlsListener | undefined; - /** Stream listener settings. */ + /** Unencrypted stream (TCP) listener settings. */ stream?: StreamListener | undefined; } @@ -261,7 +267,7 @@ export interface HttpListener { redirects?: Redirects; } -/** An HTTPS (HTTP over TLS) listener resource. */ +/** TLS-encrypted (HTTP or TCP stream) listener resource. */ export interface TlsListener { $type: "yandex.cloud.apploadbalancer.v1.TlsListener"; /** @@ -276,9 +282,10 @@ export interface TlsListener { sniHandlers: SniMatch[]; } -/** A Stream listener resource. */ +/** A stream (TCP) listener resource. */ export interface StreamListener { $type: "yandex.cloud.apploadbalancer.v1.StreamListener"; + /** Settings for handling stream (TCP) requests. */ handler?: StreamHandler; } @@ -289,9 +296,17 @@ export interface Http2Options { maxConcurrentStreams: number; } -/** A stream handler resource. */ +/** A stream (TCP) handler resource. */ export interface StreamHandler { $type: "yandex.cloud.apploadbalancer.v1.StreamHandler"; + /** + * ID of the backend group processing requests. For details about the concept, see + * [documentation](/docs/application-load-balancer/concepts/backend-group). + * + * The backend group type, specified via [BackendGroup.backend], must be `stream`. + * + * To get the list of all available backend groups, make a [BackendGroupService.List] request. + */ backendGroupId: string; } @@ -299,9 +314,10 @@ export interface StreamHandler { export interface HttpHandler { $type: "yandex.cloud.apploadbalancer.v1.HttpHandler"; /** - * ID of the HTTP router processing requests. + * ID of the HTTP router processing requests. For details about the concept, see + * [documentation](/docs/application-load-balancer/concepts/http-router). * - * For details about the concept, see [documentation](/docs/application-load-balancer/concepts/http-router). + * To get the list of all available HTTP routers, make a [HttpRouterService.List] request. */ httpRouterId: string; /** @@ -337,12 +353,12 @@ export interface SniMatch { handler?: TlsHandler; } -/** An HTTPS (HTTP over TLS) handler resource. */ +/** A TLS-encrypted (HTTP or TCP stream) handler resource. */ export interface TlsHandler { $type: "yandex.cloud.apploadbalancer.v1.TlsHandler"; /** HTTP handler. */ httpHandler?: HttpHandler | undefined; - /** Stream handler */ + /** Stream (TCP) handler. */ streamHandler?: StreamHandler | undefined; /** * ID's of the TLS server certificates from [Certificate Manager](/docs/certificate-manager/). diff --git a/src/generated/yandex/cloud/apploadbalancer/v1/load_balancer_service.ts b/src/generated/yandex/cloud/apploadbalancer/v1/load_balancer_service.ts index 36446b17..2c45bc1c 100644 --- a/src/generated/yandex/cloud/apploadbalancer/v1/load_balancer_service.ts +++ b/src/generated/yandex/cloud/apploadbalancer/v1/load_balancer_service.ts @@ -380,11 +380,17 @@ export interface ListenerSpec { * Endpoints are defined by their IP addresses and ports. */ endpointSpecs: EndpointSpec[]; - /** HTTP listener settings. */ + /** Unencrypted HTTP listener settings. */ http?: HttpListener | undefined; - /** TLS listener settings. */ + /** + * TLS-encrypted HTTP or TCP stream listener settings. + * + * All handlers within a listener ([TlsListener.default_handler] and [TlsListener.sni_handlers]) must be of one + * type, [HttpHandler] or [StreamHandler]. Mixing HTTP and TCP stream traffic in a TLS-encrypted listener is not + * supported. + */ tls?: TlsListener | undefined; - /** TCP listener settings. */ + /** Unencrypted stream (TCP) listener settings. */ stream?: StreamListener | undefined; } diff --git a/src/generated/yandex/cloud/apploadbalancer/v1/virtual_host.ts b/src/generated/yandex/cloud/apploadbalancer/v1/virtual_host.ts index ee8c69d0..685be7ae 100644 --- a/src/generated/yandex/cloud/apploadbalancer/v1/virtual_host.ts +++ b/src/generated/yandex/cloud/apploadbalancer/v1/virtual_host.ts @@ -358,7 +358,11 @@ export function grpcStatusResponseAction_StatusToJSON( /** An HTTP route action resource. */ export interface HttpRouteAction { $type: "yandex.cloud.apploadbalancer.v1.HttpRouteAction"; - /** Backend group to forward requests to. */ + /** + * Backend group to forward requests to. + * + * Stream (TCP) backend groups are not supported. + */ backendGroupId: string; /** * Overall timeout for an HTTP connection between a load balancer node an a backend from the backend group: diff --git a/src/generated/yandex/cloud/cdn/index.ts b/src/generated/yandex/cloud/cdn/index.ts index e82aaf81..b5ae2427 100644 --- a/src/generated/yandex/cloud/cdn/index.ts +++ b/src/generated/yandex/cloud/cdn/index.ts @@ -4,5 +4,7 @@ export * as origin_group from './v1/origin_group' export * as origin_group_service from './v1/origin_group_service' export * as origin_service from './v1/origin_service' export * as provider_service from './v1/provider_service' +export * as raw_logs from './v1/raw_logs' +export * as raw_logs_service from './v1/raw_logs_service' export * as resource from './v1/resource' export * as resource_service from './v1/resource_service' \ No newline at end of file diff --git a/src/generated/yandex/cloud/cdn/v1/cache_service.ts b/src/generated/yandex/cloud/cdn/v1/cache_service.ts index f37bc77a..b2e9d335 100644 --- a/src/generated/yandex/cloud/cdn/v1/cache_service.ts +++ b/src/generated/yandex/cloud/cdn/v1/cache_service.ts @@ -339,7 +339,11 @@ messageTypeRegistry.set(PrefetchCacheMetadata.$type, PrefetchCacheMetadata); /** A set of methods for managing Cache Service resources. */ export const CacheServiceService = { - /** Removes specified files from the cache of the specified resource. For details about purging, see [documentation](/docs/cdn/concepts/caching#purge). */ + /** + * Removes specified files from the cache of the specified resource. For details about purging, see [documentation](/docs/cdn/concepts/caching#purge). + * + * Purging may take up to 15 minutes. + */ purge: { path: "/yandex.cloud.cdn.v1.CacheService/Purge", requestStream: false, @@ -366,14 +370,22 @@ export const CacheServiceService = { } as const; export interface CacheServiceServer extends UntypedServiceImplementation { - /** Removes specified files from the cache of the specified resource. For details about purging, see [documentation](/docs/cdn/concepts/caching#purge). */ + /** + * Removes specified files from the cache of the specified resource. For details about purging, see [documentation](/docs/cdn/concepts/caching#purge). + * + * Purging may take up to 15 minutes. + */ purge: handleUnaryCall; /** Uploads specified files from origins to cache of the specified resource. For defails about prefetching, see [documentation](/docs/cdn/concepts/caching#prefetch). */ prefetch: handleUnaryCall; } export interface CacheServiceClient extends Client { - /** Removes specified files from the cache of the specified resource. For details about purging, see [documentation](/docs/cdn/concepts/caching#purge). */ + /** + * Removes specified files from the cache of the specified resource. For details about purging, see [documentation](/docs/cdn/concepts/caching#purge). + * + * Purging may take up to 15 minutes. + */ purge( request: PurgeCacheRequest, callback: (error: ServiceError | null, response: Operation) => void diff --git a/src/generated/yandex/cloud/cdn/v1/origin_group_service.ts b/src/generated/yandex/cloud/cdn/v1/origin_group_service.ts index a1918295..d8649684 100644 --- a/src/generated/yandex/cloud/cdn/v1/origin_group_service.ts +++ b/src/generated/yandex/cloud/cdn/v1/origin_group_service.ts @@ -1022,7 +1022,12 @@ export const OriginGroupServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Updates origin group. */ + /** + * Updates the specified origin group. + * + * Changes may take up to 15 minutes to apply. Afterwards, it is recommended to purge cache of the resources that + * use the origin group via a [CacheService.Purge] request. + */ update: { path: "/yandex.cloud.cdn.v1.OriginGroupService/Update", requestStream: false, @@ -1057,7 +1062,12 @@ export interface OriginGroupServiceServer extends UntypedServiceImplementation { list: handleUnaryCall; /** Creates origin group. */ create: handleUnaryCall; - /** Updates origin group. */ + /** + * Updates the specified origin group. + * + * Changes may take up to 15 minutes to apply. Afterwards, it is recommended to purge cache of the resources that + * use the origin group via a [CacheService.Purge] request. + */ update: handleUnaryCall; /** Deletes origin group with specified origin group id. */ delete: handleUnaryCall; @@ -1121,7 +1131,12 @@ export interface OriginGroupServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Updates origin group. */ + /** + * Updates the specified origin group. + * + * Changes may take up to 15 minutes to apply. Afterwards, it is recommended to purge cache of the resources that + * use the origin group via a [CacheService.Purge] request. + */ update( request: UpdateOriginGroupRequest, callback: (error: ServiceError | null, response: Operation) => void diff --git a/src/generated/yandex/cloud/cdn/v1/origin_service.ts b/src/generated/yandex/cloud/cdn/v1/origin_service.ts index d3a9e990..c0233d4b 100644 --- a/src/generated/yandex/cloud/cdn/v1/origin_service.ts +++ b/src/generated/yandex/cloud/cdn/v1/origin_service.ts @@ -967,7 +967,12 @@ export const OriginServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Updates origin from origin group. */ + /** + * Updates the specified origin from the origin group. + * + * Changes may take up to 15 minutes to apply. Afterwards, it is recommended to purge cache of the resources that + * use the origin via a [CacheService.Purge] request. + */ update: { path: "/yandex.cloud.cdn.v1.OriginService/Update", requestStream: false, @@ -1000,7 +1005,12 @@ export interface OriginServiceServer extends UntypedServiceImplementation { list: handleUnaryCall; /** Creates origin inside origin group. */ create: handleUnaryCall; - /** Updates origin from origin group. */ + /** + * Updates the specified origin from the origin group. + * + * Changes may take up to 15 minutes to apply. Afterwards, it is recommended to purge cache of the resources that + * use the origin via a [CacheService.Purge] request. + */ update: handleUnaryCall; /** Deletes origin from origin group. */ delete: handleUnaryCall; @@ -1064,7 +1074,12 @@ export interface OriginServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Updates origin from origin group. */ + /** + * Updates the specified origin from the origin group. + * + * Changes may take up to 15 minutes to apply. Afterwards, it is recommended to purge cache of the resources that + * use the origin via a [CacheService.Purge] request. + */ update( request: UpdateOriginRequest, callback: (error: ServiceError | null, response: Operation) => void diff --git a/src/generated/yandex/cloud/cdn/v1/raw_logs.ts b/src/generated/yandex/cloud/cdn/v1/raw_logs.ts new file mode 100644 index 00000000..f45f2616 --- /dev/null +++ b/src/generated/yandex/cloud/cdn/v1/raw_logs.ts @@ -0,0 +1,193 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; + +export const protobufPackage = "yandex.cloud.cdn.v1"; + +/** Provider side statuses of Raw logs processing. */ +export enum RawLogsStatus { + RAW_LOGS_STATUS_UNSPECIFIED = 0, + /** RAW_LOGS_STATUS_NOT_ACTIVATED - Raw logs wasn't activated. */ + RAW_LOGS_STATUS_NOT_ACTIVATED = 1, + /** RAW_LOGS_STATUS_OK - Raw logs was activated, and logs storing process works as expected. */ + RAW_LOGS_STATUS_OK = 2, + /** RAW_LOGS_STATUS_FAILED - Raw logs was activated, but logs CDN provider has been failed to store logs. */ + RAW_LOGS_STATUS_FAILED = 3, + UNRECOGNIZED = -1, +} + +export function rawLogsStatusFromJSON(object: any): RawLogsStatus { + switch (object) { + case 0: + case "RAW_LOGS_STATUS_UNSPECIFIED": + return RawLogsStatus.RAW_LOGS_STATUS_UNSPECIFIED; + case 1: + case "RAW_LOGS_STATUS_NOT_ACTIVATED": + return RawLogsStatus.RAW_LOGS_STATUS_NOT_ACTIVATED; + case 2: + case "RAW_LOGS_STATUS_OK": + return RawLogsStatus.RAW_LOGS_STATUS_OK; + case 3: + case "RAW_LOGS_STATUS_FAILED": + return RawLogsStatus.RAW_LOGS_STATUS_FAILED; + case -1: + case "UNRECOGNIZED": + default: + return RawLogsStatus.UNRECOGNIZED; + } +} + +export function rawLogsStatusToJSON(object: RawLogsStatus): string { + switch (object) { + case RawLogsStatus.RAW_LOGS_STATUS_UNSPECIFIED: + return "RAW_LOGS_STATUS_UNSPECIFIED"; + case RawLogsStatus.RAW_LOGS_STATUS_NOT_ACTIVATED: + return "RAW_LOGS_STATUS_NOT_ACTIVATED"; + case RawLogsStatus.RAW_LOGS_STATUS_OK: + return "RAW_LOGS_STATUS_OK"; + case RawLogsStatus.RAW_LOGS_STATUS_FAILED: + return "RAW_LOGS_STATUS_FAILED"; + default: + return "UNKNOWN"; + } +} + +/** User settings for Raw logs. */ +export interface RawLogsSettings { + $type: "yandex.cloud.cdn.v1.RawLogsSettings"; + /** Destination S3 bucket name, note that the suer should be owner of the bucket. */ + bucketName: string; + /** Bucket region, unused for now, could be blank. */ + bucketRegion: string; + /** + * file_prefix: prefix each log object name with specified prefix. + * + * The prefix makes it simpler for you to locate the log objects. + * For example, if you specify the prefix value logs/, each log object that + * S3 creates begins with the logs/ prefix in its key, so pseudo S3 folders + * could be setup. + */ + filePrefix: string; +} + +const baseRawLogsSettings: object = { + $type: "yandex.cloud.cdn.v1.RawLogsSettings", + bucketName: "", + bucketRegion: "", + filePrefix: "", +}; + +export const RawLogsSettings = { + $type: "yandex.cloud.cdn.v1.RawLogsSettings" as const, + + encode( + message: RawLogsSettings, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.bucketName !== "") { + writer.uint32(10).string(message.bucketName); + } + if (message.bucketRegion !== "") { + writer.uint32(18).string(message.bucketRegion); + } + if (message.filePrefix !== "") { + writer.uint32(26).string(message.filePrefix); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): RawLogsSettings { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseRawLogsSettings } as RawLogsSettings; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.bucketName = reader.string(); + break; + case 2: + message.bucketRegion = reader.string(); + break; + case 3: + message.filePrefix = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RawLogsSettings { + const message = { ...baseRawLogsSettings } as RawLogsSettings; + message.bucketName = + object.bucketName !== undefined && object.bucketName !== null + ? String(object.bucketName) + : ""; + message.bucketRegion = + object.bucketRegion !== undefined && object.bucketRegion !== null + ? String(object.bucketRegion) + : ""; + message.filePrefix = + object.filePrefix !== undefined && object.filePrefix !== null + ? String(object.filePrefix) + : ""; + return message; + }, + + toJSON(message: RawLogsSettings): unknown { + const obj: any = {}; + message.bucketName !== undefined && (obj.bucketName = message.bucketName); + message.bucketRegion !== undefined && + (obj.bucketRegion = message.bucketRegion); + message.filePrefix !== undefined && (obj.filePrefix = message.filePrefix); + return obj; + }, + + fromPartial, I>>( + object: I + ): RawLogsSettings { + const message = { ...baseRawLogsSettings } as RawLogsSettings; + message.bucketName = object.bucketName ?? ""; + message.bucketRegion = object.bucketRegion ?? ""; + message.filePrefix = object.filePrefix ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(RawLogsSettings.$type, RawLogsSettings); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/cdn/v1/raw_logs_service.ts b/src/generated/yandex/cloud/cdn/v1/raw_logs_service.ts new file mode 100644 index 00000000..76b7dc9a --- /dev/null +++ b/src/generated/yandex/cloud/cdn/v1/raw_logs_service.ts @@ -0,0 +1,1039 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../typeRegistry"; +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleUnaryCall, + Client, + ClientUnaryCall, + Metadata, + CallOptions, + ServiceError, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; +import { + RawLogsSettings, + RawLogsStatus, + rawLogsStatusFromJSON, + rawLogsStatusToJSON, +} from "../../../../yandex/cloud/cdn/v1/raw_logs"; +import { Operation } from "../../../../yandex/cloud/operation/operation"; + +export const protobufPackage = "yandex.cloud.cdn.v1"; + +export interface ActivateRawLogsRequest { + $type: "yandex.cloud.cdn.v1.ActivateRawLogsRequest"; + /** ID of CDN resource to switch logs storage for.. */ + resourceId: string; + /** Raw logs settings. */ + settings?: RawLogsSettings; +} + +export interface ActivateRawLogsMetadata { + $type: "yandex.cloud.cdn.v1.ActivateRawLogsMetadata"; + /** ID of resource with activated raw logs. */ + resourceId: string; +} + +export interface ActivateRawLogsResponse { + $type: "yandex.cloud.cdn.v1.ActivateRawLogsResponse"; + /** Raw logs status. */ + status: RawLogsStatus; + /** Raw logs settings. */ + settings?: RawLogsSettings; +} + +export interface DeactivateRawLogsRequest { + $type: "yandex.cloud.cdn.v1.DeactivateRawLogsRequest"; + /** ID of CDN resource to deactivate Raw Logs for. */ + resourceId: string; +} + +export interface DeactivateRawLogsMetadata { + $type: "yandex.cloud.cdn.v1.DeactivateRawLogsMetadata"; + /** ID of CDN resource. */ + resourceId: string; +} + +export interface GetRawLogsRequest { + $type: "yandex.cloud.cdn.v1.GetRawLogsRequest"; + /** ID of CDN resource to request status and settings. */ + resourceId: string; +} + +export interface GetRawLogsResponse { + $type: "yandex.cloud.cdn.v1.GetRawLogsResponse"; + /** Raw logs status. */ + status: RawLogsStatus; + /** Raw logs settings. */ + settings?: RawLogsSettings; +} + +export interface UpdateRawLogsRequest { + $type: "yandex.cloud.cdn.v1.UpdateRawLogsRequest"; + /** ID of CDN resource. */ + resourceId: string; + /** Raw logs settings. */ + settings?: RawLogsSettings; +} + +export interface UpdateRawLogsResponse { + $type: "yandex.cloud.cdn.v1.UpdateRawLogsResponse"; + /** Raw logs status. */ + status: RawLogsStatus; + /** Raw logs settings. */ + settings?: RawLogsSettings; +} + +export interface UpdateRawLogsMetadata { + $type: "yandex.cloud.cdn.v1.UpdateRawLogsMetadata"; + /** ID of CDN resource. */ + resourceId: string; +} + +const baseActivateRawLogsRequest: object = { + $type: "yandex.cloud.cdn.v1.ActivateRawLogsRequest", + resourceId: "", +}; + +export const ActivateRawLogsRequest = { + $type: "yandex.cloud.cdn.v1.ActivateRawLogsRequest" as const, + + encode( + message: ActivateRawLogsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.resourceId !== "") { + writer.uint32(10).string(message.resourceId); + } + if (message.settings !== undefined) { + RawLogsSettings.encode( + message.settings, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ActivateRawLogsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseActivateRawLogsRequest } as ActivateRawLogsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.resourceId = reader.string(); + break; + case 2: + message.settings = RawLogsSettings.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ActivateRawLogsRequest { + const message = { ...baseActivateRawLogsRequest } as ActivateRawLogsRequest; + message.resourceId = + object.resourceId !== undefined && object.resourceId !== null + ? String(object.resourceId) + : ""; + message.settings = + object.settings !== undefined && object.settings !== null + ? RawLogsSettings.fromJSON(object.settings) + : undefined; + return message; + }, + + toJSON(message: ActivateRawLogsRequest): unknown { + const obj: any = {}; + message.resourceId !== undefined && (obj.resourceId = message.resourceId); + message.settings !== undefined && + (obj.settings = message.settings + ? RawLogsSettings.toJSON(message.settings) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): ActivateRawLogsRequest { + const message = { ...baseActivateRawLogsRequest } as ActivateRawLogsRequest; + message.resourceId = object.resourceId ?? ""; + message.settings = + object.settings !== undefined && object.settings !== null + ? RawLogsSettings.fromPartial(object.settings) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(ActivateRawLogsRequest.$type, ActivateRawLogsRequest); + +const baseActivateRawLogsMetadata: object = { + $type: "yandex.cloud.cdn.v1.ActivateRawLogsMetadata", + resourceId: "", +}; + +export const ActivateRawLogsMetadata = { + $type: "yandex.cloud.cdn.v1.ActivateRawLogsMetadata" as const, + + encode( + message: ActivateRawLogsMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.resourceId !== "") { + writer.uint32(10).string(message.resourceId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ActivateRawLogsMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseActivateRawLogsMetadata, + } as ActivateRawLogsMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.resourceId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ActivateRawLogsMetadata { + const message = { + ...baseActivateRawLogsMetadata, + } as ActivateRawLogsMetadata; + message.resourceId = + object.resourceId !== undefined && object.resourceId !== null + ? String(object.resourceId) + : ""; + return message; + }, + + toJSON(message: ActivateRawLogsMetadata): unknown { + const obj: any = {}; + message.resourceId !== undefined && (obj.resourceId = message.resourceId); + return obj; + }, + + fromPartial, I>>( + object: I + ): ActivateRawLogsMetadata { + const message = { + ...baseActivateRawLogsMetadata, + } as ActivateRawLogsMetadata; + message.resourceId = object.resourceId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ActivateRawLogsMetadata.$type, ActivateRawLogsMetadata); + +const baseActivateRawLogsResponse: object = { + $type: "yandex.cloud.cdn.v1.ActivateRawLogsResponse", + status: 0, +}; + +export const ActivateRawLogsResponse = { + $type: "yandex.cloud.cdn.v1.ActivateRawLogsResponse" as const, + + encode( + message: ActivateRawLogsResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.status !== 0) { + writer.uint32(8).int32(message.status); + } + if (message.settings !== undefined) { + RawLogsSettings.encode( + message.settings, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ActivateRawLogsResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseActivateRawLogsResponse, + } as ActivateRawLogsResponse; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.status = reader.int32() as any; + break; + case 2: + message.settings = RawLogsSettings.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ActivateRawLogsResponse { + const message = { + ...baseActivateRawLogsResponse, + } as ActivateRawLogsResponse; + message.status = + object.status !== undefined && object.status !== null + ? rawLogsStatusFromJSON(object.status) + : 0; + message.settings = + object.settings !== undefined && object.settings !== null + ? RawLogsSettings.fromJSON(object.settings) + : undefined; + return message; + }, + + toJSON(message: ActivateRawLogsResponse): unknown { + const obj: any = {}; + message.status !== undefined && + (obj.status = rawLogsStatusToJSON(message.status)); + message.settings !== undefined && + (obj.settings = message.settings + ? RawLogsSettings.toJSON(message.settings) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): ActivateRawLogsResponse { + const message = { + ...baseActivateRawLogsResponse, + } as ActivateRawLogsResponse; + message.status = object.status ?? 0; + message.settings = + object.settings !== undefined && object.settings !== null + ? RawLogsSettings.fromPartial(object.settings) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(ActivateRawLogsResponse.$type, ActivateRawLogsResponse); + +const baseDeactivateRawLogsRequest: object = { + $type: "yandex.cloud.cdn.v1.DeactivateRawLogsRequest", + resourceId: "", +}; + +export const DeactivateRawLogsRequest = { + $type: "yandex.cloud.cdn.v1.DeactivateRawLogsRequest" as const, + + encode( + message: DeactivateRawLogsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.resourceId !== "") { + writer.uint32(10).string(message.resourceId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DeactivateRawLogsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseDeactivateRawLogsRequest, + } as DeactivateRawLogsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.resourceId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeactivateRawLogsRequest { + const message = { + ...baseDeactivateRawLogsRequest, + } as DeactivateRawLogsRequest; + message.resourceId = + object.resourceId !== undefined && object.resourceId !== null + ? String(object.resourceId) + : ""; + return message; + }, + + toJSON(message: DeactivateRawLogsRequest): unknown { + const obj: any = {}; + message.resourceId !== undefined && (obj.resourceId = message.resourceId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeactivateRawLogsRequest { + const message = { + ...baseDeactivateRawLogsRequest, + } as DeactivateRawLogsRequest; + message.resourceId = object.resourceId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + DeactivateRawLogsRequest.$type, + DeactivateRawLogsRequest +); + +const baseDeactivateRawLogsMetadata: object = { + $type: "yandex.cloud.cdn.v1.DeactivateRawLogsMetadata", + resourceId: "", +}; + +export const DeactivateRawLogsMetadata = { + $type: "yandex.cloud.cdn.v1.DeactivateRawLogsMetadata" as const, + + encode( + message: DeactivateRawLogsMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.resourceId !== "") { + writer.uint32(10).string(message.resourceId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DeactivateRawLogsMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseDeactivateRawLogsMetadata, + } as DeactivateRawLogsMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.resourceId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeactivateRawLogsMetadata { + const message = { + ...baseDeactivateRawLogsMetadata, + } as DeactivateRawLogsMetadata; + message.resourceId = + object.resourceId !== undefined && object.resourceId !== null + ? String(object.resourceId) + : ""; + return message; + }, + + toJSON(message: DeactivateRawLogsMetadata): unknown { + const obj: any = {}; + message.resourceId !== undefined && (obj.resourceId = message.resourceId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeactivateRawLogsMetadata { + const message = { + ...baseDeactivateRawLogsMetadata, + } as DeactivateRawLogsMetadata; + message.resourceId = object.resourceId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + DeactivateRawLogsMetadata.$type, + DeactivateRawLogsMetadata +); + +const baseGetRawLogsRequest: object = { + $type: "yandex.cloud.cdn.v1.GetRawLogsRequest", + resourceId: "", +}; + +export const GetRawLogsRequest = { + $type: "yandex.cloud.cdn.v1.GetRawLogsRequest" as const, + + encode( + message: GetRawLogsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.resourceId !== "") { + writer.uint32(10).string(message.resourceId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): GetRawLogsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGetRawLogsRequest } as GetRawLogsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.resourceId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GetRawLogsRequest { + const message = { ...baseGetRawLogsRequest } as GetRawLogsRequest; + message.resourceId = + object.resourceId !== undefined && object.resourceId !== null + ? String(object.resourceId) + : ""; + return message; + }, + + toJSON(message: GetRawLogsRequest): unknown { + const obj: any = {}; + message.resourceId !== undefined && (obj.resourceId = message.resourceId); + return obj; + }, + + fromPartial, I>>( + object: I + ): GetRawLogsRequest { + const message = { ...baseGetRawLogsRequest } as GetRawLogsRequest; + message.resourceId = object.resourceId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(GetRawLogsRequest.$type, GetRawLogsRequest); + +const baseGetRawLogsResponse: object = { + $type: "yandex.cloud.cdn.v1.GetRawLogsResponse", + status: 0, +}; + +export const GetRawLogsResponse = { + $type: "yandex.cloud.cdn.v1.GetRawLogsResponse" as const, + + encode( + message: GetRawLogsResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.status !== 0) { + writer.uint32(8).int32(message.status); + } + if (message.settings !== undefined) { + RawLogsSettings.encode( + message.settings, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): GetRawLogsResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGetRawLogsResponse } as GetRawLogsResponse; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.status = reader.int32() as any; + break; + case 2: + message.settings = RawLogsSettings.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GetRawLogsResponse { + const message = { ...baseGetRawLogsResponse } as GetRawLogsResponse; + message.status = + object.status !== undefined && object.status !== null + ? rawLogsStatusFromJSON(object.status) + : 0; + message.settings = + object.settings !== undefined && object.settings !== null + ? RawLogsSettings.fromJSON(object.settings) + : undefined; + return message; + }, + + toJSON(message: GetRawLogsResponse): unknown { + const obj: any = {}; + message.status !== undefined && + (obj.status = rawLogsStatusToJSON(message.status)); + message.settings !== undefined && + (obj.settings = message.settings + ? RawLogsSettings.toJSON(message.settings) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): GetRawLogsResponse { + const message = { ...baseGetRawLogsResponse } as GetRawLogsResponse; + message.status = object.status ?? 0; + message.settings = + object.settings !== undefined && object.settings !== null + ? RawLogsSettings.fromPartial(object.settings) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(GetRawLogsResponse.$type, GetRawLogsResponse); + +const baseUpdateRawLogsRequest: object = { + $type: "yandex.cloud.cdn.v1.UpdateRawLogsRequest", + resourceId: "", +}; + +export const UpdateRawLogsRequest = { + $type: "yandex.cloud.cdn.v1.UpdateRawLogsRequest" as const, + + encode( + message: UpdateRawLogsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.resourceId !== "") { + writer.uint32(10).string(message.resourceId); + } + if (message.settings !== undefined) { + RawLogsSettings.encode( + message.settings, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateRawLogsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseUpdateRawLogsRequest } as UpdateRawLogsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.resourceId = reader.string(); + break; + case 2: + message.settings = RawLogsSettings.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateRawLogsRequest { + const message = { ...baseUpdateRawLogsRequest } as UpdateRawLogsRequest; + message.resourceId = + object.resourceId !== undefined && object.resourceId !== null + ? String(object.resourceId) + : ""; + message.settings = + object.settings !== undefined && object.settings !== null + ? RawLogsSettings.fromJSON(object.settings) + : undefined; + return message; + }, + + toJSON(message: UpdateRawLogsRequest): unknown { + const obj: any = {}; + message.resourceId !== undefined && (obj.resourceId = message.resourceId); + message.settings !== undefined && + (obj.settings = message.settings + ? RawLogsSettings.toJSON(message.settings) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateRawLogsRequest { + const message = { ...baseUpdateRawLogsRequest } as UpdateRawLogsRequest; + message.resourceId = object.resourceId ?? ""; + message.settings = + object.settings !== undefined && object.settings !== null + ? RawLogsSettings.fromPartial(object.settings) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(UpdateRawLogsRequest.$type, UpdateRawLogsRequest); + +const baseUpdateRawLogsResponse: object = { + $type: "yandex.cloud.cdn.v1.UpdateRawLogsResponse", + status: 0, +}; + +export const UpdateRawLogsResponse = { + $type: "yandex.cloud.cdn.v1.UpdateRawLogsResponse" as const, + + encode( + message: UpdateRawLogsResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.status !== 0) { + writer.uint32(8).int32(message.status); + } + if (message.settings !== undefined) { + RawLogsSettings.encode( + message.settings, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateRawLogsResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseUpdateRawLogsResponse } as UpdateRawLogsResponse; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.status = reader.int32() as any; + break; + case 2: + message.settings = RawLogsSettings.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateRawLogsResponse { + const message = { ...baseUpdateRawLogsResponse } as UpdateRawLogsResponse; + message.status = + object.status !== undefined && object.status !== null + ? rawLogsStatusFromJSON(object.status) + : 0; + message.settings = + object.settings !== undefined && object.settings !== null + ? RawLogsSettings.fromJSON(object.settings) + : undefined; + return message; + }, + + toJSON(message: UpdateRawLogsResponse): unknown { + const obj: any = {}; + message.status !== undefined && + (obj.status = rawLogsStatusToJSON(message.status)); + message.settings !== undefined && + (obj.settings = message.settings + ? RawLogsSettings.toJSON(message.settings) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateRawLogsResponse { + const message = { ...baseUpdateRawLogsResponse } as UpdateRawLogsResponse; + message.status = object.status ?? 0; + message.settings = + object.settings !== undefined && object.settings !== null + ? RawLogsSettings.fromPartial(object.settings) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(UpdateRawLogsResponse.$type, UpdateRawLogsResponse); + +const baseUpdateRawLogsMetadata: object = { + $type: "yandex.cloud.cdn.v1.UpdateRawLogsMetadata", + resourceId: "", +}; + +export const UpdateRawLogsMetadata = { + $type: "yandex.cloud.cdn.v1.UpdateRawLogsMetadata" as const, + + encode( + message: UpdateRawLogsMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.resourceId !== "") { + writer.uint32(10).string(message.resourceId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateRawLogsMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseUpdateRawLogsMetadata } as UpdateRawLogsMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.resourceId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateRawLogsMetadata { + const message = { ...baseUpdateRawLogsMetadata } as UpdateRawLogsMetadata; + message.resourceId = + object.resourceId !== undefined && object.resourceId !== null + ? String(object.resourceId) + : ""; + return message; + }, + + toJSON(message: UpdateRawLogsMetadata): unknown { + const obj: any = {}; + message.resourceId !== undefined && (obj.resourceId = message.resourceId); + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateRawLogsMetadata { + const message = { ...baseUpdateRawLogsMetadata } as UpdateRawLogsMetadata; + message.resourceId = object.resourceId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(UpdateRawLogsMetadata.$type, UpdateRawLogsMetadata); + +export const RawLogsServiceService = { + activate: { + path: "/yandex.cloud.cdn.v1.RawLogsService/Activate", + requestStream: false, + responseStream: false, + requestSerialize: (value: ActivateRawLogsRequest) => + Buffer.from(ActivateRawLogsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => ActivateRawLogsRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + deactivate: { + path: "/yandex.cloud.cdn.v1.RawLogsService/Deactivate", + requestStream: false, + responseStream: false, + requestSerialize: (value: DeactivateRawLogsRequest) => + Buffer.from(DeactivateRawLogsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + DeactivateRawLogsRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + get: { + path: "/yandex.cloud.cdn.v1.RawLogsService/Get", + requestStream: false, + responseStream: false, + requestSerialize: (value: GetRawLogsRequest) => + Buffer.from(GetRawLogsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => GetRawLogsRequest.decode(value), + responseSerialize: (value: GetRawLogsResponse) => + Buffer.from(GetRawLogsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => GetRawLogsResponse.decode(value), + }, + update: { + path: "/yandex.cloud.cdn.v1.RawLogsService/Update", + requestStream: false, + responseStream: false, + requestSerialize: (value: UpdateRawLogsRequest) => + Buffer.from(UpdateRawLogsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => UpdateRawLogsRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, +} as const; + +export interface RawLogsServiceServer extends UntypedServiceImplementation { + activate: handleUnaryCall; + deactivate: handleUnaryCall; + get: handleUnaryCall; + update: handleUnaryCall; +} + +export interface RawLogsServiceClient extends Client { + activate( + request: ActivateRawLogsRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + activate( + request: ActivateRawLogsRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + activate( + request: ActivateRawLogsRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + deactivate( + request: DeactivateRawLogsRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + deactivate( + request: DeactivateRawLogsRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + deactivate( + request: DeactivateRawLogsRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + get( + request: GetRawLogsRequest, + callback: (error: ServiceError | null, response: GetRawLogsResponse) => void + ): ClientUnaryCall; + get( + request: GetRawLogsRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: GetRawLogsResponse) => void + ): ClientUnaryCall; + get( + request: GetRawLogsRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: GetRawLogsResponse) => void + ): ClientUnaryCall; + update( + request: UpdateRawLogsRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + update( + request: UpdateRawLogsRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + update( + request: UpdateRawLogsRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; +} + +export const RawLogsServiceClient = makeGenericClientConstructor( + RawLogsServiceService, + "yandex.cloud.cdn.v1.RawLogsService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): RawLogsServiceClient; + service: typeof RawLogsServiceService; +}; + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/cdn/v1/resource.ts b/src/generated/yandex/cloud/cdn/v1/resource.ts index 18f38f22..f826fced 100644 --- a/src/generated/yandex/cloud/cdn/v1/resource.ts +++ b/src/generated/yandex/cloud/cdn/v1/resource.ts @@ -122,7 +122,7 @@ export function rewriteFlagToJSON(object: RewriteFlag): string { } } -/** A certificate type patameters. */ +/** A certificate type parameters. */ export enum SSLCertificateType { /** SSL_CERTIFICATE_TYPE_UNSPECIFIED - SSL certificate is unspecified. */ SSL_CERTIFICATE_TYPE_UNSPECIFIED = 0, @@ -171,7 +171,7 @@ export function sSLCertificateTypeToJSON(object: SSLCertificateType): string { } } -/** A certificate status patameters. */ +/** A certificate status parameters. */ export enum SSLCertificateStatus { /** SSL_CERTIFICATE_STATUS_UNSPECIFIED - SSL certificate is unspecified. */ SSL_CERTIFICATE_STATUS_UNSPECIFIED = 0, @@ -557,19 +557,19 @@ export interface ResourceOptions_RewriteOption { flag: RewriteFlag; } -/** A set of the personal SSL certificate patameters. */ +/** A set of the personal SSL certificate parameters. */ export interface SSLTargetCertificate { $type: "yandex.cloud.cdn.v1.SSLTargetCertificate"; - /** Type of the sertificate. */ + /** Type of the certificate. */ type: SSLCertificateType; /** Certificate data. */ data?: SSLCertificateData; } -/** A SSL sertificate patameters. */ +/** A SSL certificate parameters. */ export interface SSLCertificate { $type: "yandex.cloud.cdn.v1.SSLCertificate"; - /** Type of the sertificate. */ + /** Type of the certificate. */ type: SSLCertificateType; /** Active status. */ status: SSLCertificateStatus; @@ -577,7 +577,7 @@ export interface SSLCertificate { data?: SSLCertificateData; } -/** A certificate data patameters. */ +/** A certificate data parameters. */ export interface SSLCertificateData { $type: "yandex.cloud.cdn.v1.SSLCertificateData"; /** @@ -587,10 +587,10 @@ export interface SSLCertificateData { cm?: SSLCertificateCMData | undefined; } -/** A certificate data custom patameters. */ +/** A certificate data custom parameters. */ export interface SSLCertificateCMData { $type: "yandex.cloud.cdn.v1.SSLCertificateCMData"; - /** ID of the custom sertificate. */ + /** ID of the custom certificate. */ id: string; } diff --git a/src/generated/yandex/cloud/cdn/v1/resource_service.ts b/src/generated/yandex/cloud/cdn/v1/resource_service.ts index e5a50a87..b3d961af 100644 --- a/src/generated/yandex/cloud/cdn/v1/resource_service.ts +++ b/src/generated/yandex/cloud/cdn/v1/resource_service.ts @@ -102,14 +102,14 @@ export interface CreateResourceRequest_Origin { * returned in result. */ originSource: string | undefined; - /** Set up resourse origin parameters. */ + /** Set up resource origin parameters. */ originSourceParams?: ResourceOriginParams | undefined; } -/** A set of resourse origin parameters. */ +/** A set of resource origin parameters. */ export interface ResourceOriginParams { $type: "yandex.cloud.cdn.v1.ResourceOriginParams"; - /** Sourse of the content. */ + /** Source of the content. */ source: string; /** Set up type of the origin. */ meta?: OriginMeta; @@ -1438,7 +1438,11 @@ export const ResourceServiceService = { Buffer.from(ListResourcesResponse.encode(value).finish()), responseDeserialize: (value: Buffer) => ListResourcesResponse.decode(value), }, - /** Creates client's CDN resource. */ + /** + * Creates a CDN resource in the specified folder. + * + * Creation may take up to 15 minutes. + */ create: { path: "/yandex.cloud.cdn.v1.ResourceService/Create", requestStream: false, @@ -1450,7 +1454,14 @@ export const ResourceServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Updates of client's CDN resource. (PATCH behavior) */ + /** + * Updates the specified CDN resource. + * + * The method implements patch behaviour, i.e. only the fields specified in the request are updated in the resource. + * + * Changes may take up to 15 minutes to apply. Afterwards, it is recommended to purge the resource's cache via a + * [CacheService.Purge] request. + */ update: { path: "/yandex.cloud.cdn.v1.ResourceService/Update", requestStream: false, @@ -1475,7 +1486,7 @@ export const ResourceServiceService = { responseDeserialize: (value: Buffer) => Operation.decode(value), }, /** - * Get Provider's CNAME (edge endpoint) binded to specified folder id. + * Get Provider's CNAME (edge endpoint) bind to specified folder id. * Returns UNIMPLEMENTED error, if provider doesn't support CNAME request. */ getProviderCName: { @@ -1498,14 +1509,25 @@ export interface ResourceServiceServer extends UntypedServiceImplementation { get: handleUnaryCall; /** Lists CDN resources. */ list: handleUnaryCall; - /** Creates client's CDN resource. */ + /** + * Creates a CDN resource in the specified folder. + * + * Creation may take up to 15 minutes. + */ create: handleUnaryCall; - /** Updates of client's CDN resource. (PATCH behavior) */ + /** + * Updates the specified CDN resource. + * + * The method implements patch behaviour, i.e. only the fields specified in the request are updated in the resource. + * + * Changes may take up to 15 minutes to apply. Afterwards, it is recommended to purge the resource's cache via a + * [CacheService.Purge] request. + */ update: handleUnaryCall; /** Deletes client's CDN resource. */ delete: handleUnaryCall; /** - * Get Provider's CNAME (edge endpoint) binded to specified folder id. + * Get Provider's CNAME (edge endpoint) bind to specified folder id. * Returns UNIMPLEMENTED error, if provider doesn't support CNAME request. */ getProviderCName: handleUnaryCall< @@ -1556,7 +1578,11 @@ export interface ResourceServiceClient extends Client { response: ListResourcesResponse ) => void ): ClientUnaryCall; - /** Creates client's CDN resource. */ + /** + * Creates a CDN resource in the specified folder. + * + * Creation may take up to 15 minutes. + */ create( request: CreateResourceRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -1572,7 +1598,14 @@ export interface ResourceServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Updates of client's CDN resource. (PATCH behavior) */ + /** + * Updates the specified CDN resource. + * + * The method implements patch behaviour, i.e. only the fields specified in the request are updated in the resource. + * + * Changes may take up to 15 minutes to apply. Afterwards, it is recommended to purge the resource's cache via a + * [CacheService.Purge] request. + */ update( request: UpdateResourceRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -1605,7 +1638,7 @@ export interface ResourceServiceClient extends Client { callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; /** - * Get Provider's CNAME (edge endpoint) binded to specified folder id. + * Get Provider's CNAME (edge endpoint) bind to specified folder id. * Returns UNIMPLEMENTED error, if provider doesn't support CNAME request. */ getProviderCName( diff --git a/src/generated/yandex/cloud/compute/v1/disk.ts b/src/generated/yandex/cloud/compute/v1/disk.ts index 9e58fa87..0905a82e 100644 --- a/src/generated/yandex/cloud/compute/v1/disk.ts +++ b/src/generated/yandex/cloud/compute/v1/disk.ts @@ -32,7 +32,7 @@ export interface Disk { * License IDs that indicate which licenses are attached to this resource. * License IDs are used to calculate additional charges for the use of the virtual machine. * - * The correct license ID is generated by Yandex.Cloud. IDs are inherited by new resources created from this resource. + * The correct license ID is generated by Yandex Cloud. IDs are inherited by new resources created from this resource. * * If you know the license IDs, specify them when you create the image. * For example, if you create a disk image using a third-party utility and load it into Yandex Object Storage, the license IDs will be lost. diff --git a/src/generated/yandex/cloud/compute/v1/disk_service.ts b/src/generated/yandex/cloud/compute/v1/disk_service.ts index e9a6acf8..7f813b4f 100644 --- a/src/generated/yandex/cloud/compute/v1/disk_service.ts +++ b/src/generated/yandex/cloud/compute/v1/disk_service.ts @@ -209,6 +209,32 @@ export interface ListDiskOperationsResponse { nextPageToken: string; } +export interface MoveDiskRequest { + $type: "yandex.cloud.compute.v1.MoveDiskRequest"; + /** + * ID of the disk to move. + * + * To get the disk ID, make a [DiskService.List] request. + */ + diskId: string; + /** + * ID of the folder to move the disk to. + * + * To get the folder ID, make a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + */ + destinationFolderId: string; +} + +export interface MoveDiskMetadata { + $type: "yandex.cloud.compute.v1.MoveDiskMetadata"; + /** ID of the disk that is being moved. */ + diskId: string; + /** ID of the folder that the disk is being moved from. */ + sourceFolderId: string; + /** ID of the folder that the disk is being moved to. */ + destinationFolderId: string; +} + const baseGetDiskRequest: object = { $type: "yandex.cloud.compute.v1.GetDiskRequest", diskId: "", @@ -1481,6 +1507,174 @@ messageTypeRegistry.set( ListDiskOperationsResponse ); +const baseMoveDiskRequest: object = { + $type: "yandex.cloud.compute.v1.MoveDiskRequest", + diskId: "", + destinationFolderId: "", +}; + +export const MoveDiskRequest = { + $type: "yandex.cloud.compute.v1.MoveDiskRequest" as const, + + encode( + message: MoveDiskRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.diskId !== "") { + writer.uint32(10).string(message.diskId); + } + if (message.destinationFolderId !== "") { + writer.uint32(18).string(message.destinationFolderId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): MoveDiskRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMoveDiskRequest } as MoveDiskRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.diskId = reader.string(); + break; + case 2: + message.destinationFolderId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): MoveDiskRequest { + const message = { ...baseMoveDiskRequest } as MoveDiskRequest; + message.diskId = + object.diskId !== undefined && object.diskId !== null + ? String(object.diskId) + : ""; + message.destinationFolderId = + object.destinationFolderId !== undefined && + object.destinationFolderId !== null + ? String(object.destinationFolderId) + : ""; + return message; + }, + + toJSON(message: MoveDiskRequest): unknown { + const obj: any = {}; + message.diskId !== undefined && (obj.diskId = message.diskId); + message.destinationFolderId !== undefined && + (obj.destinationFolderId = message.destinationFolderId); + return obj; + }, + + fromPartial, I>>( + object: I + ): MoveDiskRequest { + const message = { ...baseMoveDiskRequest } as MoveDiskRequest; + message.diskId = object.diskId ?? ""; + message.destinationFolderId = object.destinationFolderId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(MoveDiskRequest.$type, MoveDiskRequest); + +const baseMoveDiskMetadata: object = { + $type: "yandex.cloud.compute.v1.MoveDiskMetadata", + diskId: "", + sourceFolderId: "", + destinationFolderId: "", +}; + +export const MoveDiskMetadata = { + $type: "yandex.cloud.compute.v1.MoveDiskMetadata" as const, + + encode( + message: MoveDiskMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.diskId !== "") { + writer.uint32(10).string(message.diskId); + } + if (message.sourceFolderId !== "") { + writer.uint32(18).string(message.sourceFolderId); + } + if (message.destinationFolderId !== "") { + writer.uint32(26).string(message.destinationFolderId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): MoveDiskMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMoveDiskMetadata } as MoveDiskMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.diskId = reader.string(); + break; + case 2: + message.sourceFolderId = reader.string(); + break; + case 3: + message.destinationFolderId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): MoveDiskMetadata { + const message = { ...baseMoveDiskMetadata } as MoveDiskMetadata; + message.diskId = + object.diskId !== undefined && object.diskId !== null + ? String(object.diskId) + : ""; + message.sourceFolderId = + object.sourceFolderId !== undefined && object.sourceFolderId !== null + ? String(object.sourceFolderId) + : ""; + message.destinationFolderId = + object.destinationFolderId !== undefined && + object.destinationFolderId !== null + ? String(object.destinationFolderId) + : ""; + return message; + }, + + toJSON(message: MoveDiskMetadata): unknown { + const obj: any = {}; + message.diskId !== undefined && (obj.diskId = message.diskId); + message.sourceFolderId !== undefined && + (obj.sourceFolderId = message.sourceFolderId); + message.destinationFolderId !== undefined && + (obj.destinationFolderId = message.destinationFolderId); + return obj; + }, + + fromPartial, I>>( + object: I + ): MoveDiskMetadata { + const message = { ...baseMoveDiskMetadata } as MoveDiskMetadata; + message.diskId = object.diskId ?? ""; + message.sourceFolderId = object.sourceFolderId ?? ""; + message.destinationFolderId = object.destinationFolderId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(MoveDiskMetadata.$type, MoveDiskMetadata); + /** A set of methods for managing Disk resources. */ export const DiskServiceService = { /** @@ -1573,6 +1767,18 @@ export const DiskServiceService = { responseDeserialize: (value: Buffer) => ListDiskOperationsResponse.decode(value), }, + /** Moves the specified disk to another folder of the same cloud. */ + move: { + path: "/yandex.cloud.compute.v1.DiskService/Move", + requestStream: false, + responseStream: false, + requestSerialize: (value: MoveDiskRequest) => + Buffer.from(MoveDiskRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => MoveDiskRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, } as const; export interface DiskServiceServer extends UntypedServiceImplementation { @@ -1607,6 +1813,8 @@ export interface DiskServiceServer extends UntypedServiceImplementation { ListDiskOperationsRequest, ListDiskOperationsResponse >; + /** Moves the specified disk to another folder of the same cloud. */ + move: handleUnaryCall; } export interface DiskServiceClient extends Client { @@ -1731,6 +1939,22 @@ export interface DiskServiceClient extends Client { response: ListDiskOperationsResponse ) => void ): ClientUnaryCall; + /** Moves the specified disk to another folder of the same cloud. */ + move( + request: MoveDiskRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + move( + request: MoveDiskRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + move( + request: MoveDiskRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; } export const DiskServiceClient = makeGenericClientConstructor( diff --git a/src/generated/yandex/cloud/compute/v1/filesystem_service.ts b/src/generated/yandex/cloud/compute/v1/filesystem_service.ts index 361acea6..e074f64c 100644 --- a/src/generated/yandex/cloud/compute/v1/filesystem_service.ts +++ b/src/generated/yandex/cloud/compute/v1/filesystem_service.ts @@ -163,6 +163,8 @@ export interface UpdateFilesystemRequest { * 3. Send the new set in this field. */ labels: { [key: string]: string }; + /** Size of the filesystem, specified in bytes. */ + size: number; } export interface UpdateFilesystemRequest_LabelsEntry { @@ -846,6 +848,7 @@ const baseUpdateFilesystemRequest: object = { filesystemId: "", name: "", description: "", + size: 0, }; export const UpdateFilesystemRequest = { @@ -877,6 +880,9 @@ export const UpdateFilesystemRequest = { writer.uint32(42).fork() ).ldelim(); }); + if (message.size !== 0) { + writer.uint32(48).int64(message.size); + } return writer; }, @@ -914,6 +920,9 @@ export const UpdateFilesystemRequest = { message.labels[entry5.key] = entry5.value; } break; + case 6: + message.size = longToNumber(reader.int64() as Long); + break; default: reader.skipType(tag & 7); break; @@ -948,6 +957,10 @@ export const UpdateFilesystemRequest = { acc[key] = String(value); return acc; }, {}); + message.size = + object.size !== undefined && object.size !== null + ? Number(object.size) + : 0; return message; }, @@ -968,6 +981,7 @@ export const UpdateFilesystemRequest = { obj.labels[k] = v; }); } + message.size !== undefined && (obj.size = Math.round(message.size)); return obj; }, @@ -992,6 +1006,7 @@ export const UpdateFilesystemRequest = { } return acc; }, {}); + message.size = object.size ?? 0; return message; }, }; diff --git a/src/generated/yandex/cloud/compute/v1/image.ts b/src/generated/yandex/cloud/compute/v1/image.ts index 8cc3e817..a5ad7257 100644 --- a/src/generated/yandex/cloud/compute/v1/image.ts +++ b/src/generated/yandex/cloud/compute/v1/image.ts @@ -36,7 +36,7 @@ export interface Image { * License IDs that indicate which licenses are attached to this resource. * License IDs are used to calculate additional charges for the use of the virtual machine. * - * The correct license ID is generated by Yandex.Cloud. IDs are inherited by new resources created from this resource. + * The correct license ID is generated by Yandex Cloud. IDs are inherited by new resources created from this resource. * * If you know the license IDs, specify them when you create the image. * For example, if you create a disk image using a third-party utility and load it into Yandex Object Storage, the license IDs will be lost. diff --git a/src/generated/yandex/cloud/compute/v1/image_service.ts b/src/generated/yandex/cloud/compute/v1/image_service.ts index e66f685b..9e4c1ec1 100644 --- a/src/generated/yandex/cloud/compute/v1/image_service.ts +++ b/src/generated/yandex/cloud/compute/v1/image_service.ts @@ -112,7 +112,7 @@ export interface CreateImageRequest { * License IDs that indicate which licenses are attached to this resource. * License IDs are used to calculate additional charges for the use of the virtual machine. * - * The correct license ID is generated by Yandex.Cloud. IDs are inherited by new resources created from this resource. + * The correct license ID is generated by Yandex Cloud. IDs are inherited by new resources created from this resource. * * If you know the license IDs, specify them when you create the image. * For example, if you create a disk image using a third-party utility and load it into Yandex Object Storage, the license IDs will be lost. diff --git a/src/generated/yandex/cloud/compute/v1/instance.ts b/src/generated/yandex/cloud/compute/v1/instance.ts index ec2f772e..65988ab7 100644 --- a/src/generated/yandex/cloud/compute/v1/instance.ts +++ b/src/generated/yandex/cloud/compute/v1/instance.ts @@ -79,6 +79,8 @@ export interface Instance { bootDisk?: AttachedDisk; /** Array of secondary disks that are attached to the instance. */ secondaryDisks: AttachedDisk[]; + /** Array of local disks that are attached to the instance. */ + localDisks: AttachedLocalDisk[]; /** Array of filesystems that are attached to the instance. */ filesystems: AttachedFilesystem[]; /** Array of network interfaces that are attached to the instance. */ @@ -282,6 +284,19 @@ export function attachedDisk_ModeToJSON(object: AttachedDisk_Mode): string { } } +export interface AttachedLocalDisk { + $type: "yandex.cloud.compute.v1.AttachedLocalDisk"; + /** Size of the disk, specified in bytes. */ + size: number; + /** + * Serial number that is reflected into the /dev/disk/by-id/ tree + * of a Linux operating system running within the instance. + * + * This value can be used to reference the device for mounting, resizing, and so on, from within the instance. + */ + deviceName: string; +} + export interface AttachedFilesystem { $type: "yandex.cloud.compute.v1.AttachedFilesystem"; /** Access mode to the filesystem. */ @@ -595,6 +610,9 @@ export const Instance = { for (const v of message.secondaryDisks) { AttachedDisk.encode(v!, writer.uint32(106).fork()).ldelim(); } + for (const v of message.localDisks) { + AttachedLocalDisk.encode(v!, writer.uint32(178).fork()).ldelim(); + } for (const v of message.filesystems) { AttachedFilesystem.encode(v!, writer.uint32(170).fork()).ldelim(); } @@ -635,6 +653,7 @@ export const Instance = { message.labels = {}; message.metadata = {}; message.secondaryDisks = []; + message.localDisks = []; message.filesystems = []; message.networkInterfaces = []; while (reader.pos < end) { @@ -692,6 +711,11 @@ export const Instance = { AttachedDisk.decode(reader, reader.uint32()) ); break; + case 22: + message.localDisks.push( + AttachedLocalDisk.decode(reader, reader.uint32()) + ); + break; case 21: message.filesystems.push( AttachedFilesystem.decode(reader, reader.uint32()) @@ -789,6 +813,9 @@ export const Instance = { message.secondaryDisks = (object.secondaryDisks ?? []).map((e: any) => AttachedDisk.fromJSON(e) ); + message.localDisks = (object.localDisks ?? []).map((e: any) => + AttachedLocalDisk.fromJSON(e) + ); message.filesystems = (object.filesystems ?? []).map((e: any) => AttachedFilesystem.fromJSON(e) ); @@ -858,6 +885,13 @@ export const Instance = { } else { obj.secondaryDisks = []; } + if (message.localDisks) { + obj.localDisks = message.localDisks.map((e) => + e ? AttachedLocalDisk.toJSON(e) : undefined + ); + } else { + obj.localDisks = []; + } if (message.filesystems) { obj.filesystems = message.filesystems.map((e) => e ? AttachedFilesystem.toJSON(e) : undefined @@ -926,6 +960,8 @@ export const Instance = { : undefined; message.secondaryDisks = object.secondaryDisks?.map((e) => AttachedDisk.fromPartial(e)) || []; + message.localDisks = + object.localDisks?.map((e) => AttachedLocalDisk.fromPartial(e)) || []; message.filesystems = object.filesystems?.map((e) => AttachedFilesystem.fromPartial(e)) || []; message.networkInterfaces = @@ -1307,6 +1343,81 @@ export const AttachedDisk = { messageTypeRegistry.set(AttachedDisk.$type, AttachedDisk); +const baseAttachedLocalDisk: object = { + $type: "yandex.cloud.compute.v1.AttachedLocalDisk", + size: 0, + deviceName: "", +}; + +export const AttachedLocalDisk = { + $type: "yandex.cloud.compute.v1.AttachedLocalDisk" as const, + + encode( + message: AttachedLocalDisk, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.size !== 0) { + writer.uint32(8).int64(message.size); + } + if (message.deviceName !== "") { + writer.uint32(18).string(message.deviceName); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): AttachedLocalDisk { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseAttachedLocalDisk } as AttachedLocalDisk; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.size = longToNumber(reader.int64() as Long); + break; + case 2: + message.deviceName = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AttachedLocalDisk { + const message = { ...baseAttachedLocalDisk } as AttachedLocalDisk; + message.size = + object.size !== undefined && object.size !== null + ? Number(object.size) + : 0; + message.deviceName = + object.deviceName !== undefined && object.deviceName !== null + ? String(object.deviceName) + : ""; + return message; + }, + + toJSON(message: AttachedLocalDisk): unknown { + const obj: any = {}; + message.size !== undefined && (obj.size = Math.round(message.size)); + message.deviceName !== undefined && (obj.deviceName = message.deviceName); + return obj; + }, + + fromPartial, I>>( + object: I + ): AttachedLocalDisk { + const message = { ...baseAttachedLocalDisk } as AttachedLocalDisk; + message.size = object.size ?? 0; + message.deviceName = object.deviceName ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(AttachedLocalDisk.$type, AttachedLocalDisk); + const baseAttachedFilesystem: object = { $type: "yandex.cloud.compute.v1.AttachedFilesystem", mode: 0, diff --git a/src/generated/yandex/cloud/compute/v1/instance_service.ts b/src/generated/yandex/cloud/compute/v1/instance_service.ts index 7322f303..46c7daa0 100644 --- a/src/generated/yandex/cloud/compute/v1/instance_service.ts +++ b/src/generated/yandex/cloud/compute/v1/instance_service.ts @@ -166,6 +166,8 @@ export interface CreateInstanceRequest { bootDiskSpec?: AttachedDiskSpec; /** Array of secondary disks to attach to the instance. */ secondaryDiskSpecs: AttachedDiskSpec[]; + /** Array of local disks to attach to the instance. */ + localDiskSpecs: AttachedLocalDiskSpec[]; /** * Array of filesystems to attach to the instance. * @@ -275,6 +277,8 @@ export interface UpdateInstanceRequest { networkSettings?: NetworkSettings; /** Placement policy configuration. */ placementPolicy?: PlacementPolicy; + /** Scheduling policy configuration. */ + schedulingPolicy?: SchedulingPolicy; } export interface UpdateInstanceRequest_LabelsEntry { @@ -678,6 +682,12 @@ export interface AttachedDiskSpec_DiskSpec { snapshotId: string | undefined; } +export interface AttachedLocalDiskSpec { + $type: "yandex.cloud.compute.v1.AttachedLocalDiskSpec"; + /** Size of the disk, specified in bytes. */ + size: number; +} + export interface AttachedFilesystemSpec { $type: "yandex.cloud.compute.v1.AttachedFilesystemSpec"; /** Mode of access to the filesystem that should be attached. */ @@ -789,6 +799,32 @@ export interface DnsRecordSpec { ptr: boolean; } +export interface MoveInstanceRequest { + $type: "yandex.cloud.compute.v1.MoveInstanceRequest"; + /** + * ID of the instance to move. + * + * To get the instance ID, make a [InstanceService.List] request. + */ + instanceId: string; + /** + * ID of the folder to move the instance to. + * + * To get the folder ID, make a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + */ + destinationFolderId: string; +} + +export interface MoveInstanceMetadata { + $type: "yandex.cloud.compute.v1.MoveInstanceMetadata"; + /** ID of the instance that is being moved. */ + instanceId: string; + /** ID of the folder that the instance is being moved from. */ + sourceFolderId: string; + /** ID of the folder that the instance is being moved to. */ + destinationFolderId: string; +} + const baseGetInstanceRequest: object = { $type: "yandex.cloud.compute.v1.GetInstanceRequest", instanceId: "", @@ -1122,6 +1158,9 @@ export const CreateInstanceRequest = { for (const v of message.secondaryDiskSpecs) { AttachedDiskSpec.encode(v!, writer.uint32(82).fork()).ldelim(); } + for (const v of message.localDiskSpecs) { + AttachedLocalDiskSpec.encode(v!, writer.uint32(146).fork()).ldelim(); + } for (const v of message.filesystemSpecs) { AttachedFilesystemSpec.encode(v!, writer.uint32(138).fork()).ldelim(); } @@ -1165,6 +1204,7 @@ export const CreateInstanceRequest = { message.labels = {}; message.metadata = {}; message.secondaryDiskSpecs = []; + message.localDiskSpecs = []; message.filesystemSpecs = []; message.networkInterfaceSpecs = []; while (reader.pos < end) { @@ -1217,6 +1257,11 @@ export const CreateInstanceRequest = { AttachedDiskSpec.decode(reader, reader.uint32()) ); break; + case 18: + message.localDiskSpecs.push( + AttachedLocalDiskSpec.decode(reader, reader.uint32()) + ); + break; case 17: message.filesystemSpecs.push( AttachedFilesystemSpec.decode(reader, reader.uint32()) @@ -1304,6 +1349,9 @@ export const CreateInstanceRequest = { message.secondaryDiskSpecs = (object.secondaryDiskSpecs ?? []).map( (e: any) => AttachedDiskSpec.fromJSON(e) ); + message.localDiskSpecs = (object.localDiskSpecs ?? []).map((e: any) => + AttachedLocalDiskSpec.fromJSON(e) + ); message.filesystemSpecs = (object.filesystemSpecs ?? []).map((e: any) => AttachedFilesystemSpec.fromJSON(e) ); @@ -1368,6 +1416,13 @@ export const CreateInstanceRequest = { } else { obj.secondaryDiskSpecs = []; } + if (message.localDiskSpecs) { + obj.localDiskSpecs = message.localDiskSpecs.map((e) => + e ? AttachedLocalDiskSpec.toJSON(e) : undefined + ); + } else { + obj.localDiskSpecs = []; + } if (message.filesystemSpecs) { obj.filesystemSpecs = message.filesystemSpecs.map((e) => e ? AttachedFilesystemSpec.toJSON(e) : undefined @@ -1436,6 +1491,9 @@ export const CreateInstanceRequest = { message.secondaryDiskSpecs = object.secondaryDiskSpecs?.map((e) => AttachedDiskSpec.fromPartial(e)) || []; + message.localDiskSpecs = + object.localDiskSpecs?.map((e) => AttachedLocalDiskSpec.fromPartial(e)) || + []; message.filesystemSpecs = object.filesystemSpecs?.map((e) => AttachedFilesystemSpec.fromPartial(e) @@ -1771,6 +1829,12 @@ export const UpdateInstanceRequest = { writer.uint32(90).fork() ).ldelim(); } + if (message.schedulingPolicy !== undefined) { + SchedulingPolicy.encode( + message.schedulingPolicy, + writer.uint32(98).fork() + ).ldelim(); + } return writer; }, @@ -1837,6 +1901,12 @@ export const UpdateInstanceRequest = { reader.uint32() ); break; + case 12: + message.schedulingPolicy = SchedulingPolicy.decode( + reader, + reader.uint32() + ); + break; default: reader.skipType(tag & 7); break; @@ -1895,6 +1965,10 @@ export const UpdateInstanceRequest = { object.placementPolicy !== undefined && object.placementPolicy !== null ? PlacementPolicy.fromJSON(object.placementPolicy) : undefined; + message.schedulingPolicy = + object.schedulingPolicy !== undefined && object.schedulingPolicy !== null + ? SchedulingPolicy.fromJSON(object.schedulingPolicy) + : undefined; return message; }, @@ -1935,6 +2009,10 @@ export const UpdateInstanceRequest = { (obj.placementPolicy = message.placementPolicy ? PlacementPolicy.toJSON(message.placementPolicy) : undefined); + message.schedulingPolicy !== undefined && + (obj.schedulingPolicy = message.schedulingPolicy + ? SchedulingPolicy.toJSON(message.schedulingPolicy) + : undefined); return obj; }, @@ -1979,6 +2057,10 @@ export const UpdateInstanceRequest = { object.placementPolicy !== undefined && object.placementPolicy !== null ? PlacementPolicy.fromPartial(object.placementPolicy) : undefined; + message.schedulingPolicy = + object.schedulingPolicy !== undefined && object.schedulingPolicy !== null + ? SchedulingPolicy.fromPartial(object.schedulingPolicy) + : undefined; return message; }, }; @@ -5197,6 +5279,71 @@ messageTypeRegistry.set( AttachedDiskSpec_DiskSpec ); +const baseAttachedLocalDiskSpec: object = { + $type: "yandex.cloud.compute.v1.AttachedLocalDiskSpec", + size: 0, +}; + +export const AttachedLocalDiskSpec = { + $type: "yandex.cloud.compute.v1.AttachedLocalDiskSpec" as const, + + encode( + message: AttachedLocalDiskSpec, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.size !== 0) { + writer.uint32(8).int64(message.size); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): AttachedLocalDiskSpec { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseAttachedLocalDiskSpec } as AttachedLocalDiskSpec; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.size = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): AttachedLocalDiskSpec { + const message = { ...baseAttachedLocalDiskSpec } as AttachedLocalDiskSpec; + message.size = + object.size !== undefined && object.size !== null + ? Number(object.size) + : 0; + return message; + }, + + toJSON(message: AttachedLocalDiskSpec): unknown { + const obj: any = {}; + message.size !== undefined && (obj.size = Math.round(message.size)); + return obj; + }, + + fromPartial, I>>( + object: I + ): AttachedLocalDiskSpec { + const message = { ...baseAttachedLocalDiskSpec } as AttachedLocalDiskSpec; + message.size = object.size ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(AttachedLocalDiskSpec.$type, AttachedLocalDiskSpec); + const baseAttachedFilesystemSpec: object = { $type: "yandex.cloud.compute.v1.AttachedFilesystemSpec", mode: 0, @@ -5727,6 +5874,177 @@ export const DnsRecordSpec = { messageTypeRegistry.set(DnsRecordSpec.$type, DnsRecordSpec); +const baseMoveInstanceRequest: object = { + $type: "yandex.cloud.compute.v1.MoveInstanceRequest", + instanceId: "", + destinationFolderId: "", +}; + +export const MoveInstanceRequest = { + $type: "yandex.cloud.compute.v1.MoveInstanceRequest" as const, + + encode( + message: MoveInstanceRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.instanceId !== "") { + writer.uint32(10).string(message.instanceId); + } + if (message.destinationFolderId !== "") { + writer.uint32(18).string(message.destinationFolderId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): MoveInstanceRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMoveInstanceRequest } as MoveInstanceRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.instanceId = reader.string(); + break; + case 2: + message.destinationFolderId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): MoveInstanceRequest { + const message = { ...baseMoveInstanceRequest } as MoveInstanceRequest; + message.instanceId = + object.instanceId !== undefined && object.instanceId !== null + ? String(object.instanceId) + : ""; + message.destinationFolderId = + object.destinationFolderId !== undefined && + object.destinationFolderId !== null + ? String(object.destinationFolderId) + : ""; + return message; + }, + + toJSON(message: MoveInstanceRequest): unknown { + const obj: any = {}; + message.instanceId !== undefined && (obj.instanceId = message.instanceId); + message.destinationFolderId !== undefined && + (obj.destinationFolderId = message.destinationFolderId); + return obj; + }, + + fromPartial, I>>( + object: I + ): MoveInstanceRequest { + const message = { ...baseMoveInstanceRequest } as MoveInstanceRequest; + message.instanceId = object.instanceId ?? ""; + message.destinationFolderId = object.destinationFolderId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(MoveInstanceRequest.$type, MoveInstanceRequest); + +const baseMoveInstanceMetadata: object = { + $type: "yandex.cloud.compute.v1.MoveInstanceMetadata", + instanceId: "", + sourceFolderId: "", + destinationFolderId: "", +}; + +export const MoveInstanceMetadata = { + $type: "yandex.cloud.compute.v1.MoveInstanceMetadata" as const, + + encode( + message: MoveInstanceMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.instanceId !== "") { + writer.uint32(10).string(message.instanceId); + } + if (message.sourceFolderId !== "") { + writer.uint32(18).string(message.sourceFolderId); + } + if (message.destinationFolderId !== "") { + writer.uint32(26).string(message.destinationFolderId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): MoveInstanceMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMoveInstanceMetadata } as MoveInstanceMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.instanceId = reader.string(); + break; + case 2: + message.sourceFolderId = reader.string(); + break; + case 3: + message.destinationFolderId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): MoveInstanceMetadata { + const message = { ...baseMoveInstanceMetadata } as MoveInstanceMetadata; + message.instanceId = + object.instanceId !== undefined && object.instanceId !== null + ? String(object.instanceId) + : ""; + message.sourceFolderId = + object.sourceFolderId !== undefined && object.sourceFolderId !== null + ? String(object.sourceFolderId) + : ""; + message.destinationFolderId = + object.destinationFolderId !== undefined && + object.destinationFolderId !== null + ? String(object.destinationFolderId) + : ""; + return message; + }, + + toJSON(message: MoveInstanceMetadata): unknown { + const obj: any = {}; + message.instanceId !== undefined && (obj.instanceId = message.instanceId); + message.sourceFolderId !== undefined && + (obj.sourceFolderId = message.sourceFolderId); + message.destinationFolderId !== undefined && + (obj.destinationFolderId = message.destinationFolderId); + return obj; + }, + + fromPartial, I>>( + object: I + ): MoveInstanceMetadata { + const message = { ...baseMoveInstanceMetadata } as MoveInstanceMetadata; + message.instanceId = object.instanceId ?? ""; + message.sourceFolderId = object.sourceFolderId ?? ""; + message.destinationFolderId = object.destinationFolderId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(MoveInstanceMetadata.$type, MoveInstanceMetadata); + /** A set of methods for managing Instance resources. */ export const InstanceServiceService = { /** @@ -5985,6 +6303,25 @@ export const InstanceServiceService = { responseDeserialize: (value: Buffer) => ListInstanceOperationsResponse.decode(value), }, + /** + * Moves the specified instance to another folder of the same cloud. + * + * The instance must be stopped before moving. To stop the instance, make a [Stop] request. + * + * After moving, the instance will start recording its Yandex Monitoring default metrics to its new folder. Metrics + * that have been recorded to the source folder prior to moving will be retained. + */ + move: { + path: "/yandex.cloud.compute.v1.InstanceService/Move", + requestStream: false, + responseStream: false, + requestSerialize: (value: MoveInstanceRequest) => + Buffer.from(MoveInstanceRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => MoveInstanceRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, } as const; export interface InstanceServiceServer extends UntypedServiceImplementation { @@ -6064,6 +6401,15 @@ export interface InstanceServiceServer extends UntypedServiceImplementation { ListInstanceOperationsRequest, ListInstanceOperationsResponse >; + /** + * Moves the specified instance to another folder of the same cloud. + * + * The instance must be stopped before moving. To stop the instance, make a [Stop] request. + * + * After moving, the instance will start recording its Yandex Monitoring default metrics to its new folder. Metrics + * that have been recorded to the source folder prior to moving will be retained. + */ + move: handleUnaryCall; } export interface InstanceServiceClient extends Client { @@ -6410,6 +6756,29 @@ export interface InstanceServiceClient extends Client { response: ListInstanceOperationsResponse ) => void ): ClientUnaryCall; + /** + * Moves the specified instance to another folder of the same cloud. + * + * The instance must be stopped before moving. To stop the instance, make a [Stop] request. + * + * After moving, the instance will start recording its Yandex Monitoring default metrics to its new folder. Metrics + * that have been recorded to the source folder prior to moving will be retained. + */ + move( + request: MoveInstanceRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + move( + request: MoveInstanceRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + move( + request: MoveInstanceRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; } export const InstanceServiceClient = makeGenericClientConstructor( diff --git a/src/generated/yandex/cloud/compute/v1/instancegroup/instance_group_service.ts b/src/generated/yandex/cloud/compute/v1/instancegroup/instance_group_service.ts index 9eddaf9b..78804e17 100644 --- a/src/generated/yandex/cloud/compute/v1/instancegroup/instance_group_service.ts +++ b/src/generated/yandex/cloud/compute/v1/instancegroup/instance_group_service.ts @@ -241,6 +241,11 @@ export interface UpdateInstanceGroupRequest { variables: Variable[]; /** Flag that inhibits deletion of the instance group */ deletionProtection: boolean; + /** + * Settings for balancing load between instances via [Application Load Balancer](/docs/application-load-balancer/concepts) + * (OSI model layer 7). + */ + applicationLoadBalancerSpec?: ApplicationLoadBalancerSpec; } export interface UpdateInstanceGroupRequest_LabelsEntry { @@ -1623,6 +1628,12 @@ export const UpdateInstanceGroupRequest = { if (message.deletionProtection === true) { writer.uint32(128).bool(message.deletionProtection); } + if (message.applicationLoadBalancerSpec !== undefined) { + ApplicationLoadBalancerSpec.encode( + message.applicationLoadBalancerSpec, + writer.uint32(138).fork() + ).ldelim(); + } return writer; }, @@ -1700,6 +1711,10 @@ export const UpdateInstanceGroupRequest = { case 16: message.deletionProtection = reader.bool(); break; + case 17: + message.applicationLoadBalancerSpec = + ApplicationLoadBalancerSpec.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -1770,6 +1785,13 @@ export const UpdateInstanceGroupRequest = { object.deletionProtection !== null ? Boolean(object.deletionProtection) : false; + message.applicationLoadBalancerSpec = + object.applicationLoadBalancerSpec !== undefined && + object.applicationLoadBalancerSpec !== null + ? ApplicationLoadBalancerSpec.fromJSON( + object.applicationLoadBalancerSpec + ) + : undefined; return message; }, @@ -1825,6 +1847,12 @@ export const UpdateInstanceGroupRequest = { } message.deletionProtection !== undefined && (obj.deletionProtection = message.deletionProtection); + message.applicationLoadBalancerSpec !== undefined && + (obj.applicationLoadBalancerSpec = message.applicationLoadBalancerSpec + ? ApplicationLoadBalancerSpec.toJSON( + message.applicationLoadBalancerSpec + ) + : undefined); return obj; }, @@ -1877,6 +1905,13 @@ export const UpdateInstanceGroupRequest = { message.variables = object.variables?.map((e) => Variable.fromPartial(e)) || []; message.deletionProtection = object.deletionProtection ?? false; + message.applicationLoadBalancerSpec = + object.applicationLoadBalancerSpec !== undefined && + object.applicationLoadBalancerSpec !== null + ? ApplicationLoadBalancerSpec.fromPartial( + object.applicationLoadBalancerSpec + ) + : undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/compute/v1/snapshot.ts b/src/generated/yandex/cloud/compute/v1/snapshot.ts index 41ec4895..4c1fe67c 100644 --- a/src/generated/yandex/cloud/compute/v1/snapshot.ts +++ b/src/generated/yandex/cloud/compute/v1/snapshot.ts @@ -28,7 +28,7 @@ export interface Snapshot { * License IDs that indicate which licenses are attached to this resource. * License IDs are used to calculate additional charges for the use of the virtual machine. * - * The correct license ID is generated by Yandex.Cloud. IDs are inherited by new resources created from this resource. + * The correct license ID is generated by Yandex Cloud. IDs are inherited by new resources created from this resource. * * If you know the license IDs, specify them when you create the image. * For example, if you create a disk image using a third-party utility and load it into Yandex Object Storage, the license IDs will be lost. diff --git a/src/generated/yandex/cloud/containerregistry/v1/registry.ts b/src/generated/yandex/cloud/containerregistry/v1/registry.ts index d36724dc..9b6a8f87 100644 --- a/src/generated/yandex/cloud/containerregistry/v1/registry.ts +++ b/src/generated/yandex/cloud/containerregistry/v1/registry.ts @@ -6,7 +6,7 @@ import { Timestamp } from "../../../../google/protobuf/timestamp"; export const protobufPackage = "yandex.cloud.containerregistry.v1"; -/** A Registry resource. For more information, see [Registry](/docs/cloud/containerregistry/registry). */ +/** A Registry resource. For more information, see the [Registry](/docs/container-registry/concepts/registry) section of the documentation. */ export interface Registry { $type: "yandex.cloud.containerregistry.v1.Registry"; /** Output only. ID of the registry. */ diff --git a/src/generated/yandex/cloud/dataproc/manager/v1/manager_service.ts b/src/generated/yandex/cloud/dataproc/manager/v1/manager_service.ts index 832a5c39..11b240c3 100644 --- a/src/generated/yandex/cloud/dataproc/manager/v1/manager_service.ts +++ b/src/generated/yandex/cloud/dataproc/manager/v1/manager_service.ts @@ -1710,9 +1710,9 @@ export const ReportReply = { messageTypeRegistry.set(ReportReply.$type, ReportReply); -/** Data Proc manager service defifnition */ +/** Data Proc manager service definition. */ export const DataprocManagerServiceService = { - /** Sends a status report from a host */ + /** Sends a status report from a host. */ report: { path: "/yandex.cloud.dataproc.manager.v1.DataprocManagerService/Report", requestStream: false, @@ -1728,12 +1728,12 @@ export const DataprocManagerServiceService = { export interface DataprocManagerServiceServer extends UntypedServiceImplementation { - /** Sends a status report from a host */ + /** Sends a status report from a host. */ report: handleUnaryCall; } export interface DataprocManagerServiceClient extends Client { - /** Sends a status report from a host */ + /** Sends a status report from a host. */ report( request: ReportRequest, callback: (error: ServiceError | null, response: ReportReply) => void diff --git a/src/generated/yandex/cloud/dataproc/v1/cluster.ts b/src/generated/yandex/cloud/dataproc/v1/cluster.ts index aaf7dd28..c477b3da 100644 --- a/src/generated/yandex/cloud/dataproc/v1/cluster.ts +++ b/src/generated/yandex/cloud/dataproc/v1/cluster.ts @@ -159,6 +159,8 @@ export interface HadoopConfig { properties: { [key: string]: string }; /** List of public SSH keys to access to cluster hosts. */ sshPublicKeys: string[]; + /** Set of init-actions */ + initializationActions: InitializationAction[]; } export enum HadoopConfig_Service { @@ -286,6 +288,16 @@ export interface ClusterConfig { hadoop?: HadoopConfig; } +export interface InitializationAction { + $type: "yandex.cloud.dataproc.v1.InitializationAction"; + /** URI of the executable file */ + uri: string; + /** Arguments to the initialization action */ + args: string[]; + /** Execution timeout */ + timeout: number; +} + const baseCluster: object = { $type: "yandex.cloud.dataproc.v1.Cluster", id: "", @@ -811,6 +823,9 @@ export const HadoopConfig = { for (const v of message.sshPublicKeys) { writer.uint32(26).string(v!); } + for (const v of message.initializationActions) { + InitializationAction.encode(v!, writer.uint32(34).fork()).ldelim(); + } return writer; }, @@ -821,6 +836,7 @@ export const HadoopConfig = { message.services = []; message.properties = {}; message.sshPublicKeys = []; + message.initializationActions = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -846,6 +862,11 @@ export const HadoopConfig = { case 3: message.sshPublicKeys.push(reader.string()); break; + case 4: + message.initializationActions.push( + InitializationAction.decode(reader, reader.uint32()) + ); + break; default: reader.skipType(tag & 7); break; @@ -868,6 +889,9 @@ export const HadoopConfig = { message.sshPublicKeys = (object.sshPublicKeys ?? []).map((e: any) => String(e) ); + message.initializationActions = (object.initializationActions ?? []).map( + (e: any) => InitializationAction.fromJSON(e) + ); return message; }, @@ -889,6 +913,13 @@ export const HadoopConfig = { } else { obj.sshPublicKeys = []; } + if (message.initializationActions) { + obj.initializationActions = message.initializationActions.map((e) => + e ? InitializationAction.toJSON(e) : undefined + ); + } else { + obj.initializationActions = []; + } return obj; }, @@ -906,6 +937,10 @@ export const HadoopConfig = { return acc; }, {}); message.sshPublicKeys = object.sshPublicKeys?.map((e) => e) || []; + message.initializationActions = + object.initializationActions?.map((e) => + InitializationAction.fromPartial(e) + ) || []; return message; }, }; @@ -1077,6 +1112,109 @@ export const ClusterConfig = { messageTypeRegistry.set(ClusterConfig.$type, ClusterConfig); +const baseInitializationAction: object = { + $type: "yandex.cloud.dataproc.v1.InitializationAction", + uri: "", + args: "", + timeout: 0, +}; + +export const InitializationAction = { + $type: "yandex.cloud.dataproc.v1.InitializationAction" as const, + + encode( + message: InitializationAction, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.uri !== "") { + writer.uint32(10).string(message.uri); + } + for (const v of message.args) { + writer.uint32(18).string(v!); + } + if (message.timeout !== 0) { + writer.uint32(24).int64(message.timeout); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): InitializationAction { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseInitializationAction } as InitializationAction; + message.args = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.uri = reader.string(); + break; + case 2: + message.args.push(reader.string()); + break; + case 3: + message.timeout = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): InitializationAction { + const message = { ...baseInitializationAction } as InitializationAction; + message.uri = + object.uri !== undefined && object.uri !== null ? String(object.uri) : ""; + message.args = (object.args ?? []).map((e: any) => String(e)); + message.timeout = + object.timeout !== undefined && object.timeout !== null + ? Number(object.timeout) + : 0; + return message; + }, + + toJSON(message: InitializationAction): unknown { + const obj: any = {}; + message.uri !== undefined && (obj.uri = message.uri); + if (message.args) { + obj.args = message.args.map((e) => e); + } else { + obj.args = []; + } + message.timeout !== undefined && + (obj.timeout = Math.round(message.timeout)); + return obj; + }, + + fromPartial, I>>( + object: I + ): InitializationAction { + const message = { ...baseInitializationAction } as InitializationAction; + message.uri = object.uri ?? ""; + message.args = object.args?.map((e) => e) || []; + message.timeout = object.timeout ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(InitializationAction.$type, InitializationAction); + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + type Builtin = | Date | Function @@ -1126,6 +1264,13 @@ function fromJsonTimestamp(o: any): Date { } } +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + if (_m0.util.Long !== Long) { _m0.util.Long = Long as any; _m0.configure(); diff --git a/src/generated/yandex/cloud/dataproc/v1/common.ts b/src/generated/yandex/cloud/dataproc/v1/common.ts index 8048efa6..043839dc 100644 --- a/src/generated/yandex/cloud/dataproc/v1/common.ts +++ b/src/generated/yandex/cloud/dataproc/v1/common.ts @@ -6,13 +6,13 @@ import _m0 from "protobufjs/minimal"; export const protobufPackage = "yandex.cloud.dataproc.v1"; export enum Health { - /** HEALTH_UNKNOWN - State of the cluster is unknown ([Host.health] for every host in the cluster is UNKNOWN). */ + /** HEALTH_UNKNOWN - Object is in unknown state (we have no data). */ HEALTH_UNKNOWN = 0, - /** ALIVE - Cluster is alive and well ([Host.health] for every host in the cluster is ALIVE). */ + /** ALIVE - Object is alive and well (for example, all hosts of the cluster are alive). */ ALIVE = 1, - /** DEAD - Cluster is inoperable ([Host.health] for every host in the cluster is DEAD). */ + /** DEAD - Object is inoperable (it cannot perform any of its essential functions). */ DEAD = 2, - /** DEGRADED - Cluster is working below capacity ([Host.health] for at least one host in the cluster is not ALIVE). */ + /** DEGRADED - Object is partially alive (it can perform some of its essential functions). */ DEGRADED = 3, UNRECOGNIZED = -1, } diff --git a/src/generated/yandex/cloud/dataproc/v1/subcluster.ts b/src/generated/yandex/cloud/dataproc/v1/subcluster.ts index 9df3789c..d493c53a 100644 --- a/src/generated/yandex/cloud/dataproc/v1/subcluster.ts +++ b/src/generated/yandex/cloud/dataproc/v1/subcluster.ts @@ -148,12 +148,12 @@ export interface Host { /** * Name of the Data Proc host. The host name is assigned by Data Proc at creation time * and cannot be changed. The name is generated to be unique across all existing Data Proc - * hosts in Yandex.Cloud, as it defines the FQDN of the host. + * hosts in Yandex Cloud, as it defines the FQDN of the host. */ name: string; /** ID of the Data Proc subcluster that the host belongs to. */ subclusterId: string; - /** Host status code. */ + /** Status code of the aggregated health of the host. */ health: Health; /** ID of the Compute virtual machine that is used as the Data Proc host. */ computeInstanceId: string; diff --git a/src/generated/yandex/cloud/datatransfer/v1/endpoint/common.ts b/src/generated/yandex/cloud/datatransfer/v1/endpoint/common.ts index 98251e8e..595c01ad 100644 --- a/src/generated/yandex/cloud/datatransfer/v1/endpoint/common.ts +++ b/src/generated/yandex/cloud/datatransfer/v1/endpoint/common.ts @@ -50,6 +50,50 @@ export function objectTransferStageToJSON(object: ObjectTransferStage): string { } } +export enum CleanupPolicy { + CLEANUP_POLICY_UNSPECIFIED = 0, + DISABLED = 1, + DROP = 2, + TRUNCATE = 3, + UNRECOGNIZED = -1, +} + +export function cleanupPolicyFromJSON(object: any): CleanupPolicy { + switch (object) { + case 0: + case "CLEANUP_POLICY_UNSPECIFIED": + return CleanupPolicy.CLEANUP_POLICY_UNSPECIFIED; + case 1: + case "DISABLED": + return CleanupPolicy.DISABLED; + case 2: + case "DROP": + return CleanupPolicy.DROP; + case 3: + case "TRUNCATE": + return CleanupPolicy.TRUNCATE; + case -1: + case "UNRECOGNIZED": + default: + return CleanupPolicy.UNRECOGNIZED; + } +} + +export function cleanupPolicyToJSON(object: CleanupPolicy): string { + switch (object) { + case CleanupPolicy.CLEANUP_POLICY_UNSPECIFIED: + return "CLEANUP_POLICY_UNSPECIFIED"; + case CleanupPolicy.DISABLED: + return "DISABLED"; + case CleanupPolicy.DROP: + return "DROP"; + case CleanupPolicy.TRUNCATE: + return "TRUNCATE"; + default: + return "UNKNOWN"; + } +} + export interface Secret { $type: "yandex.cloud.datatransfer.v1.endpoint.Secret"; /** Password */ diff --git a/src/generated/yandex/cloud/datatransfer/v1/endpoint/mysql.ts b/src/generated/yandex/cloud/datatransfer/v1/endpoint/mysql.ts index 20720b44..3fa97f38 100644 --- a/src/generated/yandex/cloud/datatransfer/v1/endpoint/mysql.ts +++ b/src/generated/yandex/cloud/datatransfer/v1/endpoint/mysql.ts @@ -6,8 +6,11 @@ import { TLSMode, ObjectTransferStage, Secret, + CleanupPolicy, objectTransferStageFromJSON, objectTransferStageToJSON, + cleanupPolicyFromJSON, + cleanupPolicyToJSON, } from "../../../../../yandex/cloud/datatransfer/v1/endpoint/common"; export const protobufPackage = "yandex.cloud.datatransfer.v1.endpoint"; @@ -40,7 +43,7 @@ export interface MysqlConnection { /** * Managed cluster * - * Yandex.Cloud Managed MySQL cluster ID + * Yandex Managed Service for MySQL cluster ID */ mdbClusterId: string | undefined; /** @@ -166,6 +169,19 @@ export interface MysqlTarget { * IANA timezone database. Default: local timezone. */ timezone: string; + /** + * Cleanup policy + * + * Cleanup policy for activate, reactivate and reupload processes. Default is + * DISABLED. + */ + cleanupPolicy: CleanupPolicy; + /** + * Database schema for service table + * + * Default: db name. Here created technical tables (__tm_keeper, __tm_gtid_keeper). + */ + serviceDatabase: string; } const baseOnPremiseMysql: object = { @@ -657,6 +673,8 @@ const baseMysqlTarget: object = { sqlMode: "", skipConstraintChecks: false, timezone: "", + cleanupPolicy: 0, + serviceDatabase: "", }; export const MysqlTarget = { @@ -690,6 +708,12 @@ export const MysqlTarget = { if (message.timezone !== "") { writer.uint32(58).string(message.timezone); } + if (message.cleanupPolicy !== 0) { + writer.uint32(64).int32(message.cleanupPolicy); + } + if (message.serviceDatabase !== "") { + writer.uint32(122).string(message.serviceDatabase); + } return writer; }, @@ -721,6 +745,12 @@ export const MysqlTarget = { case 7: message.timezone = reader.string(); break; + case 8: + message.cleanupPolicy = reader.int32() as any; + break; + case 15: + message.serviceDatabase = reader.string(); + break; default: reader.skipType(tag & 7); break; @@ -760,6 +790,14 @@ export const MysqlTarget = { object.timezone !== undefined && object.timezone !== null ? String(object.timezone) : ""; + message.cleanupPolicy = + object.cleanupPolicy !== undefined && object.cleanupPolicy !== null + ? cleanupPolicyFromJSON(object.cleanupPolicy) + : 0; + message.serviceDatabase = + object.serviceDatabase !== undefined && object.serviceDatabase !== null + ? String(object.serviceDatabase) + : ""; return message; }, @@ -779,6 +817,10 @@ export const MysqlTarget = { message.skipConstraintChecks !== undefined && (obj.skipConstraintChecks = message.skipConstraintChecks); message.timezone !== undefined && (obj.timezone = message.timezone); + message.cleanupPolicy !== undefined && + (obj.cleanupPolicy = cleanupPolicyToJSON(message.cleanupPolicy)); + message.serviceDatabase !== undefined && + (obj.serviceDatabase = message.serviceDatabase); return obj; }, @@ -799,6 +841,8 @@ export const MysqlTarget = { message.sqlMode = object.sqlMode ?? ""; message.skipConstraintChecks = object.skipConstraintChecks ?? false; message.timezone = object.timezone ?? ""; + message.cleanupPolicy = object.cleanupPolicy ?? 0; + message.serviceDatabase = object.serviceDatabase ?? ""; return message; }, }; diff --git a/src/generated/yandex/cloud/datatransfer/v1/endpoint/postgres.ts b/src/generated/yandex/cloud/datatransfer/v1/endpoint/postgres.ts index 036f86c4..beec5fae 100644 --- a/src/generated/yandex/cloud/datatransfer/v1/endpoint/postgres.ts +++ b/src/generated/yandex/cloud/datatransfer/v1/endpoint/postgres.ts @@ -6,8 +6,11 @@ import { ObjectTransferStage, TLSMode, Secret, + CleanupPolicy, objectTransferStageFromJSON, objectTransferStageToJSON, + cleanupPolicyFromJSON, + cleanupPolicyToJSON, } from "../../../../../yandex/cloud/datatransfer/v1/endpoint/common"; export const protobufPackage = "yandex.cloud.datatransfer.v1.endpoint"; @@ -140,7 +143,7 @@ export interface PostgresConnection { /** * Managed cluster * - * Yandex.Cloud Managed PostgreSQL cluster ID + * Yandex Managed Service for PostgreSQL cluster ID */ mdbClusterId: string | undefined; /** @@ -176,15 +179,15 @@ export interface PostgresSource { /** * Included tables * - * If none or empty list is presented, all tables are replicated. Can contain - * regular expression. + * If none or empty list is presented, all tables are replicated. Full table name + * with schema. Can contain schema_name.* patterns. */ includeTables: string[]; /** * Excluded tables * - * If none or empty list is presented, all tables are replicated. Can contain - * regular expression. + * If none or empty list is presented, all tables are replicated. Full table name + * with schema. Can contain schema_name.* patterns. */ excludeTables: string[]; /** @@ -232,6 +235,13 @@ export interface PostgresTarget { * Password for database access. */ password?: Secret; + /** + * Cleanup policy + * + * Cleanup policy for activate, reactivate and reupload processes. Default is + * DISABLED. + */ + cleanupPolicy: CleanupPolicy; } const basePostgresObjectTransferSettings: object = { @@ -922,6 +932,7 @@ const basePostgresTarget: object = { $type: "yandex.cloud.datatransfer.v1.endpoint.PostgresTarget", database: "", user: "", + cleanupPolicy: 0, }; export const PostgresTarget = { @@ -946,6 +957,9 @@ export const PostgresTarget = { if (message.password !== undefined) { Secret.encode(message.password, writer.uint32(34).fork()).ldelim(); } + if (message.cleanupPolicy !== 0) { + writer.uint32(40).int32(message.cleanupPolicy); + } return writer; }, @@ -971,6 +985,9 @@ export const PostgresTarget = { case 4: message.password = Secret.decode(reader, reader.uint32()); break; + case 5: + message.cleanupPolicy = reader.int32() as any; + break; default: reader.skipType(tag & 7); break; @@ -997,6 +1014,10 @@ export const PostgresTarget = { object.password !== undefined && object.password !== null ? Secret.fromJSON(object.password) : undefined; + message.cleanupPolicy = + object.cleanupPolicy !== undefined && object.cleanupPolicy !== null + ? cleanupPolicyFromJSON(object.cleanupPolicy) + : 0; return message; }, @@ -1012,6 +1033,8 @@ export const PostgresTarget = { (obj.password = message.password ? Secret.toJSON(message.password) : undefined); + message.cleanupPolicy !== undefined && + (obj.cleanupPolicy = cleanupPolicyToJSON(message.cleanupPolicy)); return obj; }, @@ -1029,6 +1052,7 @@ export const PostgresTarget = { object.password !== undefined && object.password !== null ? Secret.fromPartial(object.password) : undefined; + message.cleanupPolicy = object.cleanupPolicy ?? 0; return message; }, }; diff --git a/src/generated/yandex/cloud/datatransfer/v1/endpoint_service.ts b/src/generated/yandex/cloud/datatransfer/v1/endpoint_service.ts index 6f0910f1..d358258b 100644 --- a/src/generated/yandex/cloud/datatransfer/v1/endpoint_service.ts +++ b/src/generated/yandex/cloud/datatransfer/v1/endpoint_service.ts @@ -37,7 +37,7 @@ export interface ListEndpointsRequest { * folder contains more endpoints than page_size, next_page_token will be included * in the response message. Include it into the subsequent ListEndpointRequest to * fetch the next page. Defaults to 100 if not specified. The maximum allowed value - * for this field is 100. + * for this field is 500. */ pageSize: number; /** diff --git a/src/generated/yandex/cloud/datatransfer/v1/transfer_service.ts b/src/generated/yandex/cloud/datatransfer/v1/transfer_service.ts index e5d9a0a1..902b07ea 100644 --- a/src/generated/yandex/cloud/datatransfer/v1/transfer_service.ts +++ b/src/generated/yandex/cloud/datatransfer/v1/transfer_service.ts @@ -96,7 +96,7 @@ export interface ListTransfersRequest { * folder contains more transfers than page_size, next_page_token will be included * in the response message. Include it into the subsequent ListTransfersRequest to * fetch the next page. Defaults to 100 if not specified. The maximum allowed value - * for this field is 100. + * for this field is 500. */ pageSize: number; /** diff --git a/src/generated/yandex/cloud/iam/v1/iam_token_service.ts b/src/generated/yandex/cloud/iam/v1/iam_token_service.ts index 44afa18e..7d40e056 100644 --- a/src/generated/yandex/cloud/iam/v1/iam_token_service.ts +++ b/src/generated/yandex/cloud/iam/v1/iam_token_service.ts @@ -21,7 +21,7 @@ export const protobufPackage = "yandex.cloud.iam.v1"; export interface CreateIamTokenRequest { $type: "yandex.cloud.iam.v1.CreateIamTokenRequest"; /** - * OAuth token for a Yandex.Passport account. + * OAuth token for a Yandex account. * For more information, see [OAuth token](/docs/iam/concepts/authorization/oauth-token). */ yandexPassportOauthToken: string | undefined; diff --git a/src/generated/yandex/cloud/iam/v1/user_account.ts b/src/generated/yandex/cloud/iam/v1/user_account.ts index 80a46b9e..812d1d41 100644 --- a/src/generated/yandex/cloud/iam/v1/user_account.ts +++ b/src/generated/yandex/cloud/iam/v1/user_account.ts @@ -5,7 +5,7 @@ import _m0 from "protobufjs/minimal"; export const protobufPackage = "yandex.cloud.iam.v1"; -/** Currently represents only [Yandex.Passport account](/docs/iam/concepts/#passport). */ +/** Currently represents only [Yandex account](/docs/iam/concepts/#passport). */ export interface UserAccount { $type: "yandex.cloud.iam.v1.UserAccount"; /** ID of the user account. */ @@ -18,13 +18,13 @@ export interface UserAccount { /** * A YandexPassportUserAccount resource. - * For more information, see [Yandex.Passport account](/docs/iam/concepts/#passport). + * For more information, see [Yandex account](/docs/iam/concepts/#passport). */ export interface YandexPassportUserAccount { $type: "yandex.cloud.iam.v1.YandexPassportUserAccount"; - /** Login of the Yandex.Passport user account. */ + /** Login of the Yandex user account. */ login: string; - /** Default email of the Yandex.Passport user account. */ + /** Default email of the Yandex user account. */ defaultEmail: string; } diff --git a/src/generated/yandex/cloud/iam/v1/user_account_service.ts b/src/generated/yandex/cloud/iam/v1/user_account_service.ts index 7fabe0ff..61d63898 100644 --- a/src/generated/yandex/cloud/iam/v1/user_account_service.ts +++ b/src/generated/yandex/cloud/iam/v1/user_account_service.ts @@ -90,7 +90,7 @@ export const GetUserAccountRequest = { messageTypeRegistry.set(GetUserAccountRequest.$type, GetUserAccountRequest); -/** A set of methods for managing user accounts. Currently applicable only for [Yandex.Passport accounts](/docs/iam/concepts/#passport). */ +/** A set of methods for managing user accounts. Currently applicable only for [Yandex accounts](/docs/iam/concepts/#passport). */ export const UserAccountServiceService = { /** Returns the specified UserAccount resource. */ get: { diff --git a/src/generated/yandex/cloud/marketplace/v1/metering/image_product_usage_service.ts b/src/generated/yandex/cloud/marketplace/v1/metering/image_product_usage_service.ts index 37c11e34..b3bb2098 100644 --- a/src/generated/yandex/cloud/marketplace/v1/metering/image_product_usage_service.ts +++ b/src/generated/yandex/cloud/marketplace/v1/metering/image_product_usage_service.ts @@ -28,7 +28,7 @@ export interface WriteImageProductUsageRequest { validateOnly: boolean; /** Marketplace Product's ID. */ productId: string; - /** List of product usage records (up to 25 pet request). */ + /** List of product usage records (up to 25 per request). */ usageRecords: UsageRecord[]; } diff --git a/src/generated/yandex/cloud/marketplace/v1/metering/usage_record.ts b/src/generated/yandex/cloud/marketplace/v1/metering/usage_record.ts index 47be5b54..d2b9582d 100644 --- a/src/generated/yandex/cloud/marketplace/v1/metering/usage_record.ts +++ b/src/generated/yandex/cloud/marketplace/v1/metering/usage_record.ts @@ -10,9 +10,9 @@ export interface UsageRecord { $type: "yandex.cloud.marketplace.v1.metering.UsageRecord"; /** Unique identifier of the usage record (UUID format). */ uuid: string; - /** Consumed Marketplace SaaS Sku ID, linked to `UsageRecord.product_id`. */ + /** Consumed Marketplace SKU ID, linked to `UsageRecord.product_id`. */ skuId: string; - /** Quantity of sku consumed, measured in `sku.usage_unit` units (e.g. bytes). */ + /** Quantity of SKU consumed, measured in `sku.usage_unit` units (e.g. bytes). */ quantity: number; /** Timestamp in UTC for which the usage is being reported. */ timestamp?: Date; diff --git a/src/generated/yandex/cloud/mdb/clickhouse/v1/cluster.ts b/src/generated/yandex/cloud/mdb/clickhouse/v1/cluster.ts index 722cb265..cbcd1c38 100644 --- a/src/generated/yandex/cloud/mdb/clickhouse/v1/cluster.ts +++ b/src/generated/yandex/cloud/mdb/clickhouse/v1/cluster.ts @@ -61,7 +61,6 @@ export interface Cluster { deletionProtection: boolean; } -/** Deployment environment. */ export enum Cluster_Environment { ENVIRONMENT_UNSPECIFIED = 0, /** @@ -331,7 +330,7 @@ export interface Host { * Name of the ClickHouse host. The host name is assigned by MDB at creation time, and cannot be changed. * 1-63 characters long. * - * The name is unique across all existing MDB hosts in Yandex.Cloud, as it defines the FQDN of the host. + * The name is unique across all existing MDB hosts in Yandex Cloud, as it defines the FQDN of the host. */ name: string; /** ID of the ClickHouse host. The ID is assigned by MDB at creation time. */ @@ -554,19 +553,23 @@ export interface Access { /** Allow to export data from the cluster to Yandex DataLens. */ dataLens: boolean; /** - * Allow SQL queries to the cluster databases from the Yandex.Cloud management console. + * Allow SQL queries to the cluster databases from the Yandex Cloud management console. * * See [SQL queries in the management console](/docs/managed-clickhouse/operations/web-sql-query) for more details. */ webSql: boolean; /** - * Allow to import data from Yandex.Metrica and AppMetrica to the cluster. + * Allow to import data from Yandex Metrica and AppMetrica to the cluster. * - * See [Export data to Yandex.Cloud](https://appmetrica.yandex.com/docs/cloud/index.html) for more details. + * See [Export data to Yandex Cloud](https://appmetrica.yandex.com/docs/cloud/index.html) for more details. */ metrika: boolean; /** Allow access to cluster for Serverless. */ serverless: boolean; + /** Allow access for DataTransfer */ + dataTransfer: boolean; + /** Allow access for YandexQuery */ + yandexQuery: boolean; } export interface CloudStorage { @@ -2217,6 +2220,8 @@ const baseAccess: object = { webSql: false, metrika: false, serverless: false, + dataTransfer: false, + yandexQuery: false, }; export const Access = { @@ -2238,6 +2243,12 @@ export const Access = { if (message.serverless === true) { writer.uint32(32).bool(message.serverless); } + if (message.dataTransfer === true) { + writer.uint32(40).bool(message.dataTransfer); + } + if (message.yandexQuery === true) { + writer.uint32(48).bool(message.yandexQuery); + } return writer; }, @@ -2260,6 +2271,12 @@ export const Access = { case 4: message.serverless = reader.bool(); break; + case 5: + message.dataTransfer = reader.bool(); + break; + case 6: + message.yandexQuery = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -2286,6 +2303,14 @@ export const Access = { object.serverless !== undefined && object.serverless !== null ? Boolean(object.serverless) : false; + message.dataTransfer = + object.dataTransfer !== undefined && object.dataTransfer !== null + ? Boolean(object.dataTransfer) + : false; + message.yandexQuery = + object.yandexQuery !== undefined && object.yandexQuery !== null + ? Boolean(object.yandexQuery) + : false; return message; }, @@ -2295,6 +2320,10 @@ export const Access = { message.webSql !== undefined && (obj.webSql = message.webSql); message.metrika !== undefined && (obj.metrika = message.metrika); message.serverless !== undefined && (obj.serverless = message.serverless); + message.dataTransfer !== undefined && + (obj.dataTransfer = message.dataTransfer); + message.yandexQuery !== undefined && + (obj.yandexQuery = message.yandexQuery); return obj; }, @@ -2304,6 +2333,8 @@ export const Access = { message.webSql = object.webSql ?? false; message.metrika = object.metrika ?? false; message.serverless = object.serverless ?? false; + message.dataTransfer = object.dataTransfer ?? false; + message.yandexQuery = object.yandexQuery ?? false; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/clickhouse/v1/cluster_service.ts b/src/generated/yandex/cloud/mdb/clickhouse/v1/cluster_service.ts index d8f3bad3..aad41db8 100644 --- a/src/generated/yandex/cloud/mdb/clickhouse/v1/cluster_service.ts +++ b/src/generated/yandex/cloud/mdb/clickhouse/v1/cluster_service.ts @@ -699,6 +699,38 @@ export interface AddClusterHostsMetadata { hostNames: string[]; } +export interface UpdateHostSpec { + $type: "yandex.cloud.mdb.clickhouse.v1.UpdateHostSpec"; + /** + * Name of the host to update. + * To get the ClickHouse host name, use a [ClusterService.ListHosts] request. + */ + hostName: string; + /** Field mask that specifies which fields of the ClickHouse host should be updated. */ + updateMask?: FieldMask; + /** Whether the host should get a public IP address on creation. */ + assignPublicIp?: boolean; +} + +export interface UpdateClusterHostsRequest { + $type: "yandex.cloud.mdb.clickhouse.v1.UpdateClusterHostsRequest"; + /** + * ID of the ClickHouse cluster to update hosts in. + * To get the ClickHouse cluster ID, use a [ClusterService.List] request. + */ + clusterId: string; + /** New configurations to apply to hosts. */ + updateHostSpecs: UpdateHostSpec[]; +} + +export interface UpdateClusterHostsMetadata { + $type: "yandex.cloud.mdb.clickhouse.v1.UpdateClusterHostsMetadata"; + /** ID of the ClickHouse cluster to modify hosts in. */ + clusterId: string; + /** Names of hosts that are being modified. */ + hostNames: string[]; +} + export interface DeleteClusterHostsRequest { $type: "yandex.cloud.mdb.clickhouse.v1.DeleteClusterHostsRequest"; /** @@ -5114,6 +5146,289 @@ export const AddClusterHostsMetadata = { messageTypeRegistry.set(AddClusterHostsMetadata.$type, AddClusterHostsMetadata); +const baseUpdateHostSpec: object = { + $type: "yandex.cloud.mdb.clickhouse.v1.UpdateHostSpec", + hostName: "", +}; + +export const UpdateHostSpec = { + $type: "yandex.cloud.mdb.clickhouse.v1.UpdateHostSpec" as const, + + encode( + message: UpdateHostSpec, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.hostName !== "") { + writer.uint32(10).string(message.hostName); + } + if (message.updateMask !== undefined) { + FieldMask.encode(message.updateMask, writer.uint32(18).fork()).ldelim(); + } + if (message.assignPublicIp !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.assignPublicIp! }, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): UpdateHostSpec { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseUpdateHostSpec } as UpdateHostSpec; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.hostName = reader.string(); + break; + case 2: + message.updateMask = FieldMask.decode(reader, reader.uint32()); + break; + case 3: + message.assignPublicIp = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateHostSpec { + const message = { ...baseUpdateHostSpec } as UpdateHostSpec; + message.hostName = + object.hostName !== undefined && object.hostName !== null + ? String(object.hostName) + : ""; + message.updateMask = + object.updateMask !== undefined && object.updateMask !== null + ? FieldMask.fromJSON(object.updateMask) + : undefined; + message.assignPublicIp = + object.assignPublicIp !== undefined && object.assignPublicIp !== null + ? Boolean(object.assignPublicIp) + : undefined; + return message; + }, + + toJSON(message: UpdateHostSpec): unknown { + const obj: any = {}; + message.hostName !== undefined && (obj.hostName = message.hostName); + message.updateMask !== undefined && + (obj.updateMask = message.updateMask + ? FieldMask.toJSON(message.updateMask) + : undefined); + message.assignPublicIp !== undefined && + (obj.assignPublicIp = message.assignPublicIp); + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateHostSpec { + const message = { ...baseUpdateHostSpec } as UpdateHostSpec; + message.hostName = object.hostName ?? ""; + message.updateMask = + object.updateMask !== undefined && object.updateMask !== null + ? FieldMask.fromPartial(object.updateMask) + : undefined; + message.assignPublicIp = object.assignPublicIp ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(UpdateHostSpec.$type, UpdateHostSpec); + +const baseUpdateClusterHostsRequest: object = { + $type: "yandex.cloud.mdb.clickhouse.v1.UpdateClusterHostsRequest", + clusterId: "", +}; + +export const UpdateClusterHostsRequest = { + $type: "yandex.cloud.mdb.clickhouse.v1.UpdateClusterHostsRequest" as const, + + encode( + message: UpdateClusterHostsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + for (const v of message.updateHostSpecs) { + UpdateHostSpec.encode(v!, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateClusterHostsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateClusterHostsRequest, + } as UpdateClusterHostsRequest; + message.updateHostSpecs = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.updateHostSpecs.push( + UpdateHostSpec.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateClusterHostsRequest { + const message = { + ...baseUpdateClusterHostsRequest, + } as UpdateClusterHostsRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.updateHostSpecs = (object.updateHostSpecs ?? []).map((e: any) => + UpdateHostSpec.fromJSON(e) + ); + return message; + }, + + toJSON(message: UpdateClusterHostsRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + if (message.updateHostSpecs) { + obj.updateHostSpecs = message.updateHostSpecs.map((e) => + e ? UpdateHostSpec.toJSON(e) : undefined + ); + } else { + obj.updateHostSpecs = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateClusterHostsRequest { + const message = { + ...baseUpdateClusterHostsRequest, + } as UpdateClusterHostsRequest; + message.clusterId = object.clusterId ?? ""; + message.updateHostSpecs = + object.updateHostSpecs?.map((e) => UpdateHostSpec.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set( + UpdateClusterHostsRequest.$type, + UpdateClusterHostsRequest +); + +const baseUpdateClusterHostsMetadata: object = { + $type: "yandex.cloud.mdb.clickhouse.v1.UpdateClusterHostsMetadata", + clusterId: "", + hostNames: "", +}; + +export const UpdateClusterHostsMetadata = { + $type: "yandex.cloud.mdb.clickhouse.v1.UpdateClusterHostsMetadata" as const, + + encode( + message: UpdateClusterHostsMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + for (const v of message.hostNames) { + writer.uint32(18).string(v!); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateClusterHostsMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateClusterHostsMetadata, + } as UpdateClusterHostsMetadata; + message.hostNames = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.hostNames.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateClusterHostsMetadata { + const message = { + ...baseUpdateClusterHostsMetadata, + } as UpdateClusterHostsMetadata; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.hostNames = (object.hostNames ?? []).map((e: any) => String(e)); + return message; + }, + + toJSON(message: UpdateClusterHostsMetadata): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + if (message.hostNames) { + obj.hostNames = message.hostNames.map((e) => e); + } else { + obj.hostNames = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateClusterHostsMetadata { + const message = { + ...baseUpdateClusterHostsMetadata, + } as UpdateClusterHostsMetadata; + message.clusterId = object.clusterId ?? ""; + message.hostNames = object.hostNames?.map((e) => e) || []; + return message; + }, +}; + +messageTypeRegistry.set( + UpdateClusterHostsMetadata.$type, + UpdateClusterHostsMetadata +); + const baseDeleteClusterHostsRequest: object = { $type: "yandex.cloud.mdb.clickhouse.v1.DeleteClusterHostsRequest", clusterId: "", @@ -8352,6 +8667,19 @@ export const ClusterServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, + /** Updates the specified hosts. */ + updateHosts: { + path: "/yandex.cloud.mdb.clickhouse.v1.ClusterService/UpdateHosts", + requestStream: false, + responseStream: false, + requestSerialize: (value: UpdateClusterHostsRequest) => + Buffer.from(UpdateClusterHostsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + UpdateClusterHostsRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, /** Deletes the specified hosts for a cluster. */ deleteHosts: { path: "/yandex.cloud.mdb.clickhouse.v1.ClusterService/DeleteHosts", @@ -8583,6 +8911,8 @@ export interface ClusterServiceServer extends UntypedServiceImplementation { listHosts: handleUnaryCall; /** Creates new hosts for a cluster. */ addHosts: handleUnaryCall; + /** Updates the specified hosts. */ + updateHosts: handleUnaryCall; /** Deletes the specified hosts for a cluster. */ deleteHosts: handleUnaryCall; /** Returns the specified shard. */ @@ -8958,6 +9288,22 @@ export interface ClusterServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; + /** Updates the specified hosts. */ + updateHosts( + request: UpdateClusterHostsRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + updateHosts( + request: UpdateClusterHostsRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + updateHosts( + request: UpdateClusterHostsRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; /** Deletes the specified hosts for a cluster. */ deleteHosts( request: DeleteClusterHostsRequest, diff --git a/src/generated/yandex/cloud/mdb/elasticsearch/v1/backup.ts b/src/generated/yandex/cloud/mdb/elasticsearch/v1/backup.ts new file mode 100644 index 00000000..a9cae4b8 --- /dev/null +++ b/src/generated/yandex/cloud/mdb/elasticsearch/v1/backup.ts @@ -0,0 +1,280 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { Timestamp } from "../../../../../google/protobuf/timestamp"; + +export const protobufPackage = "yandex.cloud.mdb.elasticsearch.v1"; + +export interface Backup { + $type: "yandex.cloud.mdb.elasticsearch.v1.Backup"; + /** Required. ID of the backup. */ + id: string; + /** ID of the folder that the backup belongs to. */ + folderId: string; + /** ID of the associated Elasticsearch cluster. */ + sourceClusterId: string; + /** The time when the backup operation was started. */ + startedAt?: Date; + /** The time when the backup was created (i.e. when the backup operation completed). */ + createdAt?: Date; + /** Indices names. (max 100) */ + indices: string[]; + /** Elasticsearch version used to create the snapshot */ + elasticsearchVersion: string; + /** Total size of all indices in backup. in bytes */ + sizeBytes: number; + /** Total count of indices in backup */ + indicesTotal: number; +} + +const baseBackup: object = { + $type: "yandex.cloud.mdb.elasticsearch.v1.Backup", + id: "", + folderId: "", + sourceClusterId: "", + indices: "", + elasticsearchVersion: "", + sizeBytes: 0, + indicesTotal: 0, +}; + +export const Backup = { + $type: "yandex.cloud.mdb.elasticsearch.v1.Backup" as const, + + encode( + message: Backup, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.id !== "") { + writer.uint32(10).string(message.id); + } + if (message.folderId !== "") { + writer.uint32(18).string(message.folderId); + } + if (message.sourceClusterId !== "") { + writer.uint32(26).string(message.sourceClusterId); + } + if (message.startedAt !== undefined) { + Timestamp.encode( + toTimestamp(message.startedAt), + writer.uint32(34).fork() + ).ldelim(); + } + if (message.createdAt !== undefined) { + Timestamp.encode( + toTimestamp(message.createdAt), + writer.uint32(42).fork() + ).ldelim(); + } + for (const v of message.indices) { + writer.uint32(50).string(v!); + } + if (message.elasticsearchVersion !== "") { + writer.uint32(58).string(message.elasticsearchVersion); + } + if (message.sizeBytes !== 0) { + writer.uint32(64).int64(message.sizeBytes); + } + if (message.indicesTotal !== 0) { + writer.uint32(72).int64(message.indicesTotal); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Backup { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseBackup } as Backup; + message.indices = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = reader.string(); + break; + case 2: + message.folderId = reader.string(); + break; + case 3: + message.sourceClusterId = reader.string(); + break; + case 4: + message.startedAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 5: + message.createdAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 6: + message.indices.push(reader.string()); + break; + case 7: + message.elasticsearchVersion = reader.string(); + break; + case 8: + message.sizeBytes = longToNumber(reader.int64() as Long); + break; + case 9: + message.indicesTotal = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Backup { + const message = { ...baseBackup } as Backup; + message.id = + object.id !== undefined && object.id !== null ? String(object.id) : ""; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.sourceClusterId = + object.sourceClusterId !== undefined && object.sourceClusterId !== null + ? String(object.sourceClusterId) + : ""; + message.startedAt = + object.startedAt !== undefined && object.startedAt !== null + ? fromJsonTimestamp(object.startedAt) + : undefined; + message.createdAt = + object.createdAt !== undefined && object.createdAt !== null + ? fromJsonTimestamp(object.createdAt) + : undefined; + message.indices = (object.indices ?? []).map((e: any) => String(e)); + message.elasticsearchVersion = + object.elasticsearchVersion !== undefined && + object.elasticsearchVersion !== null + ? String(object.elasticsearchVersion) + : ""; + message.sizeBytes = + object.sizeBytes !== undefined && object.sizeBytes !== null + ? Number(object.sizeBytes) + : 0; + message.indicesTotal = + object.indicesTotal !== undefined && object.indicesTotal !== null + ? Number(object.indicesTotal) + : 0; + return message; + }, + + toJSON(message: Backup): unknown { + const obj: any = {}; + message.id !== undefined && (obj.id = message.id); + message.folderId !== undefined && (obj.folderId = message.folderId); + message.sourceClusterId !== undefined && + (obj.sourceClusterId = message.sourceClusterId); + message.startedAt !== undefined && + (obj.startedAt = message.startedAt.toISOString()); + message.createdAt !== undefined && + (obj.createdAt = message.createdAt.toISOString()); + if (message.indices) { + obj.indices = message.indices.map((e) => e); + } else { + obj.indices = []; + } + message.elasticsearchVersion !== undefined && + (obj.elasticsearchVersion = message.elasticsearchVersion); + message.sizeBytes !== undefined && + (obj.sizeBytes = Math.round(message.sizeBytes)); + message.indicesTotal !== undefined && + (obj.indicesTotal = Math.round(message.indicesTotal)); + return obj; + }, + + fromPartial, I>>(object: I): Backup { + const message = { ...baseBackup } as Backup; + message.id = object.id ?? ""; + message.folderId = object.folderId ?? ""; + message.sourceClusterId = object.sourceClusterId ?? ""; + message.startedAt = object.startedAt ?? undefined; + message.createdAt = object.createdAt ?? undefined; + message.indices = object.indices?.map((e) => e) || []; + message.elasticsearchVersion = object.elasticsearchVersion ?? ""; + message.sizeBytes = object.sizeBytes ?? 0; + message.indicesTotal = object.indicesTotal ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(Backup.$type, Backup); + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function toTimestamp(date: Date): Timestamp { + const seconds = date.getTime() / 1_000; + const nanos = (date.getTime() % 1_000) * 1_000_000; + return { $type: "google.protobuf.Timestamp", seconds, nanos }; +} + +function fromTimestamp(t: Timestamp): Date { + let millis = t.seconds * 1_000; + millis += t.nanos / 1_000_000; + return new Date(millis); +} + +function fromJsonTimestamp(o: any): Date { + if (o instanceof Date) { + return o; + } else if (typeof o === "string") { + return new Date(o); + } else { + return fromTimestamp(Timestamp.fromJSON(o)); + } +} + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/elasticsearch/v1/backup_service.ts b/src/generated/yandex/cloud/mdb/elasticsearch/v1/backup_service.ts new file mode 100644 index 00000000..f0f875da --- /dev/null +++ b/src/generated/yandex/cloud/mdb/elasticsearch/v1/backup_service.ts @@ -0,0 +1,429 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleUnaryCall, + Client, + ClientUnaryCall, + Metadata, + CallOptions, + ServiceError, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; +import { Backup } from "../../../../../yandex/cloud/mdb/elasticsearch/v1/backup"; + +export const protobufPackage = "yandex.cloud.mdb.elasticsearch.v1"; + +export interface GetBackupRequest { + $type: "yandex.cloud.mdb.elasticsearch.v1.GetBackupRequest"; + /** Required. ID of the backup to return. */ + backupId: string; +} + +export interface ListBackupsRequest { + $type: "yandex.cloud.mdb.elasticsearch.v1.ListBackupsRequest"; + /** Required. ID of the folder to list backups in. */ + folderId: string; + /** + * The maximum number of results per page that should be returned. If the number of available + * results is larger than `page_size`, the service returns a `next_page_token` that can be used + * to get the next page of results in subsequent ListBackups requests. + * Acceptable values are 0 to 1000, inclusive. Default value: 100. + */ + pageSize: number; + /** + * Page token. Set `page_token` to the `next_page_token` returned by a previous ListBackups + * request to get the next page of results. + */ + pageToken: string; +} + +export interface ListBackupsResponse { + $type: "yandex.cloud.mdb.elasticsearch.v1.ListBackupsResponse"; + /** Requested list of backups. */ + backups: Backup[]; + /** + * This token allows you to get the next page of results for ListBackups requests, + * if the number of results is larger than `page_size` specified in the request. + * To get the next page, specify the value of `next_page_token` as a value for + * the `page_token` parameter in the next ListBackups request. Subsequent ListBackups + * requests will have their own `next_page_token` to continue paging through the results. + */ + nextPageToken: string; +} + +const baseGetBackupRequest: object = { + $type: "yandex.cloud.mdb.elasticsearch.v1.GetBackupRequest", + backupId: "", +}; + +export const GetBackupRequest = { + $type: "yandex.cloud.mdb.elasticsearch.v1.GetBackupRequest" as const, + + encode( + message: GetBackupRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.backupId !== "") { + writer.uint32(10).string(message.backupId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): GetBackupRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGetBackupRequest } as GetBackupRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.backupId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GetBackupRequest { + const message = { ...baseGetBackupRequest } as GetBackupRequest; + message.backupId = + object.backupId !== undefined && object.backupId !== null + ? String(object.backupId) + : ""; + return message; + }, + + toJSON(message: GetBackupRequest): unknown { + const obj: any = {}; + message.backupId !== undefined && (obj.backupId = message.backupId); + return obj; + }, + + fromPartial, I>>( + object: I + ): GetBackupRequest { + const message = { ...baseGetBackupRequest } as GetBackupRequest; + message.backupId = object.backupId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(GetBackupRequest.$type, GetBackupRequest); + +const baseListBackupsRequest: object = { + $type: "yandex.cloud.mdb.elasticsearch.v1.ListBackupsRequest", + folderId: "", + pageSize: 0, + pageToken: "", +}; + +export const ListBackupsRequest = { + $type: "yandex.cloud.mdb.elasticsearch.v1.ListBackupsRequest" as const, + + encode( + message: ListBackupsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.folderId !== "") { + writer.uint32(10).string(message.folderId); + } + if (message.pageSize !== 0) { + writer.uint32(16).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(26).string(message.pageToken); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ListBackupsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListBackupsRequest } as ListBackupsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.folderId = reader.string(); + break; + case 2: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 3: + message.pageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListBackupsRequest { + const message = { ...baseListBackupsRequest } as ListBackupsRequest; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + return message; + }, + + toJSON(message: ListBackupsRequest): unknown { + const obj: any = {}; + message.folderId !== undefined && (obj.folderId = message.folderId); + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListBackupsRequest { + const message = { ...baseListBackupsRequest } as ListBackupsRequest; + message.folderId = object.folderId ?? ""; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListBackupsRequest.$type, ListBackupsRequest); + +const baseListBackupsResponse: object = { + $type: "yandex.cloud.mdb.elasticsearch.v1.ListBackupsResponse", + nextPageToken: "", +}; + +export const ListBackupsResponse = { + $type: "yandex.cloud.mdb.elasticsearch.v1.ListBackupsResponse" as const, + + encode( + message: ListBackupsResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.backups) { + Backup.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ListBackupsResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListBackupsResponse } as ListBackupsResponse; + message.backups = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.backups.push(Backup.decode(reader, reader.uint32())); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListBackupsResponse { + const message = { ...baseListBackupsResponse } as ListBackupsResponse; + message.backups = (object.backups ?? []).map((e: any) => + Backup.fromJSON(e) + ); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListBackupsResponse): unknown { + const obj: any = {}; + if (message.backups) { + obj.backups = message.backups.map((e) => + e ? Backup.toJSON(e) : undefined + ); + } else { + obj.backups = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListBackupsResponse { + const message = { ...baseListBackupsResponse } as ListBackupsResponse; + message.backups = object.backups?.map((e) => Backup.fromPartial(e)) || []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListBackupsResponse.$type, ListBackupsResponse); + +export const BackupServiceService = { + /** Returns the specified backup of Elasticsearch cluster. */ + get: { + path: "/yandex.cloud.mdb.elasticsearch.v1.BackupService/Get", + requestStream: false, + responseStream: false, + requestSerialize: (value: GetBackupRequest) => + Buffer.from(GetBackupRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => GetBackupRequest.decode(value), + responseSerialize: (value: Backup) => + Buffer.from(Backup.encode(value).finish()), + responseDeserialize: (value: Buffer) => Backup.decode(value), + }, + /** Returns the list of available backups for the specified Elasticsearch cluster. */ + list: { + path: "/yandex.cloud.mdb.elasticsearch.v1.BackupService/List", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListBackupsRequest) => + Buffer.from(ListBackupsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => ListBackupsRequest.decode(value), + responseSerialize: (value: ListBackupsResponse) => + Buffer.from(ListBackupsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => ListBackupsResponse.decode(value), + }, +} as const; + +export interface BackupServiceServer extends UntypedServiceImplementation { + /** Returns the specified backup of Elasticsearch cluster. */ + get: handleUnaryCall; + /** Returns the list of available backups for the specified Elasticsearch cluster. */ + list: handleUnaryCall; +} + +export interface BackupServiceClient extends Client { + /** Returns the specified backup of Elasticsearch cluster. */ + get( + request: GetBackupRequest, + callback: (error: ServiceError | null, response: Backup) => void + ): ClientUnaryCall; + get( + request: GetBackupRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Backup) => void + ): ClientUnaryCall; + get( + request: GetBackupRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Backup) => void + ): ClientUnaryCall; + /** Returns the list of available backups for the specified Elasticsearch cluster. */ + list( + request: ListBackupsRequest, + callback: ( + error: ServiceError | null, + response: ListBackupsResponse + ) => void + ): ClientUnaryCall; + list( + request: ListBackupsRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListBackupsResponse + ) => void + ): ClientUnaryCall; + list( + request: ListBackupsRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListBackupsResponse + ) => void + ): ClientUnaryCall; +} + +export const BackupServiceClient = makeGenericClientConstructor( + BackupServiceService, + "yandex.cloud.mdb.elasticsearch.v1.BackupService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): BackupServiceClient; + service: typeof BackupServiceService; +}; + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/elasticsearch/v1/cluster_service.ts b/src/generated/yandex/cloud/mdb/elasticsearch/v1/cluster_service.ts index c205bc51..2c504528 100644 --- a/src/generated/yandex/cloud/mdb/elasticsearch/v1/cluster_service.ts +++ b/src/generated/yandex/cloud/mdb/elasticsearch/v1/cluster_service.ts @@ -31,8 +31,10 @@ import { MaintenanceWindow } from "../../../../../yandex/cloud/mdb/elasticsearch import { FieldMask } from "../../../../../google/protobuf/field_mask"; import { Timestamp } from "../../../../../google/protobuf/timestamp"; import { UserSpec } from "../../../../../yandex/cloud/mdb/elasticsearch/v1/user"; +import { ExtensionSpec } from "../../../../../yandex/cloud/mdb/elasticsearch/v1/extension"; import { Operation } from "../../../../../yandex/cloud/operation/operation"; import { ElasticsearchConfig7 } from "../../../../../yandex/cloud/mdb/elasticsearch/v1/config/elasticsearch"; +import { Backup } from "../../../../../yandex/cloud/mdb/elasticsearch/v1/backup"; export const protobufPackage = "yandex.cloud.mdb.elasticsearch.v1"; @@ -123,6 +125,8 @@ export interface CreateClusterRequest { deletionProtection: boolean; /** Window of maintenance operations. */ maintenanceWindow?: MaintenanceWindow; + /** optional */ + extensionSpecs: ExtensionSpec[]; } export interface CreateClusterRequest_LabelsEntry { @@ -721,6 +725,97 @@ export interface RescheduleMaintenanceMetadata { delayedUntil?: Date; } +export interface RestoreClusterRequest { + $type: "yandex.cloud.mdb.elasticsearch.v1.RestoreClusterRequest"; + /** Required. ID of the backup to restore from. */ + backupId: string; + /** Name of the ElasticSearch cluster. The name must be unique within the folder. */ + name: string; + /** Description of the ElasticSearch cluster. */ + description: string; + /** + * Custom labels for the ElasticSearch cluster as `` key:value `` pairs. Maximum 64 per resource. + * For example, "project": "mvp" or "source": "dictionary". + */ + labels: { [key: string]: string }; + /** Deployment environment of the ElasticSearch cluster. */ + environment: Cluster_Environment; + /** Configuration and resources for hosts that should be created for the ElasticSearch cluster. */ + configSpec?: ConfigSpec; + /** Required. Configuration of ElasticSearch hosts. */ + hostSpecs: HostSpec[]; + /** ID of the network to create the cluster in. */ + networkId: string; + /** User security groups */ + securityGroupIds: string[]; + /** ID of the service account used for access to Yandex Object Storage. */ + serviceAccountId: string; + /** Deletion Protection inhibits deletion of the cluster */ + deletionProtection: boolean; + /** ID of the folder to create the ElasticSearch cluster in. */ + folderId: string; + /** optional */ + extensionSpecs: ExtensionSpec[]; +} + +export interface RestoreClusterRequest_LabelsEntry { + $type: "yandex.cloud.mdb.elasticsearch.v1.RestoreClusterRequest.LabelsEntry"; + key: string; + value: string; +} + +export interface RestoreClusterMetadata { + $type: "yandex.cloud.mdb.elasticsearch.v1.RestoreClusterMetadata"; + /** Required. ID of the new ElasticSearch cluster. */ + clusterId: string; + /** Required. ID of the backup used for recovery. */ + backupId: string; +} + +export interface BackupClusterRequest { + $type: "yandex.cloud.mdb.elasticsearch.v1.BackupClusterRequest"; + /** Required. ID of the ElasticSearch cluster to back up. */ + clusterId: string; +} + +export interface BackupClusterMetadata { + $type: "yandex.cloud.mdb.elasticsearch.v1.BackupClusterMetadata"; + /** ID of the ElasticSearch cluster. */ + clusterId: string; +} + +export interface ListClusterBackupsRequest { + $type: "yandex.cloud.mdb.elasticsearch.v1.ListClusterBackupsRequest"; + /** Required. ID of the Elasticsearch cluster. */ + clusterId: string; + /** + * The maximum number of results per page that should be returned. If the number of available + * results is larger than `page_size`, the service returns a `next_page_token` that can be used + * to get the next page of results in subsequent ListClusterBackups requests. + * Acceptable values are 0 to 1000, inclusive. Default value: 100. + */ + pageSize: number; + /** + * Page token. Set `page_token` to the `next_page_token` returned by a previous ListClusterBackups + * request to get the next page of results. + */ + pageToken: string; +} + +export interface ListClusterBackupsResponse { + $type: "yandex.cloud.mdb.elasticsearch.v1.ListClusterBackupsResponse"; + /** Requested list of backups. */ + backups: Backup[]; + /** + * This token allows you to get the next page of results for ListClusterBackups requests, + * if the number of results is larger than `page_size` specified in the request. + * To get the next page, specify the value of `next_page_token` as a value for + * the `page_token` parameter in the next ListClusterBackups request. Subsequent ListClusterBackups + * requests will have their own `next_page_token` to continue paging through the results. + */ + nextPageToken: string; +} + const baseGetClusterRequest: object = { $type: "yandex.cloud.mdb.elasticsearch.v1.GetClusterRequest", clusterId: "", @@ -1039,6 +1134,9 @@ export const CreateClusterRequest = { writer.uint32(114).fork() ).ldelim(); } + for (const v of message.extensionSpecs) { + ExtensionSpec.encode(v!, writer.uint32(122).fork()).ldelim(); + } return writer; }, @@ -1053,6 +1151,7 @@ export const CreateClusterRequest = { message.userSpecs = []; message.hostSpecs = []; message.securityGroupIds = []; + message.extensionSpecs = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -1104,6 +1203,11 @@ export const CreateClusterRequest = { reader.uint32() ); break; + case 15: + message.extensionSpecs.push( + ExtensionSpec.decode(reader, reader.uint32()) + ); + break; default: reader.skipType(tag & 7); break; @@ -1167,6 +1271,9 @@ export const CreateClusterRequest = { object.maintenanceWindow !== null ? MaintenanceWindow.fromJSON(object.maintenanceWindow) : undefined; + message.extensionSpecs = (object.extensionSpecs ?? []).map((e: any) => + ExtensionSpec.fromJSON(e) + ); return message; }, @@ -1216,6 +1323,13 @@ export const CreateClusterRequest = { (obj.maintenanceWindow = message.maintenanceWindow ? MaintenanceWindow.toJSON(message.maintenanceWindow) : undefined); + if (message.extensionSpecs) { + obj.extensionSpecs = message.extensionSpecs.map((e) => + e ? ExtensionSpec.toJSON(e) : undefined + ); + } else { + obj.extensionSpecs = []; + } return obj; }, @@ -1252,6 +1366,8 @@ export const CreateClusterRequest = { object.maintenanceWindow !== null ? MaintenanceWindow.fromPartial(object.maintenanceWindow) : undefined; + message.extensionSpecs = + object.extensionSpecs?.map((e) => ExtensionSpec.fromPartial(e)) || []; return message; }, }; @@ -4635,95 +4751,861 @@ messageTypeRegistry.set( RescheduleMaintenanceMetadata ); -/** A set of methods for managing Elasticsearch clusters. */ -export const ClusterServiceService = { - /** - * Returns the specified Elasticsearch cluster. - * - * To get the list of available Elasticsearch clusters, make a [List] request. - */ - get: { - path: "/yandex.cloud.mdb.elasticsearch.v1.ClusterService/Get", - requestStream: false, - responseStream: false, - requestSerialize: (value: GetClusterRequest) => - Buffer.from(GetClusterRequest.encode(value).finish()), - requestDeserialize: (value: Buffer) => GetClusterRequest.decode(value), - responseSerialize: (value: Cluster) => - Buffer.from(Cluster.encode(value).finish()), - responseDeserialize: (value: Buffer) => Cluster.decode(value), - }, - /** Retrieves the list of Elasticsearch clusters that belong to the specified folder. */ - list: { - path: "/yandex.cloud.mdb.elasticsearch.v1.ClusterService/List", - requestStream: false, - responseStream: false, - requestSerialize: (value: ListClustersRequest) => - Buffer.from(ListClustersRequest.encode(value).finish()), - requestDeserialize: (value: Buffer) => ListClustersRequest.decode(value), - responseSerialize: (value: ListClustersResponse) => - Buffer.from(ListClustersResponse.encode(value).finish()), - responseDeserialize: (value: Buffer) => ListClustersResponse.decode(value), - }, - /** Creates a new Elasticsearch cluster in the specified folder. */ - create: { - path: "/yandex.cloud.mdb.elasticsearch.v1.ClusterService/Create", - requestStream: false, - responseStream: false, - requestSerialize: (value: CreateClusterRequest) => - Buffer.from(CreateClusterRequest.encode(value).finish()), - requestDeserialize: (value: Buffer) => CreateClusterRequest.decode(value), - responseSerialize: (value: Operation) => - Buffer.from(Operation.encode(value).finish()), - responseDeserialize: (value: Buffer) => Operation.decode(value), - }, - /** Updates the specified Elasticsearch cluster. */ - update: { - path: "/yandex.cloud.mdb.elasticsearch.v1.ClusterService/Update", - requestStream: false, - responseStream: false, - requestSerialize: (value: UpdateClusterRequest) => - Buffer.from(UpdateClusterRequest.encode(value).finish()), - requestDeserialize: (value: Buffer) => UpdateClusterRequest.decode(value), - responseSerialize: (value: Operation) => - Buffer.from(Operation.encode(value).finish()), - responseDeserialize: (value: Buffer) => Operation.decode(value), - }, - /** Deletes the specified Elasticsearch cluster. */ - delete: { - path: "/yandex.cloud.mdb.elasticsearch.v1.ClusterService/Delete", - requestStream: false, - responseStream: false, - requestSerialize: (value: DeleteClusterRequest) => - Buffer.from(DeleteClusterRequest.encode(value).finish()), - requestDeserialize: (value: Buffer) => DeleteClusterRequest.decode(value), - responseSerialize: (value: Operation) => - Buffer.from(Operation.encode(value).finish()), - responseDeserialize: (value: Buffer) => Operation.decode(value), +const baseRestoreClusterRequest: object = { + $type: "yandex.cloud.mdb.elasticsearch.v1.RestoreClusterRequest", + backupId: "", + name: "", + description: "", + environment: 0, + networkId: "", + securityGroupIds: "", + serviceAccountId: "", + deletionProtection: false, + folderId: "", +}; + +export const RestoreClusterRequest = { + $type: "yandex.cloud.mdb.elasticsearch.v1.RestoreClusterRequest" as const, + + encode( + message: RestoreClusterRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.backupId !== "") { + writer.uint32(10).string(message.backupId); + } + if (message.name !== "") { + writer.uint32(18).string(message.name); + } + if (message.description !== "") { + writer.uint32(26).string(message.description); + } + Object.entries(message.labels).forEach(([key, value]) => { + RestoreClusterRequest_LabelsEntry.encode( + { + $type: + "yandex.cloud.mdb.elasticsearch.v1.RestoreClusterRequest.LabelsEntry", + key: key as any, + value, + }, + writer.uint32(34).fork() + ).ldelim(); + }); + if (message.environment !== 0) { + writer.uint32(40).int32(message.environment); + } + if (message.configSpec !== undefined) { + ConfigSpec.encode(message.configSpec, writer.uint32(50).fork()).ldelim(); + } + for (const v of message.hostSpecs) { + HostSpec.encode(v!, writer.uint32(74).fork()).ldelim(); + } + if (message.networkId !== "") { + writer.uint32(82).string(message.networkId); + } + for (const v of message.securityGroupIds) { + writer.uint32(90).string(v!); + } + if (message.serviceAccountId !== "") { + writer.uint32(98).string(message.serviceAccountId); + } + if (message.deletionProtection === true) { + writer.uint32(104).bool(message.deletionProtection); + } + if (message.folderId !== "") { + writer.uint32(114).string(message.folderId); + } + for (const v of message.extensionSpecs) { + ExtensionSpec.encode(v!, writer.uint32(122).fork()).ldelim(); + } + return writer; }, - /** Moves the specified Elasticsearch cluster to the specified folder. */ - move: { - path: "/yandex.cloud.mdb.elasticsearch.v1.ClusterService/Move", - requestStream: false, - responseStream: false, - requestSerialize: (value: MoveClusterRequest) => - Buffer.from(MoveClusterRequest.encode(value).finish()), - requestDeserialize: (value: Buffer) => MoveClusterRequest.decode(value), - responseSerialize: (value: Operation) => - Buffer.from(Operation.encode(value).finish()), - responseDeserialize: (value: Buffer) => Operation.decode(value), + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): RestoreClusterRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseRestoreClusterRequest } as RestoreClusterRequest; + message.labels = {}; + message.hostSpecs = []; + message.securityGroupIds = []; + message.extensionSpecs = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.backupId = reader.string(); + break; + case 2: + message.name = reader.string(); + break; + case 3: + message.description = reader.string(); + break; + case 4: + const entry4 = RestoreClusterRequest_LabelsEntry.decode( + reader, + reader.uint32() + ); + if (entry4.value !== undefined) { + message.labels[entry4.key] = entry4.value; + } + break; + case 5: + message.environment = reader.int32() as any; + break; + case 6: + message.configSpec = ConfigSpec.decode(reader, reader.uint32()); + break; + case 9: + message.hostSpecs.push(HostSpec.decode(reader, reader.uint32())); + break; + case 10: + message.networkId = reader.string(); + break; + case 11: + message.securityGroupIds.push(reader.string()); + break; + case 12: + message.serviceAccountId = reader.string(); + break; + case 13: + message.deletionProtection = reader.bool(); + break; + case 14: + message.folderId = reader.string(); + break; + case 15: + message.extensionSpecs.push( + ExtensionSpec.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; }, - /** Starts the specified Elasticsearch cluster. */ - start: { - path: "/yandex.cloud.mdb.elasticsearch.v1.ClusterService/Start", - requestStream: false, - responseStream: false, - requestSerialize: (value: StartClusterRequest) => - Buffer.from(StartClusterRequest.encode(value).finish()), - requestDeserialize: (value: Buffer) => StartClusterRequest.decode(value), - responseSerialize: (value: Operation) => - Buffer.from(Operation.encode(value).finish()), - responseDeserialize: (value: Buffer) => Operation.decode(value), + + fromJSON(object: any): RestoreClusterRequest { + const message = { ...baseRestoreClusterRequest } as RestoreClusterRequest; + message.backupId = + object.backupId !== undefined && object.backupId !== null + ? String(object.backupId) + : ""; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); + message.environment = + object.environment !== undefined && object.environment !== null + ? cluster_EnvironmentFromJSON(object.environment) + : 0; + message.configSpec = + object.configSpec !== undefined && object.configSpec !== null + ? ConfigSpec.fromJSON(object.configSpec) + : undefined; + message.hostSpecs = (object.hostSpecs ?? []).map((e: any) => + HostSpec.fromJSON(e) + ); + message.networkId = + object.networkId !== undefined && object.networkId !== null + ? String(object.networkId) + : ""; + message.securityGroupIds = (object.securityGroupIds ?? []).map((e: any) => + String(e) + ); + message.serviceAccountId = + object.serviceAccountId !== undefined && object.serviceAccountId !== null + ? String(object.serviceAccountId) + : ""; + message.deletionProtection = + object.deletionProtection !== undefined && + object.deletionProtection !== null + ? Boolean(object.deletionProtection) + : false; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.extensionSpecs = (object.extensionSpecs ?? []).map((e: any) => + ExtensionSpec.fromJSON(e) + ); + return message; + }, + + toJSON(message: RestoreClusterRequest): unknown { + const obj: any = {}; + message.backupId !== undefined && (obj.backupId = message.backupId); + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + obj.labels = {}; + if (message.labels) { + Object.entries(message.labels).forEach(([k, v]) => { + obj.labels[k] = v; + }); + } + message.environment !== undefined && + (obj.environment = cluster_EnvironmentToJSON(message.environment)); + message.configSpec !== undefined && + (obj.configSpec = message.configSpec + ? ConfigSpec.toJSON(message.configSpec) + : undefined); + if (message.hostSpecs) { + obj.hostSpecs = message.hostSpecs.map((e) => + e ? HostSpec.toJSON(e) : undefined + ); + } else { + obj.hostSpecs = []; + } + message.networkId !== undefined && (obj.networkId = message.networkId); + if (message.securityGroupIds) { + obj.securityGroupIds = message.securityGroupIds.map((e) => e); + } else { + obj.securityGroupIds = []; + } + message.serviceAccountId !== undefined && + (obj.serviceAccountId = message.serviceAccountId); + message.deletionProtection !== undefined && + (obj.deletionProtection = message.deletionProtection); + message.folderId !== undefined && (obj.folderId = message.folderId); + if (message.extensionSpecs) { + obj.extensionSpecs = message.extensionSpecs.map((e) => + e ? ExtensionSpec.toJSON(e) : undefined + ); + } else { + obj.extensionSpecs = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): RestoreClusterRequest { + const message = { ...baseRestoreClusterRequest } as RestoreClusterRequest; + message.backupId = object.backupId ?? ""; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); + message.environment = object.environment ?? 0; + message.configSpec = + object.configSpec !== undefined && object.configSpec !== null + ? ConfigSpec.fromPartial(object.configSpec) + : undefined; + message.hostSpecs = + object.hostSpecs?.map((e) => HostSpec.fromPartial(e)) || []; + message.networkId = object.networkId ?? ""; + message.securityGroupIds = object.securityGroupIds?.map((e) => e) || []; + message.serviceAccountId = object.serviceAccountId ?? ""; + message.deletionProtection = object.deletionProtection ?? false; + message.folderId = object.folderId ?? ""; + message.extensionSpecs = + object.extensionSpecs?.map((e) => ExtensionSpec.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(RestoreClusterRequest.$type, RestoreClusterRequest); + +const baseRestoreClusterRequest_LabelsEntry: object = { + $type: "yandex.cloud.mdb.elasticsearch.v1.RestoreClusterRequest.LabelsEntry", + key: "", + value: "", +}; + +export const RestoreClusterRequest_LabelsEntry = { + $type: + "yandex.cloud.mdb.elasticsearch.v1.RestoreClusterRequest.LabelsEntry" as const, + + encode( + message: RestoreClusterRequest_LabelsEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): RestoreClusterRequest_LabelsEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseRestoreClusterRequest_LabelsEntry, + } as RestoreClusterRequest_LabelsEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RestoreClusterRequest_LabelsEntry { + const message = { + ...baseRestoreClusterRequest_LabelsEntry, + } as RestoreClusterRequest_LabelsEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: RestoreClusterRequest_LabelsEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): RestoreClusterRequest_LabelsEntry { + const message = { + ...baseRestoreClusterRequest_LabelsEntry, + } as RestoreClusterRequest_LabelsEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + RestoreClusterRequest_LabelsEntry.$type, + RestoreClusterRequest_LabelsEntry +); + +const baseRestoreClusterMetadata: object = { + $type: "yandex.cloud.mdb.elasticsearch.v1.RestoreClusterMetadata", + clusterId: "", + backupId: "", +}; + +export const RestoreClusterMetadata = { + $type: "yandex.cloud.mdb.elasticsearch.v1.RestoreClusterMetadata" as const, + + encode( + message: RestoreClusterMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.backupId !== "") { + writer.uint32(18).string(message.backupId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): RestoreClusterMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseRestoreClusterMetadata } as RestoreClusterMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.backupId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RestoreClusterMetadata { + const message = { ...baseRestoreClusterMetadata } as RestoreClusterMetadata; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.backupId = + object.backupId !== undefined && object.backupId !== null + ? String(object.backupId) + : ""; + return message; + }, + + toJSON(message: RestoreClusterMetadata): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.backupId !== undefined && (obj.backupId = message.backupId); + return obj; + }, + + fromPartial, I>>( + object: I + ): RestoreClusterMetadata { + const message = { ...baseRestoreClusterMetadata } as RestoreClusterMetadata; + message.clusterId = object.clusterId ?? ""; + message.backupId = object.backupId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(RestoreClusterMetadata.$type, RestoreClusterMetadata); + +const baseBackupClusterRequest: object = { + $type: "yandex.cloud.mdb.elasticsearch.v1.BackupClusterRequest", + clusterId: "", +}; + +export const BackupClusterRequest = { + $type: "yandex.cloud.mdb.elasticsearch.v1.BackupClusterRequest" as const, + + encode( + message: BackupClusterRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): BackupClusterRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseBackupClusterRequest } as BackupClusterRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): BackupClusterRequest { + const message = { ...baseBackupClusterRequest } as BackupClusterRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + return message; + }, + + toJSON(message: BackupClusterRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + return obj; + }, + + fromPartial, I>>( + object: I + ): BackupClusterRequest { + const message = { ...baseBackupClusterRequest } as BackupClusterRequest; + message.clusterId = object.clusterId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(BackupClusterRequest.$type, BackupClusterRequest); + +const baseBackupClusterMetadata: object = { + $type: "yandex.cloud.mdb.elasticsearch.v1.BackupClusterMetadata", + clusterId: "", +}; + +export const BackupClusterMetadata = { + $type: "yandex.cloud.mdb.elasticsearch.v1.BackupClusterMetadata" as const, + + encode( + message: BackupClusterMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): BackupClusterMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseBackupClusterMetadata } as BackupClusterMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): BackupClusterMetadata { + const message = { ...baseBackupClusterMetadata } as BackupClusterMetadata; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + return message; + }, + + toJSON(message: BackupClusterMetadata): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + return obj; + }, + + fromPartial, I>>( + object: I + ): BackupClusterMetadata { + const message = { ...baseBackupClusterMetadata } as BackupClusterMetadata; + message.clusterId = object.clusterId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(BackupClusterMetadata.$type, BackupClusterMetadata); + +const baseListClusterBackupsRequest: object = { + $type: "yandex.cloud.mdb.elasticsearch.v1.ListClusterBackupsRequest", + clusterId: "", + pageSize: 0, + pageToken: "", +}; + +export const ListClusterBackupsRequest = { + $type: "yandex.cloud.mdb.elasticsearch.v1.ListClusterBackupsRequest" as const, + + encode( + message: ListClusterBackupsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.pageSize !== 0) { + writer.uint32(16).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(26).string(message.pageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListClusterBackupsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListClusterBackupsRequest, + } as ListClusterBackupsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 3: + message.pageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListClusterBackupsRequest { + const message = { + ...baseListClusterBackupsRequest, + } as ListClusterBackupsRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + return message; + }, + + toJSON(message: ListClusterBackupsRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListClusterBackupsRequest { + const message = { + ...baseListClusterBackupsRequest, + } as ListClusterBackupsRequest; + message.clusterId = object.clusterId ?? ""; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListClusterBackupsRequest.$type, + ListClusterBackupsRequest +); + +const baseListClusterBackupsResponse: object = { + $type: "yandex.cloud.mdb.elasticsearch.v1.ListClusterBackupsResponse", + nextPageToken: "", +}; + +export const ListClusterBackupsResponse = { + $type: + "yandex.cloud.mdb.elasticsearch.v1.ListClusterBackupsResponse" as const, + + encode( + message: ListClusterBackupsResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.backups) { + Backup.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListClusterBackupsResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListClusterBackupsResponse, + } as ListClusterBackupsResponse; + message.backups = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.backups.push(Backup.decode(reader, reader.uint32())); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListClusterBackupsResponse { + const message = { + ...baseListClusterBackupsResponse, + } as ListClusterBackupsResponse; + message.backups = (object.backups ?? []).map((e: any) => + Backup.fromJSON(e) + ); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListClusterBackupsResponse): unknown { + const obj: any = {}; + if (message.backups) { + obj.backups = message.backups.map((e) => + e ? Backup.toJSON(e) : undefined + ); + } else { + obj.backups = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListClusterBackupsResponse { + const message = { + ...baseListClusterBackupsResponse, + } as ListClusterBackupsResponse; + message.backups = object.backups?.map((e) => Backup.fromPartial(e)) || []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListClusterBackupsResponse.$type, + ListClusterBackupsResponse +); + +/** A set of methods for managing Elasticsearch clusters. */ +export const ClusterServiceService = { + /** + * Returns the specified Elasticsearch cluster. + * + * To get the list of available Elasticsearch clusters, make a [List] request. + */ + get: { + path: "/yandex.cloud.mdb.elasticsearch.v1.ClusterService/Get", + requestStream: false, + responseStream: false, + requestSerialize: (value: GetClusterRequest) => + Buffer.from(GetClusterRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => GetClusterRequest.decode(value), + responseSerialize: (value: Cluster) => + Buffer.from(Cluster.encode(value).finish()), + responseDeserialize: (value: Buffer) => Cluster.decode(value), + }, + /** Retrieves the list of Elasticsearch clusters that belong to the specified folder. */ + list: { + path: "/yandex.cloud.mdb.elasticsearch.v1.ClusterService/List", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListClustersRequest) => + Buffer.from(ListClustersRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => ListClustersRequest.decode(value), + responseSerialize: (value: ListClustersResponse) => + Buffer.from(ListClustersResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => ListClustersResponse.decode(value), + }, + /** Creates a new Elasticsearch cluster in the specified folder. */ + create: { + path: "/yandex.cloud.mdb.elasticsearch.v1.ClusterService/Create", + requestStream: false, + responseStream: false, + requestSerialize: (value: CreateClusterRequest) => + Buffer.from(CreateClusterRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => CreateClusterRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Updates the specified Elasticsearch cluster. */ + update: { + path: "/yandex.cloud.mdb.elasticsearch.v1.ClusterService/Update", + requestStream: false, + responseStream: false, + requestSerialize: (value: UpdateClusterRequest) => + Buffer.from(UpdateClusterRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => UpdateClusterRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Deletes the specified Elasticsearch cluster. */ + delete: { + path: "/yandex.cloud.mdb.elasticsearch.v1.ClusterService/Delete", + requestStream: false, + responseStream: false, + requestSerialize: (value: DeleteClusterRequest) => + Buffer.from(DeleteClusterRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => DeleteClusterRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Moves the specified Elasticsearch cluster to the specified folder. */ + move: { + path: "/yandex.cloud.mdb.elasticsearch.v1.ClusterService/Move", + requestStream: false, + responseStream: false, + requestSerialize: (value: MoveClusterRequest) => + Buffer.from(MoveClusterRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => MoveClusterRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Starts the specified Elasticsearch cluster. */ + start: { + path: "/yandex.cloud.mdb.elasticsearch.v1.ClusterService/Start", + requestStream: false, + responseStream: false, + requestSerialize: (value: StartClusterRequest) => + Buffer.from(StartClusterRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => StartClusterRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), }, /** Stops the specified Elasticsearch cluster. */ stop: { @@ -4737,6 +5619,44 @@ export const ClusterServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, + /** Create a backup for the specified ElasticSearch cluster. */ + backup: { + path: "/yandex.cloud.mdb.elasticsearch.v1.ClusterService/Backup", + requestStream: false, + responseStream: false, + requestSerialize: (value: BackupClusterRequest) => + Buffer.from(BackupClusterRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => BackupClusterRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Returns the list of available backups for the specified Elasticsearch cluster. */ + listBackups: { + path: "/yandex.cloud.mdb.elasticsearch.v1.ClusterService/ListBackups", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListClusterBackupsRequest) => + Buffer.from(ListClusterBackupsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + ListClusterBackupsRequest.decode(value), + responseSerialize: (value: ListClusterBackupsResponse) => + Buffer.from(ListClusterBackupsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + ListClusterBackupsResponse.decode(value), + }, + /** Creates a new ElasticSearch cluster from the specified backup. */ + restore: { + path: "/yandex.cloud.mdb.elasticsearch.v1.ClusterService/Restore", + requestStream: false, + responseStream: false, + requestSerialize: (value: RestoreClusterRequest) => + Buffer.from(RestoreClusterRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => RestoreClusterRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, /** * Retrieves logs for the specified Elasticsearch cluster. * @@ -4856,6 +5776,15 @@ export interface ClusterServiceServer extends UntypedServiceImplementation { start: handleUnaryCall; /** Stops the specified Elasticsearch cluster. */ stop: handleUnaryCall; + /** Create a backup for the specified ElasticSearch cluster. */ + backup: handleUnaryCall; + /** Returns the list of available backups for the specified Elasticsearch cluster. */ + listBackups: handleUnaryCall< + ListClusterBackupsRequest, + ListClusterBackupsResponse + >; + /** Creates a new ElasticSearch cluster from the specified backup. */ + restore: handleUnaryCall; /** * Retrieves logs for the specified Elasticsearch cluster. * @@ -5027,6 +5956,63 @@ export interface ClusterServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; + /** Create a backup for the specified ElasticSearch cluster. */ + backup( + request: BackupClusterRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + backup( + request: BackupClusterRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + backup( + request: BackupClusterRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Returns the list of available backups for the specified Elasticsearch cluster. */ + listBackups( + request: ListClusterBackupsRequest, + callback: ( + error: ServiceError | null, + response: ListClusterBackupsResponse + ) => void + ): ClientUnaryCall; + listBackups( + request: ListClusterBackupsRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListClusterBackupsResponse + ) => void + ): ClientUnaryCall; + listBackups( + request: ListClusterBackupsRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListClusterBackupsResponse + ) => void + ): ClientUnaryCall; + /** Creates a new ElasticSearch cluster from the specified backup. */ + restore( + request: RestoreClusterRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + restore( + request: RestoreClusterRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + restore( + request: RestoreClusterRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; /** * Retrieves logs for the specified Elasticsearch cluster. * diff --git a/src/generated/yandex/cloud/mdb/elasticsearch/v1/extension.ts b/src/generated/yandex/cloud/mdb/elasticsearch/v1/extension.ts new file mode 100644 index 00000000..88c90fc8 --- /dev/null +++ b/src/generated/yandex/cloud/mdb/elasticsearch/v1/extension.ts @@ -0,0 +1,281 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; + +export const protobufPackage = "yandex.cloud.mdb.elasticsearch.v1"; + +export interface Extension { + $type: "yandex.cloud.mdb.elasticsearch.v1.Extension"; + /** Name of the extension. */ + name: string; + /** Extension unique ID */ + id: string; + /** ID of the Elasticsearch cluster the extension belongs to. */ + clusterId: string; + /** Extension version */ + version: number; + /** Flag is extension active now */ + active: boolean; +} + +export interface ExtensionSpec { + $type: "yandex.cloud.mdb.elasticsearch.v1.ExtensionSpec"; + /** Name of the extension. */ + name: string; + /** + * URI of the zip arhive to create the new extension from. + * Currently only supports links that are stored in Yandex Object Storage. + */ + uri: string; + disabled: boolean; +} + +const baseExtension: object = { + $type: "yandex.cloud.mdb.elasticsearch.v1.Extension", + name: "", + id: "", + clusterId: "", + version: 0, + active: false, +}; + +export const Extension = { + $type: "yandex.cloud.mdb.elasticsearch.v1.Extension" as const, + + encode( + message: Extension, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + if (message.id !== "") { + writer.uint32(18).string(message.id); + } + if (message.clusterId !== "") { + writer.uint32(26).string(message.clusterId); + } + if (message.version !== 0) { + writer.uint32(32).int64(message.version); + } + if (message.active === true) { + writer.uint32(40).bool(message.active); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Extension { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseExtension } as Extension; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.id = reader.string(); + break; + case 3: + message.clusterId = reader.string(); + break; + case 4: + message.version = longToNumber(reader.int64() as Long); + break; + case 5: + message.active = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Extension { + const message = { ...baseExtension } as Extension; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.id = + object.id !== undefined && object.id !== null ? String(object.id) : ""; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.version = + object.version !== undefined && object.version !== null + ? Number(object.version) + : 0; + message.active = + object.active !== undefined && object.active !== null + ? Boolean(object.active) + : false; + return message; + }, + + toJSON(message: Extension): unknown { + const obj: any = {}; + message.name !== undefined && (obj.name = message.name); + message.id !== undefined && (obj.id = message.id); + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.version !== undefined && + (obj.version = Math.round(message.version)); + message.active !== undefined && (obj.active = message.active); + return obj; + }, + + fromPartial, I>>( + object: I + ): Extension { + const message = { ...baseExtension } as Extension; + message.name = object.name ?? ""; + message.id = object.id ?? ""; + message.clusterId = object.clusterId ?? ""; + message.version = object.version ?? 0; + message.active = object.active ?? false; + return message; + }, +}; + +messageTypeRegistry.set(Extension.$type, Extension); + +const baseExtensionSpec: object = { + $type: "yandex.cloud.mdb.elasticsearch.v1.ExtensionSpec", + name: "", + uri: "", + disabled: false, +}; + +export const ExtensionSpec = { + $type: "yandex.cloud.mdb.elasticsearch.v1.ExtensionSpec" as const, + + encode( + message: ExtensionSpec, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + if (message.uri !== "") { + writer.uint32(18).string(message.uri); + } + if (message.disabled === true) { + writer.uint32(24).bool(message.disabled); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ExtensionSpec { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseExtensionSpec } as ExtensionSpec; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.uri = reader.string(); + break; + case 3: + message.disabled = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ExtensionSpec { + const message = { ...baseExtensionSpec } as ExtensionSpec; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.uri = + object.uri !== undefined && object.uri !== null ? String(object.uri) : ""; + message.disabled = + object.disabled !== undefined && object.disabled !== null + ? Boolean(object.disabled) + : false; + return message; + }, + + toJSON(message: ExtensionSpec): unknown { + const obj: any = {}; + message.name !== undefined && (obj.name = message.name); + message.uri !== undefined && (obj.uri = message.uri); + message.disabled !== undefined && (obj.disabled = message.disabled); + return obj; + }, + + fromPartial, I>>( + object: I + ): ExtensionSpec { + const message = { ...baseExtensionSpec } as ExtensionSpec; + message.name = object.name ?? ""; + message.uri = object.uri ?? ""; + message.disabled = object.disabled ?? false; + return message; + }, +}; + +messageTypeRegistry.set(ExtensionSpec.$type, ExtensionSpec); + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/elasticsearch/v1/extension_service.ts b/src/generated/yandex/cloud/mdb/elasticsearch/v1/extension_service.ts new file mode 100644 index 00000000..5cdf27a1 --- /dev/null +++ b/src/generated/yandex/cloud/mdb/elasticsearch/v1/extension_service.ts @@ -0,0 +1,1127 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleUnaryCall, + Client, + ClientUnaryCall, + Metadata, + CallOptions, + ServiceError, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; +import { Extension } from "../../../../../yandex/cloud/mdb/elasticsearch/v1/extension"; +import { Operation } from "../../../../../yandex/cloud/operation/operation"; + +export const protobufPackage = "yandex.cloud.mdb.elasticsearch.v1"; + +export interface GetExtensionRequest { + $type: "yandex.cloud.mdb.elasticsearch.v1.GetExtensionRequest"; + /** Required. ID of the cluster. */ + clusterId: string; + /** Required. ID of the extension to return. */ + extensionId: string; +} + +export interface ListExtensionsRequest { + $type: "yandex.cloud.mdb.elasticsearch.v1.ListExtensionsRequest"; + /** Required. ID of the cluster to list extensions in. */ + clusterId: string; + /** + * The maximum number of results per page that should be returned. If the number of available + * results is larger than `page_size`, the service returns a `next_page_token` that can be used + * to get the next page of results in subsequent ListBackups requests. + * Acceptable values are 0 to 1000, inclusive. Default value: 100. + */ + pageSize: number; + /** + * Page token. Set `page_token` to the `next_page_token` returned by a previous ListBackups + * request to get the next page of results. + */ + pageToken: string; +} + +export interface ListExtensionsResponse { + $type: "yandex.cloud.mdb.elasticsearch.v1.ListExtensionsResponse"; + /** Requested list of extensions. */ + extensions: Extension[]; + /** + * This token allows you to get the next page of results for ListBackups requests, + * if the number of results is larger than `page_size` specified in the request. + * To get the next page, specify the value of `next_page_token` as a value for + * the `page_token` parameter in the next ListBackups request. Subsequent ListBackups + * requests will have their own `next_page_token` to continue paging through the results. + */ + nextPageToken: string; +} + +export interface DeleteExtensionRequest { + $type: "yandex.cloud.mdb.elasticsearch.v1.DeleteExtensionRequest"; + /** Required. ID of the cluster. */ + clusterId: string; + /** Required. ID of the extension to delete. */ + extensionId: string; +} + +export interface DeleteExtensionMetadata { + $type: "yandex.cloud.mdb.elasticsearch.v1.DeleteExtensionMetadata"; + /** Required. ID of the cluster. */ + clusterId: string; + /** Required. ID of the extension to delete. */ + extensionId: string; +} + +export interface UpdateExtensionRequest { + $type: "yandex.cloud.mdb.elasticsearch.v1.UpdateExtensionRequest"; + /** Required. ID of the cluster. */ + clusterId: string; + /** Required. ID of the extension to delete. */ + extensionId: string; + active: boolean; +} + +export interface UpdateExtensionMetadata { + $type: "yandex.cloud.mdb.elasticsearch.v1.UpdateExtensionMetadata"; + /** Required. ID of the cluster. */ + clusterId: string; + /** Required. ID of the extension. */ + extensionId: string; +} + +export interface CreateExtensionRequest { + $type: "yandex.cloud.mdb.elasticsearch.v1.CreateExtensionRequest"; + /** Required. ID of the cluster. */ + clusterId: string; + /** Name of the extension. */ + name: string; + /** + * URI of the zip arhive to create the new extension from. + * Currently only supports links that are stored in Yandex Object Storage. + */ + uri: string; + disabled: boolean; +} + +export interface CreateExtensionMetadata { + $type: "yandex.cloud.mdb.elasticsearch.v1.CreateExtensionMetadata"; + /** Required. ID of the cluster. */ + clusterId: string; + /** Required. ID of the extension. */ + extensionId: string; +} + +const baseGetExtensionRequest: object = { + $type: "yandex.cloud.mdb.elasticsearch.v1.GetExtensionRequest", + clusterId: "", + extensionId: "", +}; + +export const GetExtensionRequest = { + $type: "yandex.cloud.mdb.elasticsearch.v1.GetExtensionRequest" as const, + + encode( + message: GetExtensionRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.extensionId !== "") { + writer.uint32(18).string(message.extensionId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): GetExtensionRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGetExtensionRequest } as GetExtensionRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.extensionId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GetExtensionRequest { + const message = { ...baseGetExtensionRequest } as GetExtensionRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.extensionId = + object.extensionId !== undefined && object.extensionId !== null + ? String(object.extensionId) + : ""; + return message; + }, + + toJSON(message: GetExtensionRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.extensionId !== undefined && + (obj.extensionId = message.extensionId); + return obj; + }, + + fromPartial, I>>( + object: I + ): GetExtensionRequest { + const message = { ...baseGetExtensionRequest } as GetExtensionRequest; + message.clusterId = object.clusterId ?? ""; + message.extensionId = object.extensionId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(GetExtensionRequest.$type, GetExtensionRequest); + +const baseListExtensionsRequest: object = { + $type: "yandex.cloud.mdb.elasticsearch.v1.ListExtensionsRequest", + clusterId: "", + pageSize: 0, + pageToken: "", +}; + +export const ListExtensionsRequest = { + $type: "yandex.cloud.mdb.elasticsearch.v1.ListExtensionsRequest" as const, + + encode( + message: ListExtensionsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.pageSize !== 0) { + writer.uint32(16).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(26).string(message.pageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListExtensionsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListExtensionsRequest } as ListExtensionsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 3: + message.pageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListExtensionsRequest { + const message = { ...baseListExtensionsRequest } as ListExtensionsRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + return message; + }, + + toJSON(message: ListExtensionsRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListExtensionsRequest { + const message = { ...baseListExtensionsRequest } as ListExtensionsRequest; + message.clusterId = object.clusterId ?? ""; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListExtensionsRequest.$type, ListExtensionsRequest); + +const baseListExtensionsResponse: object = { + $type: "yandex.cloud.mdb.elasticsearch.v1.ListExtensionsResponse", + nextPageToken: "", +}; + +export const ListExtensionsResponse = { + $type: "yandex.cloud.mdb.elasticsearch.v1.ListExtensionsResponse" as const, + + encode( + message: ListExtensionsResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.extensions) { + Extension.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListExtensionsResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListExtensionsResponse } as ListExtensionsResponse; + message.extensions = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.extensions.push(Extension.decode(reader, reader.uint32())); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListExtensionsResponse { + const message = { ...baseListExtensionsResponse } as ListExtensionsResponse; + message.extensions = (object.extensions ?? []).map((e: any) => + Extension.fromJSON(e) + ); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListExtensionsResponse): unknown { + const obj: any = {}; + if (message.extensions) { + obj.extensions = message.extensions.map((e) => + e ? Extension.toJSON(e) : undefined + ); + } else { + obj.extensions = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListExtensionsResponse { + const message = { ...baseListExtensionsResponse } as ListExtensionsResponse; + message.extensions = + object.extensions?.map((e) => Extension.fromPartial(e)) || []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListExtensionsResponse.$type, ListExtensionsResponse); + +const baseDeleteExtensionRequest: object = { + $type: "yandex.cloud.mdb.elasticsearch.v1.DeleteExtensionRequest", + clusterId: "", + extensionId: "", +}; + +export const DeleteExtensionRequest = { + $type: "yandex.cloud.mdb.elasticsearch.v1.DeleteExtensionRequest" as const, + + encode( + message: DeleteExtensionRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.extensionId !== "") { + writer.uint32(18).string(message.extensionId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DeleteExtensionRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseDeleteExtensionRequest } as DeleteExtensionRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.extensionId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteExtensionRequest { + const message = { ...baseDeleteExtensionRequest } as DeleteExtensionRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.extensionId = + object.extensionId !== undefined && object.extensionId !== null + ? String(object.extensionId) + : ""; + return message; + }, + + toJSON(message: DeleteExtensionRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.extensionId !== undefined && + (obj.extensionId = message.extensionId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteExtensionRequest { + const message = { ...baseDeleteExtensionRequest } as DeleteExtensionRequest; + message.clusterId = object.clusterId ?? ""; + message.extensionId = object.extensionId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(DeleteExtensionRequest.$type, DeleteExtensionRequest); + +const baseDeleteExtensionMetadata: object = { + $type: "yandex.cloud.mdb.elasticsearch.v1.DeleteExtensionMetadata", + clusterId: "", + extensionId: "", +}; + +export const DeleteExtensionMetadata = { + $type: "yandex.cloud.mdb.elasticsearch.v1.DeleteExtensionMetadata" as const, + + encode( + message: DeleteExtensionMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.extensionId !== "") { + writer.uint32(18).string(message.extensionId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): DeleteExtensionMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseDeleteExtensionMetadata, + } as DeleteExtensionMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.extensionId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): DeleteExtensionMetadata { + const message = { + ...baseDeleteExtensionMetadata, + } as DeleteExtensionMetadata; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.extensionId = + object.extensionId !== undefined && object.extensionId !== null + ? String(object.extensionId) + : ""; + return message; + }, + + toJSON(message: DeleteExtensionMetadata): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.extensionId !== undefined && + (obj.extensionId = message.extensionId); + return obj; + }, + + fromPartial, I>>( + object: I + ): DeleteExtensionMetadata { + const message = { + ...baseDeleteExtensionMetadata, + } as DeleteExtensionMetadata; + message.clusterId = object.clusterId ?? ""; + message.extensionId = object.extensionId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(DeleteExtensionMetadata.$type, DeleteExtensionMetadata); + +const baseUpdateExtensionRequest: object = { + $type: "yandex.cloud.mdb.elasticsearch.v1.UpdateExtensionRequest", + clusterId: "", + extensionId: "", + active: false, +}; + +export const UpdateExtensionRequest = { + $type: "yandex.cloud.mdb.elasticsearch.v1.UpdateExtensionRequest" as const, + + encode( + message: UpdateExtensionRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.extensionId !== "") { + writer.uint32(18).string(message.extensionId); + } + if (message.active === true) { + writer.uint32(24).bool(message.active); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateExtensionRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseUpdateExtensionRequest } as UpdateExtensionRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.extensionId = reader.string(); + break; + case 3: + message.active = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateExtensionRequest { + const message = { ...baseUpdateExtensionRequest } as UpdateExtensionRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.extensionId = + object.extensionId !== undefined && object.extensionId !== null + ? String(object.extensionId) + : ""; + message.active = + object.active !== undefined && object.active !== null + ? Boolean(object.active) + : false; + return message; + }, + + toJSON(message: UpdateExtensionRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.extensionId !== undefined && + (obj.extensionId = message.extensionId); + message.active !== undefined && (obj.active = message.active); + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateExtensionRequest { + const message = { ...baseUpdateExtensionRequest } as UpdateExtensionRequest; + message.clusterId = object.clusterId ?? ""; + message.extensionId = object.extensionId ?? ""; + message.active = object.active ?? false; + return message; + }, +}; + +messageTypeRegistry.set(UpdateExtensionRequest.$type, UpdateExtensionRequest); + +const baseUpdateExtensionMetadata: object = { + $type: "yandex.cloud.mdb.elasticsearch.v1.UpdateExtensionMetadata", + clusterId: "", + extensionId: "", +}; + +export const UpdateExtensionMetadata = { + $type: "yandex.cloud.mdb.elasticsearch.v1.UpdateExtensionMetadata" as const, + + encode( + message: UpdateExtensionMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.extensionId !== "") { + writer.uint32(18).string(message.extensionId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateExtensionMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateExtensionMetadata, + } as UpdateExtensionMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.extensionId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateExtensionMetadata { + const message = { + ...baseUpdateExtensionMetadata, + } as UpdateExtensionMetadata; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.extensionId = + object.extensionId !== undefined && object.extensionId !== null + ? String(object.extensionId) + : ""; + return message; + }, + + toJSON(message: UpdateExtensionMetadata): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.extensionId !== undefined && + (obj.extensionId = message.extensionId); + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateExtensionMetadata { + const message = { + ...baseUpdateExtensionMetadata, + } as UpdateExtensionMetadata; + message.clusterId = object.clusterId ?? ""; + message.extensionId = object.extensionId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(UpdateExtensionMetadata.$type, UpdateExtensionMetadata); + +const baseCreateExtensionRequest: object = { + $type: "yandex.cloud.mdb.elasticsearch.v1.CreateExtensionRequest", + clusterId: "", + name: "", + uri: "", + disabled: false, +}; + +export const CreateExtensionRequest = { + $type: "yandex.cloud.mdb.elasticsearch.v1.CreateExtensionRequest" as const, + + encode( + message: CreateExtensionRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.name !== "") { + writer.uint32(18).string(message.name); + } + if (message.uri !== "") { + writer.uint32(26).string(message.uri); + } + if (message.disabled === true) { + writer.uint32(32).bool(message.disabled); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): CreateExtensionRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseCreateExtensionRequest } as CreateExtensionRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.name = reader.string(); + break; + case 3: + message.uri = reader.string(); + break; + case 4: + message.disabled = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateExtensionRequest { + const message = { ...baseCreateExtensionRequest } as CreateExtensionRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.uri = + object.uri !== undefined && object.uri !== null ? String(object.uri) : ""; + message.disabled = + object.disabled !== undefined && object.disabled !== null + ? Boolean(object.disabled) + : false; + return message; + }, + + toJSON(message: CreateExtensionRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.name !== undefined && (obj.name = message.name); + message.uri !== undefined && (obj.uri = message.uri); + message.disabled !== undefined && (obj.disabled = message.disabled); + return obj; + }, + + fromPartial, I>>( + object: I + ): CreateExtensionRequest { + const message = { ...baseCreateExtensionRequest } as CreateExtensionRequest; + message.clusterId = object.clusterId ?? ""; + message.name = object.name ?? ""; + message.uri = object.uri ?? ""; + message.disabled = object.disabled ?? false; + return message; + }, +}; + +messageTypeRegistry.set(CreateExtensionRequest.$type, CreateExtensionRequest); + +const baseCreateExtensionMetadata: object = { + $type: "yandex.cloud.mdb.elasticsearch.v1.CreateExtensionMetadata", + clusterId: "", + extensionId: "", +}; + +export const CreateExtensionMetadata = { + $type: "yandex.cloud.mdb.elasticsearch.v1.CreateExtensionMetadata" as const, + + encode( + message: CreateExtensionMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.extensionId !== "") { + writer.uint32(18).string(message.extensionId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): CreateExtensionMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseCreateExtensionMetadata, + } as CreateExtensionMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.extensionId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): CreateExtensionMetadata { + const message = { + ...baseCreateExtensionMetadata, + } as CreateExtensionMetadata; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.extensionId = + object.extensionId !== undefined && object.extensionId !== null + ? String(object.extensionId) + : ""; + return message; + }, + + toJSON(message: CreateExtensionMetadata): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.extensionId !== undefined && + (obj.extensionId = message.extensionId); + return obj; + }, + + fromPartial, I>>( + object: I + ): CreateExtensionMetadata { + const message = { + ...baseCreateExtensionMetadata, + } as CreateExtensionMetadata; + message.clusterId = object.clusterId ?? ""; + message.extensionId = object.extensionId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(CreateExtensionMetadata.$type, CreateExtensionMetadata); + +export const ExtensionServiceService = { + /** Returns the specified extension of Elasticsearch cluster. */ + get: { + path: "/yandex.cloud.mdb.elasticsearch.v1.ExtensionService/Get", + requestStream: false, + responseStream: false, + requestSerialize: (value: GetExtensionRequest) => + Buffer.from(GetExtensionRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => GetExtensionRequest.decode(value), + responseSerialize: (value: Extension) => + Buffer.from(Extension.encode(value).finish()), + responseDeserialize: (value: Buffer) => Extension.decode(value), + }, + /** Returns the list of available extensions for the specified Elasticsearch cluster. */ + list: { + path: "/yandex.cloud.mdb.elasticsearch.v1.ExtensionService/List", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListExtensionsRequest) => + Buffer.from(ListExtensionsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => ListExtensionsRequest.decode(value), + responseSerialize: (value: ListExtensionsResponse) => + Buffer.from(ListExtensionsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + ListExtensionsResponse.decode(value), + }, + /** Creates new extension version. */ + create: { + path: "/yandex.cloud.mdb.elasticsearch.v1.ExtensionService/Create", + requestStream: false, + responseStream: false, + requestSerialize: (value: CreateExtensionRequest) => + Buffer.from(CreateExtensionRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => CreateExtensionRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Updates the specified extension. */ + update: { + path: "/yandex.cloud.mdb.elasticsearch.v1.ExtensionService/Update", + requestStream: false, + responseStream: false, + requestSerialize: (value: UpdateExtensionRequest) => + Buffer.from(UpdateExtensionRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => UpdateExtensionRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, + /** Deletes the specified extension. */ + delete: { + path: "/yandex.cloud.mdb.elasticsearch.v1.ExtensionService/Delete", + requestStream: false, + responseStream: false, + requestSerialize: (value: DeleteExtensionRequest) => + Buffer.from(DeleteExtensionRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => DeleteExtensionRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, +} as const; + +export interface ExtensionServiceServer extends UntypedServiceImplementation { + /** Returns the specified extension of Elasticsearch cluster. */ + get: handleUnaryCall; + /** Returns the list of available extensions for the specified Elasticsearch cluster. */ + list: handleUnaryCall; + /** Creates new extension version. */ + create: handleUnaryCall; + /** Updates the specified extension. */ + update: handleUnaryCall; + /** Deletes the specified extension. */ + delete: handleUnaryCall; +} + +export interface ExtensionServiceClient extends Client { + /** Returns the specified extension of Elasticsearch cluster. */ + get( + request: GetExtensionRequest, + callback: (error: ServiceError | null, response: Extension) => void + ): ClientUnaryCall; + get( + request: GetExtensionRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Extension) => void + ): ClientUnaryCall; + get( + request: GetExtensionRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Extension) => void + ): ClientUnaryCall; + /** Returns the list of available extensions for the specified Elasticsearch cluster. */ + list( + request: ListExtensionsRequest, + callback: ( + error: ServiceError | null, + response: ListExtensionsResponse + ) => void + ): ClientUnaryCall; + list( + request: ListExtensionsRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListExtensionsResponse + ) => void + ): ClientUnaryCall; + list( + request: ListExtensionsRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListExtensionsResponse + ) => void + ): ClientUnaryCall; + /** Creates new extension version. */ + create( + request: CreateExtensionRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + create( + request: CreateExtensionRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + create( + request: CreateExtensionRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Updates the specified extension. */ + update( + request: UpdateExtensionRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + update( + request: UpdateExtensionRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + update( + request: UpdateExtensionRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + /** Deletes the specified extension. */ + delete( + request: DeleteExtensionRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + delete( + request: DeleteExtensionRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + delete( + request: DeleteExtensionRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; +} + +export const ExtensionServiceClient = makeGenericClientConstructor( + ExtensionServiceService, + "yandex.cloud.mdb.elasticsearch.v1.ExtensionService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): ExtensionServiceClient; + service: typeof ExtensionServiceService; +}; + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/greenplum/v1/backup.ts b/src/generated/yandex/cloud/mdb/greenplum/v1/backup.ts new file mode 100644 index 00000000..e6169ecf --- /dev/null +++ b/src/generated/yandex/cloud/mdb/greenplum/v1/backup.ts @@ -0,0 +1,232 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { Timestamp } from "../../../../../google/protobuf/timestamp"; + +export const protobufPackage = "yandex.cloud.mdb.greenplum.v1"; + +export interface Backup { + $type: "yandex.cloud.mdb.greenplum.v1.Backup"; + /** Required. ID of the backup. */ + id: string; + /** ID of the folder that the backup belongs to. */ + folderId: string; + /** + * Creation timestamp in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format + * (i.e. when the backup operation was completed). + */ + createdAt?: Date; + /** ID of the PostgreSQL cluster that the backup was created for. */ + sourceClusterId: string; + /** Time when the backup operation was started. */ + startedAt?: Date; + /** Size of backup in bytes */ + size: number; +} + +const baseBackup: object = { + $type: "yandex.cloud.mdb.greenplum.v1.Backup", + id: "", + folderId: "", + sourceClusterId: "", + size: 0, +}; + +export const Backup = { + $type: "yandex.cloud.mdb.greenplum.v1.Backup" as const, + + encode( + message: Backup, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.id !== "") { + writer.uint32(10).string(message.id); + } + if (message.folderId !== "") { + writer.uint32(18).string(message.folderId); + } + if (message.createdAt !== undefined) { + Timestamp.encode( + toTimestamp(message.createdAt), + writer.uint32(26).fork() + ).ldelim(); + } + if (message.sourceClusterId !== "") { + writer.uint32(34).string(message.sourceClusterId); + } + if (message.startedAt !== undefined) { + Timestamp.encode( + toTimestamp(message.startedAt), + writer.uint32(42).fork() + ).ldelim(); + } + if (message.size !== 0) { + writer.uint32(48).int64(message.size); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Backup { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseBackup } as Backup; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = reader.string(); + break; + case 2: + message.folderId = reader.string(); + break; + case 3: + message.createdAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 4: + message.sourceClusterId = reader.string(); + break; + case 5: + message.startedAt = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 6: + message.size = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Backup { + const message = { ...baseBackup } as Backup; + message.id = + object.id !== undefined && object.id !== null ? String(object.id) : ""; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.createdAt = + object.createdAt !== undefined && object.createdAt !== null + ? fromJsonTimestamp(object.createdAt) + : undefined; + message.sourceClusterId = + object.sourceClusterId !== undefined && object.sourceClusterId !== null + ? String(object.sourceClusterId) + : ""; + message.startedAt = + object.startedAt !== undefined && object.startedAt !== null + ? fromJsonTimestamp(object.startedAt) + : undefined; + message.size = + object.size !== undefined && object.size !== null + ? Number(object.size) + : 0; + return message; + }, + + toJSON(message: Backup): unknown { + const obj: any = {}; + message.id !== undefined && (obj.id = message.id); + message.folderId !== undefined && (obj.folderId = message.folderId); + message.createdAt !== undefined && + (obj.createdAt = message.createdAt.toISOString()); + message.sourceClusterId !== undefined && + (obj.sourceClusterId = message.sourceClusterId); + message.startedAt !== undefined && + (obj.startedAt = message.startedAt.toISOString()); + message.size !== undefined && (obj.size = Math.round(message.size)); + return obj; + }, + + fromPartial, I>>(object: I): Backup { + const message = { ...baseBackup } as Backup; + message.id = object.id ?? ""; + message.folderId = object.folderId ?? ""; + message.createdAt = object.createdAt ?? undefined; + message.sourceClusterId = object.sourceClusterId ?? ""; + message.startedAt = object.startedAt ?? undefined; + message.size = object.size ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(Backup.$type, Backup); + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function toTimestamp(date: Date): Timestamp { + const seconds = date.getTime() / 1_000; + const nanos = (date.getTime() % 1_000) * 1_000_000; + return { $type: "google.protobuf.Timestamp", seconds, nanos }; +} + +function fromTimestamp(t: Timestamp): Date { + let millis = t.seconds * 1_000; + millis += t.nanos / 1_000_000; + return new Date(millis); +} + +function fromJsonTimestamp(o: any): Date { + if (o instanceof Date) { + return o; + } else if (typeof o === "string") { + return new Date(o); + } else { + return fromTimestamp(Timestamp.fromJSON(o)); + } +} + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/greenplum/v1/backup_service.ts b/src/generated/yandex/cloud/mdb/greenplum/v1/backup_service.ts new file mode 100644 index 00000000..769482a2 --- /dev/null +++ b/src/generated/yandex/cloud/mdb/greenplum/v1/backup_service.ts @@ -0,0 +1,429 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleUnaryCall, + Client, + ClientUnaryCall, + Metadata, + CallOptions, + ServiceError, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; +import { Backup } from "../../../../../yandex/cloud/mdb/greenplum/v1/backup"; + +export const protobufPackage = "yandex.cloud.mdb.greenplum.v1"; + +export interface GetBackupRequest { + $type: "yandex.cloud.mdb.greenplum.v1.GetBackupRequest"; + /** Required. ID of the backup to return. */ + backupId: string; +} + +export interface ListBackupsRequest { + $type: "yandex.cloud.mdb.greenplum.v1.ListBackupsRequest"; + /** Required. ID of the folder to list backups in. */ + folderId: string; + /** + * The maximum number of results per page that should be returned. If the number of available + * results is larger than `page_size`, the service returns a `next_page_token` that can be used + * to get the next page of results in subsequent ListBackups requests. + * Acceptable values are 0 to 1000, inclusive. Default value: 100. + */ + pageSize: number; + /** + * Page token. Set `page_token` to the `next_page_token` returned by a previous ListBackups + * request to get the next page of results. + */ + pageToken: string; +} + +export interface ListBackupsResponse { + $type: "yandex.cloud.mdb.greenplum.v1.ListBackupsResponse"; + /** Requested list of backups. */ + backups: Backup[]; + /** + * This token allows you to get the next page of results for ListBackups requests, + * if the number of results is larger than `page_size` specified in the request. + * To get the next page, specify the value of `next_page_token` as a value for + * the `page_token` parameter in the next ListBackups request. Subsequent ListBackups + * requests will have their own `next_page_token` to continue paging through the results. + */ + nextPageToken: string; +} + +const baseGetBackupRequest: object = { + $type: "yandex.cloud.mdb.greenplum.v1.GetBackupRequest", + backupId: "", +}; + +export const GetBackupRequest = { + $type: "yandex.cloud.mdb.greenplum.v1.GetBackupRequest" as const, + + encode( + message: GetBackupRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.backupId !== "") { + writer.uint32(10).string(message.backupId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): GetBackupRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGetBackupRequest } as GetBackupRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.backupId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GetBackupRequest { + const message = { ...baseGetBackupRequest } as GetBackupRequest; + message.backupId = + object.backupId !== undefined && object.backupId !== null + ? String(object.backupId) + : ""; + return message; + }, + + toJSON(message: GetBackupRequest): unknown { + const obj: any = {}; + message.backupId !== undefined && (obj.backupId = message.backupId); + return obj; + }, + + fromPartial, I>>( + object: I + ): GetBackupRequest { + const message = { ...baseGetBackupRequest } as GetBackupRequest; + message.backupId = object.backupId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(GetBackupRequest.$type, GetBackupRequest); + +const baseListBackupsRequest: object = { + $type: "yandex.cloud.mdb.greenplum.v1.ListBackupsRequest", + folderId: "", + pageSize: 0, + pageToken: "", +}; + +export const ListBackupsRequest = { + $type: "yandex.cloud.mdb.greenplum.v1.ListBackupsRequest" as const, + + encode( + message: ListBackupsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.folderId !== "") { + writer.uint32(10).string(message.folderId); + } + if (message.pageSize !== 0) { + writer.uint32(16).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(26).string(message.pageToken); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ListBackupsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListBackupsRequest } as ListBackupsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.folderId = reader.string(); + break; + case 2: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 3: + message.pageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListBackupsRequest { + const message = { ...baseListBackupsRequest } as ListBackupsRequest; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + return message; + }, + + toJSON(message: ListBackupsRequest): unknown { + const obj: any = {}; + message.folderId !== undefined && (obj.folderId = message.folderId); + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListBackupsRequest { + const message = { ...baseListBackupsRequest } as ListBackupsRequest; + message.folderId = object.folderId ?? ""; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListBackupsRequest.$type, ListBackupsRequest); + +const baseListBackupsResponse: object = { + $type: "yandex.cloud.mdb.greenplum.v1.ListBackupsResponse", + nextPageToken: "", +}; + +export const ListBackupsResponse = { + $type: "yandex.cloud.mdb.greenplum.v1.ListBackupsResponse" as const, + + encode( + message: ListBackupsResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.backups) { + Backup.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ListBackupsResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListBackupsResponse } as ListBackupsResponse; + message.backups = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.backups.push(Backup.decode(reader, reader.uint32())); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListBackupsResponse { + const message = { ...baseListBackupsResponse } as ListBackupsResponse; + message.backups = (object.backups ?? []).map((e: any) => + Backup.fromJSON(e) + ); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListBackupsResponse): unknown { + const obj: any = {}; + if (message.backups) { + obj.backups = message.backups.map((e) => + e ? Backup.toJSON(e) : undefined + ); + } else { + obj.backups = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListBackupsResponse { + const message = { ...baseListBackupsResponse } as ListBackupsResponse; + message.backups = object.backups?.map((e) => Backup.fromPartial(e)) || []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListBackupsResponse.$type, ListBackupsResponse); + +export const BackupServiceService = { + /** Returns the specified backup of Greenplum® cluster. */ + get: { + path: "/yandex.cloud.mdb.greenplum.v1.BackupService/Get", + requestStream: false, + responseStream: false, + requestSerialize: (value: GetBackupRequest) => + Buffer.from(GetBackupRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => GetBackupRequest.decode(value), + responseSerialize: (value: Backup) => + Buffer.from(Backup.encode(value).finish()), + responseDeserialize: (value: Buffer) => Backup.decode(value), + }, + /** Returns the list of available backups for the specified Greenplum® cluster. */ + list: { + path: "/yandex.cloud.mdb.greenplum.v1.BackupService/List", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListBackupsRequest) => + Buffer.from(ListBackupsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => ListBackupsRequest.decode(value), + responseSerialize: (value: ListBackupsResponse) => + Buffer.from(ListBackupsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => ListBackupsResponse.decode(value), + }, +} as const; + +export interface BackupServiceServer extends UntypedServiceImplementation { + /** Returns the specified backup of Greenplum® cluster. */ + get: handleUnaryCall; + /** Returns the list of available backups for the specified Greenplum® cluster. */ + list: handleUnaryCall; +} + +export interface BackupServiceClient extends Client { + /** Returns the specified backup of Greenplum® cluster. */ + get( + request: GetBackupRequest, + callback: (error: ServiceError | null, response: Backup) => void + ): ClientUnaryCall; + get( + request: GetBackupRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Backup) => void + ): ClientUnaryCall; + get( + request: GetBackupRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Backup) => void + ): ClientUnaryCall; + /** Returns the list of available backups for the specified Greenplum® cluster. */ + list( + request: ListBackupsRequest, + callback: ( + error: ServiceError | null, + response: ListBackupsResponse + ) => void + ): ClientUnaryCall; + list( + request: ListBackupsRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListBackupsResponse + ) => void + ): ClientUnaryCall; + list( + request: ListBackupsRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListBackupsResponse + ) => void + ): ClientUnaryCall; +} + +export const BackupServiceClient = makeGenericClientConstructor( + BackupServiceService, + "yandex.cloud.mdb.greenplum.v1.BackupService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): BackupServiceClient; + service: typeof BackupServiceService; +}; + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/greenplum/v1/cluster.ts b/src/generated/yandex/cloud/mdb/greenplum/v1/cluster.ts index 1bda2522..664dca58 100644 --- a/src/generated/yandex/cloud/mdb/greenplum/v1/cluster.ts +++ b/src/generated/yandex/cloud/mdb/greenplum/v1/cluster.ts @@ -5,6 +5,9 @@ import _m0 from "protobufjs/minimal"; import { MasterSubclusterConfig, SegmentSubclusterConfig, + ConnectionPoolerConfigSet, + Greenplumconfigset617, + Greenplumconfigset619, } from "../../../../../yandex/cloud/mdb/greenplum/v1/config"; import { MaintenanceWindow, @@ -15,44 +18,44 @@ import { Timestamp } from "../../../../../google/protobuf/timestamp"; export const protobufPackage = "yandex.cloud.mdb.greenplum.v1"; -/** A Greenplum Cluster resource. For more information, see the */ +/** A Greenplum® cluster resource. */ export interface Cluster { $type: "yandex.cloud.mdb.greenplum.v1.Cluster"; /** - * ID of the Greenplum cluster. - * This ID is assigned by MDB at creation time. + * ID of the Greenplum® cluster. + * This ID is assigned by Yandex Cloud at the time of cluster creation. */ id: string; - /** ID of the folder that the Greenplum cluster belongs to. */ + /** ID of the folder that the Greenplum® cluster belongs to. */ folderId: string; - /** Creation timestamp in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. */ + /** Cluster creation timestamp in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. */ createdAt?: Date; /** - * Name of the Greenplum cluster. - * The name is unique within the folder. 1-63 characters long. + * Name of the Greenplum® cluster. + * The name is unique within the folder and is 1-63 characters long. */ name: string; - /** Greenplum cluster config */ + /** Greenplum® cluster configuration. */ config?: GreenplumConfig; - /** Description of the Greenplum cluster. 0-256 characters long. */ + /** Description of the Greenplum® cluster. 0-256 characters long. */ description: string; - /** Custom labels for the Greenplum cluster as `key:value` pairs. Maximum 64 per resource. */ + /** Custom labels for the Greenplum® cluster as `key:value` pairs. Maximum 64 labels per resource. */ labels: { [key: string]: string }; - /** Deployment environment of the Greenplum cluster. */ + /** Deployment environment of the Greenplum® cluster. */ environment: Cluster_Environment; - /** Description of monitoring systems relevant to the Greenplum cluster. */ + /** Description of monitoring systems relevant to the Greenplum® cluster. */ monitoring: Monitoring[]; - /** Configuration of the Greenplum master subcluster. */ + /** Configuration of the Greenplum® master subcluster. */ masterConfig?: MasterSubclusterConfig; - /** Configuration of the Greenplum segment subcluster. */ + /** Configuration of the Greenplum® segment subcluster. */ segmentConfig?: SegmentSubclusterConfig; - /** Number of hosts of the master subcluster */ + /** Number of hosts in the master subcluster. */ masterHostCount: number; - /** Number of hosts of the segment subcluster */ + /** Number of hosts in the segment subcluster. */ segmentHostCount: number; - /** Number of segments in the host */ + /** Number of segments per host. */ segmentInHost: number; - /** ID of the network that the cluster belongs to. */ + /** ID of the cloud network that the cluster belongs to. */ networkId: string; /** Aggregated cluster health. */ health: Cluster_Health; @@ -60,19 +63,20 @@ export interface Cluster { status: Cluster_Status; /** Window of maintenance operations. */ maintenanceWindow?: MaintenanceWindow; - /** Maintenance operation planned at nearest maintenance_window. */ + /** Maintenance operation planned at nearest [maintenance_window]. */ plannedOperation?: MaintenanceOperation; - /** User security groups */ + /** User security groups. */ securityGroupIds: string[]; - /** Owner user name */ + /** Owner user name. */ userName: string; - /** Deletion Protection inhibits deletion of the cluster */ + /** Whether or not cluster is protected from being deleted. */ deletionProtection: boolean; /** Host groups hosting VMs of the cluster. */ hostGroupIds: string[]; + /** Greenplum and Odyssey configuration; */ + clusterConfig?: ClusterConfigSet; } -/** Deployment environment. */ export enum Cluster_Environment { ENVIRONMENT_UNSPECIFIED = 0, /** @@ -120,9 +124,9 @@ export function cluster_EnvironmentToJSON(object: Cluster_Environment): string { } export enum Cluster_Health { - /** HEALTH_UNKNOWN - State of the cluster is unknown ([Host.health] for every host in the cluster is UNKNOWN). */ + /** HEALTH_UNKNOWN - Health of the cluster is unknown ([Host.health] for every host in the cluster is UNKNOWN). */ HEALTH_UNKNOWN = 0, - /** ALIVE - Cluster is alive and well ([Host.health] for every host in the cluster is ALIVE). */ + /** ALIVE - Cluster is working normally ([Host.health] for every host in the cluster is ALIVE). */ ALIVE = 1, /** DEAD - Cluster is inoperable ([Host.health] for every host in the cluster is DEAD). */ DEAD = 2, @@ -174,6 +178,7 @@ export function cluster_HealthToJSON(object: Cluster_Health): string { } } +/** Current state of the cluster. */ export enum Cluster_Status { /** STATUS_UNKNOWN - Cluster state is unknown. */ STATUS_UNKNOWN = 0, @@ -181,13 +186,13 @@ export enum Cluster_Status { CREATING = 1, /** RUNNING - Cluster is running normally. */ RUNNING = 2, - /** ERROR - Cluster encountered a problem and cannot operate. */ + /** ERROR - Cluster has encountered a problem and cannot operate. */ ERROR = 3, /** UPDATING - Cluster is being updated. */ UPDATING = 4, /** STOPPING - Cluster is stopping. */ STOPPING = 5, - /** STOPPED - Cluster stopped. */ + /** STOPPED - Cluster has stopped. */ STOPPED = 6, /** STARTING - Cluster is starting. */ STARTING = 7, @@ -256,6 +261,14 @@ export interface Cluster_LabelsEntry { value: string; } +export interface ClusterConfigSet { + $type: "yandex.cloud.mdb.greenplum.v1.ClusterConfigSet"; + greenplumConfigSet617?: Greenplumconfigset617 | undefined; + greenplumConfigSet619?: Greenplumconfigset619 | undefined; + /** Odyssey pool settings */ + pool?: ConnectionPoolerConfigSet; +} + /** Monitoring system metadata. */ export interface Monitoring { $type: "yandex.cloud.mdb.greenplum.v1.Monitoring"; @@ -263,18 +276,54 @@ export interface Monitoring { name: string; /** Description of the monitoring system. */ description: string; - /** Link to the monitoring system charts for the Greenplum cluster. */ + /** Link to the monitoring system charts for the Greenplum® cluster. */ link: string; } +/** Greenplum® cluster configuration. */ export interface GreenplumConfig { $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfig"; - /** Version of the Greenplum server software. */ + /** Version of the Greenplum® server software. */ version: string; /** Time to start the daily backup, in the UTC timezone. */ backupWindowStart?: TimeOfDay; /** Access policy for external services. */ access?: Access; + /** + * ID of the availability zone the cluster belongs to. + * To get a list of available zones, use the [yandex.cloud.compute.v1.ZoneService.List] request. + */ + zoneId: string; + /** + * ID of the subnet the cluster belongs to. This subnet should be a part + * of the cloud network the cluster belongs to (see [Cluster.network_id]). + */ + subnetId: string; + /** + * Whether or not the cluster has a public IP address. + * + * After the cluster has been created, this setting cannot be changed. + */ + assignPublicIp: boolean; +} + +/** Greenplum® cluster access options. */ +export interface Access { + $type: "yandex.cloud.mdb.greenplum.v1.Access"; + /** Allows data export from the cluster to Yandex DataLens. */ + dataLens: boolean; + /** Allows SQL queries to the cluster databases from the Yandex Cloud management console. */ + webSql: boolean; + /** Allow access for DataTransfer. */ + dataTransfer: boolean; +} + +export interface GreenplumRestoreConfig { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumRestoreConfig"; + /** Time to start the daily backup, in the UTC timezone. */ + backupWindowStart?: TimeOfDay; + /** Access policy for external services. */ + access?: Access; /** * ID of the availability zone where the host resides. * To get a list of available zones, use the [yandex.cloud.compute.v1.ZoneService.List] request. @@ -299,12 +348,12 @@ export interface GreenplumConfig { assignPublicIp: boolean; } -export interface Access { - $type: "yandex.cloud.mdb.greenplum.v1.Access"; - /** Allow to export data from the cluster to Yandex DataLens. */ - dataLens: boolean; - /** Allow SQL queries to the cluster databases from the Yandex.Cloud management console. */ - webSql: boolean; +export interface RestoreResources { + $type: "yandex.cloud.mdb.greenplum.v1.RestoreResources"; + /** ID of the preset for computational resources available to a host (CPU, memory etc.). */ + resourcePresetId: string; + /** Volume of the storage available to a host. */ + diskSize: number; } const baseCluster: object = { @@ -424,6 +473,12 @@ export const Cluster = { for (const v of message.hostGroupIds) { writer.uint32(186).string(v!); } + if (message.clusterConfig !== undefined) { + ClusterConfigSet.encode( + message.clusterConfig, + writer.uint32(194).fork() + ).ldelim(); + } return writer; }, @@ -524,6 +579,12 @@ export const Cluster = { case 23: message.hostGroupIds.push(reader.string()); break; + case 24: + message.clusterConfig = ClusterConfigSet.decode( + reader, + reader.uint32() + ); + break; default: reader.skipType(tag & 7); break; @@ -625,6 +686,10 @@ export const Cluster = { message.hostGroupIds = (object.hostGroupIds ?? []).map((e: any) => String(e) ); + message.clusterConfig = + object.clusterConfig !== undefined && object.clusterConfig !== null + ? ClusterConfigSet.fromJSON(object.clusterConfig) + : undefined; return message; }, @@ -696,6 +761,10 @@ export const Cluster = { } else { obj.hostGroupIds = []; } + message.clusterConfig !== undefined && + (obj.clusterConfig = message.clusterConfig + ? ClusterConfigSet.toJSON(message.clusterConfig) + : undefined); return obj; }, @@ -748,6 +817,10 @@ export const Cluster = { message.userName = object.userName ?? ""; message.deletionProtection = object.deletionProtection ?? false; message.hostGroupIds = object.hostGroupIds?.map((e) => e) || []; + message.clusterConfig = + object.clusterConfig !== undefined && object.clusterConfig !== null + ? ClusterConfigSet.fromPartial(object.clusterConfig) + : undefined; return message; }, }; @@ -827,6 +900,131 @@ export const Cluster_LabelsEntry = { messageTypeRegistry.set(Cluster_LabelsEntry.$type, Cluster_LabelsEntry); +const baseClusterConfigSet: object = { + $type: "yandex.cloud.mdb.greenplum.v1.ClusterConfigSet", +}; + +export const ClusterConfigSet = { + $type: "yandex.cloud.mdb.greenplum.v1.ClusterConfigSet" as const, + + encode( + message: ClusterConfigSet, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.greenplumConfigSet617 !== undefined) { + Greenplumconfigset617.encode( + message.greenplumConfigSet617, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.greenplumConfigSet619 !== undefined) { + Greenplumconfigset619.encode( + message.greenplumConfigSet619, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.pool !== undefined) { + ConnectionPoolerConfigSet.encode( + message.pool, + writer.uint32(34).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ClusterConfigSet { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseClusterConfigSet } as ClusterConfigSet; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.greenplumConfigSet617 = Greenplumconfigset617.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.greenplumConfigSet619 = Greenplumconfigset619.decode( + reader, + reader.uint32() + ); + break; + case 4: + message.pool = ConnectionPoolerConfigSet.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ClusterConfigSet { + const message = { ...baseClusterConfigSet } as ClusterConfigSet; + message.greenplumConfigSet617 = + object.greenplumConfigSet_6_17 !== undefined && + object.greenplumConfigSet_6_17 !== null + ? Greenplumconfigset617.fromJSON(object.greenplumConfigSet_6_17) + : undefined; + message.greenplumConfigSet619 = + object.greenplumConfigSet_6_19 !== undefined && + object.greenplumConfigSet_6_19 !== null + ? Greenplumconfigset619.fromJSON(object.greenplumConfigSet_6_19) + : undefined; + message.pool = + object.pool !== undefined && object.pool !== null + ? ConnectionPoolerConfigSet.fromJSON(object.pool) + : undefined; + return message; + }, + + toJSON(message: ClusterConfigSet): unknown { + const obj: any = {}; + message.greenplumConfigSet617 !== undefined && + (obj.greenplumConfigSet_6_17 = message.greenplumConfigSet617 + ? Greenplumconfigset617.toJSON(message.greenplumConfigSet617) + : undefined); + message.greenplumConfigSet619 !== undefined && + (obj.greenplumConfigSet_6_19 = message.greenplumConfigSet619 + ? Greenplumconfigset619.toJSON(message.greenplumConfigSet619) + : undefined); + message.pool !== undefined && + (obj.pool = message.pool + ? ConnectionPoolerConfigSet.toJSON(message.pool) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): ClusterConfigSet { + const message = { ...baseClusterConfigSet } as ClusterConfigSet; + message.greenplumConfigSet617 = + object.greenplumConfigSet617 !== undefined && + object.greenplumConfigSet617 !== null + ? Greenplumconfigset617.fromPartial(object.greenplumConfigSet617) + : undefined; + message.greenplumConfigSet619 = + object.greenplumConfigSet619 !== undefined && + object.greenplumConfigSet619 !== null + ? Greenplumconfigset619.fromPartial(object.greenplumConfigSet619) + : undefined; + message.pool = + object.pool !== undefined && object.pool !== null + ? ConnectionPoolerConfigSet.fromPartial(object.pool) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(ClusterConfigSet.$type, ClusterConfigSet); + const baseMonitoring: object = { $type: "yandex.cloud.mdb.greenplum.v1.Monitoring", name: "", @@ -1061,6 +1259,7 @@ const baseAccess: object = { $type: "yandex.cloud.mdb.greenplum.v1.Access", dataLens: false, webSql: false, + dataTransfer: false, }; export const Access = { @@ -1076,6 +1275,9 @@ export const Access = { if (message.webSql === true) { writer.uint32(16).bool(message.webSql); } + if (message.dataTransfer === true) { + writer.uint32(24).bool(message.dataTransfer); + } return writer; }, @@ -1092,6 +1294,9 @@ export const Access = { case 2: message.webSql = reader.bool(); break; + case 3: + message.dataTransfer = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -1110,6 +1315,10 @@ export const Access = { object.webSql !== undefined && object.webSql !== null ? Boolean(object.webSql) : false; + message.dataTransfer = + object.dataTransfer !== undefined && object.dataTransfer !== null + ? Boolean(object.dataTransfer) + : false; return message; }, @@ -1117,6 +1326,8 @@ export const Access = { const obj: any = {}; message.dataLens !== undefined && (obj.dataLens = message.dataLens); message.webSql !== undefined && (obj.webSql = message.webSql); + message.dataTransfer !== undefined && + (obj.dataTransfer = message.dataTransfer); return obj; }, @@ -1124,12 +1335,221 @@ export const Access = { const message = { ...baseAccess } as Access; message.dataLens = object.dataLens ?? false; message.webSql = object.webSql ?? false; + message.dataTransfer = object.dataTransfer ?? false; return message; }, }; messageTypeRegistry.set(Access.$type, Access); +const baseGreenplumRestoreConfig: object = { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumRestoreConfig", + zoneId: "", + subnetId: "", + assignPublicIp: false, +}; + +export const GreenplumRestoreConfig = { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumRestoreConfig" as const, + + encode( + message: GreenplumRestoreConfig, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.backupWindowStart !== undefined) { + TimeOfDay.encode( + message.backupWindowStart, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.access !== undefined) { + Access.encode(message.access, writer.uint32(18).fork()).ldelim(); + } + if (message.zoneId !== "") { + writer.uint32(26).string(message.zoneId); + } + if (message.subnetId !== "") { + writer.uint32(34).string(message.subnetId); + } + if (message.assignPublicIp === true) { + writer.uint32(40).bool(message.assignPublicIp); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): GreenplumRestoreConfig { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGreenplumRestoreConfig } as GreenplumRestoreConfig; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.backupWindowStart = TimeOfDay.decode(reader, reader.uint32()); + break; + case 2: + message.access = Access.decode(reader, reader.uint32()); + break; + case 3: + message.zoneId = reader.string(); + break; + case 4: + message.subnetId = reader.string(); + break; + case 5: + message.assignPublicIp = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GreenplumRestoreConfig { + const message = { ...baseGreenplumRestoreConfig } as GreenplumRestoreConfig; + message.backupWindowStart = + object.backupWindowStart !== undefined && + object.backupWindowStart !== null + ? TimeOfDay.fromJSON(object.backupWindowStart) + : undefined; + message.access = + object.access !== undefined && object.access !== null + ? Access.fromJSON(object.access) + : undefined; + message.zoneId = + object.zoneId !== undefined && object.zoneId !== null + ? String(object.zoneId) + : ""; + message.subnetId = + object.subnetId !== undefined && object.subnetId !== null + ? String(object.subnetId) + : ""; + message.assignPublicIp = + object.assignPublicIp !== undefined && object.assignPublicIp !== null + ? Boolean(object.assignPublicIp) + : false; + return message; + }, + + toJSON(message: GreenplumRestoreConfig): unknown { + const obj: any = {}; + message.backupWindowStart !== undefined && + (obj.backupWindowStart = message.backupWindowStart + ? TimeOfDay.toJSON(message.backupWindowStart) + : undefined); + message.access !== undefined && + (obj.access = message.access ? Access.toJSON(message.access) : undefined); + message.zoneId !== undefined && (obj.zoneId = message.zoneId); + message.subnetId !== undefined && (obj.subnetId = message.subnetId); + message.assignPublicIp !== undefined && + (obj.assignPublicIp = message.assignPublicIp); + return obj; + }, + + fromPartial, I>>( + object: I + ): GreenplumRestoreConfig { + const message = { ...baseGreenplumRestoreConfig } as GreenplumRestoreConfig; + message.backupWindowStart = + object.backupWindowStart !== undefined && + object.backupWindowStart !== null + ? TimeOfDay.fromPartial(object.backupWindowStart) + : undefined; + message.access = + object.access !== undefined && object.access !== null + ? Access.fromPartial(object.access) + : undefined; + message.zoneId = object.zoneId ?? ""; + message.subnetId = object.subnetId ?? ""; + message.assignPublicIp = object.assignPublicIp ?? false; + return message; + }, +}; + +messageTypeRegistry.set(GreenplumRestoreConfig.$type, GreenplumRestoreConfig); + +const baseRestoreResources: object = { + $type: "yandex.cloud.mdb.greenplum.v1.RestoreResources", + resourcePresetId: "", + diskSize: 0, +}; + +export const RestoreResources = { + $type: "yandex.cloud.mdb.greenplum.v1.RestoreResources" as const, + + encode( + message: RestoreResources, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.resourcePresetId !== "") { + writer.uint32(10).string(message.resourcePresetId); + } + if (message.diskSize !== 0) { + writer.uint32(16).int64(message.diskSize); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): RestoreResources { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseRestoreResources } as RestoreResources; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.resourcePresetId = reader.string(); + break; + case 2: + message.diskSize = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RestoreResources { + const message = { ...baseRestoreResources } as RestoreResources; + message.resourcePresetId = + object.resourcePresetId !== undefined && object.resourcePresetId !== null + ? String(object.resourcePresetId) + : ""; + message.diskSize = + object.diskSize !== undefined && object.diskSize !== null + ? Number(object.diskSize) + : 0; + return message; + }, + + toJSON(message: RestoreResources): unknown { + const obj: any = {}; + message.resourcePresetId !== undefined && + (obj.resourcePresetId = message.resourcePresetId); + message.diskSize !== undefined && + (obj.diskSize = Math.round(message.diskSize)); + return obj; + }, + + fromPartial, I>>( + object: I + ): RestoreResources { + const message = { ...baseRestoreResources } as RestoreResources; + message.resourcePresetId = object.resourcePresetId ?? ""; + message.diskSize = object.diskSize ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(RestoreResources.$type, RestoreResources); + declare var self: any | undefined; declare var window: any | undefined; declare var global: any | undefined; diff --git a/src/generated/yandex/cloud/mdb/greenplum/v1/cluster_service.ts b/src/generated/yandex/cloud/mdb/greenplum/v1/cluster_service.ts index 0cd0d67d..23cb86f1 100644 --- a/src/generated/yandex/cloud/mdb/greenplum/v1/cluster_service.ts +++ b/src/generated/yandex/cloud/mdb/greenplum/v1/cluster_service.ts @@ -17,26 +17,30 @@ import _m0 from "protobufjs/minimal"; import { Cluster_Environment, GreenplumConfig, + GreenplumRestoreConfig, Cluster, cluster_EnvironmentFromJSON, cluster_EnvironmentToJSON, } from "../../../../../yandex/cloud/mdb/greenplum/v1/cluster"; import { MaintenanceWindow } from "../../../../../yandex/cloud/mdb/greenplum/v1/maintenance"; -import { FieldMask } from "../../../../../google/protobuf/field_mask"; import { + ConnectionPoolerConfig, Resources, - GreenplumMasterConfig, - GreenplumSegmentConfig, + Greenplumconfig617, + Greenplumconfig619, } from "../../../../../yandex/cloud/mdb/greenplum/v1/config"; +import { FieldMask } from "../../../../../google/protobuf/field_mask"; +import { Timestamp } from "../../../../../google/protobuf/timestamp"; import { Operation } from "../../../../../yandex/cloud/operation/operation"; import { Host } from "../../../../../yandex/cloud/mdb/greenplum/v1/host"; +import { Backup } from "../../../../../yandex/cloud/mdb/greenplum/v1/backup"; export const protobufPackage = "yandex.cloud.mdb.greenplum.v1"; export interface GetClusterRequest { $type: "yandex.cloud.mdb.greenplum.v1.GetClusterRequest"; /** - * ID of the Greenplum Cluster resource to return. + * ID of the Greenplum® Cluster resource to return. * To get the cluster ID, use a [ClusterService.List] request. */ clusterId: string; @@ -45,7 +49,7 @@ export interface GetClusterRequest { export interface ListClustersRequest { $type: "yandex.cloud.mdb.greenplum.v1.ListClustersRequest"; /** - * ID of the folder to list Greenplum clusters in. + * ID of the folder to list Greenplum® clusters in. * To get the folder ID, use a [yandex.cloud.resourcemanager.v1.FolderService.List] request. */ folderId: string; @@ -85,45 +89,46 @@ export interface ListClustersResponse { export interface CreateClusterRequest { $type: "yandex.cloud.mdb.greenplum.v1.CreateClusterRequest"; - /** ID of the folder to create the Greenplum cluster in. */ + /** ID of the folder to create the Greenplum® cluster in. */ folderId: string; - /** Name of the Greenplum cluster. The name must be unique within the folder. */ + /** Name of the Greenplum® cluster. The name must be unique within the folder. Maximum 63 characters. */ name: string; - /** Description of the Greenplum cluster. */ + /** Description of the Greenplum® cluster. */ description: string; /** - * Custom labels for the Greenplum cluster as `key:value` pairs. Maximum 64 per resource. - * For example, "project": "mvp" or "source": "dictionary". + * Custom labels for the Greenplum® cluster as `key:value` pairs. Maximum 64 per resource. + * For example, "project":"mvp" or "source":"dictionary". */ labels: { [key: string]: string }; - /** Deployment environment of the Greenplum cluster. */ + /** Deployment environment of the Greenplum® cluster. */ environment: Cluster_Environment; - /** Greenplum cluster config */ + /** Greenplum® cluster configuration. */ config?: GreenplumConfig; - /** Configuration of the Greenplum master subcluster. */ + /** Configuration of the Greenplum® master subcluster. */ masterConfig?: MasterSubclusterConfigSpec; - /** Configuration of the Greenplum segment subcluster. */ + /** Configuration of the Greenplum® segment subcluster. */ segmentConfig?: SegmentSubclusterConfigSpec; - /** Number of hosts of the master subcluster */ + /** Number of hosts in the master subcluster. */ masterHostCount: number; - /** Number of segments in the host */ + /** Number of segments per host. */ segmentInHost: number; - /** Number of hosts of the segment subcluster */ + /** Number of hosts in the segment subcluster. */ segmentHostCount: number; - /** Owner user name */ + /** Owner user name. */ userName: string; - /** Owner user password */ + /** Owner user password. Must be 8-128 characters long */ userPassword: string; /** ID of the network to create the cluster in. */ networkId: string; - /** User security groups */ + /** User security groups. */ securityGroupIds: string[]; - /** Deletion Protection inhibits deletion of the cluster */ + /** Whether or not cluster is protected from being deleted. */ deletionProtection: boolean; - /** Host groups to place VMs of cluster on. */ + /** Host groups to place VMs of the cluster in. */ hostGroupIds: string[]; /** Window of maintenance operations. */ maintenanceWindow?: MaintenanceWindow; + configSpec?: ConfigSpec; } export interface CreateClusterRequest_LabelsEntry { @@ -132,26 +137,35 @@ export interface CreateClusterRequest_LabelsEntry { value: string; } +/** Configuration of greenplum and odyssey */ +export interface ConfigSpec { + $type: "yandex.cloud.mdb.greenplum.v1.ConfigSpec"; + greenplumConfig617?: Greenplumconfig617 | undefined; + greenplumConfig619?: Greenplumconfig619 | undefined; + /** Odyssey pool settings */ + pool?: ConnectionPoolerConfig; +} + export interface CreateClusterMetadata { $type: "yandex.cloud.mdb.greenplum.v1.CreateClusterMetadata"; - /** ID of the Greenplum cluster that is being created. */ + /** ID of the Greenplum® cluster that is being created. */ clusterId: string; } export interface UpdateClusterRequest { $type: "yandex.cloud.mdb.greenplum.v1.UpdateClusterRequest"; /** - * ID of the Greenplum Cluster resource to update. - * To get the Greenplum cluster ID, use a [ClusterService.List] request. + * ID of the Greenplum® Cluster resource to update. + * To get the Greenplum® cluster ID, use a [ClusterService.List] request. */ clusterId: string; - /** Field mask that specifies which fields of the Greenplum Cluster resource should be updated. */ + /** Field mask that specifies which fields of the Greenplum® Cluster resource should be updated. */ updateMask?: FieldMask; - /** New description of the Greenplum cluster. */ + /** New description of the Greenplum® cluster. */ description: string; /** - * Custom labels for the Greenplum cluster as `key:value` pairs. Maximum 64 per resource. - * For example, "project": "mvp" or "source": "dictionary". + * Custom labels for the Greenplum® cluster as `key:value` pairs. Maximum 64 per resource. + * For example, "project":"mvp" or "source":"dictionary". * * The new set of labels will completely replace the old ones. To add a label, request the current * set with the [ClusterService.Get] method, then send an [ClusterService.Update] request with the new label added to the set. @@ -159,17 +173,17 @@ export interface UpdateClusterRequest { labels: { [key: string]: string }; /** New name for the cluster. */ name: string; - /** Greenplum cluster config */ + /** Greenplum® cluster configuration. */ config?: GreenplumConfig; - /** Configuration of the Greenplum master subcluster. */ + /** Configuration of the Greenplum® master subcluster. */ masterConfig?: MasterSubclusterConfigSpec; - /** Configuration of the Greenplum segment subcluster. */ + /** Configuration of the Greenplum® segment subcluster. */ segmentConfig?: SegmentSubclusterConfigSpec; /** Window of maintenance operations. */ maintenanceWindow?: MaintenanceWindow; - /** User security groups */ + /** User security groups. */ securityGroupIds: string[]; - /** Deletion Protection inhibits deletion of the cluster */ + /** Whether or not cluster is protected from being deleted. */ deletionProtection: boolean; } @@ -181,52 +195,58 @@ export interface UpdateClusterRequest_LabelsEntry { export interface UpdateClusterMetadata { $type: "yandex.cloud.mdb.greenplum.v1.UpdateClusterMetadata"; - /** ID of the Greenplum Cluster resource that is being updated. */ + /** ID of the Greenplum® Cluster resource that is being updated. */ clusterId: string; } export interface DeleteClusterRequest { $type: "yandex.cloud.mdb.greenplum.v1.DeleteClusterRequest"; /** - * ID of the Greenplum cluster to delete. - * To get the Greenplum cluster ID, use a [ClusterService.List] request. + * ID of the Greenplum® cluster to delete. + * To get the Greenplum® cluster ID, use a [ClusterService.List] request. */ clusterId: string; } export interface DeleteClusterMetadata { $type: "yandex.cloud.mdb.greenplum.v1.DeleteClusterMetadata"; - /** ID of the Greenplum cluster that is being deleted. */ + /** ID of the Greenplum® cluster that is being deleted. */ clusterId: string; } export interface StartClusterRequest { $type: "yandex.cloud.mdb.greenplum.v1.StartClusterRequest"; - /** ID of the Greenplum cluster to start. */ + /** + * ID of the Greenplum® cluster to start. + * To get the Greenplum® cluster ID, use a [ClusterService.List] request. + */ clusterId: string; } export interface StartClusterMetadata { $type: "yandex.cloud.mdb.greenplum.v1.StartClusterMetadata"; - /** ID of the Greenplum cluster being started. */ + /** ID of the Greenplum® cluster being started. */ clusterId: string; } export interface StopClusterRequest { $type: "yandex.cloud.mdb.greenplum.v1.StopClusterRequest"; - /** ID of the Greenplum cluster to stop. */ + /** + * ID of the Greenplum® cluster to stop. + * To get the Greenplum® cluster ID, use a [ClusterService.List] request. + */ clusterId: string; } export interface StopClusterMetadata { $type: "yandex.cloud.mdb.greenplum.v1.StopClusterMetadata"; - /** ID of the Greenplum cluster being stopped. */ + /** ID of the Greenplum® cluster being stopped. */ clusterId: string; } export interface ListClusterOperationsRequest { $type: "yandex.cloud.mdb.greenplum.v1.ListClusterOperationsRequest"; - /** ID of the Greenplum Cluster resource to list operations for. */ + /** ID of the Greenplum® Cluster resource to list operations for. */ clusterId: string; /** * The maximum number of results per page to return. If the number of available @@ -235,7 +255,7 @@ export interface ListClusterOperationsRequest { */ pageSize: number; /** - * Page token. To get the next page of results, set [page_token] to the [ListClusterOperationsResponse.next_page_token] + * Page token. To get the next page of results, set [page_token] to the [ListClusterOperationsResponse.next_page_token] * returned by a previous list request. */ pageToken: string; @@ -243,7 +263,7 @@ export interface ListClusterOperationsRequest { export interface ListClusterOperationsResponse { $type: "yandex.cloud.mdb.greenplum.v1.ListClusterOperationsResponse"; - /** List of Operation resources for the specified Greenplum cluster. */ + /** List of Operation resources for the specified Greenplum® cluster. */ operations: Operation[]; /** * This token allows you to get the next page of results for list requests. If the number of results @@ -257,8 +277,8 @@ export interface ListClusterOperationsResponse { export interface ListClusterHostsRequest { $type: "yandex.cloud.mdb.greenplum.v1.ListClusterHostsRequest"; /** - * ID of the Greenplum cluster. - * To get the Greenplum cluster ID use a [ClusterService.List] request. + * ID of the Greenplum® cluster. + * To get the Greenplum® cluster ID use a [ClusterService.List] request. */ clusterId: string; /** @@ -268,7 +288,7 @@ export interface ListClusterHostsRequest { */ pageSize: number; /** - * Page token. To get the next page of results, set [page_token] to the [ListClusterHostsResponse.next_page_token] + * Page token. To get the next page of results, set [page_token] to the [ListClusterHostsResponse.next_page_token] * returned by a previous list request. */ pageToken: string; @@ -287,22 +307,224 @@ export interface ListClusterHostsResponse { nextPageToken: string; } -/** Configuration of master subcluster */ +/** Configuration of the master subcluster. */ export interface MasterSubclusterConfigSpec { $type: "yandex.cloud.mdb.greenplum.v1.MasterSubclusterConfigSpec"; - /** Resources allocated to Greenplum master subcluster hosts. */ + /** Resources allocated to Greenplum® master subcluster hosts. */ resources?: Resources; - /** Configuration settings of a Greenplum master server. */ - config?: GreenplumMasterConfig; } -/** Configuration of segmet subcluster */ +/** Configuration of the segment subcluster. */ export interface SegmentSubclusterConfigSpec { $type: "yandex.cloud.mdb.greenplum.v1.SegmentSubclusterConfigSpec"; - /** Resources allocated to Greenplum segment subcluster hosts. */ + /** Resources allocated to Greenplum® segment subcluster hosts. */ resources?: Resources; - /** Configuration settings of a Greenplum segment server. */ - config?: GreenplumSegmentConfig; +} + +export interface ListClusterLogsResponse { + $type: "yandex.cloud.mdb.greenplum.v1.ListClusterLogsResponse"; + /** Requested log records. */ + logs: LogRecord[]; + /** + * This token allows you to get the next page of results for list requests. If the number of results + * is larger than [ListClusterLogsRequest.page_size], use the [next_page_token] as the value + * for the [ListClusterLogsRequest.page_token] query parameter in the next list request. + * Each subsequent list request will have its own [next_page_token] to continue paging through the results. + * This value is interchangeable with the [StreamLogRecord.next_record_token] from StreamLogs method. + */ + nextPageToken: string; +} + +export interface LogRecord { + $type: "yandex.cloud.mdb.greenplum.v1.LogRecord"; + /** Log record timestamp in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. */ + timestamp?: Date; + /** Contents of the log record. */ + message: { [key: string]: string }; +} + +export interface LogRecord_MessageEntry { + $type: "yandex.cloud.mdb.greenplum.v1.LogRecord.MessageEntry"; + key: string; + value: string; +} + +export interface ListClusterLogsRequest { + $type: "yandex.cloud.mdb.greenplum.v1.ListClusterLogsRequest"; + /** + * ID of the Greenplum® cluster to request logs for. + * To get the Greenplum® cluster ID, use a [ClusterService.List] request. + */ + clusterId: string; + /** + * Columns from logs table to request. + * If no columns are specified, entire log records are returned. + */ + columnFilter: string[]; + /** Type of the service to request logs about. */ + serviceType: ListClusterLogsRequest_ServiceType; + /** Start timestamp for the logs request, in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. */ + fromTime?: Date; + /** End timestamp for the logs request, in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. */ + toTime?: Date; + /** + * The maximum number of results per page to return. If the number of available + * results is larger than [page_size], the service returns a [ListClusterLogsResponse.next_page_token] + * that can be used to get the next page of results in subsequent list requests. + */ + pageSize: number; + /** + * Page token. To get the next page of results, set [page_token] to the [ListClusterLogsResponse.next_page_token] + * returned by a previous list request. + */ + pageToken: string; + /** Always return `next_page_token`, even if the current page is empty. */ + alwaysNextPageToken: boolean; + /** + * A filter expression that filters resources listed in the response. + * The expression must specify: + * 1. The field name. Currently filtering can be applied to the [LogRecord.logs.message.hostname], + * [LogRecord.logs.message.error_severity] (for `GREENPLUM` service) and [LogRecord.logs.message.level] (for `GREENPLUM_POOLER` service) fields. + * 2. A conditional operator. Can be either `=` or `!=` for single values, `IN` or `NOT IN` for lists of values. + * 3. The value. Must be 1-63 characters long and match the regular expression `^[a-z0-9.-]{1,61}$`. + * Examples of a filter: + * * `message.hostname='node1.db.cloud.yandex.net'` + * * `message.error_severity IN ("ERROR", "FATAL", "PANIC") AND message.hostname = "node1.db.cloud.yandex.net"` + */ + filter: string; +} + +/** Type of the service to request logs about. */ +export enum ListClusterLogsRequest_ServiceType { + /** SERVICE_TYPE_UNSPECIFIED - Type is not specified. */ + SERVICE_TYPE_UNSPECIFIED = 0, + /** GREENPLUM - Greenplum® activity logs. */ + GREENPLUM = 1, + /** GREENPLUM_POOLER - Greenplum® pooler logs. */ + GREENPLUM_POOLER = 2, + UNRECOGNIZED = -1, +} + +export function listClusterLogsRequest_ServiceTypeFromJSON( + object: any +): ListClusterLogsRequest_ServiceType { + switch (object) { + case 0: + case "SERVICE_TYPE_UNSPECIFIED": + return ListClusterLogsRequest_ServiceType.SERVICE_TYPE_UNSPECIFIED; + case 1: + case "GREENPLUM": + return ListClusterLogsRequest_ServiceType.GREENPLUM; + case 2: + case "GREENPLUM_POOLER": + return ListClusterLogsRequest_ServiceType.GREENPLUM_POOLER; + case -1: + case "UNRECOGNIZED": + default: + return ListClusterLogsRequest_ServiceType.UNRECOGNIZED; + } +} + +export function listClusterLogsRequest_ServiceTypeToJSON( + object: ListClusterLogsRequest_ServiceType +): string { + switch (object) { + case ListClusterLogsRequest_ServiceType.SERVICE_TYPE_UNSPECIFIED: + return "SERVICE_TYPE_UNSPECIFIED"; + case ListClusterLogsRequest_ServiceType.GREENPLUM: + return "GREENPLUM"; + case ListClusterLogsRequest_ServiceType.GREENPLUM_POOLER: + return "GREENPLUM_POOLER"; + default: + return "UNKNOWN"; + } +} + +export interface ListClusterBackupsRequest { + $type: "yandex.cloud.mdb.greenplum.v1.ListClusterBackupsRequest"; + /** + * ID of the Greenplum® cluster. + * To get the Greenplum® cluster ID use a [ClusterService.List] request. + */ + clusterId: string; + /** + * The maximum number of results per page to return. If the number of available + * results is larger than [page_size], the service returns a [ListClusterBackupsResponse.next_page_token] + * that can be used to get the next page of results in subsequent list requests. + */ + pageSize: number; + /** + * Page token. To get the next page of results, set [page_token] to the [ListClusterBackupsResponse.next_page_token] + * returned by a previous list request. + */ + pageToken: string; +} + +export interface ListClusterBackupsResponse { + $type: "yandex.cloud.mdb.greenplum.v1.ListClusterBackupsResponse"; + /** List of Greenplum® backups. */ + backups: Backup[]; + /** + * This token allows you to get the next page of results for list requests. If the number of results + * is larger than [ListClusterBackupsRequest.page_size], use the [next_page_token] as the value + * for the [ListClusterBackupsRequest.page_token] query parameter in the next list request. + * Each subsequent list request will have its own [next_page_token] to continue paging through the results. + */ + nextPageToken: string; +} + +export interface RestoreClusterRequest { + $type: "yandex.cloud.mdb.greenplum.v1.RestoreClusterRequest"; + /** + * ID of the backup to create a cluster from. + * To get the backup ID, use a [ClusterService.ListBackups] request. + */ + backupId: string; + /** ID of the folder to create the Greenplum® cluster in. */ + folderId: string; + /** Name of the Greenplum® cluster. The name must be unique within the folder. */ + name: string; + /** Description of the Greenplum® cluster. */ + description: string; + /** + * Custom labels for the Greenplum® cluster as `key:value` pairs. Maximum 64 per resource. + * For example, "project": "mvp" or "source": "dictionary". + */ + labels: { [key: string]: string }; + /** Deployment environment of the Greenplum® cluster. */ + environment: Cluster_Environment; + /** Greenplum® cluster config */ + config?: GreenplumRestoreConfig; + /** Resources of the Greenplum® master subcluster. */ + masterResources?: Resources; + /** Resources of the Greenplum® segment subcluster. */ + segmentResources?: Resources; + /** ID of the network to create the cluster in. */ + networkId: string; + /** User security groups */ + securityGroupIds: string[]; + /** Deletion Protection inhibits deletion of the cluster */ + deletionProtection: boolean; + /** Host groups to place VMs of cluster on. */ + hostGroupIds: string[]; + /** ID of placement group */ + placementGroupId: string; + /** Window of maintenance operations. */ + maintenanceWindow?: MaintenanceWindow; +} + +export interface RestoreClusterRequest_LabelsEntry { + $type: "yandex.cloud.mdb.greenplum.v1.RestoreClusterRequest.LabelsEntry"; + key: string; + value: string; +} + +export interface RestoreClusterMetadata { + $type: "yandex.cloud.mdb.greenplum.v1.RestoreClusterMetadata"; + /** ID of the new Greenplum® cluster that is being created from a backup. */ + clusterId: string; + /** ID of the backup that is being used for creating a cluster. */ + backupId: string; } const baseGetClusterRequest: object = { @@ -649,6 +871,9 @@ export const CreateClusterRequest = { writer.uint32(154).fork() ).ldelim(); } + if (message.configSpec !== undefined) { + ConfigSpec.encode(message.configSpec, writer.uint32(162).fork()).ldelim(); + } return writer; }, @@ -734,6 +959,9 @@ export const CreateClusterRequest = { reader.uint32() ); break; + case 20: + message.configSpec = ConfigSpec.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -818,6 +1046,10 @@ export const CreateClusterRequest = { object.maintenanceWindow !== null ? MaintenanceWindow.fromJSON(object.maintenanceWindow) : undefined; + message.configSpec = + object.configSpec !== undefined && object.configSpec !== null + ? ConfigSpec.fromJSON(object.configSpec) + : undefined; return message; }, @@ -873,6 +1105,10 @@ export const CreateClusterRequest = { (obj.maintenanceWindow = message.maintenanceWindow ? MaintenanceWindow.toJSON(message.maintenanceWindow) : undefined); + message.configSpec !== undefined && + (obj.configSpec = message.configSpec + ? ConfigSpec.toJSON(message.configSpec) + : undefined); return obj; }, @@ -918,6 +1154,10 @@ export const CreateClusterRequest = { object.maintenanceWindow !== null ? MaintenanceWindow.fromPartial(object.maintenanceWindow) : undefined; + message.configSpec = + object.configSpec !== undefined && object.configSpec !== null + ? ConfigSpec.fromPartial(object.configSpec) + : undefined; return message; }, }; @@ -1010,6 +1250,128 @@ messageTypeRegistry.set( CreateClusterRequest_LabelsEntry ); +const baseConfigSpec: object = { + $type: "yandex.cloud.mdb.greenplum.v1.ConfigSpec", +}; + +export const ConfigSpec = { + $type: "yandex.cloud.mdb.greenplum.v1.ConfigSpec" as const, + + encode( + message: ConfigSpec, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.greenplumConfig617 !== undefined) { + Greenplumconfig617.encode( + message.greenplumConfig617, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.greenplumConfig619 !== undefined) { + Greenplumconfig619.encode( + message.greenplumConfig619, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.pool !== undefined) { + ConnectionPoolerConfig.encode( + message.pool, + writer.uint32(34).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ConfigSpec { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseConfigSpec } as ConfigSpec; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.greenplumConfig617 = Greenplumconfig617.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.greenplumConfig619 = Greenplumconfig619.decode( + reader, + reader.uint32() + ); + break; + case 4: + message.pool = ConnectionPoolerConfig.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ConfigSpec { + const message = { ...baseConfigSpec } as ConfigSpec; + message.greenplumConfig617 = + object.greenplumConfig_6_17 !== undefined && + object.greenplumConfig_6_17 !== null + ? Greenplumconfig617.fromJSON(object.greenplumConfig_6_17) + : undefined; + message.greenplumConfig619 = + object.greenplumConfig_6_19 !== undefined && + object.greenplumConfig_6_19 !== null + ? Greenplumconfig619.fromJSON(object.greenplumConfig_6_19) + : undefined; + message.pool = + object.pool !== undefined && object.pool !== null + ? ConnectionPoolerConfig.fromJSON(object.pool) + : undefined; + return message; + }, + + toJSON(message: ConfigSpec): unknown { + const obj: any = {}; + message.greenplumConfig617 !== undefined && + (obj.greenplumConfig_6_17 = message.greenplumConfig617 + ? Greenplumconfig617.toJSON(message.greenplumConfig617) + : undefined); + message.greenplumConfig619 !== undefined && + (obj.greenplumConfig_6_19 = message.greenplumConfig619 + ? Greenplumconfig619.toJSON(message.greenplumConfig619) + : undefined); + message.pool !== undefined && + (obj.pool = message.pool + ? ConnectionPoolerConfig.toJSON(message.pool) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): ConfigSpec { + const message = { ...baseConfigSpec } as ConfigSpec; + message.greenplumConfig617 = + object.greenplumConfig617 !== undefined && + object.greenplumConfig617 !== null + ? Greenplumconfig617.fromPartial(object.greenplumConfig617) + : undefined; + message.greenplumConfig619 = + object.greenplumConfig619 !== undefined && + object.greenplumConfig619 !== null + ? Greenplumconfig619.fromPartial(object.greenplumConfig619) + : undefined; + message.pool = + object.pool !== undefined && object.pool !== null + ? ConnectionPoolerConfig.fromPartial(object.pool) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(ConfigSpec.$type, ConfigSpec); + const baseCreateClusterMetadata: object = { $type: "yandex.cloud.mdb.greenplum.v1.CreateClusterMetadata", clusterId: "", @@ -2278,12 +2640,6 @@ export const MasterSubclusterConfigSpec = { if (message.resources !== undefined) { Resources.encode(message.resources, writer.uint32(10).fork()).ldelim(); } - if (message.config !== undefined) { - GreenplumMasterConfig.encode( - message.config, - writer.uint32(18).fork() - ).ldelim(); - } return writer; }, @@ -2302,12 +2658,6 @@ export const MasterSubclusterConfigSpec = { case 1: message.resources = Resources.decode(reader, reader.uint32()); break; - case 2: - message.config = GreenplumMasterConfig.decode( - reader, - reader.uint32() - ); - break; default: reader.skipType(tag & 7); break; @@ -2324,10 +2674,6 @@ export const MasterSubclusterConfigSpec = { object.resources !== undefined && object.resources !== null ? Resources.fromJSON(object.resources) : undefined; - message.config = - object.config !== undefined && object.config !== null - ? GreenplumMasterConfig.fromJSON(object.config) - : undefined; return message; }, @@ -2337,10 +2683,6 @@ export const MasterSubclusterConfigSpec = { (obj.resources = message.resources ? Resources.toJSON(message.resources) : undefined); - message.config !== undefined && - (obj.config = message.config - ? GreenplumMasterConfig.toJSON(message.config) - : undefined); return obj; }, @@ -2354,10 +2696,6 @@ export const MasterSubclusterConfigSpec = { object.resources !== undefined && object.resources !== null ? Resources.fromPartial(object.resources) : undefined; - message.config = - object.config !== undefined && object.config !== null - ? GreenplumMasterConfig.fromPartial(object.config) - : undefined; return message; }, }; @@ -2381,12 +2719,6 @@ export const SegmentSubclusterConfigSpec = { if (message.resources !== undefined) { Resources.encode(message.resources, writer.uint32(10).fork()).ldelim(); } - if (message.config !== undefined) { - GreenplumSegmentConfig.encode( - message.config, - writer.uint32(18).fork() - ).ldelim(); - } return writer; }, @@ -2405,12 +2737,6 @@ export const SegmentSubclusterConfigSpec = { case 1: message.resources = Resources.decode(reader, reader.uint32()); break; - case 2: - message.config = GreenplumSegmentConfig.decode( - reader, - reader.uint32() - ); - break; default: reader.skipType(tag & 7); break; @@ -2427,10 +2753,6 @@ export const SegmentSubclusterConfigSpec = { object.resources !== undefined && object.resources !== null ? Resources.fromJSON(object.resources) : undefined; - message.config = - object.config !== undefined && object.config !== null - ? GreenplumSegmentConfig.fromJSON(object.config) - : undefined; return message; }, @@ -2440,10 +2762,6 @@ export const SegmentSubclusterConfigSpec = { (obj.resources = message.resources ? Resources.toJSON(message.resources) : undefined); - message.config !== undefined && - (obj.config = message.config - ? GreenplumSegmentConfig.toJSON(message.config) - : undefined); return obj; }, @@ -2457,10 +2775,6 @@ export const SegmentSubclusterConfigSpec = { object.resources !== undefined && object.resources !== null ? Resources.fromPartial(object.resources) : undefined; - message.config = - object.config !== undefined && object.config !== null - ? GreenplumSegmentConfig.fromPartial(object.config) - : undefined; return message; }, }; @@ -2470,43 +2784,1180 @@ messageTypeRegistry.set( SegmentSubclusterConfigSpec ); -/** A set of methods for managing Greenplum clusters. */ -export const ClusterServiceService = { - /** - * Returns the specified Greenplum cluster. - * - * To get the list of available Greenplum clusters, make a [List] request. - */ - get: { - path: "/yandex.cloud.mdb.greenplum.v1.ClusterService/Get", - requestStream: false, - responseStream: false, - requestSerialize: (value: GetClusterRequest) => - Buffer.from(GetClusterRequest.encode(value).finish()), - requestDeserialize: (value: Buffer) => GetClusterRequest.decode(value), - responseSerialize: (value: Cluster) => - Buffer.from(Cluster.encode(value).finish()), - responseDeserialize: (value: Buffer) => Cluster.decode(value), - }, - /** - * Retrieves a list of Greenplum clusters that belong - * to the specified folder. - */ - list: { - path: "/yandex.cloud.mdb.greenplum.v1.ClusterService/List", - requestStream: false, - responseStream: false, - requestSerialize: (value: ListClustersRequest) => - Buffer.from(ListClustersRequest.encode(value).finish()), - requestDeserialize: (value: Buffer) => ListClustersRequest.decode(value), - responseSerialize: (value: ListClustersResponse) => - Buffer.from(ListClustersResponse.encode(value).finish()), - responseDeserialize: (value: Buffer) => ListClustersResponse.decode(value), +const baseListClusterLogsResponse: object = { + $type: "yandex.cloud.mdb.greenplum.v1.ListClusterLogsResponse", + nextPageToken: "", +}; + +export const ListClusterLogsResponse = { + $type: "yandex.cloud.mdb.greenplum.v1.ListClusterLogsResponse" as const, + + encode( + message: ListClusterLogsResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.logs) { + LogRecord.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; }, - /** Creates a Greenplum cluster in the specified folder. */ - create: { - path: "/yandex.cloud.mdb.greenplum.v1.ClusterService/Create", - requestStream: false, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListClusterLogsResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListClusterLogsResponse, + } as ListClusterLogsResponse; + message.logs = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.logs.push(LogRecord.decode(reader, reader.uint32())); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListClusterLogsResponse { + const message = { + ...baseListClusterLogsResponse, + } as ListClusterLogsResponse; + message.logs = (object.logs ?? []).map((e: any) => LogRecord.fromJSON(e)); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListClusterLogsResponse): unknown { + const obj: any = {}; + if (message.logs) { + obj.logs = message.logs.map((e) => (e ? LogRecord.toJSON(e) : undefined)); + } else { + obj.logs = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListClusterLogsResponse { + const message = { + ...baseListClusterLogsResponse, + } as ListClusterLogsResponse; + message.logs = object.logs?.map((e) => LogRecord.fromPartial(e)) || []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListClusterLogsResponse.$type, ListClusterLogsResponse); + +const baseLogRecord: object = { + $type: "yandex.cloud.mdb.greenplum.v1.LogRecord", +}; + +export const LogRecord = { + $type: "yandex.cloud.mdb.greenplum.v1.LogRecord" as const, + + encode( + message: LogRecord, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.timestamp !== undefined) { + Timestamp.encode( + toTimestamp(message.timestamp), + writer.uint32(10).fork() + ).ldelim(); + } + Object.entries(message.message).forEach(([key, value]) => { + LogRecord_MessageEntry.encode( + { + $type: "yandex.cloud.mdb.greenplum.v1.LogRecord.MessageEntry", + key: key as any, + value, + }, + writer.uint32(18).fork() + ).ldelim(); + }); + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): LogRecord { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseLogRecord } as LogRecord; + message.message = {}; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.timestamp = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 2: + const entry2 = LogRecord_MessageEntry.decode(reader, reader.uint32()); + if (entry2.value !== undefined) { + message.message[entry2.key] = entry2.value; + } + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): LogRecord { + const message = { ...baseLogRecord } as LogRecord; + message.timestamp = + object.timestamp !== undefined && object.timestamp !== null + ? fromJsonTimestamp(object.timestamp) + : undefined; + message.message = Object.entries(object.message ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); + return message; + }, + + toJSON(message: LogRecord): unknown { + const obj: any = {}; + message.timestamp !== undefined && + (obj.timestamp = message.timestamp.toISOString()); + obj.message = {}; + if (message.message) { + Object.entries(message.message).forEach(([k, v]) => { + obj.message[k] = v; + }); + } + return obj; + }, + + fromPartial, I>>( + object: I + ): LogRecord { + const message = { ...baseLogRecord } as LogRecord; + message.timestamp = object.timestamp ?? undefined; + message.message = Object.entries(object.message ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); + return message; + }, +}; + +messageTypeRegistry.set(LogRecord.$type, LogRecord); + +const baseLogRecord_MessageEntry: object = { + $type: "yandex.cloud.mdb.greenplum.v1.LogRecord.MessageEntry", + key: "", + value: "", +}; + +export const LogRecord_MessageEntry = { + $type: "yandex.cloud.mdb.greenplum.v1.LogRecord.MessageEntry" as const, + + encode( + message: LogRecord_MessageEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): LogRecord_MessageEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseLogRecord_MessageEntry } as LogRecord_MessageEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): LogRecord_MessageEntry { + const message = { ...baseLogRecord_MessageEntry } as LogRecord_MessageEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: LogRecord_MessageEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial, I>>( + object: I + ): LogRecord_MessageEntry { + const message = { ...baseLogRecord_MessageEntry } as LogRecord_MessageEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(LogRecord_MessageEntry.$type, LogRecord_MessageEntry); + +const baseListClusterLogsRequest: object = { + $type: "yandex.cloud.mdb.greenplum.v1.ListClusterLogsRequest", + clusterId: "", + columnFilter: "", + serviceType: 0, + pageSize: 0, + pageToken: "", + alwaysNextPageToken: false, + filter: "", +}; + +export const ListClusterLogsRequest = { + $type: "yandex.cloud.mdb.greenplum.v1.ListClusterLogsRequest" as const, + + encode( + message: ListClusterLogsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + for (const v of message.columnFilter) { + writer.uint32(18).string(v!); + } + if (message.serviceType !== 0) { + writer.uint32(24).int32(message.serviceType); + } + if (message.fromTime !== undefined) { + Timestamp.encode( + toTimestamp(message.fromTime), + writer.uint32(34).fork() + ).ldelim(); + } + if (message.toTime !== undefined) { + Timestamp.encode( + toTimestamp(message.toTime), + writer.uint32(42).fork() + ).ldelim(); + } + if (message.pageSize !== 0) { + writer.uint32(48).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(58).string(message.pageToken); + } + if (message.alwaysNextPageToken === true) { + writer.uint32(64).bool(message.alwaysNextPageToken); + } + if (message.filter !== "") { + writer.uint32(74).string(message.filter); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListClusterLogsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseListClusterLogsRequest } as ListClusterLogsRequest; + message.columnFilter = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.columnFilter.push(reader.string()); + break; + case 3: + message.serviceType = reader.int32() as any; + break; + case 4: + message.fromTime = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 5: + message.toTime = fromTimestamp( + Timestamp.decode(reader, reader.uint32()) + ); + break; + case 6: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 7: + message.pageToken = reader.string(); + break; + case 8: + message.alwaysNextPageToken = reader.bool(); + break; + case 9: + message.filter = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListClusterLogsRequest { + const message = { ...baseListClusterLogsRequest } as ListClusterLogsRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.columnFilter = (object.columnFilter ?? []).map((e: any) => + String(e) + ); + message.serviceType = + object.serviceType !== undefined && object.serviceType !== null + ? listClusterLogsRequest_ServiceTypeFromJSON(object.serviceType) + : 0; + message.fromTime = + object.fromTime !== undefined && object.fromTime !== null + ? fromJsonTimestamp(object.fromTime) + : undefined; + message.toTime = + object.toTime !== undefined && object.toTime !== null + ? fromJsonTimestamp(object.toTime) + : undefined; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + message.alwaysNextPageToken = + object.alwaysNextPageToken !== undefined && + object.alwaysNextPageToken !== null + ? Boolean(object.alwaysNextPageToken) + : false; + message.filter = + object.filter !== undefined && object.filter !== null + ? String(object.filter) + : ""; + return message; + }, + + toJSON(message: ListClusterLogsRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + if (message.columnFilter) { + obj.columnFilter = message.columnFilter.map((e) => e); + } else { + obj.columnFilter = []; + } + message.serviceType !== undefined && + (obj.serviceType = listClusterLogsRequest_ServiceTypeToJSON( + message.serviceType + )); + message.fromTime !== undefined && + (obj.fromTime = message.fromTime.toISOString()); + message.toTime !== undefined && (obj.toTime = message.toTime.toISOString()); + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + message.alwaysNextPageToken !== undefined && + (obj.alwaysNextPageToken = message.alwaysNextPageToken); + message.filter !== undefined && (obj.filter = message.filter); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListClusterLogsRequest { + const message = { ...baseListClusterLogsRequest } as ListClusterLogsRequest; + message.clusterId = object.clusterId ?? ""; + message.columnFilter = object.columnFilter?.map((e) => e) || []; + message.serviceType = object.serviceType ?? 0; + message.fromTime = object.fromTime ?? undefined; + message.toTime = object.toTime ?? undefined; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + message.alwaysNextPageToken = object.alwaysNextPageToken ?? false; + message.filter = object.filter ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ListClusterLogsRequest.$type, ListClusterLogsRequest); + +const baseListClusterBackupsRequest: object = { + $type: "yandex.cloud.mdb.greenplum.v1.ListClusterBackupsRequest", + clusterId: "", + pageSize: 0, + pageToken: "", +}; + +export const ListClusterBackupsRequest = { + $type: "yandex.cloud.mdb.greenplum.v1.ListClusterBackupsRequest" as const, + + encode( + message: ListClusterBackupsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.pageSize !== 0) { + writer.uint32(16).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(26).string(message.pageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListClusterBackupsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListClusterBackupsRequest, + } as ListClusterBackupsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 3: + message.pageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListClusterBackupsRequest { + const message = { + ...baseListClusterBackupsRequest, + } as ListClusterBackupsRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + return message; + }, + + toJSON(message: ListClusterBackupsRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListClusterBackupsRequest { + const message = { + ...baseListClusterBackupsRequest, + } as ListClusterBackupsRequest; + message.clusterId = object.clusterId ?? ""; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListClusterBackupsRequest.$type, + ListClusterBackupsRequest +); + +const baseListClusterBackupsResponse: object = { + $type: "yandex.cloud.mdb.greenplum.v1.ListClusterBackupsResponse", + nextPageToken: "", +}; + +export const ListClusterBackupsResponse = { + $type: "yandex.cloud.mdb.greenplum.v1.ListClusterBackupsResponse" as const, + + encode( + message: ListClusterBackupsResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.backups) { + Backup.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListClusterBackupsResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListClusterBackupsResponse, + } as ListClusterBackupsResponse; + message.backups = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.backups.push(Backup.decode(reader, reader.uint32())); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListClusterBackupsResponse { + const message = { + ...baseListClusterBackupsResponse, + } as ListClusterBackupsResponse; + message.backups = (object.backups ?? []).map((e: any) => + Backup.fromJSON(e) + ); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListClusterBackupsResponse): unknown { + const obj: any = {}; + if (message.backups) { + obj.backups = message.backups.map((e) => + e ? Backup.toJSON(e) : undefined + ); + } else { + obj.backups = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListClusterBackupsResponse { + const message = { + ...baseListClusterBackupsResponse, + } as ListClusterBackupsResponse; + message.backups = object.backups?.map((e) => Backup.fromPartial(e)) || []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListClusterBackupsResponse.$type, + ListClusterBackupsResponse +); + +const baseRestoreClusterRequest: object = { + $type: "yandex.cloud.mdb.greenplum.v1.RestoreClusterRequest", + backupId: "", + folderId: "", + name: "", + description: "", + environment: 0, + networkId: "", + securityGroupIds: "", + deletionProtection: false, + hostGroupIds: "", + placementGroupId: "", +}; + +export const RestoreClusterRequest = { + $type: "yandex.cloud.mdb.greenplum.v1.RestoreClusterRequest" as const, + + encode( + message: RestoreClusterRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.backupId !== "") { + writer.uint32(10).string(message.backupId); + } + if (message.folderId !== "") { + writer.uint32(18).string(message.folderId); + } + if (message.name !== "") { + writer.uint32(26).string(message.name); + } + if (message.description !== "") { + writer.uint32(34).string(message.description); + } + Object.entries(message.labels).forEach(([key, value]) => { + RestoreClusterRequest_LabelsEntry.encode( + { + $type: + "yandex.cloud.mdb.greenplum.v1.RestoreClusterRequest.LabelsEntry", + key: key as any, + value, + }, + writer.uint32(42).fork() + ).ldelim(); + }); + if (message.environment !== 0) { + writer.uint32(48).int32(message.environment); + } + if (message.config !== undefined) { + GreenplumRestoreConfig.encode( + message.config, + writer.uint32(58).fork() + ).ldelim(); + } + if (message.masterResources !== undefined) { + Resources.encode( + message.masterResources, + writer.uint32(66).fork() + ).ldelim(); + } + if (message.segmentResources !== undefined) { + Resources.encode( + message.segmentResources, + writer.uint32(74).fork() + ).ldelim(); + } + if (message.networkId !== "") { + writer.uint32(82).string(message.networkId); + } + for (const v of message.securityGroupIds) { + writer.uint32(90).string(v!); + } + if (message.deletionProtection === true) { + writer.uint32(96).bool(message.deletionProtection); + } + for (const v of message.hostGroupIds) { + writer.uint32(106).string(v!); + } + if (message.placementGroupId !== "") { + writer.uint32(114).string(message.placementGroupId); + } + if (message.maintenanceWindow !== undefined) { + MaintenanceWindow.encode( + message.maintenanceWindow, + writer.uint32(122).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): RestoreClusterRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseRestoreClusterRequest } as RestoreClusterRequest; + message.labels = {}; + message.securityGroupIds = []; + message.hostGroupIds = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.backupId = reader.string(); + break; + case 2: + message.folderId = reader.string(); + break; + case 3: + message.name = reader.string(); + break; + case 4: + message.description = reader.string(); + break; + case 5: + const entry5 = RestoreClusterRequest_LabelsEntry.decode( + reader, + reader.uint32() + ); + if (entry5.value !== undefined) { + message.labels[entry5.key] = entry5.value; + } + break; + case 6: + message.environment = reader.int32() as any; + break; + case 7: + message.config = GreenplumRestoreConfig.decode( + reader, + reader.uint32() + ); + break; + case 8: + message.masterResources = Resources.decode(reader, reader.uint32()); + break; + case 9: + message.segmentResources = Resources.decode(reader, reader.uint32()); + break; + case 10: + message.networkId = reader.string(); + break; + case 11: + message.securityGroupIds.push(reader.string()); + break; + case 12: + message.deletionProtection = reader.bool(); + break; + case 13: + message.hostGroupIds.push(reader.string()); + break; + case 14: + message.placementGroupId = reader.string(); + break; + case 15: + message.maintenanceWindow = MaintenanceWindow.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RestoreClusterRequest { + const message = { ...baseRestoreClusterRequest } as RestoreClusterRequest; + message.backupId = + object.backupId !== undefined && object.backupId !== null + ? String(object.backupId) + : ""; + message.folderId = + object.folderId !== undefined && object.folderId !== null + ? String(object.folderId) + : ""; + message.name = + object.name !== undefined && object.name !== null + ? String(object.name) + : ""; + message.description = + object.description !== undefined && object.description !== null + ? String(object.description) + : ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + acc[key] = String(value); + return acc; + }, {}); + message.environment = + object.environment !== undefined && object.environment !== null + ? cluster_EnvironmentFromJSON(object.environment) + : 0; + message.config = + object.config !== undefined && object.config !== null + ? GreenplumRestoreConfig.fromJSON(object.config) + : undefined; + message.masterResources = + object.masterResources !== undefined && object.masterResources !== null + ? Resources.fromJSON(object.masterResources) + : undefined; + message.segmentResources = + object.segmentResources !== undefined && object.segmentResources !== null + ? Resources.fromJSON(object.segmentResources) + : undefined; + message.networkId = + object.networkId !== undefined && object.networkId !== null + ? String(object.networkId) + : ""; + message.securityGroupIds = (object.securityGroupIds ?? []).map((e: any) => + String(e) + ); + message.deletionProtection = + object.deletionProtection !== undefined && + object.deletionProtection !== null + ? Boolean(object.deletionProtection) + : false; + message.hostGroupIds = (object.hostGroupIds ?? []).map((e: any) => + String(e) + ); + message.placementGroupId = + object.placementGroupId !== undefined && object.placementGroupId !== null + ? String(object.placementGroupId) + : ""; + message.maintenanceWindow = + object.maintenanceWindow !== undefined && + object.maintenanceWindow !== null + ? MaintenanceWindow.fromJSON(object.maintenanceWindow) + : undefined; + return message; + }, + + toJSON(message: RestoreClusterRequest): unknown { + const obj: any = {}; + message.backupId !== undefined && (obj.backupId = message.backupId); + message.folderId !== undefined && (obj.folderId = message.folderId); + message.name !== undefined && (obj.name = message.name); + message.description !== undefined && + (obj.description = message.description); + obj.labels = {}; + if (message.labels) { + Object.entries(message.labels).forEach(([k, v]) => { + obj.labels[k] = v; + }); + } + message.environment !== undefined && + (obj.environment = cluster_EnvironmentToJSON(message.environment)); + message.config !== undefined && + (obj.config = message.config + ? GreenplumRestoreConfig.toJSON(message.config) + : undefined); + message.masterResources !== undefined && + (obj.masterResources = message.masterResources + ? Resources.toJSON(message.masterResources) + : undefined); + message.segmentResources !== undefined && + (obj.segmentResources = message.segmentResources + ? Resources.toJSON(message.segmentResources) + : undefined); + message.networkId !== undefined && (obj.networkId = message.networkId); + if (message.securityGroupIds) { + obj.securityGroupIds = message.securityGroupIds.map((e) => e); + } else { + obj.securityGroupIds = []; + } + message.deletionProtection !== undefined && + (obj.deletionProtection = message.deletionProtection); + if (message.hostGroupIds) { + obj.hostGroupIds = message.hostGroupIds.map((e) => e); + } else { + obj.hostGroupIds = []; + } + message.placementGroupId !== undefined && + (obj.placementGroupId = message.placementGroupId); + message.maintenanceWindow !== undefined && + (obj.maintenanceWindow = message.maintenanceWindow + ? MaintenanceWindow.toJSON(message.maintenanceWindow) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): RestoreClusterRequest { + const message = { ...baseRestoreClusterRequest } as RestoreClusterRequest; + message.backupId = object.backupId ?? ""; + message.folderId = object.folderId ?? ""; + message.name = object.name ?? ""; + message.description = object.description ?? ""; + message.labels = Object.entries(object.labels ?? {}).reduce<{ + [key: string]: string; + }>((acc, [key, value]) => { + if (value !== undefined) { + acc[key] = String(value); + } + return acc; + }, {}); + message.environment = object.environment ?? 0; + message.config = + object.config !== undefined && object.config !== null + ? GreenplumRestoreConfig.fromPartial(object.config) + : undefined; + message.masterResources = + object.masterResources !== undefined && object.masterResources !== null + ? Resources.fromPartial(object.masterResources) + : undefined; + message.segmentResources = + object.segmentResources !== undefined && object.segmentResources !== null + ? Resources.fromPartial(object.segmentResources) + : undefined; + message.networkId = object.networkId ?? ""; + message.securityGroupIds = object.securityGroupIds?.map((e) => e) || []; + message.deletionProtection = object.deletionProtection ?? false; + message.hostGroupIds = object.hostGroupIds?.map((e) => e) || []; + message.placementGroupId = object.placementGroupId ?? ""; + message.maintenanceWindow = + object.maintenanceWindow !== undefined && + object.maintenanceWindow !== null + ? MaintenanceWindow.fromPartial(object.maintenanceWindow) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(RestoreClusterRequest.$type, RestoreClusterRequest); + +const baseRestoreClusterRequest_LabelsEntry: object = { + $type: "yandex.cloud.mdb.greenplum.v1.RestoreClusterRequest.LabelsEntry", + key: "", + value: "", +}; + +export const RestoreClusterRequest_LabelsEntry = { + $type: + "yandex.cloud.mdb.greenplum.v1.RestoreClusterRequest.LabelsEntry" as const, + + encode( + message: RestoreClusterRequest_LabelsEntry, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): RestoreClusterRequest_LabelsEntry { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseRestoreClusterRequest_LabelsEntry, + } as RestoreClusterRequest_LabelsEntry; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RestoreClusterRequest_LabelsEntry { + const message = { + ...baseRestoreClusterRequest_LabelsEntry, + } as RestoreClusterRequest_LabelsEntry; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.value = + object.value !== undefined && object.value !== null + ? String(object.value) + : ""; + return message; + }, + + toJSON(message: RestoreClusterRequest_LabelsEntry): unknown { + const obj: any = {}; + message.key !== undefined && (obj.key = message.key); + message.value !== undefined && (obj.value = message.value); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): RestoreClusterRequest_LabelsEntry { + const message = { + ...baseRestoreClusterRequest_LabelsEntry, + } as RestoreClusterRequest_LabelsEntry; + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + RestoreClusterRequest_LabelsEntry.$type, + RestoreClusterRequest_LabelsEntry +); + +const baseRestoreClusterMetadata: object = { + $type: "yandex.cloud.mdb.greenplum.v1.RestoreClusterMetadata", + clusterId: "", + backupId: "", +}; + +export const RestoreClusterMetadata = { + $type: "yandex.cloud.mdb.greenplum.v1.RestoreClusterMetadata" as const, + + encode( + message: RestoreClusterMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + if (message.backupId !== "") { + writer.uint32(18).string(message.backupId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): RestoreClusterMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseRestoreClusterMetadata } as RestoreClusterMetadata; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.backupId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): RestoreClusterMetadata { + const message = { ...baseRestoreClusterMetadata } as RestoreClusterMetadata; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.backupId = + object.backupId !== undefined && object.backupId !== null + ? String(object.backupId) + : ""; + return message; + }, + + toJSON(message: RestoreClusterMetadata): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + message.backupId !== undefined && (obj.backupId = message.backupId); + return obj; + }, + + fromPartial, I>>( + object: I + ): RestoreClusterMetadata { + const message = { ...baseRestoreClusterMetadata } as RestoreClusterMetadata; + message.clusterId = object.clusterId ?? ""; + message.backupId = object.backupId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(RestoreClusterMetadata.$type, RestoreClusterMetadata); + +/** A set of methods for managing Greenplum® clusters. */ +export const ClusterServiceService = { + /** + * Returns the specified Greenplum® cluster. + * + * To get the list of available Greenplum® clusters, make a [List] request. + */ + get: { + path: "/yandex.cloud.mdb.greenplum.v1.ClusterService/Get", + requestStream: false, + responseStream: false, + requestSerialize: (value: GetClusterRequest) => + Buffer.from(GetClusterRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => GetClusterRequest.decode(value), + responseSerialize: (value: Cluster) => + Buffer.from(Cluster.encode(value).finish()), + responseDeserialize: (value: Buffer) => Cluster.decode(value), + }, + /** Retrieves a list of Greenplum® clusters that belong to the specified folder. */ + list: { + path: "/yandex.cloud.mdb.greenplum.v1.ClusterService/List", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListClustersRequest) => + Buffer.from(ListClustersRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => ListClustersRequest.decode(value), + responseSerialize: (value: ListClustersResponse) => + Buffer.from(ListClustersResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => ListClustersResponse.decode(value), + }, + /** Creates a Greenplum® cluster in the specified folder. */ + create: { + path: "/yandex.cloud.mdb.greenplum.v1.ClusterService/Create", + requestStream: false, responseStream: false, requestSerialize: (value: CreateClusterRequest) => Buffer.from(CreateClusterRequest.encode(value).finish()), @@ -2515,7 +3966,7 @@ export const ClusterServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Updates the specified Greenplum cluster. */ + /** Updates the specified Greenplum® cluster. */ update: { path: "/yandex.cloud.mdb.greenplum.v1.ClusterService/Update", requestStream: false, @@ -2527,7 +3978,7 @@ export const ClusterServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Deletes the specified Greenplum cluster. */ + /** Deletes the specified Greenplum® cluster. */ delete: { path: "/yandex.cloud.mdb.greenplum.v1.ClusterService/Delete", requestStream: false, @@ -2539,7 +3990,7 @@ export const ClusterServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Starts the specified Greenplum cluster. */ + /** Starts the specified Greenplum® cluster. */ start: { path: "/yandex.cloud.mdb.greenplum.v1.ClusterService/Start", requestStream: false, @@ -2551,7 +4002,7 @@ export const ClusterServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Stops the specified Greenplum cluster. */ + /** Stops the specified Greenplum® cluster. */ stop: { path: "/yandex.cloud.mdb.greenplum.v1.ClusterService/Stop", requestStream: false, @@ -2605,29 +4056,65 @@ export const ClusterServiceService = { responseDeserialize: (value: Buffer) => ListClusterHostsResponse.decode(value), }, + /** Retrieves logs for the specified Greenplum® cluster. */ + listLogs: { + path: "/yandex.cloud.mdb.greenplum.v1.ClusterService/ListLogs", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListClusterLogsRequest) => + Buffer.from(ListClusterLogsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => ListClusterLogsRequest.decode(value), + responseSerialize: (value: ListClusterLogsResponse) => + Buffer.from(ListClusterLogsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + ListClusterLogsResponse.decode(value), + }, + /** Retrieves the list of available backups for the specified Greenplum cluster. */ + listBackups: { + path: "/yandex.cloud.mdb.greenplum.v1.ClusterService/ListBackups", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListClusterBackupsRequest) => + Buffer.from(ListClusterBackupsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + ListClusterBackupsRequest.decode(value), + responseSerialize: (value: ListClusterBackupsResponse) => + Buffer.from(ListClusterBackupsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + ListClusterBackupsResponse.decode(value), + }, + /** Creates a new Greenplum® cluster using the specified backup. */ + restore: { + path: "/yandex.cloud.mdb.greenplum.v1.ClusterService/Restore", + requestStream: false, + responseStream: false, + requestSerialize: (value: RestoreClusterRequest) => + Buffer.from(RestoreClusterRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => RestoreClusterRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, } as const; export interface ClusterServiceServer extends UntypedServiceImplementation { /** - * Returns the specified Greenplum cluster. + * Returns the specified Greenplum® cluster. * - * To get the list of available Greenplum clusters, make a [List] request. + * To get the list of available Greenplum® clusters, make a [List] request. */ get: handleUnaryCall; - /** - * Retrieves a list of Greenplum clusters that belong - * to the specified folder. - */ + /** Retrieves a list of Greenplum® clusters that belong to the specified folder. */ list: handleUnaryCall; - /** Creates a Greenplum cluster in the specified folder. */ + /** Creates a Greenplum® cluster in the specified folder. */ create: handleUnaryCall; - /** Updates the specified Greenplum cluster. */ + /** Updates the specified Greenplum® cluster. */ update: handleUnaryCall; - /** Deletes the specified Greenplum cluster. */ + /** Deletes the specified Greenplum® cluster. */ delete: handleUnaryCall; - /** Starts the specified Greenplum cluster. */ + /** Starts the specified Greenplum® cluster. */ start: handleUnaryCall; - /** Stops the specified Greenplum cluster. */ + /** Stops the specified Greenplum® cluster. */ stop: handleUnaryCall; /** Retrieves the list of Operation resources for the specified cluster. */ listOperations: handleUnaryCall< @@ -2644,13 +4131,22 @@ export interface ClusterServiceServer extends UntypedServiceImplementation { ListClusterHostsRequest, ListClusterHostsResponse >; + /** Retrieves logs for the specified Greenplum® cluster. */ + listLogs: handleUnaryCall; + /** Retrieves the list of available backups for the specified Greenplum cluster. */ + listBackups: handleUnaryCall< + ListClusterBackupsRequest, + ListClusterBackupsResponse + >; + /** Creates a new Greenplum® cluster using the specified backup. */ + restore: handleUnaryCall; } export interface ClusterServiceClient extends Client { /** - * Returns the specified Greenplum cluster. + * Returns the specified Greenplum® cluster. * - * To get the list of available Greenplum clusters, make a [List] request. + * To get the list of available Greenplum® clusters, make a [List] request. */ get( request: GetClusterRequest, @@ -2667,10 +4163,7 @@ export interface ClusterServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Cluster) => void ): ClientUnaryCall; - /** - * Retrieves a list of Greenplum clusters that belong - * to the specified folder. - */ + /** Retrieves a list of Greenplum® clusters that belong to the specified folder. */ list( request: ListClustersRequest, callback: ( @@ -2695,7 +4188,7 @@ export interface ClusterServiceClient extends Client { response: ListClustersResponse ) => void ): ClientUnaryCall; - /** Creates a Greenplum cluster in the specified folder. */ + /** Creates a Greenplum® cluster in the specified folder. */ create( request: CreateClusterRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -2711,7 +4204,7 @@ export interface ClusterServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Updates the specified Greenplum cluster. */ + /** Updates the specified Greenplum® cluster. */ update( request: UpdateClusterRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -2727,7 +4220,7 @@ export interface ClusterServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Deletes the specified Greenplum cluster. */ + /** Deletes the specified Greenplum® cluster. */ delete( request: DeleteClusterRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -2743,7 +4236,7 @@ export interface ClusterServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Starts the specified Greenplum cluster. */ + /** Starts the specified Greenplum® cluster. */ start( request: StartClusterRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -2759,7 +4252,7 @@ export interface ClusterServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Stops the specified Greenplum cluster. */ + /** Stops the specified Greenplum® cluster. */ stop( request: StopClusterRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -2850,6 +4343,72 @@ export interface ClusterServiceClient extends Client { response: ListClusterHostsResponse ) => void ): ClientUnaryCall; + /** Retrieves logs for the specified Greenplum® cluster. */ + listLogs( + request: ListClusterLogsRequest, + callback: ( + error: ServiceError | null, + response: ListClusterLogsResponse + ) => void + ): ClientUnaryCall; + listLogs( + request: ListClusterLogsRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListClusterLogsResponse + ) => void + ): ClientUnaryCall; + listLogs( + request: ListClusterLogsRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListClusterLogsResponse + ) => void + ): ClientUnaryCall; + /** Retrieves the list of available backups for the specified Greenplum cluster. */ + listBackups( + request: ListClusterBackupsRequest, + callback: ( + error: ServiceError | null, + response: ListClusterBackupsResponse + ) => void + ): ClientUnaryCall; + listBackups( + request: ListClusterBackupsRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListClusterBackupsResponse + ) => void + ): ClientUnaryCall; + listBackups( + request: ListClusterBackupsRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListClusterBackupsResponse + ) => void + ): ClientUnaryCall; + /** Creates a new Greenplum® cluster using the specified backup. */ + restore( + request: RestoreClusterRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + restore( + request: RestoreClusterRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + restore( + request: RestoreClusterRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; } export const ClusterServiceClient = makeGenericClientConstructor( @@ -2902,6 +4461,28 @@ export type Exact = P extends Builtin never >; +function toTimestamp(date: Date): Timestamp { + const seconds = date.getTime() / 1_000; + const nanos = (date.getTime() % 1_000) * 1_000_000; + return { $type: "google.protobuf.Timestamp", seconds, nanos }; +} + +function fromTimestamp(t: Timestamp): Date { + let millis = t.seconds * 1_000; + millis += t.nanos / 1_000_000; + return new Date(millis); +} + +function fromJsonTimestamp(o: any): Date { + if (o instanceof Date) { + return o; + } else if (typeof o === "string") { + return new Date(o); + } else { + return fromTimestamp(Timestamp.fromJSON(o)); + } +} + function longToNumber(long: Long): number { if (long.gt(Number.MAX_SAFE_INTEGER)) { throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); diff --git a/src/generated/yandex/cloud/mdb/greenplum/v1/config.ts b/src/generated/yandex/cloud/mdb/greenplum/v1/config.ts index f938d1cb..8a25ddfc 100644 --- a/src/generated/yandex/cloud/mdb/greenplum/v1/config.ts +++ b/src/generated/yandex/cloud/mdb/greenplum/v1/config.ts @@ -2,61 +2,47 @@ import { messageTypeRegistry } from "../../../../../typeRegistry"; import Long from "long"; import _m0 from "protobufjs/minimal"; -import { - Int64Value, - StringValue, - BoolValue, - FloatValue, -} from "../../../../../google/protobuf/wrappers"; +import { Int64Value, BoolValue } from "../../../../../google/protobuf/wrappers"; export const protobufPackage = "yandex.cloud.mdb.greenplum.v1"; +/** A list of computational resources allocated to a host. */ export interface Resources { $type: "yandex.cloud.mdb.greenplum.v1.Resources"; /** - * ID of the preset for computational resources available to a host (CPU, memory etc.). - * All available presets are listed in the [documentation](/docs/managed-greenplum/concepts/instance-types). + * ID of the preset for computational resources allocated to a host. + * Available presets are listed in the [documentation](/docs/managed-greenplum/concepts/instance-types). */ resourcePresetId: string; - /** Volume of the storage available to a host. */ + /** Volume of the storage used by the host, in bytes. */ diskSize: number; - /** - * Type of the storage environment for the host. - * - * Possible values: - * * network-hdd - network HDD drive, - * * network-ssd - network SSD drive, - * * local-ssd - local SSD storage. - */ + /** Type of the storage used by the host: `network-hdd`, `network-ssd` or `local-ssd`. */ diskTypeId: string; } +/** Route server configuration. */ export interface ConnectionPoolerConfig { $type: "yandex.cloud.mdb.greenplum.v1.ConnectionPoolerConfig"; - /** - * Odyssey route server pool mode. Default is session mode. - * https://github.com/yandex/odyssey/blob/master/documentation/configuration.md#pool-string - */ + /** Route server pool mode. */ mode: ConnectionPoolerConfig_PoolMode; /** - * Odyssey Server pool size. - * Keep the number of servers in the pool as much as 'pool_size'. Clients are put in a wait queue, when all servers are busy. + * The number of servers in the server pool. Clients are placed in a wait queue when all servers are busy. * Set to zero to disable the limit. - * https://github.com/yandex/odyssey/blob/master/documentation/configuration.md#pool_size-integer */ size?: number; /** - * Server pool idle timeout. - * Close an server connection when it becomes idle for 'pool_ttl' seconds. - * Set to zero to disable. - * https://github.com/yandex/odyssey/blob/master/documentation/configuration.md#pool_ttl-integer + * Server pool idle timeout, in seconds. A server connection closes after it has been idle for the specified duration. + * Set to zero to disable the limit. */ clientIdleTimeout?: number; } +/** Route server pool mode. */ export enum ConnectionPoolerConfig_PoolMode { POOL_MODE_UNSPECIFIED = 0, + /** SESSION - Assign server connection to a client until it disconnects. Default value. */ SESSION = 1, + /** TRANSACTION - Assign server connection to a client for a transaction processing. */ TRANSACTION = 2, UNRECOGNIZED = -1, } @@ -96,296 +82,65 @@ export function connectionPoolerConfig_PoolModeToJSON( } } -/** Configuration of master subcluster */ +/** Configuration of the master subcluster. */ export interface MasterSubclusterConfig { $type: "yandex.cloud.mdb.greenplum.v1.MasterSubclusterConfig"; - /** Resources allocated to Greenplum master subcluster hosts. */ + /** Computational resources allocated to Greenplum® master subcluster hosts. */ resources?: Resources; - /** Configuration settings of a Greenplum master server. */ - config?: GreenplumMasterConfigSet; } -/** Configuration of segmet subcluster */ +/** Configuration of the segment subcluster. */ export interface SegmentSubclusterConfig { $type: "yandex.cloud.mdb.greenplum.v1.SegmentSubclusterConfig"; - /** Resources allocated to Greenplum segment subcluster hosts. */ + /** Computational resources allocated to Greenplum® segment subcluster hosts. */ resources?: Resources; - /** Configuration settings of a Greenplum segment server. */ - config?: GreenplumSegmentConfigSet; } -/** - * Greenplum master subcluster configuration options. Detailed description for each set of options - * - * Any options not listed here are not supported. - */ -export interface GreenplumMasterConfig { - $type: "yandex.cloud.mdb.greenplum.v1.GreenplumMasterConfig"; - /** Logging level for the Greenplum master subcluster. Possible values: TRACE, DEBUG, INFORMATION, WARNING, ERROR. */ - logLevel: GreenplumMasterConfig_LogLevel; - /** Maximum number of inbound connections. */ +export interface Greenplumconfig617 { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_17"; + /** Maximum number of inbound connections on master segment */ maxConnections?: number; - /** The server's time zone to be used in DateTime fields conversions. Specified as an IANA identifier. */ - timezone?: string; - /** Odyssey pool settings */ - pool?: ConnectionPoolerConfig; - /** - * Sets the maximum number of transactions that can be in the "prepared" state simultaneously - * https://www.postgresql.org/docs/9.6/runtime-config-resource.html - */ - maxPreparedTransactions?: number; - /** - * For queries that are managed by resource queues or resource groups, - * this parameter determines when Greenplum Database terminates running queries based on the amount of memory the queries are using. - * A value of 100 disables the automatic termination of queries based on the percentage of memory that is utilized. - * https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#runaway_detector_activation_percent - */ - runawayDetectorActivationPercent?: number; - /** - * How many keepalives may be lost before the connection is considered dead. A value of 0 uses the system default. - * If TCP_KEEPCNT is not supported, this parameter must be 0. - * https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#tcp_keepalives_count - */ - tcpKeepalivesCount?: number; - /** - * How many seconds to wait for a response to a keepalive before retransmitting. A value of 0 uses the system default. - * If TCP_KEEPINTVL is not supported, this parameter must be 0. - * https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#tcp_keepalives_interval - */ - tcpKeepalivesInterval?: number; - /** - * When an SQL query reads from an external table, the parameter value specifies the amount of time in seconds that - * Greenplum Database waits before cancelling the query when data stops being returned from the external table. - * The default value of 0, specifies no time out. Greenplum Database does not cancel the query. - * https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#readable_external_table_timeout - */ - readableExternalTableTimeout?: number; - /** - * Sets the amount of data per-peer to be queued by the default UDPIFC interconnect on senders. - * Increasing the depth from its default value will cause the system to use more memory, but may increase performance. - * Reasonable values for this parameter are between 1 and 4. Increasing the value might radically increase the amount of memory used by the system. - * https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_interconnect_snd_queue_depth - */ - gpInterconnectSndQueueDepth?: number; - /** - * Sets the amount of data per-peer to be queued by the Greenplum Database interconnect on receivers - * (when data is received but no space is available to receive it the data will be dropped, and the transmitter will need to resend it) - * for the default UDPIFC interconnect. - * https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_interconnect_queue_depth - */ - gpInterconnectQueueDepth?: number; /** - * Controls which SQL statements are logged. DDL logs all data definition commands like CREATE, ALTER, and DROP commands. - * MOD logs all DDL statements, plus INSERT, UPDATE, DELETE, TRUNCATE, and COPY FROM. - * PREPARE and EXPLAIN ANALYZE statements are also logged if their contained command is of an appropriate type. - * https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#log_statement - * Default value is ddl + * Specify the maximum size of WAL files that replication slots are allowed to retain in the pg_wal directory at checkpoint time. + * https://www.postgresql.org/docs/current/runtime-config-replication.html */ - logStatement: GreenplumMasterConfig_LogStatement; + maxSlotWalKeepSize?: number; /** - * Causes the duration of every completed statement which satisfies log_statement to be logged. - * https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#log_duration + * Sets the maximum total disk size that all running queries are allowed to use for creating temporary spill files at each segment. + * The default value is 0, which means a limit is not enforced. + * https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_workfile_limit_per_segment */ - logDuration?: boolean; + gpWorkfileLimitPerSegment?: number; /** - * For a partitioned table, controls whether the ROOTPARTITION keyword is required to collect root partition statistics - * when the ANALYZE command is run on the table. GPORCA uses the root partition statistics when generating a query plan. - * https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#optimizer_analyze_root_partition + * Sets the maximum disk size an individual query is allowed to use for creating temporary spill files at each segment. + * The default value is 0, which means a limit is not enforced. + * https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_workfile_limit_per_query */ - optimizerAnalyzeRootPartition?: boolean; + gpWorkfileLimitPerQuery?: number; /** - * Sets the number of segments that will scan external table data during an external table operation, - * the purpose being not to overload the system with scanning data and take away resources from other concurrent operations. - * This only applies to external tables that use the gpfdist:// protocol to access external table data. - * https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_external_max_segs + * Sets the maximum number of temporary spill files (also known as workfiles) allowed per query per segment. + * Spill files are created when executing a query that requires more memory than it is allocated. + * The current query is terminated when the limit is exceeded. + * Set the value to 0 (zero) to allow an unlimited number of spill files. master session reload + * https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_workfile_limit_files_per_query + * Default value is 10000 */ - gpExternalMaxSegs?: number; + gpWorkfileLimitFilesPerQuery?: number; /** - * Specifies the allowed timeout for the fault detection process (ftsprobe) to establish a connection to a segment before declaring it down. - * https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_fts_probe_timeout + * Sets the maximum number of transactions that can be in the "prepared" state simultaneously + * https://www.postgresql.org/docs/9.6/runtime-config-resource.html */ - gpFtsProbeTimeout?: number; + maxPreparedTransactions?: number; /** * Specifies whether the temporary files created, when a hash aggregation or hash join operation spills to disk, are compressed. * https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_workfile_compression */ gpWorkfileCompression?: boolean; - /** https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_autostats_mode_in_functions */ - gpAutostatsModeInFunctions: GreenplumMasterConfig_AutostatsModeInFunctions; -} - -export enum GreenplumMasterConfig_LogLevel { - LOG_LEVEL_UNSPECIFIED = 0, - TRACE = 1, - DEBUG = 2, - INFORMATION = 3, - WARNING = 4, - ERROR = 5, - UNRECOGNIZED = -1, -} - -export function greenplumMasterConfig_LogLevelFromJSON( - object: any -): GreenplumMasterConfig_LogLevel { - switch (object) { - case 0: - case "LOG_LEVEL_UNSPECIFIED": - return GreenplumMasterConfig_LogLevel.LOG_LEVEL_UNSPECIFIED; - case 1: - case "TRACE": - return GreenplumMasterConfig_LogLevel.TRACE; - case 2: - case "DEBUG": - return GreenplumMasterConfig_LogLevel.DEBUG; - case 3: - case "INFORMATION": - return GreenplumMasterConfig_LogLevel.INFORMATION; - case 4: - case "WARNING": - return GreenplumMasterConfig_LogLevel.WARNING; - case 5: - case "ERROR": - return GreenplumMasterConfig_LogLevel.ERROR; - case -1: - case "UNRECOGNIZED": - default: - return GreenplumMasterConfig_LogLevel.UNRECOGNIZED; - } -} - -export function greenplumMasterConfig_LogLevelToJSON( - object: GreenplumMasterConfig_LogLevel -): string { - switch (object) { - case GreenplumMasterConfig_LogLevel.LOG_LEVEL_UNSPECIFIED: - return "LOG_LEVEL_UNSPECIFIED"; - case GreenplumMasterConfig_LogLevel.TRACE: - return "TRACE"; - case GreenplumMasterConfig_LogLevel.DEBUG: - return "DEBUG"; - case GreenplumMasterConfig_LogLevel.INFORMATION: - return "INFORMATION"; - case GreenplumMasterConfig_LogLevel.WARNING: - return "WARNING"; - case GreenplumMasterConfig_LogLevel.ERROR: - return "ERROR"; - default: - return "UNKNOWN"; - } -} - -export enum GreenplumMasterConfig_LogStatement { - LOG_STATEMENT_UNSPECIFIED = 0, - NONE = 1, - DDL = 2, - MOD = 3, - ALL = 4, - UNRECOGNIZED = -1, -} - -export function greenplumMasterConfig_LogStatementFromJSON( - object: any -): GreenplumMasterConfig_LogStatement { - switch (object) { - case 0: - case "LOG_STATEMENT_UNSPECIFIED": - return GreenplumMasterConfig_LogStatement.LOG_STATEMENT_UNSPECIFIED; - case 1: - case "NONE": - return GreenplumMasterConfig_LogStatement.NONE; - case 2: - case "DDL": - return GreenplumMasterConfig_LogStatement.DDL; - case 3: - case "MOD": - return GreenplumMasterConfig_LogStatement.MOD; - case 4: - case "ALL": - return GreenplumMasterConfig_LogStatement.ALL; - case -1: - case "UNRECOGNIZED": - default: - return GreenplumMasterConfig_LogStatement.UNRECOGNIZED; - } -} - -export function greenplumMasterConfig_LogStatementToJSON( - object: GreenplumMasterConfig_LogStatement -): string { - switch (object) { - case GreenplumMasterConfig_LogStatement.LOG_STATEMENT_UNSPECIFIED: - return "LOG_STATEMENT_UNSPECIFIED"; - case GreenplumMasterConfig_LogStatement.NONE: - return "NONE"; - case GreenplumMasterConfig_LogStatement.DDL: - return "DDL"; - case GreenplumMasterConfig_LogStatement.MOD: - return "MOD"; - case GreenplumMasterConfig_LogStatement.ALL: - return "ALL"; - default: - return "UNKNOWN"; - } -} - -export enum GreenplumMasterConfig_AutostatsModeInFunctions { - AUTOSTATS_MODE_IN_FUNCTIONS_UNSPECIFIED = 0, - MODE_NONE = 1, - ON_CHANGE = 2, - ON_NO_STATS = 3, - UNRECOGNIZED = -1, -} - -export function greenplumMasterConfig_AutostatsModeInFunctionsFromJSON( - object: any -): GreenplumMasterConfig_AutostatsModeInFunctions { - switch (object) { - case 0: - case "AUTOSTATS_MODE_IN_FUNCTIONS_UNSPECIFIED": - return GreenplumMasterConfig_AutostatsModeInFunctions.AUTOSTATS_MODE_IN_FUNCTIONS_UNSPECIFIED; - case 1: - case "MODE_NONE": - return GreenplumMasterConfig_AutostatsModeInFunctions.MODE_NONE; - case 2: - case "ON_CHANGE": - return GreenplumMasterConfig_AutostatsModeInFunctions.ON_CHANGE; - case 3: - case "ON_NO_STATS": - return GreenplumMasterConfig_AutostatsModeInFunctions.ON_NO_STATS; - case -1: - case "UNRECOGNIZED": - default: - return GreenplumMasterConfig_AutostatsModeInFunctions.UNRECOGNIZED; - } -} - -export function greenplumMasterConfig_AutostatsModeInFunctionsToJSON( - object: GreenplumMasterConfig_AutostatsModeInFunctions -): string { - switch (object) { - case GreenplumMasterConfig_AutostatsModeInFunctions.AUTOSTATS_MODE_IN_FUNCTIONS_UNSPECIFIED: - return "AUTOSTATS_MODE_IN_FUNCTIONS_UNSPECIFIED"; - case GreenplumMasterConfig_AutostatsModeInFunctions.MODE_NONE: - return "MODE_NONE"; - case GreenplumMasterConfig_AutostatsModeInFunctions.ON_CHANGE: - return "ON_CHANGE"; - case GreenplumMasterConfig_AutostatsModeInFunctions.ON_NO_STATS: - return "ON_NO_STATS"; - default: - return "UNKNOWN"; - } } -/** - * Greenplum segment subcluster configuration options. Detailed description for each set of options - * - * Any options not listed here are not supported. - */ -export interface GreenplumSegmentConfig { - $type: "yandex.cloud.mdb.greenplum.v1.GreenplumSegmentConfig"; - /** Logging level for the Greenplum segment subcluster. Possible values: TRACE, DEBUG, INFORMATION, WARNING, ERROR. */ - logLevel: GreenplumSegmentConfig_LogLevel; - /** Maximum number of inbound connections. */ +export interface Greenplumconfig619 { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_19"; + /** Maximum number of inbound connections on master segment */ maxConnections?: number; /** * Specify the maximum size of WAL files that replication slots are allowed to retain in the pg_wal directory at checkpoint time. @@ -414,151 +169,54 @@ export interface GreenplumSegmentConfig { */ gpWorkfileLimitFilesPerQuery?: number; /** - * Identifies the resource management scheme currently enabled in the Greenplum Database cluster. The default scheme is to use resource queues. - * https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_resource_manager - * "group" is the default value - */ - gpResourceManager: GreenplumSegmentConfig_GPResourceManager; - /** - * Identifies the maximum percentage of system CPU resources to allocate to resource groups on each Greenplum Database segment node. - * Note: The gp_resource_group_cpu_limit server configuration parameter is enforced only when resource group-based resource management is active. - * https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_resource_group_cpu_limit + * Sets the maximum number of transactions that can be in the "prepared" state simultaneously + * https://www.postgresql.org/docs/9.6/runtime-config-resource.html */ - gpResourceGroupCpuLimit?: number; + maxPreparedTransactions?: number; /** - * Identifies the maximum percentage of system memory resources to allocate to resource groups on each Greenplum Database segment node. - * Note: The gp_resource_group_memory_limit server configuration parameter is enforced only when resource group-based resource management is active. - * https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_resource_group_memory_limit + * Specifies whether the temporary files created, when a hash aggregation or hash join operation spills to disk, are compressed. + * https://docs.greenplum.org/6-5/ref_guide/config_params/guc-list.html#gp_workfile_compression */ - gpResourceGroupMemoryLimit?: number; -} - -export enum GreenplumSegmentConfig_LogLevel { - LOG_LEVEL_UNSPECIFIED = 0, - TRACE = 1, - DEBUG = 2, - INFORMATION = 3, - WARNING = 4, - ERROR = 5, - UNRECOGNIZED = -1, -} - -export function greenplumSegmentConfig_LogLevelFromJSON( - object: any -): GreenplumSegmentConfig_LogLevel { - switch (object) { - case 0: - case "LOG_LEVEL_UNSPECIFIED": - return GreenplumSegmentConfig_LogLevel.LOG_LEVEL_UNSPECIFIED; - case 1: - case "TRACE": - return GreenplumSegmentConfig_LogLevel.TRACE; - case 2: - case "DEBUG": - return GreenplumSegmentConfig_LogLevel.DEBUG; - case 3: - case "INFORMATION": - return GreenplumSegmentConfig_LogLevel.INFORMATION; - case 4: - case "WARNING": - return GreenplumSegmentConfig_LogLevel.WARNING; - case 5: - case "ERROR": - return GreenplumSegmentConfig_LogLevel.ERROR; - case -1: - case "UNRECOGNIZED": - default: - return GreenplumSegmentConfig_LogLevel.UNRECOGNIZED; - } -} - -export function greenplumSegmentConfig_LogLevelToJSON( - object: GreenplumSegmentConfig_LogLevel -): string { - switch (object) { - case GreenplumSegmentConfig_LogLevel.LOG_LEVEL_UNSPECIFIED: - return "LOG_LEVEL_UNSPECIFIED"; - case GreenplumSegmentConfig_LogLevel.TRACE: - return "TRACE"; - case GreenplumSegmentConfig_LogLevel.DEBUG: - return "DEBUG"; - case GreenplumSegmentConfig_LogLevel.INFORMATION: - return "INFORMATION"; - case GreenplumSegmentConfig_LogLevel.WARNING: - return "WARNING"; - case GreenplumSegmentConfig_LogLevel.ERROR: - return "ERROR"; - default: - return "UNKNOWN"; - } -} - -export enum GreenplumSegmentConfig_GPResourceManager { - GP_RESOURCE_MANAGER_UNSPECIFIED = 0, - QUEUE = 1, - GROUP = 2, - UNRECOGNIZED = -1, -} - -export function greenplumSegmentConfig_GPResourceManagerFromJSON( - object: any -): GreenplumSegmentConfig_GPResourceManager { - switch (object) { - case 0: - case "GP_RESOURCE_MANAGER_UNSPECIFIED": - return GreenplumSegmentConfig_GPResourceManager.GP_RESOURCE_MANAGER_UNSPECIFIED; - case 1: - case "QUEUE": - return GreenplumSegmentConfig_GPResourceManager.QUEUE; - case 2: - case "GROUP": - return GreenplumSegmentConfig_GPResourceManager.GROUP; - case -1: - case "UNRECOGNIZED": - default: - return GreenplumSegmentConfig_GPResourceManager.UNRECOGNIZED; - } + gpWorkfileCompression?: boolean; } -export function greenplumSegmentConfig_GPResourceManagerToJSON( - object: GreenplumSegmentConfig_GPResourceManager -): string { - switch (object) { - case GreenplumSegmentConfig_GPResourceManager.GP_RESOURCE_MANAGER_UNSPECIFIED: - return "GP_RESOURCE_MANAGER_UNSPECIFIED"; - case GreenplumSegmentConfig_GPResourceManager.QUEUE: - return "QUEUE"; - case GreenplumSegmentConfig_GPResourceManager.GROUP: - return "GROUP"; - default: - return "UNKNOWN"; - } +export interface Greenplumconfigset617 { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfigSet6_17"; + /** + * Effective settings for a Greenplum (a combination of settings defined + * in [user_config] and [default_config]). + */ + effectiveConfig?: Greenplumconfig617; + /** User-defined settings for a Greenplum. */ + userConfig?: Greenplumconfig617; + /** Default configuration for a Greenplum. */ + defaultConfig?: Greenplumconfig617; } -export interface GreenplumMasterConfigSet { - $type: "yandex.cloud.mdb.greenplum.v1.GreenplumMasterConfigSet"; +export interface Greenplumconfigset619 { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfigSet6_19"; /** - * Effective settings for a Greenplum master subcluster (a combination of settings defined + * Effective settings for a Greenplum (a combination of settings defined * in [user_config] and [default_config]). */ - effectiveConfig?: GreenplumMasterConfig; - /** User-defined settings for a Greenplum master subcluster. */ - userConfig?: GreenplumMasterConfig; - /** Default configuration for a Greenplum master subcluster. */ - defaultConfig?: GreenplumMasterConfig; + effectiveConfig?: Greenplumconfig619; + /** User-defined settings for a Greenplum. */ + userConfig?: Greenplumconfig619; + /** Default configuration for a Greenplum. */ + defaultConfig?: Greenplumconfig619; } -export interface GreenplumSegmentConfigSet { - $type: "yandex.cloud.mdb.greenplum.v1.GreenplumSegmentConfigSet"; +export interface ConnectionPoolerConfigSet { + $type: "yandex.cloud.mdb.greenplum.v1.ConnectionPoolerConfigSet"; /** - * Effective settings for a Greenplum segment subcluster (a combination of settings defined + * Effective settings for a odyssey (a combination of settings defined * in [user_config] and [default_config]). */ - effectiveConfig?: GreenplumSegmentConfig; - /** User-defined settings for a Greenplum segment subcluster. */ - userConfig?: GreenplumSegmentConfig; - /** Default configuration for a Greenplum segment subcluster. */ - defaultConfig?: GreenplumSegmentConfig; + effectiveConfig?: ConnectionPoolerConfig; + /** User-defined settings for a odyssey. */ + userConfig?: ConnectionPoolerConfig; + /** Default configuration for a odyssey. */ + defaultConfig?: ConnectionPoolerConfig; } const baseResources: object = { @@ -769,12 +427,6 @@ export const MasterSubclusterConfig = { if (message.resources !== undefined) { Resources.encode(message.resources, writer.uint32(10).fork()).ldelim(); } - if (message.config !== undefined) { - GreenplumMasterConfigSet.encode( - message.config, - writer.uint32(18).fork() - ).ldelim(); - } return writer; }, @@ -791,12 +443,6 @@ export const MasterSubclusterConfig = { case 1: message.resources = Resources.decode(reader, reader.uint32()); break; - case 2: - message.config = GreenplumMasterConfigSet.decode( - reader, - reader.uint32() - ); - break; default: reader.skipType(tag & 7); break; @@ -811,10 +457,6 @@ export const MasterSubclusterConfig = { object.resources !== undefined && object.resources !== null ? Resources.fromJSON(object.resources) : undefined; - message.config = - object.config !== undefined && object.config !== null - ? GreenplumMasterConfigSet.fromJSON(object.config) - : undefined; return message; }, @@ -824,10 +466,6 @@ export const MasterSubclusterConfig = { (obj.resources = message.resources ? Resources.toJSON(message.resources) : undefined); - message.config !== undefined && - (obj.config = message.config - ? GreenplumMasterConfigSet.toJSON(message.config) - : undefined); return obj; }, @@ -839,10 +477,6 @@ export const MasterSubclusterConfig = { object.resources !== undefined && object.resources !== null ? Resources.fromPartial(object.resources) : undefined; - message.config = - object.config !== undefined && object.config !== null - ? GreenplumMasterConfigSet.fromPartial(object.config) - : undefined; return message; }, }; @@ -863,12 +497,6 @@ export const SegmentSubclusterConfig = { if (message.resources !== undefined) { Resources.encode(message.resources, writer.uint32(10).fork()).ldelim(); } - if (message.config !== undefined) { - GreenplumSegmentConfigSet.encode( - message.config, - writer.uint32(18).fork() - ).ldelim(); - } return writer; }, @@ -887,12 +515,6 @@ export const SegmentSubclusterConfig = { case 1: message.resources = Resources.decode(reader, reader.uint32()); break; - case 2: - message.config = GreenplumSegmentConfigSet.decode( - reader, - reader.uint32() - ); - break; default: reader.skipType(tag & 7); break; @@ -909,10 +531,6 @@ export const SegmentSubclusterConfig = { object.resources !== undefined && object.resources !== null ? Resources.fromJSON(object.resources) : undefined; - message.config = - object.config !== undefined && object.config !== null - ? GreenplumSegmentConfigSet.fromJSON(object.config) - : undefined; return message; }, @@ -922,10 +540,6 @@ export const SegmentSubclusterConfig = { (obj.resources = message.resources ? Resources.toJSON(message.resources) : undefined); - message.config !== undefined && - (obj.config = message.config - ? GreenplumSegmentConfigSet.toJSON(message.config) - : undefined); return obj; }, @@ -939,148 +553,72 @@ export const SegmentSubclusterConfig = { object.resources !== undefined && object.resources !== null ? Resources.fromPartial(object.resources) : undefined; - message.config = - object.config !== undefined && object.config !== null - ? GreenplumSegmentConfigSet.fromPartial(object.config) - : undefined; return message; }, }; messageTypeRegistry.set(SegmentSubclusterConfig.$type, SegmentSubclusterConfig); -const baseGreenplumMasterConfig: object = { - $type: "yandex.cloud.mdb.greenplum.v1.GreenplumMasterConfig", - logLevel: 0, - logStatement: 0, - gpAutostatsModeInFunctions: 0, +const baseGreenplumconfig617: object = { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_17", }; -export const GreenplumMasterConfig = { - $type: "yandex.cloud.mdb.greenplum.v1.GreenplumMasterConfig" as const, +export const Greenplumconfig617 = { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_17" as const, encode( - message: GreenplumMasterConfig, + message: Greenplumconfig617, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { - if (message.logLevel !== 0) { - writer.uint32(8).int32(message.logLevel); - } if (message.maxConnections !== undefined) { Int64Value.encode( { $type: "google.protobuf.Int64Value", value: message.maxConnections! }, - writer.uint32(18).fork() - ).ldelim(); - } - if (message.timezone !== undefined) { - StringValue.encode( - { $type: "google.protobuf.StringValue", value: message.timezone! }, - writer.uint32(26).fork() - ).ldelim(); - } - if (message.pool !== undefined) { - ConnectionPoolerConfig.encode( - message.pool, - writer.uint32(34).fork() - ).ldelim(); - } - if (message.maxPreparedTransactions !== undefined) { - Int64Value.encode( - { - $type: "google.protobuf.Int64Value", - value: message.maxPreparedTransactions!, - }, - writer.uint32(106).fork() - ).ldelim(); - } - if (message.runawayDetectorActivationPercent !== undefined) { - Int64Value.encode( - { - $type: "google.protobuf.Int64Value", - value: message.runawayDetectorActivationPercent!, - }, - writer.uint32(114).fork() - ).ldelim(); - } - if (message.tcpKeepalivesCount !== undefined) { - Int64Value.encode( - { - $type: "google.protobuf.Int64Value", - value: message.tcpKeepalivesCount!, - }, - writer.uint32(122).fork() - ).ldelim(); - } - if (message.tcpKeepalivesInterval !== undefined) { - Int64Value.encode( - { - $type: "google.protobuf.Int64Value", - value: message.tcpKeepalivesInterval!, - }, - writer.uint32(130).fork() + writer.uint32(10).fork() ).ldelim(); } - if (message.readableExternalTableTimeout !== undefined) { + if (message.maxSlotWalKeepSize !== undefined) { Int64Value.encode( { $type: "google.protobuf.Int64Value", - value: message.readableExternalTableTimeout!, + value: message.maxSlotWalKeepSize!, }, - writer.uint32(154).fork() + writer.uint32(18).fork() ).ldelim(); } - if (message.gpInterconnectSndQueueDepth !== undefined) { + if (message.gpWorkfileLimitPerSegment !== undefined) { Int64Value.encode( { $type: "google.protobuf.Int64Value", - value: message.gpInterconnectSndQueueDepth!, + value: message.gpWorkfileLimitPerSegment!, }, - writer.uint32(162).fork() + writer.uint32(26).fork() ).ldelim(); } - if (message.gpInterconnectQueueDepth !== undefined) { + if (message.gpWorkfileLimitPerQuery !== undefined) { Int64Value.encode( { $type: "google.protobuf.Int64Value", - value: message.gpInterconnectQueueDepth!, - }, - writer.uint32(170).fork() - ).ldelim(); - } - if (message.logStatement !== 0) { - writer.uint32(176).int32(message.logStatement); - } - if (message.logDuration !== undefined) { - BoolValue.encode( - { $type: "google.protobuf.BoolValue", value: message.logDuration! }, - writer.uint32(186).fork() - ).ldelim(); - } - if (message.optimizerAnalyzeRootPartition !== undefined) { - BoolValue.encode( - { - $type: "google.protobuf.BoolValue", - value: message.optimizerAnalyzeRootPartition!, + value: message.gpWorkfileLimitPerQuery!, }, - writer.uint32(194).fork() + writer.uint32(34).fork() ).ldelim(); } - if (message.gpExternalMaxSegs !== undefined) { + if (message.gpWorkfileLimitFilesPerQuery !== undefined) { Int64Value.encode( { $type: "google.protobuf.Int64Value", - value: message.gpExternalMaxSegs!, + value: message.gpWorkfileLimitFilesPerQuery!, }, - writer.uint32(202).fork() + writer.uint32(42).fork() ).ldelim(); } - if (message.gpFtsProbeTimeout !== undefined) { + if (message.maxPreparedTransactions !== undefined) { Int64Value.encode( { $type: "google.protobuf.Int64Value", - value: message.gpFtsProbeTimeout!, + value: message.maxPreparedTransactions!, }, - writer.uint32(210).fork() + writer.uint32(50).fork() ).ldelim(); } if (message.gpWorkfileCompression !== undefined) { @@ -1089,115 +627,61 @@ export const GreenplumMasterConfig = { $type: "google.protobuf.BoolValue", value: message.gpWorkfileCompression!, }, - writer.uint32(218).fork() + writer.uint32(58).fork() ).ldelim(); } - if (message.gpAutostatsModeInFunctions !== 0) { - writer.uint32(224).int32(message.gpAutostatsModeInFunctions); - } return writer; }, - decode( - input: _m0.Reader | Uint8Array, - length?: number - ): GreenplumMasterConfig { + decode(input: _m0.Reader | Uint8Array, length?: number): Greenplumconfig617 { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; - const message = { ...baseGreenplumMasterConfig } as GreenplumMasterConfig; + const message = { ...baseGreenplumconfig617 } as Greenplumconfig617; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.logLevel = reader.int32() as any; - break; - case 2: message.maxConnections = Int64Value.decode( reader, reader.uint32() ).value; break; - case 3: - message.timezone = StringValue.decode(reader, reader.uint32()).value; - break; - case 4: - message.pool = ConnectionPoolerConfig.decode(reader, reader.uint32()); - break; - case 13: - message.maxPreparedTransactions = Int64Value.decode( - reader, - reader.uint32() - ).value; - break; - case 14: - message.runawayDetectorActivationPercent = Int64Value.decode( - reader, - reader.uint32() - ).value; - break; - case 15: - message.tcpKeepalivesCount = Int64Value.decode( - reader, - reader.uint32() - ).value; - break; - case 16: - message.tcpKeepalivesInterval = Int64Value.decode( - reader, - reader.uint32() - ).value; - break; - case 19: - message.readableExternalTableTimeout = Int64Value.decode( - reader, - reader.uint32() - ).value; - break; - case 20: - message.gpInterconnectSndQueueDepth = Int64Value.decode( + case 2: + message.maxSlotWalKeepSize = Int64Value.decode( reader, reader.uint32() ).value; break; - case 21: - message.gpInterconnectQueueDepth = Int64Value.decode( + case 3: + message.gpWorkfileLimitPerSegment = Int64Value.decode( reader, reader.uint32() ).value; break; - case 22: - message.logStatement = reader.int32() as any; - break; - case 23: - message.logDuration = BoolValue.decode(reader, reader.uint32()).value; - break; - case 24: - message.optimizerAnalyzeRootPartition = BoolValue.decode( + case 4: + message.gpWorkfileLimitPerQuery = Int64Value.decode( reader, reader.uint32() ).value; break; - case 25: - message.gpExternalMaxSegs = Int64Value.decode( + case 5: + message.gpWorkfileLimitFilesPerQuery = Int64Value.decode( reader, reader.uint32() ).value; break; - case 26: - message.gpFtsProbeTimeout = Int64Value.decode( + case 6: + message.maxPreparedTransactions = Int64Value.decode( reader, reader.uint32() ).value; break; - case 27: + case 7: message.gpWorkfileCompression = BoolValue.decode( reader, reader.uint32() ).value; break; - case 28: - message.gpAutostatsModeInFunctions = reader.int32() as any; - break; default: reader.skipType(tag & 7); break; @@ -1206,203 +690,100 @@ export const GreenplumMasterConfig = { return message; }, - fromJSON(object: any): GreenplumMasterConfig { - const message = { ...baseGreenplumMasterConfig } as GreenplumMasterConfig; - message.logLevel = - object.logLevel !== undefined && object.logLevel !== null - ? greenplumMasterConfig_LogLevelFromJSON(object.logLevel) - : 0; + fromJSON(object: any): Greenplumconfig617 { + const message = { ...baseGreenplumconfig617 } as Greenplumconfig617; message.maxConnections = object.maxConnections !== undefined && object.maxConnections !== null ? Number(object.maxConnections) : undefined; - message.timezone = - object.timezone !== undefined && object.timezone !== null - ? String(object.timezone) + message.maxSlotWalKeepSize = + object.maxSlotWalKeepSize !== undefined && + object.maxSlotWalKeepSize !== null + ? Number(object.maxSlotWalKeepSize) + : undefined; + message.gpWorkfileLimitPerSegment = + object.gpWorkfileLimitPerSegment !== undefined && + object.gpWorkfileLimitPerSegment !== null + ? Number(object.gpWorkfileLimitPerSegment) : undefined; - message.pool = - object.pool !== undefined && object.pool !== null - ? ConnectionPoolerConfig.fromJSON(object.pool) + message.gpWorkfileLimitPerQuery = + object.gpWorkfileLimitPerQuery !== undefined && + object.gpWorkfileLimitPerQuery !== null + ? Number(object.gpWorkfileLimitPerQuery) + : undefined; + message.gpWorkfileLimitFilesPerQuery = + object.gpWorkfileLimitFilesPerQuery !== undefined && + object.gpWorkfileLimitFilesPerQuery !== null + ? Number(object.gpWorkfileLimitFilesPerQuery) : undefined; message.maxPreparedTransactions = object.maxPreparedTransactions !== undefined && object.maxPreparedTransactions !== null ? Number(object.maxPreparedTransactions) : undefined; - message.runawayDetectorActivationPercent = - object.runawayDetectorActivationPercent !== undefined && - object.runawayDetectorActivationPercent !== null - ? Number(object.runawayDetectorActivationPercent) - : undefined; - message.tcpKeepalivesCount = - object.tcpKeepalivesCount !== undefined && - object.tcpKeepalivesCount !== null - ? Number(object.tcpKeepalivesCount) - : undefined; - message.tcpKeepalivesInterval = - object.tcpKeepalivesInterval !== undefined && - object.tcpKeepalivesInterval !== null - ? Number(object.tcpKeepalivesInterval) - : undefined; - message.readableExternalTableTimeout = - object.readableExternalTableTimeout !== undefined && - object.readableExternalTableTimeout !== null - ? Number(object.readableExternalTableTimeout) - : undefined; - message.gpInterconnectSndQueueDepth = - object.gpInterconnectSndQueueDepth !== undefined && - object.gpInterconnectSndQueueDepth !== null - ? Number(object.gpInterconnectSndQueueDepth) - : undefined; - message.gpInterconnectQueueDepth = - object.gpInterconnectQueueDepth !== undefined && - object.gpInterconnectQueueDepth !== null - ? Number(object.gpInterconnectQueueDepth) - : undefined; - message.logStatement = - object.logStatement !== undefined && object.logStatement !== null - ? greenplumMasterConfig_LogStatementFromJSON(object.logStatement) - : 0; - message.logDuration = - object.logDuration !== undefined && object.logDuration !== null - ? Boolean(object.logDuration) - : undefined; - message.optimizerAnalyzeRootPartition = - object.optimizerAnalyzeRootPartition !== undefined && - object.optimizerAnalyzeRootPartition !== null - ? Boolean(object.optimizerAnalyzeRootPartition) - : undefined; - message.gpExternalMaxSegs = - object.gpExternalMaxSegs !== undefined && - object.gpExternalMaxSegs !== null - ? Number(object.gpExternalMaxSegs) - : undefined; - message.gpFtsProbeTimeout = - object.gpFtsProbeTimeout !== undefined && - object.gpFtsProbeTimeout !== null - ? Number(object.gpFtsProbeTimeout) - : undefined; message.gpWorkfileCompression = object.gpWorkfileCompression !== undefined && object.gpWorkfileCompression !== null ? Boolean(object.gpWorkfileCompression) : undefined; - message.gpAutostatsModeInFunctions = - object.gpAutostatsModeInFunctions !== undefined && - object.gpAutostatsModeInFunctions !== null - ? greenplumMasterConfig_AutostatsModeInFunctionsFromJSON( - object.gpAutostatsModeInFunctions - ) - : 0; return message; }, - toJSON(message: GreenplumMasterConfig): unknown { + toJSON(message: Greenplumconfig617): unknown { const obj: any = {}; - message.logLevel !== undefined && - (obj.logLevel = greenplumMasterConfig_LogLevelToJSON(message.logLevel)); message.maxConnections !== undefined && (obj.maxConnections = message.maxConnections); - message.timezone !== undefined && (obj.timezone = message.timezone); - message.pool !== undefined && - (obj.pool = message.pool - ? ConnectionPoolerConfig.toJSON(message.pool) - : undefined); + message.maxSlotWalKeepSize !== undefined && + (obj.maxSlotWalKeepSize = message.maxSlotWalKeepSize); + message.gpWorkfileLimitPerSegment !== undefined && + (obj.gpWorkfileLimitPerSegment = message.gpWorkfileLimitPerSegment); + message.gpWorkfileLimitPerQuery !== undefined && + (obj.gpWorkfileLimitPerQuery = message.gpWorkfileLimitPerQuery); + message.gpWorkfileLimitFilesPerQuery !== undefined && + (obj.gpWorkfileLimitFilesPerQuery = message.gpWorkfileLimitFilesPerQuery); message.maxPreparedTransactions !== undefined && (obj.maxPreparedTransactions = message.maxPreparedTransactions); - message.runawayDetectorActivationPercent !== undefined && - (obj.runawayDetectorActivationPercent = - message.runawayDetectorActivationPercent); - message.tcpKeepalivesCount !== undefined && - (obj.tcpKeepalivesCount = message.tcpKeepalivesCount); - message.tcpKeepalivesInterval !== undefined && - (obj.tcpKeepalivesInterval = message.tcpKeepalivesInterval); - message.readableExternalTableTimeout !== undefined && - (obj.readableExternalTableTimeout = message.readableExternalTableTimeout); - message.gpInterconnectSndQueueDepth !== undefined && - (obj.gpInterconnectSndQueueDepth = message.gpInterconnectSndQueueDepth); - message.gpInterconnectQueueDepth !== undefined && - (obj.gpInterconnectQueueDepth = message.gpInterconnectQueueDepth); - message.logStatement !== undefined && - (obj.logStatement = greenplumMasterConfig_LogStatementToJSON( - message.logStatement - )); - message.logDuration !== undefined && - (obj.logDuration = message.logDuration); - message.optimizerAnalyzeRootPartition !== undefined && - (obj.optimizerAnalyzeRootPartition = - message.optimizerAnalyzeRootPartition); - message.gpExternalMaxSegs !== undefined && - (obj.gpExternalMaxSegs = message.gpExternalMaxSegs); - message.gpFtsProbeTimeout !== undefined && - (obj.gpFtsProbeTimeout = message.gpFtsProbeTimeout); message.gpWorkfileCompression !== undefined && (obj.gpWorkfileCompression = message.gpWorkfileCompression); - message.gpAutostatsModeInFunctions !== undefined && - (obj.gpAutostatsModeInFunctions = - greenplumMasterConfig_AutostatsModeInFunctionsToJSON( - message.gpAutostatsModeInFunctions - )); return obj; }, - fromPartial, I>>( + fromPartial, I>>( object: I - ): GreenplumMasterConfig { - const message = { ...baseGreenplumMasterConfig } as GreenplumMasterConfig; - message.logLevel = object.logLevel ?? 0; + ): Greenplumconfig617 { + const message = { ...baseGreenplumconfig617 } as Greenplumconfig617; message.maxConnections = object.maxConnections ?? undefined; - message.timezone = object.timezone ?? undefined; - message.pool = - object.pool !== undefined && object.pool !== null - ? ConnectionPoolerConfig.fromPartial(object.pool) - : undefined; + message.maxSlotWalKeepSize = object.maxSlotWalKeepSize ?? undefined; + message.gpWorkfileLimitPerSegment = + object.gpWorkfileLimitPerSegment ?? undefined; + message.gpWorkfileLimitPerQuery = + object.gpWorkfileLimitPerQuery ?? undefined; + message.gpWorkfileLimitFilesPerQuery = + object.gpWorkfileLimitFilesPerQuery ?? undefined; message.maxPreparedTransactions = object.maxPreparedTransactions ?? undefined; - message.runawayDetectorActivationPercent = - object.runawayDetectorActivationPercent ?? undefined; - message.tcpKeepalivesCount = object.tcpKeepalivesCount ?? undefined; - message.tcpKeepalivesInterval = object.tcpKeepalivesInterval ?? undefined; - message.readableExternalTableTimeout = - object.readableExternalTableTimeout ?? undefined; - message.gpInterconnectSndQueueDepth = - object.gpInterconnectSndQueueDepth ?? undefined; - message.gpInterconnectQueueDepth = - object.gpInterconnectQueueDepth ?? undefined; - message.logStatement = object.logStatement ?? 0; - message.logDuration = object.logDuration ?? undefined; - message.optimizerAnalyzeRootPartition = - object.optimizerAnalyzeRootPartition ?? undefined; - message.gpExternalMaxSegs = object.gpExternalMaxSegs ?? undefined; - message.gpFtsProbeTimeout = object.gpFtsProbeTimeout ?? undefined; message.gpWorkfileCompression = object.gpWorkfileCompression ?? undefined; - message.gpAutostatsModeInFunctions = object.gpAutostatsModeInFunctions ?? 0; return message; }, }; -messageTypeRegistry.set(GreenplumMasterConfig.$type, GreenplumMasterConfig); +messageTypeRegistry.set(Greenplumconfig617.$type, Greenplumconfig617); -const baseGreenplumSegmentConfig: object = { - $type: "yandex.cloud.mdb.greenplum.v1.GreenplumSegmentConfig", - logLevel: 0, - gpResourceManager: 0, +const baseGreenplumconfig619: object = { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_19", }; -export const GreenplumSegmentConfig = { - $type: "yandex.cloud.mdb.greenplum.v1.GreenplumSegmentConfig" as const, +export const Greenplumconfig619 = { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_19" as const, encode( - message: GreenplumSegmentConfig, + message: Greenplumconfig619, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { - if (message.logLevel !== 0) { - writer.uint32(8).int32(message.logLevel); - } if (message.maxConnections !== undefined) { Int64Value.encode( { $type: "google.protobuf.Int64Value", value: message.maxConnections! }, - writer.uint32(18).fork() + writer.uint32(10).fork() ).ldelim(); } if (message.maxSlotWalKeepSize !== undefined) { @@ -1411,7 +792,7 @@ export const GreenplumSegmentConfig = { $type: "google.protobuf.Int64Value", value: message.maxSlotWalKeepSize!, }, - writer.uint32(58).fork() + writer.uint32(18).fork() ).ldelim(); } if (message.gpWorkfileLimitPerSegment !== undefined) { @@ -1420,7 +801,7 @@ export const GreenplumSegmentConfig = { $type: "google.protobuf.Int64Value", value: message.gpWorkfileLimitPerSegment!, }, - writer.uint32(66).fork() + writer.uint32(26).fork() ).ldelim(); } if (message.gpWorkfileLimitPerQuery !== undefined) { @@ -1429,7 +810,7 @@ export const GreenplumSegmentConfig = { $type: "google.protobuf.Int64Value", value: message.gpWorkfileLimitPerQuery!, }, - writer.uint32(74).fork() + writer.uint32(34).fork() ).ldelim(); } if (message.gpWorkfileLimitFilesPerQuery !== undefined) { @@ -1438,87 +819,75 @@ export const GreenplumSegmentConfig = { $type: "google.protobuf.Int64Value", value: message.gpWorkfileLimitFilesPerQuery!, }, - writer.uint32(82).fork() + writer.uint32(42).fork() ).ldelim(); } - if (message.gpResourceManager !== 0) { - writer.uint32(88).int32(message.gpResourceManager); - } - if (message.gpResourceGroupCpuLimit !== undefined) { - FloatValue.encode( + if (message.maxPreparedTransactions !== undefined) { + Int64Value.encode( { - $type: "google.protobuf.FloatValue", - value: message.gpResourceGroupCpuLimit!, + $type: "google.protobuf.Int64Value", + value: message.maxPreparedTransactions!, }, - writer.uint32(138).fork() + writer.uint32(50).fork() ).ldelim(); } - if (message.gpResourceGroupMemoryLimit !== undefined) { - FloatValue.encode( + if (message.gpWorkfileCompression !== undefined) { + BoolValue.encode( { - $type: "google.protobuf.FloatValue", - value: message.gpResourceGroupMemoryLimit!, + $type: "google.protobuf.BoolValue", + value: message.gpWorkfileCompression!, }, - writer.uint32(146).fork() + writer.uint32(58).fork() ).ldelim(); } return writer; }, - decode( - input: _m0.Reader | Uint8Array, - length?: number - ): GreenplumSegmentConfig { + decode(input: _m0.Reader | Uint8Array, length?: number): Greenplumconfig619 { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; - const message = { ...baseGreenplumSegmentConfig } as GreenplumSegmentConfig; + const message = { ...baseGreenplumconfig619 } as Greenplumconfig619; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.logLevel = reader.int32() as any; - break; - case 2: message.maxConnections = Int64Value.decode( reader, reader.uint32() ).value; break; - case 7: + case 2: message.maxSlotWalKeepSize = Int64Value.decode( reader, reader.uint32() ).value; break; - case 8: + case 3: message.gpWorkfileLimitPerSegment = Int64Value.decode( reader, reader.uint32() ).value; break; - case 9: + case 4: message.gpWorkfileLimitPerQuery = Int64Value.decode( reader, reader.uint32() ).value; break; - case 10: + case 5: message.gpWorkfileLimitFilesPerQuery = Int64Value.decode( reader, reader.uint32() ).value; break; - case 11: - message.gpResourceManager = reader.int32() as any; - break; - case 17: - message.gpResourceGroupCpuLimit = FloatValue.decode( + case 6: + message.maxPreparedTransactions = Int64Value.decode( reader, reader.uint32() ).value; break; - case 18: - message.gpResourceGroupMemoryLimit = FloatValue.decode( + case 7: + message.gpWorkfileCompression = BoolValue.decode( reader, reader.uint32() ).value; @@ -1531,12 +900,8 @@ export const GreenplumSegmentConfig = { return message; }, - fromJSON(object: any): GreenplumSegmentConfig { - const message = { ...baseGreenplumSegmentConfig } as GreenplumSegmentConfig; - message.logLevel = - object.logLevel !== undefined && object.logLevel !== null - ? greenplumSegmentConfig_LogLevelFromJSON(object.logLevel) - : 0; + fromJSON(object: any): Greenplumconfig619 { + const message = { ...baseGreenplumconfig619 } as Greenplumconfig619; message.maxConnections = object.maxConnections !== undefined && object.maxConnections !== null ? Number(object.maxConnections) @@ -1561,30 +926,21 @@ export const GreenplumSegmentConfig = { object.gpWorkfileLimitFilesPerQuery !== null ? Number(object.gpWorkfileLimitFilesPerQuery) : undefined; - message.gpResourceManager = - object.gpResourceManager !== undefined && - object.gpResourceManager !== null - ? greenplumSegmentConfig_GPResourceManagerFromJSON( - object.gpResourceManager - ) - : 0; - message.gpResourceGroupCpuLimit = - object.gpResourceGroupCpuLimit !== undefined && - object.gpResourceGroupCpuLimit !== null - ? Number(object.gpResourceGroupCpuLimit) + message.maxPreparedTransactions = + object.maxPreparedTransactions !== undefined && + object.maxPreparedTransactions !== null + ? Number(object.maxPreparedTransactions) : undefined; - message.gpResourceGroupMemoryLimit = - object.gpResourceGroupMemoryLimit !== undefined && - object.gpResourceGroupMemoryLimit !== null - ? Number(object.gpResourceGroupMemoryLimit) + message.gpWorkfileCompression = + object.gpWorkfileCompression !== undefined && + object.gpWorkfileCompression !== null + ? Boolean(object.gpWorkfileCompression) : undefined; return message; }, - toJSON(message: GreenplumSegmentConfig): unknown { + toJSON(message: Greenplumconfig619): unknown { const obj: any = {}; - message.logLevel !== undefined && - (obj.logLevel = greenplumSegmentConfig_LogLevelToJSON(message.logLevel)); message.maxConnections !== undefined && (obj.maxConnections = message.maxConnections); message.maxSlotWalKeepSize !== undefined && @@ -1595,22 +951,17 @@ export const GreenplumSegmentConfig = { (obj.gpWorkfileLimitPerQuery = message.gpWorkfileLimitPerQuery); message.gpWorkfileLimitFilesPerQuery !== undefined && (obj.gpWorkfileLimitFilesPerQuery = message.gpWorkfileLimitFilesPerQuery); - message.gpResourceManager !== undefined && - (obj.gpResourceManager = greenplumSegmentConfig_GPResourceManagerToJSON( - message.gpResourceManager - )); - message.gpResourceGroupCpuLimit !== undefined && - (obj.gpResourceGroupCpuLimit = message.gpResourceGroupCpuLimit); - message.gpResourceGroupMemoryLimit !== undefined && - (obj.gpResourceGroupMemoryLimit = message.gpResourceGroupMemoryLimit); + message.maxPreparedTransactions !== undefined && + (obj.maxPreparedTransactions = message.maxPreparedTransactions); + message.gpWorkfileCompression !== undefined && + (obj.gpWorkfileCompression = message.gpWorkfileCompression); return obj; }, - fromPartial, I>>( + fromPartial, I>>( object: I - ): GreenplumSegmentConfig { - const message = { ...baseGreenplumSegmentConfig } as GreenplumSegmentConfig; - message.logLevel = object.logLevel ?? 0; + ): Greenplumconfig619 { + const message = { ...baseGreenplumconfig619 } as Greenplumconfig619; message.maxConnections = object.maxConnections ?? undefined; message.maxSlotWalKeepSize = object.maxSlotWalKeepSize ?? undefined; message.gpWorkfileLimitPerSegment = @@ -1619,42 +970,40 @@ export const GreenplumSegmentConfig = { object.gpWorkfileLimitPerQuery ?? undefined; message.gpWorkfileLimitFilesPerQuery = object.gpWorkfileLimitFilesPerQuery ?? undefined; - message.gpResourceManager = object.gpResourceManager ?? 0; - message.gpResourceGroupCpuLimit = - object.gpResourceGroupCpuLimit ?? undefined; - message.gpResourceGroupMemoryLimit = - object.gpResourceGroupMemoryLimit ?? undefined; + message.maxPreparedTransactions = + object.maxPreparedTransactions ?? undefined; + message.gpWorkfileCompression = object.gpWorkfileCompression ?? undefined; return message; }, }; -messageTypeRegistry.set(GreenplumSegmentConfig.$type, GreenplumSegmentConfig); +messageTypeRegistry.set(Greenplumconfig619.$type, Greenplumconfig619); -const baseGreenplumMasterConfigSet: object = { - $type: "yandex.cloud.mdb.greenplum.v1.GreenplumMasterConfigSet", +const baseGreenplumconfigset617: object = { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfigSet6_17", }; -export const GreenplumMasterConfigSet = { - $type: "yandex.cloud.mdb.greenplum.v1.GreenplumMasterConfigSet" as const, +export const Greenplumconfigset617 = { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfigSet6_17" as const, encode( - message: GreenplumMasterConfigSet, + message: Greenplumconfigset617, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { if (message.effectiveConfig !== undefined) { - GreenplumMasterConfig.encode( + Greenplumconfig617.encode( message.effectiveConfig, writer.uint32(10).fork() ).ldelim(); } if (message.userConfig !== undefined) { - GreenplumMasterConfig.encode( + Greenplumconfig617.encode( message.userConfig, writer.uint32(18).fork() ).ldelim(); } if (message.defaultConfig !== undefined) { - GreenplumMasterConfig.encode( + Greenplumconfig617.encode( message.defaultConfig, writer.uint32(26).fork() ).ldelim(); @@ -1665,29 +1014,27 @@ export const GreenplumMasterConfigSet = { decode( input: _m0.Reader | Uint8Array, length?: number - ): GreenplumMasterConfigSet { + ): Greenplumconfigset617 { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; - const message = { - ...baseGreenplumMasterConfigSet, - } as GreenplumMasterConfigSet; + const message = { ...baseGreenplumconfigset617 } as Greenplumconfigset617; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.effectiveConfig = GreenplumMasterConfig.decode( + message.effectiveConfig = Greenplumconfig617.decode( reader, reader.uint32() ); break; case 2: - message.userConfig = GreenplumMasterConfig.decode( + message.userConfig = Greenplumconfig617.decode( reader, reader.uint32() ); break; case 3: - message.defaultConfig = GreenplumMasterConfig.decode( + message.defaultConfig = Greenplumconfig617.decode( reader, reader.uint32() ); @@ -1700,94 +1047,211 @@ export const GreenplumMasterConfigSet = { return message; }, - fromJSON(object: any): GreenplumMasterConfigSet { - const message = { - ...baseGreenplumMasterConfigSet, - } as GreenplumMasterConfigSet; + fromJSON(object: any): Greenplumconfigset617 { + const message = { ...baseGreenplumconfigset617 } as Greenplumconfigset617; message.effectiveConfig = object.effectiveConfig !== undefined && object.effectiveConfig !== null - ? GreenplumMasterConfig.fromJSON(object.effectiveConfig) + ? Greenplumconfig617.fromJSON(object.effectiveConfig) : undefined; message.userConfig = object.userConfig !== undefined && object.userConfig !== null - ? GreenplumMasterConfig.fromJSON(object.userConfig) + ? Greenplumconfig617.fromJSON(object.userConfig) : undefined; message.defaultConfig = object.defaultConfig !== undefined && object.defaultConfig !== null - ? GreenplumMasterConfig.fromJSON(object.defaultConfig) + ? Greenplumconfig617.fromJSON(object.defaultConfig) : undefined; return message; }, - toJSON(message: GreenplumMasterConfigSet): unknown { + toJSON(message: Greenplumconfigset617): unknown { const obj: any = {}; message.effectiveConfig !== undefined && (obj.effectiveConfig = message.effectiveConfig - ? GreenplumMasterConfig.toJSON(message.effectiveConfig) + ? Greenplumconfig617.toJSON(message.effectiveConfig) : undefined); message.userConfig !== undefined && (obj.userConfig = message.userConfig - ? GreenplumMasterConfig.toJSON(message.userConfig) + ? Greenplumconfig617.toJSON(message.userConfig) : undefined); message.defaultConfig !== undefined && (obj.defaultConfig = message.defaultConfig - ? GreenplumMasterConfig.toJSON(message.defaultConfig) + ? Greenplumconfig617.toJSON(message.defaultConfig) : undefined); return obj; }, - fromPartial, I>>( + fromPartial, I>>( object: I - ): GreenplumMasterConfigSet { - const message = { - ...baseGreenplumMasterConfigSet, - } as GreenplumMasterConfigSet; + ): Greenplumconfigset617 { + const message = { ...baseGreenplumconfigset617 } as Greenplumconfigset617; message.effectiveConfig = object.effectiveConfig !== undefined && object.effectiveConfig !== null - ? GreenplumMasterConfig.fromPartial(object.effectiveConfig) + ? Greenplumconfig617.fromPartial(object.effectiveConfig) : undefined; message.userConfig = object.userConfig !== undefined && object.userConfig !== null - ? GreenplumMasterConfig.fromPartial(object.userConfig) + ? Greenplumconfig617.fromPartial(object.userConfig) : undefined; message.defaultConfig = object.defaultConfig !== undefined && object.defaultConfig !== null - ? GreenplumMasterConfig.fromPartial(object.defaultConfig) + ? Greenplumconfig617.fromPartial(object.defaultConfig) : undefined; return message; }, }; -messageTypeRegistry.set( - GreenplumMasterConfigSet.$type, - GreenplumMasterConfigSet -); +messageTypeRegistry.set(Greenplumconfigset617.$type, Greenplumconfigset617); + +const baseGreenplumconfigset619: object = { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfigSet6_19", +}; + +export const Greenplumconfigset619 = { + $type: "yandex.cloud.mdb.greenplum.v1.GreenplumConfigSet6_19" as const, + + encode( + message: Greenplumconfigset619, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.effectiveConfig !== undefined) { + Greenplumconfig619.encode( + message.effectiveConfig, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.userConfig !== undefined) { + Greenplumconfig619.encode( + message.userConfig, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.defaultConfig !== undefined) { + Greenplumconfig619.encode( + message.defaultConfig, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Greenplumconfigset619 { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseGreenplumconfigset619 } as Greenplumconfigset619; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.effectiveConfig = Greenplumconfig619.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.userConfig = Greenplumconfig619.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.defaultConfig = Greenplumconfig619.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Greenplumconfigset619 { + const message = { ...baseGreenplumconfigset619 } as Greenplumconfigset619; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Greenplumconfig619.fromJSON(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Greenplumconfig619.fromJSON(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Greenplumconfig619.fromJSON(object.defaultConfig) + : undefined; + return message; + }, + + toJSON(message: Greenplumconfigset619): unknown { + const obj: any = {}; + message.effectiveConfig !== undefined && + (obj.effectiveConfig = message.effectiveConfig + ? Greenplumconfig619.toJSON(message.effectiveConfig) + : undefined); + message.userConfig !== undefined && + (obj.userConfig = message.userConfig + ? Greenplumconfig619.toJSON(message.userConfig) + : undefined); + message.defaultConfig !== undefined && + (obj.defaultConfig = message.defaultConfig + ? Greenplumconfig619.toJSON(message.defaultConfig) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Greenplumconfigset619 { + const message = { ...baseGreenplumconfigset619 } as Greenplumconfigset619; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Greenplumconfig619.fromPartial(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Greenplumconfig619.fromPartial(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Greenplumconfig619.fromPartial(object.defaultConfig) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Greenplumconfigset619.$type, Greenplumconfigset619); -const baseGreenplumSegmentConfigSet: object = { - $type: "yandex.cloud.mdb.greenplum.v1.GreenplumSegmentConfigSet", +const baseConnectionPoolerConfigSet: object = { + $type: "yandex.cloud.mdb.greenplum.v1.ConnectionPoolerConfigSet", }; -export const GreenplumSegmentConfigSet = { - $type: "yandex.cloud.mdb.greenplum.v1.GreenplumSegmentConfigSet" as const, +export const ConnectionPoolerConfigSet = { + $type: "yandex.cloud.mdb.greenplum.v1.ConnectionPoolerConfigSet" as const, encode( - message: GreenplumSegmentConfigSet, + message: ConnectionPoolerConfigSet, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { if (message.effectiveConfig !== undefined) { - GreenplumSegmentConfig.encode( + ConnectionPoolerConfig.encode( message.effectiveConfig, writer.uint32(10).fork() ).ldelim(); } if (message.userConfig !== undefined) { - GreenplumSegmentConfig.encode( + ConnectionPoolerConfig.encode( message.userConfig, writer.uint32(18).fork() ).ldelim(); } if (message.defaultConfig !== undefined) { - GreenplumSegmentConfig.encode( + ConnectionPoolerConfig.encode( message.defaultConfig, writer.uint32(26).fork() ).ldelim(); @@ -1798,29 +1262,29 @@ export const GreenplumSegmentConfigSet = { decode( input: _m0.Reader | Uint8Array, length?: number - ): GreenplumSegmentConfigSet { + ): ConnectionPoolerConfigSet { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = { - ...baseGreenplumSegmentConfigSet, - } as GreenplumSegmentConfigSet; + ...baseConnectionPoolerConfigSet, + } as ConnectionPoolerConfigSet; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.effectiveConfig = GreenplumSegmentConfig.decode( + message.effectiveConfig = ConnectionPoolerConfig.decode( reader, reader.uint32() ); break; case 2: - message.userConfig = GreenplumSegmentConfig.decode( + message.userConfig = ConnectionPoolerConfig.decode( reader, reader.uint32() ); break; case 3: - message.defaultConfig = GreenplumSegmentConfig.decode( + message.defaultConfig = ConnectionPoolerConfig.decode( reader, reader.uint32() ); @@ -1833,67 +1297,67 @@ export const GreenplumSegmentConfigSet = { return message; }, - fromJSON(object: any): GreenplumSegmentConfigSet { + fromJSON(object: any): ConnectionPoolerConfigSet { const message = { - ...baseGreenplumSegmentConfigSet, - } as GreenplumSegmentConfigSet; + ...baseConnectionPoolerConfigSet, + } as ConnectionPoolerConfigSet; message.effectiveConfig = object.effectiveConfig !== undefined && object.effectiveConfig !== null - ? GreenplumSegmentConfig.fromJSON(object.effectiveConfig) + ? ConnectionPoolerConfig.fromJSON(object.effectiveConfig) : undefined; message.userConfig = object.userConfig !== undefined && object.userConfig !== null - ? GreenplumSegmentConfig.fromJSON(object.userConfig) + ? ConnectionPoolerConfig.fromJSON(object.userConfig) : undefined; message.defaultConfig = object.defaultConfig !== undefined && object.defaultConfig !== null - ? GreenplumSegmentConfig.fromJSON(object.defaultConfig) + ? ConnectionPoolerConfig.fromJSON(object.defaultConfig) : undefined; return message; }, - toJSON(message: GreenplumSegmentConfigSet): unknown { + toJSON(message: ConnectionPoolerConfigSet): unknown { const obj: any = {}; message.effectiveConfig !== undefined && (obj.effectiveConfig = message.effectiveConfig - ? GreenplumSegmentConfig.toJSON(message.effectiveConfig) + ? ConnectionPoolerConfig.toJSON(message.effectiveConfig) : undefined); message.userConfig !== undefined && (obj.userConfig = message.userConfig - ? GreenplumSegmentConfig.toJSON(message.userConfig) + ? ConnectionPoolerConfig.toJSON(message.userConfig) : undefined); message.defaultConfig !== undefined && (obj.defaultConfig = message.defaultConfig - ? GreenplumSegmentConfig.toJSON(message.defaultConfig) + ? ConnectionPoolerConfig.toJSON(message.defaultConfig) : undefined); return obj; }, - fromPartial, I>>( + fromPartial, I>>( object: I - ): GreenplumSegmentConfigSet { + ): ConnectionPoolerConfigSet { const message = { - ...baseGreenplumSegmentConfigSet, - } as GreenplumSegmentConfigSet; + ...baseConnectionPoolerConfigSet, + } as ConnectionPoolerConfigSet; message.effectiveConfig = object.effectiveConfig !== undefined && object.effectiveConfig !== null - ? GreenplumSegmentConfig.fromPartial(object.effectiveConfig) + ? ConnectionPoolerConfig.fromPartial(object.effectiveConfig) : undefined; message.userConfig = object.userConfig !== undefined && object.userConfig !== null - ? GreenplumSegmentConfig.fromPartial(object.userConfig) + ? ConnectionPoolerConfig.fromPartial(object.userConfig) : undefined; message.defaultConfig = object.defaultConfig !== undefined && object.defaultConfig !== null - ? GreenplumSegmentConfig.fromPartial(object.defaultConfig) + ? ConnectionPoolerConfig.fromPartial(object.defaultConfig) : undefined; return message; }, }; messageTypeRegistry.set( - GreenplumSegmentConfigSet.$type, - GreenplumSegmentConfigSet + ConnectionPoolerConfigSet.$type, + ConnectionPoolerConfigSet ); declare var self: any | undefined; diff --git a/src/generated/yandex/cloud/mdb/greenplum/v1/host.ts b/src/generated/yandex/cloud/mdb/greenplum/v1/host.ts index 5b8432c0..183b66de 100644 --- a/src/generated/yandex/cloud/mdb/greenplum/v1/host.ts +++ b/src/generated/yandex/cloud/mdb/greenplum/v1/host.ts @@ -6,38 +6,40 @@ import { Resources } from "../../../../../yandex/cloud/mdb/greenplum/v1/config"; export const protobufPackage = "yandex.cloud.mdb.greenplum.v1"; +/** A Greenplum® cluster host resource. */ export interface Host { $type: "yandex.cloud.mdb.greenplum.v1.Host"; /** - * Name of the Greenplum host. The host name is assigned by MDB at creation time, and cannot be changed. + * Name of the Greenplum® host. The host name is assigned by Yandex Cloud at creation time and cannot be changed. * 1-63 characters long. * - * The name is unique across all existing MDB hosts in Yandex.Cloud, as it defines the FQDN of the host. + * The name is unique across all existing MDB hosts in Yandex Cloud, as it defines the FQDN of the host. */ name: string; - /** ID of the Greenplum cluster. The ID is assigned by MDB at creation time. */ + /** ID of the Greenplum® cluster. The ID is assigned by Yandex Cloud at creation time. */ clusterId: string; - /** ID of the availability zone where the Greenplum host resides. */ + /** ID of the availability zone the Greenplum® host belongs to. */ zoneId: string; /** Type of the host. */ type: Host_Type; - /** Resources allocated to the Greenplum host. */ + /** Resources allocated to the Greenplum® host. */ resources?: Resources; /** Status code of the aggregated health of the host. */ health: Host_Health; /** ID of the subnet that the host belongs to. */ subnetId: string; - /** Flag showing public IP assignment status to this host. */ + /** Whether or not a public IP is assigned to the host. */ assignPublicIp: boolean; } export enum Host_Type { + /** TYPE_UNSPECIFIED - The type is not specified. */ TYPE_UNSPECIFIED = 0, - /** MASTER - Greenplum master host. */ + /** MASTER - A Greenplum® master host. */ MASTER = 1, - /** REPLICA - Greenplum master host. */ + /** REPLICA - A Greenplum® master replica host. */ REPLICA = 2, - /** SEGMENT - Greenplum segment host. */ + /** SEGMENT - A Greenplum® segment host. */ SEGMENT = 3, UNRECOGNIZED = -1, } @@ -83,11 +85,11 @@ export enum Host_Health { UNKNOWN = 0, /** ALIVE - The host is performing all its functions normally. */ ALIVE = 1, - /** DEAD - The host is inoperable, and cannot perform any of its essential functions. */ + /** DEAD - The host is inoperable and cannot perform any of its essential functions. */ DEAD = 2, /** DEGRADED - The host is working below capacity or not fully functional. */ DEGRADED = 3, - /** UNBALANCED - One or more segments are not in prefer role. */ + /** UNBALANCED - One or more segments are not in preferred role. */ UNBALANCED = 4, UNRECOGNIZED = -1, } diff --git a/src/generated/yandex/cloud/mdb/greenplum/v1/maintenance.ts b/src/generated/yandex/cloud/mdb/greenplum/v1/maintenance.ts index 2c23e7da..41b0a76c 100644 --- a/src/generated/yandex/cloud/mdb/greenplum/v1/maintenance.ts +++ b/src/generated/yandex/cloud/mdb/greenplum/v1/maintenance.ts @@ -6,20 +6,26 @@ import { Timestamp } from "../../../../../google/protobuf/timestamp"; export const protobufPackage = "yandex.cloud.mdb.greenplum.v1"; +/** A Greenplum® cluster maintenance window. Should be defined by either one of the two options. */ export interface MaintenanceWindow { $type: "yandex.cloud.mdb.greenplum.v1.MaintenanceWindow"; + /** An any-time maintenance window. */ anytime?: AnytimeMaintenanceWindow | undefined; + /** A weekly maintenance window. */ weeklyMaintenanceWindow?: WeeklyMaintenanceWindow | undefined; } +/** An any-time maintenance window. */ export interface AnytimeMaintenanceWindow { $type: "yandex.cloud.mdb.greenplum.v1.AnytimeMaintenanceWindow"; } +/** A weekly maintenance window. */ export interface WeeklyMaintenanceWindow { $type: "yandex.cloud.mdb.greenplum.v1.WeeklyMaintenanceWindow"; + /** Day of the week. */ day: WeeklyMaintenanceWindow_WeekDay; - /** Hour of the day in UTC. */ + /** Hour of the day in the UTC timezone. */ hour: number; } @@ -95,9 +101,12 @@ export function weeklyMaintenanceWindow_WeekDayToJSON( } } +/** The operation to perform during maintenance. */ export interface MaintenanceOperation { $type: "yandex.cloud.mdb.greenplum.v1.MaintenanceOperation"; + /** The description of the operation, 1-256 characters long. */ info: string; + /** Delay time for the maintenance operation. */ delayedUntil?: Date; } diff --git a/src/generated/yandex/cloud/mdb/greenplum/v1/resource_preset.ts b/src/generated/yandex/cloud/mdb/greenplum/v1/resource_preset.ts new file mode 100644 index 00000000..e4bf3175 --- /dev/null +++ b/src/generated/yandex/cloud/mdb/greenplum/v1/resource_preset.ts @@ -0,0 +1,291 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; + +export const protobufPackage = "yandex.cloud.mdb.greenplum.v1"; + +/** A preset of resources for hardware configuration of Greenplum hosts. */ +export interface ResourcePreset { + $type: "yandex.cloud.mdb.greenplum.v1.ResourcePreset"; + /** ID of the resource preset. */ + id: string; + /** IDs of availability zones where the resource preset is available. */ + zoneIds: string[]; + /** Number of CPU cores for a Greenplum host created with the preset. */ + cores: number; + /** RAM volume for a Greenplum host created with the preset, in bytes. */ + memory: number; + /** Host type */ + type: ResourcePreset_Type; + /** Min host count */ + minHostCount: number; + /** Max host count */ + maxHostCount: number; + /** The number of hosts must be divisible by host_count_divider */ + hostCountDivider: number; + /** Max segment count in host (actual only for segment host) */ + maxSegmentInHostCount: number; +} + +export enum ResourcePreset_Type { + TYPE_UNSPECIFIED = 0, + /** MASTER - Greenplum master host. */ + MASTER = 1, + /** SEGMENT - Greenplum segment host. */ + SEGMENT = 2, + UNRECOGNIZED = -1, +} + +export function resourcePreset_TypeFromJSON(object: any): ResourcePreset_Type { + switch (object) { + case 0: + case "TYPE_UNSPECIFIED": + return ResourcePreset_Type.TYPE_UNSPECIFIED; + case 1: + case "MASTER": + return ResourcePreset_Type.MASTER; + case 2: + case "SEGMENT": + return ResourcePreset_Type.SEGMENT; + case -1: + case "UNRECOGNIZED": + default: + return ResourcePreset_Type.UNRECOGNIZED; + } +} + +export function resourcePreset_TypeToJSON(object: ResourcePreset_Type): string { + switch (object) { + case ResourcePreset_Type.TYPE_UNSPECIFIED: + return "TYPE_UNSPECIFIED"; + case ResourcePreset_Type.MASTER: + return "MASTER"; + case ResourcePreset_Type.SEGMENT: + return "SEGMENT"; + default: + return "UNKNOWN"; + } +} + +const baseResourcePreset: object = { + $type: "yandex.cloud.mdb.greenplum.v1.ResourcePreset", + id: "", + zoneIds: "", + cores: 0, + memory: 0, + type: 0, + minHostCount: 0, + maxHostCount: 0, + hostCountDivider: 0, + maxSegmentInHostCount: 0, +}; + +export const ResourcePreset = { + $type: "yandex.cloud.mdb.greenplum.v1.ResourcePreset" as const, + + encode( + message: ResourcePreset, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.id !== "") { + writer.uint32(10).string(message.id); + } + for (const v of message.zoneIds) { + writer.uint32(18).string(v!); + } + if (message.cores !== 0) { + writer.uint32(24).int64(message.cores); + } + if (message.memory !== 0) { + writer.uint32(32).int64(message.memory); + } + if (message.type !== 0) { + writer.uint32(40).int32(message.type); + } + if (message.minHostCount !== 0) { + writer.uint32(48).int64(message.minHostCount); + } + if (message.maxHostCount !== 0) { + writer.uint32(56).int64(message.maxHostCount); + } + if (message.hostCountDivider !== 0) { + writer.uint32(64).int64(message.hostCountDivider); + } + if (message.maxSegmentInHostCount !== 0) { + writer.uint32(72).int64(message.maxSegmentInHostCount); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ResourcePreset { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseResourcePreset } as ResourcePreset; + message.zoneIds = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = reader.string(); + break; + case 2: + message.zoneIds.push(reader.string()); + break; + case 3: + message.cores = longToNumber(reader.int64() as Long); + break; + case 4: + message.memory = longToNumber(reader.int64() as Long); + break; + case 5: + message.type = reader.int32() as any; + break; + case 6: + message.minHostCount = longToNumber(reader.int64() as Long); + break; + case 7: + message.maxHostCount = longToNumber(reader.int64() as Long); + break; + case 8: + message.hostCountDivider = longToNumber(reader.int64() as Long); + break; + case 9: + message.maxSegmentInHostCount = longToNumber(reader.int64() as Long); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ResourcePreset { + const message = { ...baseResourcePreset } as ResourcePreset; + message.id = + object.id !== undefined && object.id !== null ? String(object.id) : ""; + message.zoneIds = (object.zoneIds ?? []).map((e: any) => String(e)); + message.cores = + object.cores !== undefined && object.cores !== null + ? Number(object.cores) + : 0; + message.memory = + object.memory !== undefined && object.memory !== null + ? Number(object.memory) + : 0; + message.type = + object.type !== undefined && object.type !== null + ? resourcePreset_TypeFromJSON(object.type) + : 0; + message.minHostCount = + object.minHostCount !== undefined && object.minHostCount !== null + ? Number(object.minHostCount) + : 0; + message.maxHostCount = + object.maxHostCount !== undefined && object.maxHostCount !== null + ? Number(object.maxHostCount) + : 0; + message.hostCountDivider = + object.hostCountDivider !== undefined && object.hostCountDivider !== null + ? Number(object.hostCountDivider) + : 0; + message.maxSegmentInHostCount = + object.maxSegmentInHostCount !== undefined && + object.maxSegmentInHostCount !== null + ? Number(object.maxSegmentInHostCount) + : 0; + return message; + }, + + toJSON(message: ResourcePreset): unknown { + const obj: any = {}; + message.id !== undefined && (obj.id = message.id); + if (message.zoneIds) { + obj.zoneIds = message.zoneIds.map((e) => e); + } else { + obj.zoneIds = []; + } + message.cores !== undefined && (obj.cores = Math.round(message.cores)); + message.memory !== undefined && (obj.memory = Math.round(message.memory)); + message.type !== undefined && + (obj.type = resourcePreset_TypeToJSON(message.type)); + message.minHostCount !== undefined && + (obj.minHostCount = Math.round(message.minHostCount)); + message.maxHostCount !== undefined && + (obj.maxHostCount = Math.round(message.maxHostCount)); + message.hostCountDivider !== undefined && + (obj.hostCountDivider = Math.round(message.hostCountDivider)); + message.maxSegmentInHostCount !== undefined && + (obj.maxSegmentInHostCount = Math.round(message.maxSegmentInHostCount)); + return obj; + }, + + fromPartial, I>>( + object: I + ): ResourcePreset { + const message = { ...baseResourcePreset } as ResourcePreset; + message.id = object.id ?? ""; + message.zoneIds = object.zoneIds?.map((e) => e) || []; + message.cores = object.cores ?? 0; + message.memory = object.memory ?? 0; + message.type = object.type ?? 0; + message.minHostCount = object.minHostCount ?? 0; + message.maxHostCount = object.maxHostCount ?? 0; + message.hostCountDivider = object.hostCountDivider ?? 0; + message.maxSegmentInHostCount = object.maxSegmentInHostCount ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(ResourcePreset.$type, ResourcePreset); + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/greenplum/v1/resource_preset_service.ts b/src/generated/yandex/cloud/mdb/greenplum/v1/resource_preset_service.ts new file mode 100644 index 00000000..ee362f83 --- /dev/null +++ b/src/generated/yandex/cloud/mdb/greenplum/v1/resource_preset_service.ts @@ -0,0 +1,475 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../typeRegistry"; +import Long from "long"; +import { + makeGenericClientConstructor, + ChannelCredentials, + ChannelOptions, + UntypedServiceImplementation, + handleUnaryCall, + Client, + ClientUnaryCall, + Metadata, + CallOptions, + ServiceError, +} from "@grpc/grpc-js"; +import _m0 from "protobufjs/minimal"; +import { ResourcePreset } from "../../../../../yandex/cloud/mdb/greenplum/v1/resource_preset"; + +export const protobufPackage = "yandex.cloud.mdb.greenplum.v1"; + +export interface GetResourcePresetRequest { + $type: "yandex.cloud.mdb.greenplum.v1.GetResourcePresetRequest"; + /** + * Required. ID of the resource preset to return. + * To get the resource preset ID, use a [ResourcePresetService.List] request. + */ + resourcePresetId: string; +} + +export interface ListResourcePresetsRequest { + $type: "yandex.cloud.mdb.greenplum.v1.ListResourcePresetsRequest"; + /** + * The maximum number of results per page to return. If the number of available + * results is larger than [page_size], the service returns a [ListResourcePresetsResponse.next_page_token] + * that can be used to get the next page of results in subsequent list requests. + */ + pageSize: number; + /** + * Page token. To get the next page of results, set [page_token] to the [ListResourcePresetsResponse.next_page_token] + * returned by a previous list request. + */ + pageToken: string; +} + +export interface ListResourcePresetsResponse { + $type: "yandex.cloud.mdb.greenplum.v1.ListResourcePresetsResponse"; + /** List of resource presets. */ + resourcePresets: ResourcePreset[]; + /** + * This token allows you to get the next page of results for list requests. If the number of results + * is larger than [ListResourcePresetsRequest.page_size], use the [next_page_token] as the value + * for the [ListResourcePresetsRequest.page_token] parameter in the next list request. Each subsequent + * list request will have its own [next_page_token] to continue paging through the results. + */ + nextPageToken: string; +} + +const baseGetResourcePresetRequest: object = { + $type: "yandex.cloud.mdb.greenplum.v1.GetResourcePresetRequest", + resourcePresetId: "", +}; + +export const GetResourcePresetRequest = { + $type: "yandex.cloud.mdb.greenplum.v1.GetResourcePresetRequest" as const, + + encode( + message: GetResourcePresetRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.resourcePresetId !== "") { + writer.uint32(10).string(message.resourcePresetId); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): GetResourcePresetRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseGetResourcePresetRequest, + } as GetResourcePresetRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.resourcePresetId = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): GetResourcePresetRequest { + const message = { + ...baseGetResourcePresetRequest, + } as GetResourcePresetRequest; + message.resourcePresetId = + object.resourcePresetId !== undefined && object.resourcePresetId !== null + ? String(object.resourcePresetId) + : ""; + return message; + }, + + toJSON(message: GetResourcePresetRequest): unknown { + const obj: any = {}; + message.resourcePresetId !== undefined && + (obj.resourcePresetId = message.resourcePresetId); + return obj; + }, + + fromPartial, I>>( + object: I + ): GetResourcePresetRequest { + const message = { + ...baseGetResourcePresetRequest, + } as GetResourcePresetRequest; + message.resourcePresetId = object.resourcePresetId ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + GetResourcePresetRequest.$type, + GetResourcePresetRequest +); + +const baseListResourcePresetsRequest: object = { + $type: "yandex.cloud.mdb.greenplum.v1.ListResourcePresetsRequest", + pageSize: 0, + pageToken: "", +}; + +export const ListResourcePresetsRequest = { + $type: "yandex.cloud.mdb.greenplum.v1.ListResourcePresetsRequest" as const, + + encode( + message: ListResourcePresetsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.pageSize !== 0) { + writer.uint32(16).int64(message.pageSize); + } + if (message.pageToken !== "") { + writer.uint32(26).string(message.pageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListResourcePresetsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListResourcePresetsRequest, + } as ListResourcePresetsRequest; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + message.pageSize = longToNumber(reader.int64() as Long); + break; + case 3: + message.pageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListResourcePresetsRequest { + const message = { + ...baseListResourcePresetsRequest, + } as ListResourcePresetsRequest; + message.pageSize = + object.pageSize !== undefined && object.pageSize !== null + ? Number(object.pageSize) + : 0; + message.pageToken = + object.pageToken !== undefined && object.pageToken !== null + ? String(object.pageToken) + : ""; + return message; + }, + + toJSON(message: ListResourcePresetsRequest): unknown { + const obj: any = {}; + message.pageSize !== undefined && + (obj.pageSize = Math.round(message.pageSize)); + message.pageToken !== undefined && (obj.pageToken = message.pageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListResourcePresetsRequest { + const message = { + ...baseListResourcePresetsRequest, + } as ListResourcePresetsRequest; + message.pageSize = object.pageSize ?? 0; + message.pageToken = object.pageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListResourcePresetsRequest.$type, + ListResourcePresetsRequest +); + +const baseListResourcePresetsResponse: object = { + $type: "yandex.cloud.mdb.greenplum.v1.ListResourcePresetsResponse", + nextPageToken: "", +}; + +export const ListResourcePresetsResponse = { + $type: "yandex.cloud.mdb.greenplum.v1.ListResourcePresetsResponse" as const, + + encode( + message: ListResourcePresetsResponse, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + for (const v of message.resourcePresets) { + ResourcePreset.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.nextPageToken !== "") { + writer.uint32(18).string(message.nextPageToken); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): ListResourcePresetsResponse { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseListResourcePresetsResponse, + } as ListResourcePresetsResponse; + message.resourcePresets = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.resourcePresets.push( + ResourcePreset.decode(reader, reader.uint32()) + ); + break; + case 2: + message.nextPageToken = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): ListResourcePresetsResponse { + const message = { + ...baseListResourcePresetsResponse, + } as ListResourcePresetsResponse; + message.resourcePresets = (object.resourcePresets ?? []).map((e: any) => + ResourcePreset.fromJSON(e) + ); + message.nextPageToken = + object.nextPageToken !== undefined && object.nextPageToken !== null + ? String(object.nextPageToken) + : ""; + return message; + }, + + toJSON(message: ListResourcePresetsResponse): unknown { + const obj: any = {}; + if (message.resourcePresets) { + obj.resourcePresets = message.resourcePresets.map((e) => + e ? ResourcePreset.toJSON(e) : undefined + ); + } else { + obj.resourcePresets = []; + } + message.nextPageToken !== undefined && + (obj.nextPageToken = message.nextPageToken); + return obj; + }, + + fromPartial, I>>( + object: I + ): ListResourcePresetsResponse { + const message = { + ...baseListResourcePresetsResponse, + } as ListResourcePresetsResponse; + message.resourcePresets = + object.resourcePresets?.map((e) => ResourcePreset.fromPartial(e)) || []; + message.nextPageToken = object.nextPageToken ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + ListResourcePresetsResponse.$type, + ListResourcePresetsResponse +); + +/** A set of methods for managing resource presets. */ +export const ResourcePresetServiceService = { + /** + * Returns the specified resource preset. + * + * To get the list of available resource presets, make a [List] request. + */ + get: { + path: "/yandex.cloud.mdb.greenplum.v1.ResourcePresetService/Get", + requestStream: false, + responseStream: false, + requestSerialize: (value: GetResourcePresetRequest) => + Buffer.from(GetResourcePresetRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + GetResourcePresetRequest.decode(value), + responseSerialize: (value: ResourcePreset) => + Buffer.from(ResourcePreset.encode(value).finish()), + responseDeserialize: (value: Buffer) => ResourcePreset.decode(value), + }, + /** Retrieves the list of available resource presets. */ + list: { + path: "/yandex.cloud.mdb.greenplum.v1.ResourcePresetService/List", + requestStream: false, + responseStream: false, + requestSerialize: (value: ListResourcePresetsRequest) => + Buffer.from(ListResourcePresetsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + ListResourcePresetsRequest.decode(value), + responseSerialize: (value: ListResourcePresetsResponse) => + Buffer.from(ListResourcePresetsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + ListResourcePresetsResponse.decode(value), + }, +} as const; + +export interface ResourcePresetServiceServer + extends UntypedServiceImplementation { + /** + * Returns the specified resource preset. + * + * To get the list of available resource presets, make a [List] request. + */ + get: handleUnaryCall; + /** Retrieves the list of available resource presets. */ + list: handleUnaryCall< + ListResourcePresetsRequest, + ListResourcePresetsResponse + >; +} + +export interface ResourcePresetServiceClient extends Client { + /** + * Returns the specified resource preset. + * + * To get the list of available resource presets, make a [List] request. + */ + get( + request: GetResourcePresetRequest, + callback: (error: ServiceError | null, response: ResourcePreset) => void + ): ClientUnaryCall; + get( + request: GetResourcePresetRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: ResourcePreset) => void + ): ClientUnaryCall; + get( + request: GetResourcePresetRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: ResourcePreset) => void + ): ClientUnaryCall; + /** Retrieves the list of available resource presets. */ + list( + request: ListResourcePresetsRequest, + callback: ( + error: ServiceError | null, + response: ListResourcePresetsResponse + ) => void + ): ClientUnaryCall; + list( + request: ListResourcePresetsRequest, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: ListResourcePresetsResponse + ) => void + ): ClientUnaryCall; + list( + request: ListResourcePresetsRequest, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: ListResourcePresetsResponse + ) => void + ): ClientUnaryCall; +} + +export const ResourcePresetServiceClient = makeGenericClientConstructor( + ResourcePresetServiceService, + "yandex.cloud.mdb.greenplum.v1.ResourcePresetService" +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial + ): ResourcePresetServiceClient; + service: typeof ResourcePresetServiceService; +}; + +declare var self: any | undefined; +declare var window: any | undefined; +declare var global: any | undefined; +var globalThis: any = (() => { + if (typeof globalThis !== "undefined") return globalThis; + if (typeof self !== "undefined") return self; + if (typeof window !== "undefined") return window; + if (typeof global !== "undefined") return global; + throw "Unable to locate global object"; +})(); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +function longToNumber(long: Long): number { + if (long.gt(Number.MAX_SAFE_INTEGER)) { + throw new globalThis.Error("Value is larger than Number.MAX_SAFE_INTEGER"); + } + return long.toNumber(); +} + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/index.ts b/src/generated/yandex/cloud/mdb/index.ts index 90587677..0144703a 100644 --- a/src/generated/yandex/cloud/mdb/index.ts +++ b/src/generated/yandex/cloud/mdb/index.ts @@ -17,18 +17,26 @@ export * as clickhouse_version from './clickhouse/v1/version' export * as clickhouse_versions_service from './clickhouse/v1/versions_service' export * as elasticsearch_auth from './elasticsearch/v1/auth' export * as elasticsearch_auth_service from './elasticsearch/v1/auth_service' +export * as elasticsearch_backup from './elasticsearch/v1/backup' +export * as elasticsearch_backup_service from './elasticsearch/v1/backup_service' export * as elasticsearch_cluster from './elasticsearch/v1/cluster' export * as elasticsearch_cluster_service from './elasticsearch/v1/cluster_service' +export * as elasticsearch_extension from './elasticsearch/v1/extension' +export * as elasticsearch_extension_service from './elasticsearch/v1/extension_service' export * as elasticsearch_maintenance from './elasticsearch/v1/maintenance' export * as elasticsearch_resource_preset from './elasticsearch/v1/resource_preset' export * as elasticsearch_resource_preset_service from './elasticsearch/v1/resource_preset_service' export * as elasticsearch_user from './elasticsearch/v1/user' export * as elasticsearch_user_service from './elasticsearch/v1/user_service' +export * as greenplum_backup from './greenplum/v1/backup' +export * as greenplum_backup_service from './greenplum/v1/backup_service' export * as greenplum_cluster from './greenplum/v1/cluster' export * as greenplum_cluster_service from './greenplum/v1/cluster_service' export * as greenplum_config from './greenplum/v1/config' export * as greenplum_host from './greenplum/v1/host' export * as greenplum_maintenance from './greenplum/v1/maintenance' +export * as greenplum_resource_preset from './greenplum/v1/resource_preset' +export * as greenplum_resource_preset_service from './greenplum/v1/resource_preset_service' export * as kafka_cluster from './kafka/v1/cluster' export * as kafka_cluster_service from './kafka/v1/cluster_service' export * as kafka_common from './kafka/v1/common' @@ -97,7 +105,9 @@ export * as mongodb_mongodb3_6 from './mongodb/v1/config/mongodb3_6' export * as mongodb_mongodb4_0 from './mongodb/v1/config/mongodb4_0' export * as mongodb_mongodb4_2 from './mongodb/v1/config/mongodb4_2' export * as mongodb_mongodb4_4 from './mongodb/v1/config/mongodb4_4' +export * as mongodb_mongodb4_4_enterprise from './mongodb/v1/config/mongodb4_4_enterprise' export * as mongodb_mongodb5_0 from './mongodb/v1/config/mongodb5_0' +export * as mongodb_mongodb5_0_enterprise from './mongodb/v1/config/mongodb5_0_enterprise' export * as mysql_mysql5_7 from './mysql/v1/config/mysql5_7' export * as mysql_mysql8_0 from './mysql/v1/config/mysql8_0' export * as postgresql_host10 from './postgresql/v1/config/host10' @@ -107,6 +117,7 @@ export * as postgresql_host11_1c from './postgresql/v1/config/host11_1c' export * as postgresql_host12 from './postgresql/v1/config/host12' export * as postgresql_host12_1c from './postgresql/v1/config/host12_1c' export * as postgresql_host13 from './postgresql/v1/config/host13' +export * as postgresql_host14 from './postgresql/v1/config/host14' export * as postgresql_host9_6 from './postgresql/v1/config/host9_6' export * as postgresql_postgresql10 from './postgresql/v1/config/postgresql10' export * as postgresql_postgresql10_1c from './postgresql/v1/config/postgresql10_1c' @@ -115,6 +126,7 @@ export * as postgresql_postgresql11_1c from './postgresql/v1/config/postgresql11 export * as postgresql_postgresql12 from './postgresql/v1/config/postgresql12' export * as postgresql_postgresql12_1c from './postgresql/v1/config/postgresql12_1c' export * as postgresql_postgresql13 from './postgresql/v1/config/postgresql13' +export * as postgresql_postgresql14 from './postgresql/v1/config/postgresql14' export * as postgresql_postgresql9_6 from './postgresql/v1/config/postgresql9_6' export * as redis_redis5_0 from './redis/v1/config/redis5_0' export * as redis_redis6_0 from './redis/v1/config/redis6_0' diff --git a/src/generated/yandex/cloud/mdb/kafka/v1/cluster.ts b/src/generated/yandex/cloud/mdb/kafka/v1/cluster.ts index 8d888eb7..acb38a43 100644 --- a/src/generated/yandex/cloud/mdb/kafka/v1/cluster.ts +++ b/src/generated/yandex/cloud/mdb/kafka/v1/cluster.ts @@ -269,6 +269,8 @@ export interface ConfigSpec { unmanagedTopics: boolean; /** Enables managed schema registry on cluster */ schemaRegistry: boolean; + /** Access policy for external services. */ + access?: Access; } export interface ConfigSpec_Kafka { @@ -293,7 +295,7 @@ export interface Resources { * All available presets are listed in the [documentation](/docs/managed-kafka/concepts/instance-types). */ resourcePresetId: string; - /** Volume of the storage available to a host, in bytes. */ + /** Volume of the storage available to a host, in bytes. Must be greater than 2 * partition segment size in bytes * partitions count, so each partition can have one active segment file and one closed segment file that can be deleted. */ diskSize: number; /** Type of the storage environment for the host. */ diskTypeId: string; @@ -622,6 +624,12 @@ export function host_HealthToJSON(object: Host_Health): string { } } +export interface Access { + $type: "yandex.cloud.mdb.kafka.v1.Access"; + /** Allow access for DataTransfer. */ + dataTransfer: boolean; +} + const baseCluster: object = { $type: "yandex.cloud.mdb.kafka.v1.Cluster", id: "", @@ -1174,6 +1182,9 @@ export const ConfigSpec = { if (message.schemaRegistry === true) { writer.uint32(64).bool(message.schemaRegistry); } + if (message.access !== undefined) { + Access.encode(message.access, writer.uint32(74).fork()).ldelim(); + } return writer; }, @@ -1215,6 +1226,9 @@ export const ConfigSpec = { case 8: message.schemaRegistry = reader.bool(); break; + case 9: + message.access = Access.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -1254,6 +1268,10 @@ export const ConfigSpec = { object.schemaRegistry !== undefined && object.schemaRegistry !== null ? Boolean(object.schemaRegistry) : false; + message.access = + object.access !== undefined && object.access !== null + ? Access.fromJSON(object.access) + : undefined; return message; }, @@ -1281,6 +1299,8 @@ export const ConfigSpec = { (obj.unmanagedTopics = message.unmanagedTopics); message.schemaRegistry !== undefined && (obj.schemaRegistry = message.schemaRegistry); + message.access !== undefined && + (obj.access = message.access ? Access.toJSON(message.access) : undefined); return obj; }, @@ -1302,6 +1322,10 @@ export const ConfigSpec = { message.assignPublicIp = object.assignPublicIp ?? false; message.unmanagedTopics = object.unmanagedTopics ?? false; message.schemaRegistry = object.schemaRegistry ?? false; + message.access = + object.access !== undefined && object.access !== null + ? Access.fromPartial(object.access) + : undefined; return message; }, }; @@ -2881,6 +2905,67 @@ export const Host = { messageTypeRegistry.set(Host.$type, Host); +const baseAccess: object = { + $type: "yandex.cloud.mdb.kafka.v1.Access", + dataTransfer: false, +}; + +export const Access = { + $type: "yandex.cloud.mdb.kafka.v1.Access" as const, + + encode( + message: Access, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.dataTransfer === true) { + writer.uint32(8).bool(message.dataTransfer); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Access { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseAccess } as Access; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.dataTransfer = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Access { + const message = { ...baseAccess } as Access; + message.dataTransfer = + object.dataTransfer !== undefined && object.dataTransfer !== null + ? Boolean(object.dataTransfer) + : false; + return message; + }, + + toJSON(message: Access): unknown { + const obj: any = {}; + message.dataTransfer !== undefined && + (obj.dataTransfer = message.dataTransfer); + return obj; + }, + + fromPartial, I>>(object: I): Access { + const message = { ...baseAccess } as Access; + message.dataTransfer = object.dataTransfer ?? false; + return message; + }, +}; + +messageTypeRegistry.set(Access.$type, Access); + declare var self: any | undefined; declare var window: any | undefined; declare var global: any | undefined; diff --git a/src/generated/yandex/cloud/mdb/mongodb/v1/cluster.ts b/src/generated/yandex/cloud/mdb/mongodb/v1/cluster.ts index 10980586..e889a4fa 100644 --- a/src/generated/yandex/cloud/mdb/mongodb/v1/cluster.ts +++ b/src/generated/yandex/cloud/mdb/mongodb/v1/cluster.ts @@ -27,11 +27,21 @@ import { Mongocfgconfigset44, Mongosconfigset44, } from "../../../../../yandex/cloud/mdb/mongodb/v1/config/mongodb4_4"; +import { + Mongodconfigset44Enterprise, + Mongocfgconfigset44Enterprise, + Mongosconfigset44Enterprise, +} from "../../../../../yandex/cloud/mdb/mongodb/v1/config/mongodb4_4_enterprise"; import { Mongodconfigset50, Mongocfgconfigset50, Mongosconfigset50, } from "../../../../../yandex/cloud/mdb/mongodb/v1/config/mongodb5_0"; +import { + Mongodconfigset50Enterprise, + Mongocfgconfigset50Enterprise, + Mongosconfigset50Enterprise, +} from "../../../../../yandex/cloud/mdb/mongodb/v1/config/mongodb5_0_enterprise"; import { Timestamp } from "../../../../../google/protobuf/timestamp"; import { Int64Value } from "../../../../../google/protobuf/wrappers"; @@ -82,7 +92,6 @@ export interface Cluster { deletionProtection: boolean; } -/** Deployment environment. */ export enum Cluster_Environment { ENVIRONMENT_UNSPECIFIED = 0, /** @@ -272,7 +281,7 @@ export interface Monitoring { export interface ClusterConfig { $type: "yandex.cloud.mdb.mongodb.v1.ClusterConfig"; - /** Version of MongoDB server software. Possible values: `3.6`, `4.0`, `4.2`, `4.4`, `5.0`. */ + /** Version of MongoDB server software. Possible values: `3.6`, `4.0`, `4.2`, `4.4`, `4.4-enterprise`, `5.0`, `5.0-enterprise`. */ version: string; /** * MongoDB feature compatibility version. See usage details in [MongoDB documentation](https://docs.mongodb.com/manual/reference/command/setFeatureCompatibilityVersion/). @@ -295,6 +304,10 @@ export interface ClusterConfig { mongodb44?: Mongodb44 | undefined; /** Configuration and resource allocation for a MongoDB 5.0 cluster. */ mongodb50?: Mongodb50 | undefined; + /** Configuration and resource allocation for a MongoDB 4.4 Enterprise cluster. */ + mongodb44Enterprise?: Mongodb44Enterprise | undefined; + /** Configuration and resource allocation for a MongoDB 5.0 Enterprise cluster. */ + mongodb50Enterprise?: Mongodb50Enterprise | undefined; /** Time to start the daily backup, in the UTC timezone. */ backupWindowStart?: TimeOfDay; /** Retain period of automatically created backup in days */ @@ -477,6 +490,50 @@ export interface Mongodb44_MongoInfra { resources?: Resources; } +export interface Mongodb44Enterprise { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb4_4_enterprise"; + /** Configuration and resource allocation for mongod in a MongoDB 4.4 cluster. */ + mongod?: Mongodb44Enterprise_Mongod; + /** Configuration and resource allocation for mongocfg in a MongoDB 4.4 cluster. */ + mongocfg?: Mongodb44Enterprise_MongoCfg; + /** Configuration and resource allocation for mongos in a MongoDB 4.4 cluster. */ + mongos?: Mongodb44Enterprise_Mongos; + /** Configuration and resource allocation for mongoinfra (mongos+mongocfg) in a MongoDB 4.4 cluster. */ + mongoinfra?: Mongodb44Enterprise_MongoInfra; +} + +export interface Mongodb44Enterprise_Mongod { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb4_4_enterprise.Mongod"; + /** Configuration for mongod 4.4 hosts. */ + config?: Mongodconfigset44Enterprise; + /** Resources allocated to mongod hosts. */ + resources?: Resources; +} + +export interface Mongodb44Enterprise_MongoCfg { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb4_4_enterprise.MongoCfg"; + /** Configuration for mongocfg 4.4 hosts. */ + config?: Mongocfgconfigset44Enterprise; + /** Resources allocated to mongocfg hosts. */ + resources?: Resources; +} + +export interface Mongodb44Enterprise_Mongos { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb4_4_enterprise.Mongos"; + /** Configuration for mongos 4.4 hosts. */ + config?: Mongosconfigset44Enterprise; + /** Resources allocated to mongos hosts. */ + resources?: Resources; +} + +export interface Mongodb44Enterprise_MongoInfra { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb4_4_enterprise.MongoInfra"; + configMongos?: Mongosconfigset44Enterprise; + configMongocfg?: Mongocfgconfigset44Enterprise; + /** Resources allocated to mongoinfra (mongos+mongocfg) hosts. */ + resources?: Resources; +} + export interface Mongodb50 { $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0"; /** Configuration and resource allocation for mongod in a MongoDB 5.0 cluster. */ @@ -521,6 +578,50 @@ export interface Mongodb50_MongoInfra { resources?: Resources; } +export interface Mongodb50Enterprise { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0_enterprise"; + /** Configuration and resource allocation for mongod in a MongoDB 5.0 cluster. */ + mongod?: Mongodb50Enterprise_Mongod; + /** Configuration and resource allocation for mongocfg in a MongoDB 5.0 cluster. */ + mongocfg?: Mongodb50Enterprise_MongoCfg; + /** Configuration and resource allocation for mongos in a MongoDB 5.0 cluster. */ + mongos?: Mongodb50Enterprise_Mongos; + /** Configuration and resource allocation for mongoinfra (mongos+mongocfg) in a MongoDB 5.0 cluster. */ + mongoinfra?: Mongodb50Enterprise_MongoInfra; +} + +export interface Mongodb50Enterprise_Mongod { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0_enterprise.Mongod"; + /** Configuration for mongod 5.0 hosts. */ + config?: Mongodconfigset50Enterprise; + /** Resources allocated to mongod hosts. */ + resources?: Resources; +} + +export interface Mongodb50Enterprise_MongoCfg { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0_enterprise.MongoCfg"; + /** Configuration for mongocfg 5.0 hosts. */ + config?: Mongocfgconfigset50Enterprise; + /** Resources allocated to mongocfg hosts. */ + resources?: Resources; +} + +export interface Mongodb50Enterprise_Mongos { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0_enterprise.Mongos"; + /** Configuration for mongos 5.0 hosts. */ + config?: Mongosconfigset50Enterprise; + /** Resources allocated to mongos hosts. */ + resources?: Resources; +} + +export interface Mongodb50Enterprise_MongoInfra { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0_enterprise.MongoInfra"; + configMongos?: Mongosconfigset50Enterprise; + configMongocfg?: Mongocfgconfigset50Enterprise; + /** Resources allocated to mongoinfra (mongos+mongocfg) hosts. */ + resources?: Resources; +} + export interface Shard { $type: "yandex.cloud.mdb.mongodb.v1.Shard"; /** Name of the shard. */ @@ -535,7 +636,7 @@ export interface Host { * Name of the MongoDB host. The host name is assigned by MDB at creation time, and cannot be changed. * 1-63 characters long. * - * The name is unique across all existing MDB hosts in Yandex.Cloud, as it defines the FQDN of the host. + * The name is unique across all existing MDB hosts in Yandex Cloud, as it defines the FQDN of the host. */ name: string; /** ID of the MongoDB host. The ID is assigned by MDB at creation time. */ @@ -822,6 +923,8 @@ export interface Access { $type: "yandex.cloud.mdb.mongodb.v1.Access"; /** Allow access for DataLens */ dataLens: boolean; + /** Allow access for DataTransfer. */ + dataTransfer: boolean; } const baseCluster: object = { @@ -1360,6 +1463,18 @@ export const ClusterConfig = { if (message.mongodb50 !== undefined) { Mongodb50.encode(message.mongodb50, writer.uint32(82).fork()).ldelim(); } + if (message.mongodb44Enterprise !== undefined) { + Mongodb44Enterprise.encode( + message.mongodb44Enterprise, + writer.uint32(90).fork() + ).ldelim(); + } + if (message.mongodb50Enterprise !== undefined) { + Mongodb50Enterprise.encode( + message.mongodb50Enterprise, + writer.uint32(98).fork() + ).ldelim(); + } if (message.backupWindowStart !== undefined) { TimeOfDay.encode( message.backupWindowStart, @@ -1409,6 +1524,18 @@ export const ClusterConfig = { case 10: message.mongodb50 = Mongodb50.decode(reader, reader.uint32()); break; + case 11: + message.mongodb44Enterprise = Mongodb44Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 12: + message.mongodb50Enterprise = Mongodb50Enterprise.decode( + reader, + reader.uint32() + ); + break; case 3: message.backupWindowStart = TimeOfDay.decode(reader, reader.uint32()); break; @@ -1460,6 +1587,16 @@ export const ClusterConfig = { object.mongodb_5_0 !== undefined && object.mongodb_5_0 !== null ? Mongodb50.fromJSON(object.mongodb_5_0) : undefined; + message.mongodb44Enterprise = + object.mongodb_4_4_enterprise !== undefined && + object.mongodb_4_4_enterprise !== null + ? Mongodb44Enterprise.fromJSON(object.mongodb_4_4_enterprise) + : undefined; + message.mongodb50Enterprise = + object.mongodb_5_0_enterprise !== undefined && + object.mongodb_5_0_enterprise !== null + ? Mongodb50Enterprise.fromJSON(object.mongodb_5_0_enterprise) + : undefined; message.backupWindowStart = object.backupWindowStart !== undefined && object.backupWindowStart !== null @@ -1502,6 +1639,14 @@ export const ClusterConfig = { (obj.mongodb_5_0 = message.mongodb50 ? Mongodb50.toJSON(message.mongodb50) : undefined); + message.mongodb44Enterprise !== undefined && + (obj.mongodb_4_4_enterprise = message.mongodb44Enterprise + ? Mongodb44Enterprise.toJSON(message.mongodb44Enterprise) + : undefined); + message.mongodb50Enterprise !== undefined && + (obj.mongodb_5_0_enterprise = message.mongodb50Enterprise + ? Mongodb50Enterprise.toJSON(message.mongodb50Enterprise) + : undefined); message.backupWindowStart !== undefined && (obj.backupWindowStart = message.backupWindowStart ? TimeOfDay.toJSON(message.backupWindowStart) @@ -1540,6 +1685,16 @@ export const ClusterConfig = { object.mongodb50 !== undefined && object.mongodb50 !== null ? Mongodb50.fromPartial(object.mongodb50) : undefined; + message.mongodb44Enterprise = + object.mongodb44Enterprise !== undefined && + object.mongodb44Enterprise !== null + ? Mongodb44Enterprise.fromPartial(object.mongodb44Enterprise) + : undefined; + message.mongodb50Enterprise = + object.mongodb50Enterprise !== undefined && + object.mongodb50Enterprise !== null + ? Mongodb50Enterprise.fromPartial(object.mongodb50Enterprise) + : undefined; message.backupWindowStart = object.backupWindowStart !== undefined && object.backupWindowStart !== null @@ -3628,37 +3783,37 @@ export const Mongodb44_MongoInfra = { messageTypeRegistry.set(Mongodb44_MongoInfra.$type, Mongodb44_MongoInfra); -const baseMongodb50: object = { - $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0", +const baseMongodb44Enterprise: object = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb4_4_enterprise", }; -export const Mongodb50 = { - $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0" as const, +export const Mongodb44Enterprise = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb4_4_enterprise" as const, encode( - message: Mongodb50, + message: Mongodb44Enterprise, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { if (message.mongod !== undefined) { - Mongodb50_Mongod.encode( + Mongodb44Enterprise_Mongod.encode( message.mongod, writer.uint32(10).fork() ).ldelim(); } if (message.mongocfg !== undefined) { - Mongodb50_MongoCfg.encode( + Mongodb44Enterprise_MongoCfg.encode( message.mongocfg, writer.uint32(18).fork() ).ldelim(); } if (message.mongos !== undefined) { - Mongodb50_Mongos.encode( + Mongodb44Enterprise_Mongos.encode( message.mongos, writer.uint32(26).fork() ).ldelim(); } if (message.mongoinfra !== undefined) { - Mongodb50_MongoInfra.encode( + Mongodb44Enterprise_MongoInfra.encode( message.mongoinfra, writer.uint32(34).fork() ).ldelim(); @@ -3666,24 +3821,33 @@ export const Mongodb50 = { return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): Mongodb50 { + decode(input: _m0.Reader | Uint8Array, length?: number): Mongodb44Enterprise { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; - const message = { ...baseMongodb50 } as Mongodb50; + const message = { ...baseMongodb44Enterprise } as Mongodb44Enterprise; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.mongod = Mongodb50_Mongod.decode(reader, reader.uint32()); + message.mongod = Mongodb44Enterprise_Mongod.decode( + reader, + reader.uint32() + ); break; case 2: - message.mongocfg = Mongodb50_MongoCfg.decode(reader, reader.uint32()); + message.mongocfg = Mongodb44Enterprise_MongoCfg.decode( + reader, + reader.uint32() + ); break; case 3: - message.mongos = Mongodb50_Mongos.decode(reader, reader.uint32()); + message.mongos = Mongodb44Enterprise_Mongos.decode( + reader, + reader.uint32() + ); break; case 4: - message.mongoinfra = Mongodb50_MongoInfra.decode( + message.mongoinfra = Mongodb44Enterprise_MongoInfra.decode( reader, reader.uint32() ); @@ -3696,87 +3860,87 @@ export const Mongodb50 = { return message; }, - fromJSON(object: any): Mongodb50 { - const message = { ...baseMongodb50 } as Mongodb50; + fromJSON(object: any): Mongodb44Enterprise { + const message = { ...baseMongodb44Enterprise } as Mongodb44Enterprise; message.mongod = object.mongod !== undefined && object.mongod !== null - ? Mongodb50_Mongod.fromJSON(object.mongod) + ? Mongodb44Enterprise_Mongod.fromJSON(object.mongod) : undefined; message.mongocfg = object.mongocfg !== undefined && object.mongocfg !== null - ? Mongodb50_MongoCfg.fromJSON(object.mongocfg) + ? Mongodb44Enterprise_MongoCfg.fromJSON(object.mongocfg) : undefined; message.mongos = object.mongos !== undefined && object.mongos !== null - ? Mongodb50_Mongos.fromJSON(object.mongos) + ? Mongodb44Enterprise_Mongos.fromJSON(object.mongos) : undefined; message.mongoinfra = object.mongoinfra !== undefined && object.mongoinfra !== null - ? Mongodb50_MongoInfra.fromJSON(object.mongoinfra) + ? Mongodb44Enterprise_MongoInfra.fromJSON(object.mongoinfra) : undefined; return message; }, - toJSON(message: Mongodb50): unknown { + toJSON(message: Mongodb44Enterprise): unknown { const obj: any = {}; message.mongod !== undefined && (obj.mongod = message.mongod - ? Mongodb50_Mongod.toJSON(message.mongod) + ? Mongodb44Enterprise_Mongod.toJSON(message.mongod) : undefined); message.mongocfg !== undefined && (obj.mongocfg = message.mongocfg - ? Mongodb50_MongoCfg.toJSON(message.mongocfg) + ? Mongodb44Enterprise_MongoCfg.toJSON(message.mongocfg) : undefined); message.mongos !== undefined && (obj.mongos = message.mongos - ? Mongodb50_Mongos.toJSON(message.mongos) + ? Mongodb44Enterprise_Mongos.toJSON(message.mongos) : undefined); message.mongoinfra !== undefined && (obj.mongoinfra = message.mongoinfra - ? Mongodb50_MongoInfra.toJSON(message.mongoinfra) + ? Mongodb44Enterprise_MongoInfra.toJSON(message.mongoinfra) : undefined); return obj; }, - fromPartial, I>>( + fromPartial, I>>( object: I - ): Mongodb50 { - const message = { ...baseMongodb50 } as Mongodb50; + ): Mongodb44Enterprise { + const message = { ...baseMongodb44Enterprise } as Mongodb44Enterprise; message.mongod = object.mongod !== undefined && object.mongod !== null - ? Mongodb50_Mongod.fromPartial(object.mongod) + ? Mongodb44Enterprise_Mongod.fromPartial(object.mongod) : undefined; message.mongocfg = object.mongocfg !== undefined && object.mongocfg !== null - ? Mongodb50_MongoCfg.fromPartial(object.mongocfg) + ? Mongodb44Enterprise_MongoCfg.fromPartial(object.mongocfg) : undefined; message.mongos = object.mongos !== undefined && object.mongos !== null - ? Mongodb50_Mongos.fromPartial(object.mongos) + ? Mongodb44Enterprise_Mongos.fromPartial(object.mongos) : undefined; message.mongoinfra = object.mongoinfra !== undefined && object.mongoinfra !== null - ? Mongodb50_MongoInfra.fromPartial(object.mongoinfra) + ? Mongodb44Enterprise_MongoInfra.fromPartial(object.mongoinfra) : undefined; return message; }, }; -messageTypeRegistry.set(Mongodb50.$type, Mongodb50); +messageTypeRegistry.set(Mongodb44Enterprise.$type, Mongodb44Enterprise); -const baseMongodb50_Mongod: object = { - $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0.Mongod", +const baseMongodb44Enterprise_Mongod: object = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb4_4_enterprise.Mongod", }; -export const Mongodb50_Mongod = { - $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0.Mongod" as const, +export const Mongodb44Enterprise_Mongod = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb4_4_enterprise.Mongod" as const, encode( - message: Mongodb50_Mongod, + message: Mongodb44Enterprise_Mongod, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { if (message.config !== undefined) { - Mongodconfigset50.encode( + Mongodconfigset44Enterprise.encode( message.config, writer.uint32(10).fork() ).ldelim(); @@ -3787,15 +3951,23 @@ export const Mongodb50_Mongod = { return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): Mongodb50_Mongod { + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodb44Enterprise_Mongod { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; - const message = { ...baseMongodb50_Mongod } as Mongodb50_Mongod; + const message = { + ...baseMongodb44Enterprise_Mongod, + } as Mongodb44Enterprise_Mongod; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.config = Mongodconfigset50.decode(reader, reader.uint32()); + message.config = Mongodconfigset44Enterprise.decode( + reader, + reader.uint32() + ); break; case 2: message.resources = Resources.decode(reader, reader.uint32()); @@ -3808,11 +3980,13 @@ export const Mongodb50_Mongod = { return message; }, - fromJSON(object: any): Mongodb50_Mongod { - const message = { ...baseMongodb50_Mongod } as Mongodb50_Mongod; + fromJSON(object: any): Mongodb44Enterprise_Mongod { + const message = { + ...baseMongodb44Enterprise_Mongod, + } as Mongodb44Enterprise_Mongod; message.config = object.config !== undefined && object.config !== null - ? Mongodconfigset50.fromJSON(object.config) + ? Mongodconfigset44Enterprise.fromJSON(object.config) : undefined; message.resources = object.resources !== undefined && object.resources !== null @@ -3821,11 +3995,11 @@ export const Mongodb50_Mongod = { return message; }, - toJSON(message: Mongodb50_Mongod): unknown { + toJSON(message: Mongodb44Enterprise_Mongod): unknown { const obj: any = {}; message.config !== undefined && (obj.config = message.config - ? Mongodconfigset50.toJSON(message.config) + ? Mongodconfigset44Enterprise.toJSON(message.config) : undefined); message.resources !== undefined && (obj.resources = message.resources @@ -3834,13 +4008,15 @@ export const Mongodb50_Mongod = { return obj; }, - fromPartial, I>>( + fromPartial, I>>( object: I - ): Mongodb50_Mongod { - const message = { ...baseMongodb50_Mongod } as Mongodb50_Mongod; + ): Mongodb44Enterprise_Mongod { + const message = { + ...baseMongodb44Enterprise_Mongod, + } as Mongodb44Enterprise_Mongod; message.config = object.config !== undefined && object.config !== null - ? Mongodconfigset50.fromPartial(object.config) + ? Mongodconfigset44Enterprise.fromPartial(object.config) : undefined; message.resources = object.resources !== undefined && object.resources !== null @@ -3850,21 +4026,24 @@ export const Mongodb50_Mongod = { }, }; -messageTypeRegistry.set(Mongodb50_Mongod.$type, Mongodb50_Mongod); +messageTypeRegistry.set( + Mongodb44Enterprise_Mongod.$type, + Mongodb44Enterprise_Mongod +); -const baseMongodb50_MongoCfg: object = { - $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0.MongoCfg", +const baseMongodb44Enterprise_MongoCfg: object = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb4_4_enterprise.MongoCfg", }; -export const Mongodb50_MongoCfg = { - $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0.MongoCfg" as const, +export const Mongodb44Enterprise_MongoCfg = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb4_4_enterprise.MongoCfg" as const, encode( - message: Mongodb50_MongoCfg, + message: Mongodb44Enterprise_MongoCfg, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { if (message.config !== undefined) { - Mongocfgconfigset50.encode( + Mongocfgconfigset44Enterprise.encode( message.config, writer.uint32(10).fork() ).ldelim(); @@ -3875,15 +4054,23 @@ export const Mongodb50_MongoCfg = { return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): Mongodb50_MongoCfg { + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodb44Enterprise_MongoCfg { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; - const message = { ...baseMongodb50_MongoCfg } as Mongodb50_MongoCfg; + const message = { + ...baseMongodb44Enterprise_MongoCfg, + } as Mongodb44Enterprise_MongoCfg; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.config = Mongocfgconfigset50.decode(reader, reader.uint32()); + message.config = Mongocfgconfigset44Enterprise.decode( + reader, + reader.uint32() + ); break; case 2: message.resources = Resources.decode(reader, reader.uint32()); @@ -3896,11 +4083,13 @@ export const Mongodb50_MongoCfg = { return message; }, - fromJSON(object: any): Mongodb50_MongoCfg { - const message = { ...baseMongodb50_MongoCfg } as Mongodb50_MongoCfg; + fromJSON(object: any): Mongodb44Enterprise_MongoCfg { + const message = { + ...baseMongodb44Enterprise_MongoCfg, + } as Mongodb44Enterprise_MongoCfg; message.config = object.config !== undefined && object.config !== null - ? Mongocfgconfigset50.fromJSON(object.config) + ? Mongocfgconfigset44Enterprise.fromJSON(object.config) : undefined; message.resources = object.resources !== undefined && object.resources !== null @@ -3909,11 +4098,11 @@ export const Mongodb50_MongoCfg = { return message; }, - toJSON(message: Mongodb50_MongoCfg): unknown { + toJSON(message: Mongodb44Enterprise_MongoCfg): unknown { const obj: any = {}; message.config !== undefined && (obj.config = message.config - ? Mongocfgconfigset50.toJSON(message.config) + ? Mongocfgconfigset44Enterprise.toJSON(message.config) : undefined); message.resources !== undefined && (obj.resources = message.resources @@ -3922,13 +4111,15 @@ export const Mongodb50_MongoCfg = { return obj; }, - fromPartial, I>>( + fromPartial, I>>( object: I - ): Mongodb50_MongoCfg { - const message = { ...baseMongodb50_MongoCfg } as Mongodb50_MongoCfg; + ): Mongodb44Enterprise_MongoCfg { + const message = { + ...baseMongodb44Enterprise_MongoCfg, + } as Mongodb44Enterprise_MongoCfg; message.config = object.config !== undefined && object.config !== null - ? Mongocfgconfigset50.fromPartial(object.config) + ? Mongocfgconfigset44Enterprise.fromPartial(object.config) : undefined; message.resources = object.resources !== undefined && object.resources !== null @@ -3938,21 +4129,24 @@ export const Mongodb50_MongoCfg = { }, }; -messageTypeRegistry.set(Mongodb50_MongoCfg.$type, Mongodb50_MongoCfg); +messageTypeRegistry.set( + Mongodb44Enterprise_MongoCfg.$type, + Mongodb44Enterprise_MongoCfg +); -const baseMongodb50_Mongos: object = { - $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0.Mongos", +const baseMongodb44Enterprise_Mongos: object = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb4_4_enterprise.Mongos", }; -export const Mongodb50_Mongos = { - $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0.Mongos" as const, +export const Mongodb44Enterprise_Mongos = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb4_4_enterprise.Mongos" as const, encode( - message: Mongodb50_Mongos, + message: Mongodb44Enterprise_Mongos, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { if (message.config !== undefined) { - Mongosconfigset50.encode( + Mongosconfigset44Enterprise.encode( message.config, writer.uint32(10).fork() ).ldelim(); @@ -3963,15 +4157,23 @@ export const Mongodb50_Mongos = { return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): Mongodb50_Mongos { + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodb44Enterprise_Mongos { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; - const message = { ...baseMongodb50_Mongos } as Mongodb50_Mongos; + const message = { + ...baseMongodb44Enterprise_Mongos, + } as Mongodb44Enterprise_Mongos; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.config = Mongosconfigset50.decode(reader, reader.uint32()); + message.config = Mongosconfigset44Enterprise.decode( + reader, + reader.uint32() + ); break; case 2: message.resources = Resources.decode(reader, reader.uint32()); @@ -3984,11 +4186,13 @@ export const Mongodb50_Mongos = { return message; }, - fromJSON(object: any): Mongodb50_Mongos { - const message = { ...baseMongodb50_Mongos } as Mongodb50_Mongos; + fromJSON(object: any): Mongodb44Enterprise_Mongos { + const message = { + ...baseMongodb44Enterprise_Mongos, + } as Mongodb44Enterprise_Mongos; message.config = object.config !== undefined && object.config !== null - ? Mongosconfigset50.fromJSON(object.config) + ? Mongosconfigset44Enterprise.fromJSON(object.config) : undefined; message.resources = object.resources !== undefined && object.resources !== null @@ -3997,11 +4201,11 @@ export const Mongodb50_Mongos = { return message; }, - toJSON(message: Mongodb50_Mongos): unknown { + toJSON(message: Mongodb44Enterprise_Mongos): unknown { const obj: any = {}; message.config !== undefined && (obj.config = message.config - ? Mongosconfigset50.toJSON(message.config) + ? Mongosconfigset44Enterprise.toJSON(message.config) : undefined); message.resources !== undefined && (obj.resources = message.resources @@ -4010,13 +4214,15 @@ export const Mongodb50_Mongos = { return obj; }, - fromPartial, I>>( + fromPartial, I>>( object: I - ): Mongodb50_Mongos { - const message = { ...baseMongodb50_Mongos } as Mongodb50_Mongos; + ): Mongodb44Enterprise_Mongos { + const message = { + ...baseMongodb44Enterprise_Mongos, + } as Mongodb44Enterprise_Mongos; message.config = object.config !== undefined && object.config !== null - ? Mongosconfigset50.fromPartial(object.config) + ? Mongosconfigset44Enterprise.fromPartial(object.config) : undefined; message.resources = object.resources !== undefined && object.resources !== null @@ -4026,27 +4232,31 @@ export const Mongodb50_Mongos = { }, }; -messageTypeRegistry.set(Mongodb50_Mongos.$type, Mongodb50_Mongos); +messageTypeRegistry.set( + Mongodb44Enterprise_Mongos.$type, + Mongodb44Enterprise_Mongos +); -const baseMongodb50_MongoInfra: object = { - $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0.MongoInfra", +const baseMongodb44Enterprise_MongoInfra: object = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb4_4_enterprise.MongoInfra", }; -export const Mongodb50_MongoInfra = { - $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0.MongoInfra" as const, +export const Mongodb44Enterprise_MongoInfra = { + $type: + "yandex.cloud.mdb.mongodb.v1.Mongodb4_4_enterprise.MongoInfra" as const, encode( - message: Mongodb50_MongoInfra, + message: Mongodb44Enterprise_MongoInfra, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { if (message.configMongos !== undefined) { - Mongosconfigset50.encode( + Mongosconfigset44Enterprise.encode( message.configMongos, writer.uint32(10).fork() ).ldelim(); } if (message.configMongocfg !== undefined) { - Mongocfgconfigset50.encode( + Mongocfgconfigset44Enterprise.encode( message.configMongocfg, writer.uint32(18).fork() ).ldelim(); @@ -4060,21 +4270,23 @@ export const Mongodb50_MongoInfra = { decode( input: _m0.Reader | Uint8Array, length?: number - ): Mongodb50_MongoInfra { + ): Mongodb44Enterprise_MongoInfra { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; - const message = { ...baseMongodb50_MongoInfra } as Mongodb50_MongoInfra; + const message = { + ...baseMongodb44Enterprise_MongoInfra, + } as Mongodb44Enterprise_MongoInfra; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.configMongos = Mongosconfigset50.decode( + message.configMongos = Mongosconfigset44Enterprise.decode( reader, reader.uint32() ); break; case 2: - message.configMongocfg = Mongocfgconfigset50.decode( + message.configMongocfg = Mongocfgconfigset44Enterprise.decode( reader, reader.uint32() ); @@ -4090,15 +4302,17 @@ export const Mongodb50_MongoInfra = { return message; }, - fromJSON(object: any): Mongodb50_MongoInfra { - const message = { ...baseMongodb50_MongoInfra } as Mongodb50_MongoInfra; + fromJSON(object: any): Mongodb44Enterprise_MongoInfra { + const message = { + ...baseMongodb44Enterprise_MongoInfra, + } as Mongodb44Enterprise_MongoInfra; message.configMongos = object.configMongos !== undefined && object.configMongos !== null - ? Mongosconfigset50.fromJSON(object.configMongos) + ? Mongosconfigset44Enterprise.fromJSON(object.configMongos) : undefined; message.configMongocfg = object.configMongocfg !== undefined && object.configMongocfg !== null - ? Mongocfgconfigset50.fromJSON(object.configMongocfg) + ? Mongocfgconfigset44Enterprise.fromJSON(object.configMongocfg) : undefined; message.resources = object.resources !== undefined && object.resources !== null @@ -4107,15 +4321,15 @@ export const Mongodb50_MongoInfra = { return message; }, - toJSON(message: Mongodb50_MongoInfra): unknown { + toJSON(message: Mongodb44Enterprise_MongoInfra): unknown { const obj: any = {}; message.configMongos !== undefined && (obj.configMongos = message.configMongos - ? Mongosconfigset50.toJSON(message.configMongos) + ? Mongosconfigset44Enterprise.toJSON(message.configMongos) : undefined); message.configMongocfg !== undefined && (obj.configMongocfg = message.configMongocfg - ? Mongocfgconfigset50.toJSON(message.configMongocfg) + ? Mongocfgconfigset44Enterprise.toJSON(message.configMongocfg) : undefined); message.resources !== undefined && (obj.resources = message.resources @@ -4124,17 +4338,19 @@ export const Mongodb50_MongoInfra = { return obj; }, - fromPartial, I>>( + fromPartial, I>>( object: I - ): Mongodb50_MongoInfra { - const message = { ...baseMongodb50_MongoInfra } as Mongodb50_MongoInfra; + ): Mongodb44Enterprise_MongoInfra { + const message = { + ...baseMongodb44Enterprise_MongoInfra, + } as Mongodb44Enterprise_MongoInfra; message.configMongos = object.configMongos !== undefined && object.configMongos !== null - ? Mongosconfigset50.fromPartial(object.configMongos) + ? Mongosconfigset44Enterprise.fromPartial(object.configMongos) : undefined; message.configMongocfg = object.configMongocfg !== undefined && object.configMongocfg !== null - ? Mongocfgconfigset50.fromPartial(object.configMongocfg) + ? Mongocfgconfigset44Enterprise.fromPartial(object.configMongocfg) : undefined; message.resources = object.resources !== undefined && object.resources !== null @@ -4144,28 +4360,1131 @@ export const Mongodb50_MongoInfra = { }, }; -messageTypeRegistry.set(Mongodb50_MongoInfra.$type, Mongodb50_MongoInfra); +messageTypeRegistry.set( + Mongodb44Enterprise_MongoInfra.$type, + Mongodb44Enterprise_MongoInfra +); -const baseShard: object = { - $type: "yandex.cloud.mdb.mongodb.v1.Shard", - name: "", - clusterId: "", +const baseMongodb50: object = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0", }; -export const Shard = { - $type: "yandex.cloud.mdb.mongodb.v1.Shard" as const, +export const Mongodb50 = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0" as const, - encode(message: Shard, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.name !== "") { - writer.uint32(10).string(message.name); + encode( + message: Mongodb50, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.mongod !== undefined) { + Mongodb50_Mongod.encode( + message.mongod, + writer.uint32(10).fork() + ).ldelim(); } - if (message.clusterId !== "") { - writer.uint32(18).string(message.clusterId); + if (message.mongocfg !== undefined) { + Mongodb50_MongoCfg.encode( + message.mongocfg, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.mongos !== undefined) { + Mongodb50_Mongos.encode( + message.mongos, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.mongoinfra !== undefined) { + Mongodb50_MongoInfra.encode( + message.mongoinfra, + writer.uint32(34).fork() + ).ldelim(); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): Shard { + decode(input: _m0.Reader | Uint8Array, length?: number): Mongodb50 { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMongodb50 } as Mongodb50; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.mongod = Mongodb50_Mongod.decode(reader, reader.uint32()); + break; + case 2: + message.mongocfg = Mongodb50_MongoCfg.decode(reader, reader.uint32()); + break; + case 3: + message.mongos = Mongodb50_Mongos.decode(reader, reader.uint32()); + break; + case 4: + message.mongoinfra = Mongodb50_MongoInfra.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodb50 { + const message = { ...baseMongodb50 } as Mongodb50; + message.mongod = + object.mongod !== undefined && object.mongod !== null + ? Mongodb50_Mongod.fromJSON(object.mongod) + : undefined; + message.mongocfg = + object.mongocfg !== undefined && object.mongocfg !== null + ? Mongodb50_MongoCfg.fromJSON(object.mongocfg) + : undefined; + message.mongos = + object.mongos !== undefined && object.mongos !== null + ? Mongodb50_Mongos.fromJSON(object.mongos) + : undefined; + message.mongoinfra = + object.mongoinfra !== undefined && object.mongoinfra !== null + ? Mongodb50_MongoInfra.fromJSON(object.mongoinfra) + : undefined; + return message; + }, + + toJSON(message: Mongodb50): unknown { + const obj: any = {}; + message.mongod !== undefined && + (obj.mongod = message.mongod + ? Mongodb50_Mongod.toJSON(message.mongod) + : undefined); + message.mongocfg !== undefined && + (obj.mongocfg = message.mongocfg + ? Mongodb50_MongoCfg.toJSON(message.mongocfg) + : undefined); + message.mongos !== undefined && + (obj.mongos = message.mongos + ? Mongodb50_Mongos.toJSON(message.mongos) + : undefined); + message.mongoinfra !== undefined && + (obj.mongoinfra = message.mongoinfra + ? Mongodb50_MongoInfra.toJSON(message.mongoinfra) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodb50 { + const message = { ...baseMongodb50 } as Mongodb50; + message.mongod = + object.mongod !== undefined && object.mongod !== null + ? Mongodb50_Mongod.fromPartial(object.mongod) + : undefined; + message.mongocfg = + object.mongocfg !== undefined && object.mongocfg !== null + ? Mongodb50_MongoCfg.fromPartial(object.mongocfg) + : undefined; + message.mongos = + object.mongos !== undefined && object.mongos !== null + ? Mongodb50_Mongos.fromPartial(object.mongos) + : undefined; + message.mongoinfra = + object.mongoinfra !== undefined && object.mongoinfra !== null + ? Mongodb50_MongoInfra.fromPartial(object.mongoinfra) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Mongodb50.$type, Mongodb50); + +const baseMongodb50_Mongod: object = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0.Mongod", +}; + +export const Mongodb50_Mongod = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0.Mongod" as const, + + encode( + message: Mongodb50_Mongod, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.config !== undefined) { + Mongodconfigset50.encode( + message.config, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Mongodb50_Mongod { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMongodb50_Mongod } as Mongodb50_Mongod; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.config = Mongodconfigset50.decode(reader, reader.uint32()); + break; + case 2: + message.resources = Resources.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodb50_Mongod { + const message = { ...baseMongodb50_Mongod } as Mongodb50_Mongod; + message.config = + object.config !== undefined && object.config !== null + ? Mongodconfigset50.fromJSON(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + return message; + }, + + toJSON(message: Mongodb50_Mongod): unknown { + const obj: any = {}; + message.config !== undefined && + (obj.config = message.config + ? Mongodconfigset50.toJSON(message.config) + : undefined); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodb50_Mongod { + const message = { ...baseMongodb50_Mongod } as Mongodb50_Mongod; + message.config = + object.config !== undefined && object.config !== null + ? Mongodconfigset50.fromPartial(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Mongodb50_Mongod.$type, Mongodb50_Mongod); + +const baseMongodb50_MongoCfg: object = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0.MongoCfg", +}; + +export const Mongodb50_MongoCfg = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0.MongoCfg" as const, + + encode( + message: Mongodb50_MongoCfg, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.config !== undefined) { + Mongocfgconfigset50.encode( + message.config, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Mongodb50_MongoCfg { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMongodb50_MongoCfg } as Mongodb50_MongoCfg; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.config = Mongocfgconfigset50.decode(reader, reader.uint32()); + break; + case 2: + message.resources = Resources.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodb50_MongoCfg { + const message = { ...baseMongodb50_MongoCfg } as Mongodb50_MongoCfg; + message.config = + object.config !== undefined && object.config !== null + ? Mongocfgconfigset50.fromJSON(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + return message; + }, + + toJSON(message: Mongodb50_MongoCfg): unknown { + const obj: any = {}; + message.config !== undefined && + (obj.config = message.config + ? Mongocfgconfigset50.toJSON(message.config) + : undefined); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodb50_MongoCfg { + const message = { ...baseMongodb50_MongoCfg } as Mongodb50_MongoCfg; + message.config = + object.config !== undefined && object.config !== null + ? Mongocfgconfigset50.fromPartial(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Mongodb50_MongoCfg.$type, Mongodb50_MongoCfg); + +const baseMongodb50_Mongos: object = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0.Mongos", +}; + +export const Mongodb50_Mongos = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0.Mongos" as const, + + encode( + message: Mongodb50_Mongos, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.config !== undefined) { + Mongosconfigset50.encode( + message.config, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Mongodb50_Mongos { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMongodb50_Mongos } as Mongodb50_Mongos; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.config = Mongosconfigset50.decode(reader, reader.uint32()); + break; + case 2: + message.resources = Resources.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodb50_Mongos { + const message = { ...baseMongodb50_Mongos } as Mongodb50_Mongos; + message.config = + object.config !== undefined && object.config !== null + ? Mongosconfigset50.fromJSON(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + return message; + }, + + toJSON(message: Mongodb50_Mongos): unknown { + const obj: any = {}; + message.config !== undefined && + (obj.config = message.config + ? Mongosconfigset50.toJSON(message.config) + : undefined); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodb50_Mongos { + const message = { ...baseMongodb50_Mongos } as Mongodb50_Mongos; + message.config = + object.config !== undefined && object.config !== null + ? Mongosconfigset50.fromPartial(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Mongodb50_Mongos.$type, Mongodb50_Mongos); + +const baseMongodb50_MongoInfra: object = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0.MongoInfra", +}; + +export const Mongodb50_MongoInfra = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0.MongoInfra" as const, + + encode( + message: Mongodb50_MongoInfra, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.configMongos !== undefined) { + Mongosconfigset50.encode( + message.configMongos, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.configMongocfg !== undefined) { + Mongocfgconfigset50.encode( + message.configMongocfg, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(26).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodb50_MongoInfra { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMongodb50_MongoInfra } as Mongodb50_MongoInfra; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.configMongos = Mongosconfigset50.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.configMongocfg = Mongocfgconfigset50.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.resources = Resources.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodb50_MongoInfra { + const message = { ...baseMongodb50_MongoInfra } as Mongodb50_MongoInfra; + message.configMongos = + object.configMongos !== undefined && object.configMongos !== null + ? Mongosconfigset50.fromJSON(object.configMongos) + : undefined; + message.configMongocfg = + object.configMongocfg !== undefined && object.configMongocfg !== null + ? Mongocfgconfigset50.fromJSON(object.configMongocfg) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + return message; + }, + + toJSON(message: Mongodb50_MongoInfra): unknown { + const obj: any = {}; + message.configMongos !== undefined && + (obj.configMongos = message.configMongos + ? Mongosconfigset50.toJSON(message.configMongos) + : undefined); + message.configMongocfg !== undefined && + (obj.configMongocfg = message.configMongocfg + ? Mongocfgconfigset50.toJSON(message.configMongocfg) + : undefined); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodb50_MongoInfra { + const message = { ...baseMongodb50_MongoInfra } as Mongodb50_MongoInfra; + message.configMongos = + object.configMongos !== undefined && object.configMongos !== null + ? Mongosconfigset50.fromPartial(object.configMongos) + : undefined; + message.configMongocfg = + object.configMongocfg !== undefined && object.configMongocfg !== null + ? Mongocfgconfigset50.fromPartial(object.configMongocfg) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Mongodb50_MongoInfra.$type, Mongodb50_MongoInfra); + +const baseMongodb50Enterprise: object = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0_enterprise", +}; + +export const Mongodb50Enterprise = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0_enterprise" as const, + + encode( + message: Mongodb50Enterprise, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.mongod !== undefined) { + Mongodb50Enterprise_Mongod.encode( + message.mongod, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.mongocfg !== undefined) { + Mongodb50Enterprise_MongoCfg.encode( + message.mongocfg, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.mongos !== undefined) { + Mongodb50Enterprise_Mongos.encode( + message.mongos, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.mongoinfra !== undefined) { + Mongodb50Enterprise_MongoInfra.encode( + message.mongoinfra, + writer.uint32(34).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Mongodb50Enterprise { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMongodb50Enterprise } as Mongodb50Enterprise; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.mongod = Mongodb50Enterprise_Mongod.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.mongocfg = Mongodb50Enterprise_MongoCfg.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.mongos = Mongodb50Enterprise_Mongos.decode( + reader, + reader.uint32() + ); + break; + case 4: + message.mongoinfra = Mongodb50Enterprise_MongoInfra.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodb50Enterprise { + const message = { ...baseMongodb50Enterprise } as Mongodb50Enterprise; + message.mongod = + object.mongod !== undefined && object.mongod !== null + ? Mongodb50Enterprise_Mongod.fromJSON(object.mongod) + : undefined; + message.mongocfg = + object.mongocfg !== undefined && object.mongocfg !== null + ? Mongodb50Enterprise_MongoCfg.fromJSON(object.mongocfg) + : undefined; + message.mongos = + object.mongos !== undefined && object.mongos !== null + ? Mongodb50Enterprise_Mongos.fromJSON(object.mongos) + : undefined; + message.mongoinfra = + object.mongoinfra !== undefined && object.mongoinfra !== null + ? Mongodb50Enterprise_MongoInfra.fromJSON(object.mongoinfra) + : undefined; + return message; + }, + + toJSON(message: Mongodb50Enterprise): unknown { + const obj: any = {}; + message.mongod !== undefined && + (obj.mongod = message.mongod + ? Mongodb50Enterprise_Mongod.toJSON(message.mongod) + : undefined); + message.mongocfg !== undefined && + (obj.mongocfg = message.mongocfg + ? Mongodb50Enterprise_MongoCfg.toJSON(message.mongocfg) + : undefined); + message.mongos !== undefined && + (obj.mongos = message.mongos + ? Mongodb50Enterprise_Mongos.toJSON(message.mongos) + : undefined); + message.mongoinfra !== undefined && + (obj.mongoinfra = message.mongoinfra + ? Mongodb50Enterprise_MongoInfra.toJSON(message.mongoinfra) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodb50Enterprise { + const message = { ...baseMongodb50Enterprise } as Mongodb50Enterprise; + message.mongod = + object.mongod !== undefined && object.mongod !== null + ? Mongodb50Enterprise_Mongod.fromPartial(object.mongod) + : undefined; + message.mongocfg = + object.mongocfg !== undefined && object.mongocfg !== null + ? Mongodb50Enterprise_MongoCfg.fromPartial(object.mongocfg) + : undefined; + message.mongos = + object.mongos !== undefined && object.mongos !== null + ? Mongodb50Enterprise_Mongos.fromPartial(object.mongos) + : undefined; + message.mongoinfra = + object.mongoinfra !== undefined && object.mongoinfra !== null + ? Mongodb50Enterprise_MongoInfra.fromPartial(object.mongoinfra) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Mongodb50Enterprise.$type, Mongodb50Enterprise); + +const baseMongodb50Enterprise_Mongod: object = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0_enterprise.Mongod", +}; + +export const Mongodb50Enterprise_Mongod = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0_enterprise.Mongod" as const, + + encode( + message: Mongodb50Enterprise_Mongod, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.config !== undefined) { + Mongodconfigset50Enterprise.encode( + message.config, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodb50Enterprise_Mongod { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodb50Enterprise_Mongod, + } as Mongodb50Enterprise_Mongod; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.config = Mongodconfigset50Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.resources = Resources.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodb50Enterprise_Mongod { + const message = { + ...baseMongodb50Enterprise_Mongod, + } as Mongodb50Enterprise_Mongod; + message.config = + object.config !== undefined && object.config !== null + ? Mongodconfigset50Enterprise.fromJSON(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + return message; + }, + + toJSON(message: Mongodb50Enterprise_Mongod): unknown { + const obj: any = {}; + message.config !== undefined && + (obj.config = message.config + ? Mongodconfigset50Enterprise.toJSON(message.config) + : undefined); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodb50Enterprise_Mongod { + const message = { + ...baseMongodb50Enterprise_Mongod, + } as Mongodb50Enterprise_Mongod; + message.config = + object.config !== undefined && object.config !== null + ? Mongodconfigset50Enterprise.fromPartial(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodb50Enterprise_Mongod.$type, + Mongodb50Enterprise_Mongod +); + +const baseMongodb50Enterprise_MongoCfg: object = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0_enterprise.MongoCfg", +}; + +export const Mongodb50Enterprise_MongoCfg = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0_enterprise.MongoCfg" as const, + + encode( + message: Mongodb50Enterprise_MongoCfg, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.config !== undefined) { + Mongocfgconfigset50Enterprise.encode( + message.config, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodb50Enterprise_MongoCfg { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodb50Enterprise_MongoCfg, + } as Mongodb50Enterprise_MongoCfg; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.config = Mongocfgconfigset50Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.resources = Resources.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodb50Enterprise_MongoCfg { + const message = { + ...baseMongodb50Enterprise_MongoCfg, + } as Mongodb50Enterprise_MongoCfg; + message.config = + object.config !== undefined && object.config !== null + ? Mongocfgconfigset50Enterprise.fromJSON(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + return message; + }, + + toJSON(message: Mongodb50Enterprise_MongoCfg): unknown { + const obj: any = {}; + message.config !== undefined && + (obj.config = message.config + ? Mongocfgconfigset50Enterprise.toJSON(message.config) + : undefined); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodb50Enterprise_MongoCfg { + const message = { + ...baseMongodb50Enterprise_MongoCfg, + } as Mongodb50Enterprise_MongoCfg; + message.config = + object.config !== undefined && object.config !== null + ? Mongocfgconfigset50Enterprise.fromPartial(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodb50Enterprise_MongoCfg.$type, + Mongodb50Enterprise_MongoCfg +); + +const baseMongodb50Enterprise_Mongos: object = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0_enterprise.Mongos", +}; + +export const Mongodb50Enterprise_Mongos = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0_enterprise.Mongos" as const, + + encode( + message: Mongodb50Enterprise_Mongos, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.config !== undefined) { + Mongosconfigset50Enterprise.encode( + message.config, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodb50Enterprise_Mongos { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodb50Enterprise_Mongos, + } as Mongodb50Enterprise_Mongos; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.config = Mongosconfigset50Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.resources = Resources.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodb50Enterprise_Mongos { + const message = { + ...baseMongodb50Enterprise_Mongos, + } as Mongodb50Enterprise_Mongos; + message.config = + object.config !== undefined && object.config !== null + ? Mongosconfigset50Enterprise.fromJSON(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + return message; + }, + + toJSON(message: Mongodb50Enterprise_Mongos): unknown { + const obj: any = {}; + message.config !== undefined && + (obj.config = message.config + ? Mongosconfigset50Enterprise.toJSON(message.config) + : undefined); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodb50Enterprise_Mongos { + const message = { + ...baseMongodb50Enterprise_Mongos, + } as Mongodb50Enterprise_Mongos; + message.config = + object.config !== undefined && object.config !== null + ? Mongosconfigset50Enterprise.fromPartial(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodb50Enterprise_Mongos.$type, + Mongodb50Enterprise_Mongos +); + +const baseMongodb50Enterprise_MongoInfra: object = { + $type: "yandex.cloud.mdb.mongodb.v1.Mongodb5_0_enterprise.MongoInfra", +}; + +export const Mongodb50Enterprise_MongoInfra = { + $type: + "yandex.cloud.mdb.mongodb.v1.Mongodb5_0_enterprise.MongoInfra" as const, + + encode( + message: Mongodb50Enterprise_MongoInfra, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.configMongos !== undefined) { + Mongosconfigset50Enterprise.encode( + message.configMongos, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.configMongocfg !== undefined) { + Mongocfgconfigset50Enterprise.encode( + message.configMongocfg, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(26).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodb50Enterprise_MongoInfra { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodb50Enterprise_MongoInfra, + } as Mongodb50Enterprise_MongoInfra; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.configMongos = Mongosconfigset50Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.configMongocfg = Mongocfgconfigset50Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.resources = Resources.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodb50Enterprise_MongoInfra { + const message = { + ...baseMongodb50Enterprise_MongoInfra, + } as Mongodb50Enterprise_MongoInfra; + message.configMongos = + object.configMongos !== undefined && object.configMongos !== null + ? Mongosconfigset50Enterprise.fromJSON(object.configMongos) + : undefined; + message.configMongocfg = + object.configMongocfg !== undefined && object.configMongocfg !== null + ? Mongocfgconfigset50Enterprise.fromJSON(object.configMongocfg) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + return message; + }, + + toJSON(message: Mongodb50Enterprise_MongoInfra): unknown { + const obj: any = {}; + message.configMongos !== undefined && + (obj.configMongos = message.configMongos + ? Mongosconfigset50Enterprise.toJSON(message.configMongos) + : undefined); + message.configMongocfg !== undefined && + (obj.configMongocfg = message.configMongocfg + ? Mongocfgconfigset50Enterprise.toJSON(message.configMongocfg) + : undefined); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodb50Enterprise_MongoInfra { + const message = { + ...baseMongodb50Enterprise_MongoInfra, + } as Mongodb50Enterprise_MongoInfra; + message.configMongos = + object.configMongos !== undefined && object.configMongos !== null + ? Mongosconfigset50Enterprise.fromPartial(object.configMongos) + : undefined; + message.configMongocfg = + object.configMongocfg !== undefined && object.configMongocfg !== null + ? Mongocfgconfigset50Enterprise.fromPartial(object.configMongocfg) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodb50Enterprise_MongoInfra.$type, + Mongodb50Enterprise_MongoInfra +); + +const baseShard: object = { + $type: "yandex.cloud.mdb.mongodb.v1.Shard", + name: "", + clusterId: "", +}; + +export const Shard = { + $type: "yandex.cloud.mdb.mongodb.v1.Shard" as const, + + encode(message: Shard, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + if (message.clusterId !== "") { + writer.uint32(18).string(message.clusterId); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Shard { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = { ...baseShard } as Shard; @@ -4583,6 +5902,7 @@ messageTypeRegistry.set(Resources.$type, Resources); const baseAccess: object = { $type: "yandex.cloud.mdb.mongodb.v1.Access", dataLens: false, + dataTransfer: false, }; export const Access = { @@ -4595,6 +5915,9 @@ export const Access = { if (message.dataLens === true) { writer.uint32(8).bool(message.dataLens); } + if (message.dataTransfer === true) { + writer.uint32(24).bool(message.dataTransfer); + } return writer; }, @@ -4608,6 +5931,9 @@ export const Access = { case 1: message.dataLens = reader.bool(); break; + case 3: + message.dataTransfer = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -4622,18 +5948,25 @@ export const Access = { object.dataLens !== undefined && object.dataLens !== null ? Boolean(object.dataLens) : false; + message.dataTransfer = + object.dataTransfer !== undefined && object.dataTransfer !== null + ? Boolean(object.dataTransfer) + : false; return message; }, toJSON(message: Access): unknown { const obj: any = {}; message.dataLens !== undefined && (obj.dataLens = message.dataLens); + message.dataTransfer !== undefined && + (obj.dataTransfer = message.dataTransfer); return obj; }, fromPartial, I>>(object: I): Access { const message = { ...baseAccess } as Access; message.dataLens = object.dataLens ?? false; + message.dataTransfer = object.dataTransfer ?? false; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/mongodb/v1/cluster_service.ts b/src/generated/yandex/cloud/mdb/mongodb/v1/cluster_service.ts index 7382a19c..ee25b5a5 100644 --- a/src/generated/yandex/cloud/mdb/mongodb/v1/cluster_service.ts +++ b/src/generated/yandex/cloud/mdb/mongodb/v1/cluster_service.ts @@ -51,11 +51,21 @@ import { Mongocfgconfig44, Mongosconfig44, } from "../../../../../yandex/cloud/mdb/mongodb/v1/config/mongodb4_4"; +import { + Mongodconfig44Enterprise, + Mongocfgconfig44Enterprise, + Mongosconfig44Enterprise, +} from "../../../../../yandex/cloud/mdb/mongodb/v1/config/mongodb4_4_enterprise"; import { Mongodconfig50, Mongocfgconfig50, Mongosconfig50, } from "../../../../../yandex/cloud/mdb/mongodb/v1/config/mongodb5_0"; +import { + Mongodconfig50Enterprise, + Mongocfgconfig50Enterprise, + Mongosconfig50Enterprise, +} from "../../../../../yandex/cloud/mdb/mongodb/v1/config/mongodb5_0_enterprise"; import { TimeOfDay } from "../../../../../google/type/timeofday"; import { Timestamp } from "../../../../../google/protobuf/timestamp"; import { DatabaseSpec } from "../../../../../yandex/cloud/mdb/mongodb/v1/database"; @@ -440,6 +450,8 @@ export enum ListClusterLogsRequest_ServiceType { MONGOD = 1, MONGOS = 2, MONGOCFG = 3, + /** AUDIT - MongoDB Enterprise audit logs */ + AUDIT = 4, UNRECOGNIZED = -1, } @@ -459,6 +471,9 @@ export function listClusterLogsRequest_ServiceTypeFromJSON( case 3: case "MONGOCFG": return ListClusterLogsRequest_ServiceType.MONGOCFG; + case 4: + case "AUDIT": + return ListClusterLogsRequest_ServiceType.AUDIT; case -1: case "UNRECOGNIZED": default: @@ -478,6 +493,8 @@ export function listClusterLogsRequest_ServiceTypeToJSON( return "MONGOS"; case ListClusterLogsRequest_ServiceType.MONGOCFG: return "MONGOCFG"; + case ListClusterLogsRequest_ServiceType.AUDIT: + return "AUDIT"; default: return "UNKNOWN"; } @@ -547,6 +564,8 @@ export enum StreamClusterLogsRequest_ServiceType { MONGOD = 1, MONGOS = 2, MONGOCFG = 3, + /** AUDIT - MongoDB Enterprise audit logs */ + AUDIT = 4, UNRECOGNIZED = -1, } @@ -566,6 +585,9 @@ export function streamClusterLogsRequest_ServiceTypeFromJSON( case 3: case "MONGOCFG": return StreamClusterLogsRequest_ServiceType.MONGOCFG; + case 4: + case "AUDIT": + return StreamClusterLogsRequest_ServiceType.AUDIT; case -1: case "UNRECOGNIZED": default: @@ -585,6 +607,8 @@ export function streamClusterLogsRequest_ServiceTypeToJSON( return "MONGOS"; case StreamClusterLogsRequest_ServiceType.MONGOCFG: return "MONGOCFG"; + case StreamClusterLogsRequest_ServiceType.AUDIT: + return "AUDIT"; default: return "UNKNOWN"; } @@ -1095,6 +1119,51 @@ export interface Mongodbspec44_MongoInfra { resources?: Resources; } +export interface Mongodbspec44Enterprise { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec4_4_enterprise"; + /** Configuration and resource allocation for mongod 4.4 hosts. */ + mongod?: Mongodbspec44Enterprise_Mongod; + /** Configuration and resource allocation for mongocfg 4.4 hosts. */ + mongocfg?: Mongodbspec44Enterprise_MongoCfg; + /** Configuration and resource allocation for mongos 4.4 hosts. */ + mongos?: Mongodbspec44Enterprise_Mongos; + /** Configuration and resource allocation for mongoinfra (mongos+mongocfg) 4.4 hosts. */ + mongoinfra?: Mongodbspec44Enterprise_MongoInfra; +} + +export interface Mongodbspec44Enterprise_Mongod { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec4_4_enterprise.Mongod"; + /** Configuration for mongod 4.4 hosts. */ + config?: Mongodconfig44Enterprise; + /** Resources allocated to each mongod host. */ + resources?: Resources; +} + +export interface Mongodbspec44Enterprise_MongoCfg { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec4_4_enterprise.MongoCfg"; + /** Configuration for mongocfg 4.4 hosts. */ + config?: Mongocfgconfig44Enterprise; + /** Resources allocated to each mongocfg host. */ + resources?: Resources; +} + +export interface Mongodbspec44Enterprise_Mongos { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec4_4_enterprise.Mongos"; + /** Configuration for mongos 4.4 hosts. */ + config?: Mongosconfig44Enterprise; + /** Resources allocated to each mongos host. */ + resources?: Resources; +} + +export interface Mongodbspec44Enterprise_MongoInfra { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec4_4_enterprise.MongoInfra"; + /** Configuration for mongoinfra 4.4 hosts. */ + configMongos?: Mongosconfig44Enterprise; + configMongocfg?: Mongocfgconfig44Enterprise; + /** Resources allocated to each mongoinfra (mongos+mongocfg) host. */ + resources?: Resources; +} + export interface Mongodbspec50 { $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0"; /** Configuration and resource allocation for mongod 5.0 hosts. */ @@ -1140,9 +1209,54 @@ export interface Mongodbspec50_MongoInfra { resources?: Resources; } +export interface Mongodbspec50Enterprise { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0_enterprise"; + /** Configuration and resource allocation for mongod 5.0 hosts. */ + mongod?: Mongodbspec50Enterprise_Mongod; + /** Configuration and resource allocation for mongocfg 5.0 hosts. */ + mongocfg?: Mongodbspec50Enterprise_MongoCfg; + /** Configuration and resource allocation for mongos 5.0 hosts. */ + mongos?: Mongodbspec50Enterprise_Mongos; + /** Configuration and resource allocation for mongoinfra (mongos+mongocfg) 5.0 hosts. */ + mongoinfra?: Mongodbspec50Enterprise_MongoInfra; +} + +export interface Mongodbspec50Enterprise_Mongod { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0_enterprise.Mongod"; + /** Configuration for mongod 5.0 hosts. */ + config?: Mongodconfig50Enterprise; + /** Resources allocated to each mongod host. */ + resources?: Resources; +} + +export interface Mongodbspec50Enterprise_MongoCfg { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0_enterprise.MongoCfg"; + /** Configuration for mongocfg 5.0 hosts. */ + config?: Mongocfgconfig50Enterprise; + /** Resources allocated to each mongocfg host. */ + resources?: Resources; +} + +export interface Mongodbspec50Enterprise_Mongos { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0_enterprise.Mongos"; + /** Configuration for mongos 5.0 hosts. */ + config?: Mongosconfig50Enterprise; + /** Resources allocated to each mongos host. */ + resources?: Resources; +} + +export interface Mongodbspec50Enterprise_MongoInfra { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0_enterprise.MongoInfra"; + /** Configuration for mongoinfra 5.0 hosts. */ + configMongos?: Mongosconfig50Enterprise; + configMongocfg?: Mongocfgconfig50Enterprise; + /** Resources allocated to each mongoinfra (mongos+mongocfg) host. */ + resources?: Resources; +} + export interface ConfigSpec { $type: "yandex.cloud.mdb.mongodb.v1.ConfigSpec"; - /** Version of MongoDB used in the cluster. Possible values: `3.6`, `4.0`, `4.2`, `4.4`, `5.0`. */ + /** Version of MongoDB used in the cluster. Possible values: `3.6`, `4.0`, `4.2`, `4.4`, `4.4-enterprise`, `5.0`, `5.0-enterprise`. */ version: string; /** * MongoDB feature compatibility version. See usage details in [MongoDB documentation](https://docs.mongodb.com/manual/reference/command/setFeatureCompatibilityVersion/). @@ -1165,6 +1279,10 @@ export interface ConfigSpec { mongodbSpec44?: Mongodbspec44 | undefined; /** Configuration and resource allocation for a MongoDB 5.0 cluster. */ mongodbSpec50?: Mongodbspec50 | undefined; + /** Configuration and resource allocation for a MongoDB 4.4 Enterprise cluster. */ + mongodbSpec44Enterprise?: Mongodbspec44Enterprise | undefined; + /** Configuration and resource allocation for a MongoDB 5.0 Enterprise cluster. */ + mongodbSpec50Enterprise?: Mongodbspec50Enterprise | undefined; /** Time to start the daily backup, in the UTC timezone. */ backupWindowStart?: TimeOfDay; /** Retain period of automatically created backup in days */ @@ -9008,37 +9126,37 @@ messageTypeRegistry.set( Mongodbspec44_MongoInfra ); -const baseMongodbspec50: object = { - $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0", +const baseMongodbspec44Enterprise: object = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec4_4_enterprise", }; -export const Mongodbspec50 = { - $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0" as const, +export const Mongodbspec44Enterprise = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec4_4_enterprise" as const, encode( - message: Mongodbspec50, + message: Mongodbspec44Enterprise, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { if (message.mongod !== undefined) { - Mongodbspec50_Mongod.encode( + Mongodbspec44Enterprise_Mongod.encode( message.mongod, writer.uint32(10).fork() ).ldelim(); } if (message.mongocfg !== undefined) { - Mongodbspec50_MongoCfg.encode( + Mongodbspec44Enterprise_MongoCfg.encode( message.mongocfg, writer.uint32(18).fork() ).ldelim(); } if (message.mongos !== undefined) { - Mongodbspec50_Mongos.encode( + Mongodbspec44Enterprise_Mongos.encode( message.mongos, writer.uint32(26).fork() ).ldelim(); } if (message.mongoinfra !== undefined) { - Mongodbspec50_MongoInfra.encode( + Mongodbspec44Enterprise_MongoInfra.encode( message.mongoinfra, writer.uint32(34).fork() ).ldelim(); @@ -9046,27 +9164,38 @@ export const Mongodbspec50 = { return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): Mongodbspec50 { + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodbspec44Enterprise { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; - const message = { ...baseMongodbspec50 } as Mongodbspec50; + const message = { + ...baseMongodbspec44Enterprise, + } as Mongodbspec44Enterprise; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.mongod = Mongodbspec50_Mongod.decode(reader, reader.uint32()); + message.mongod = Mongodbspec44Enterprise_Mongod.decode( + reader, + reader.uint32() + ); break; case 2: - message.mongocfg = Mongodbspec50_MongoCfg.decode( + message.mongocfg = Mongodbspec44Enterprise_MongoCfg.decode( reader, reader.uint32() ); break; case 3: - message.mongos = Mongodbspec50_Mongos.decode(reader, reader.uint32()); + message.mongos = Mongodbspec44Enterprise_Mongos.decode( + reader, + reader.uint32() + ); break; case 4: - message.mongoinfra = Mongodbspec50_MongoInfra.decode( + message.mongoinfra = Mongodbspec44Enterprise_MongoInfra.decode( reader, reader.uint32() ); @@ -9079,87 +9208,95 @@ export const Mongodbspec50 = { return message; }, - fromJSON(object: any): Mongodbspec50 { - const message = { ...baseMongodbspec50 } as Mongodbspec50; + fromJSON(object: any): Mongodbspec44Enterprise { + const message = { + ...baseMongodbspec44Enterprise, + } as Mongodbspec44Enterprise; message.mongod = object.mongod !== undefined && object.mongod !== null - ? Mongodbspec50_Mongod.fromJSON(object.mongod) + ? Mongodbspec44Enterprise_Mongod.fromJSON(object.mongod) : undefined; message.mongocfg = object.mongocfg !== undefined && object.mongocfg !== null - ? Mongodbspec50_MongoCfg.fromJSON(object.mongocfg) + ? Mongodbspec44Enterprise_MongoCfg.fromJSON(object.mongocfg) : undefined; message.mongos = object.mongos !== undefined && object.mongos !== null - ? Mongodbspec50_Mongos.fromJSON(object.mongos) + ? Mongodbspec44Enterprise_Mongos.fromJSON(object.mongos) : undefined; message.mongoinfra = object.mongoinfra !== undefined && object.mongoinfra !== null - ? Mongodbspec50_MongoInfra.fromJSON(object.mongoinfra) + ? Mongodbspec44Enterprise_MongoInfra.fromJSON(object.mongoinfra) : undefined; return message; }, - toJSON(message: Mongodbspec50): unknown { + toJSON(message: Mongodbspec44Enterprise): unknown { const obj: any = {}; message.mongod !== undefined && (obj.mongod = message.mongod - ? Mongodbspec50_Mongod.toJSON(message.mongod) + ? Mongodbspec44Enterprise_Mongod.toJSON(message.mongod) : undefined); message.mongocfg !== undefined && (obj.mongocfg = message.mongocfg - ? Mongodbspec50_MongoCfg.toJSON(message.mongocfg) + ? Mongodbspec44Enterprise_MongoCfg.toJSON(message.mongocfg) : undefined); message.mongos !== undefined && (obj.mongos = message.mongos - ? Mongodbspec50_Mongos.toJSON(message.mongos) + ? Mongodbspec44Enterprise_Mongos.toJSON(message.mongos) : undefined); message.mongoinfra !== undefined && (obj.mongoinfra = message.mongoinfra - ? Mongodbspec50_MongoInfra.toJSON(message.mongoinfra) + ? Mongodbspec44Enterprise_MongoInfra.toJSON(message.mongoinfra) : undefined); return obj; }, - fromPartial, I>>( + fromPartial, I>>( object: I - ): Mongodbspec50 { - const message = { ...baseMongodbspec50 } as Mongodbspec50; + ): Mongodbspec44Enterprise { + const message = { + ...baseMongodbspec44Enterprise, + } as Mongodbspec44Enterprise; message.mongod = object.mongod !== undefined && object.mongod !== null - ? Mongodbspec50_Mongod.fromPartial(object.mongod) + ? Mongodbspec44Enterprise_Mongod.fromPartial(object.mongod) : undefined; message.mongocfg = object.mongocfg !== undefined && object.mongocfg !== null - ? Mongodbspec50_MongoCfg.fromPartial(object.mongocfg) + ? Mongodbspec44Enterprise_MongoCfg.fromPartial(object.mongocfg) : undefined; message.mongos = object.mongos !== undefined && object.mongos !== null - ? Mongodbspec50_Mongos.fromPartial(object.mongos) + ? Mongodbspec44Enterprise_Mongos.fromPartial(object.mongos) : undefined; message.mongoinfra = object.mongoinfra !== undefined && object.mongoinfra !== null - ? Mongodbspec50_MongoInfra.fromPartial(object.mongoinfra) + ? Mongodbspec44Enterprise_MongoInfra.fromPartial(object.mongoinfra) : undefined; return message; }, }; -messageTypeRegistry.set(Mongodbspec50.$type, Mongodbspec50); +messageTypeRegistry.set(Mongodbspec44Enterprise.$type, Mongodbspec44Enterprise); -const baseMongodbspec50_Mongod: object = { - $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0.Mongod", +const baseMongodbspec44Enterprise_Mongod: object = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec4_4_enterprise.Mongod", }; -export const Mongodbspec50_Mongod = { - $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0.Mongod" as const, +export const Mongodbspec44Enterprise_Mongod = { + $type: + "yandex.cloud.mdb.mongodb.v1.MongodbSpec4_4_enterprise.Mongod" as const, encode( - message: Mongodbspec50_Mongod, + message: Mongodbspec44Enterprise_Mongod, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { if (message.config !== undefined) { - Mongodconfig50.encode(message.config, writer.uint32(10).fork()).ldelim(); + Mongodconfig44Enterprise.encode( + message.config, + writer.uint32(10).fork() + ).ldelim(); } if (message.resources !== undefined) { Resources.encode(message.resources, writer.uint32(18).fork()).ldelim(); @@ -9170,15 +9307,20 @@ export const Mongodbspec50_Mongod = { decode( input: _m0.Reader | Uint8Array, length?: number - ): Mongodbspec50_Mongod { + ): Mongodbspec44Enterprise_Mongod { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; - const message = { ...baseMongodbspec50_Mongod } as Mongodbspec50_Mongod; + const message = { + ...baseMongodbspec44Enterprise_Mongod, + } as Mongodbspec44Enterprise_Mongod; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.config = Mongodconfig50.decode(reader, reader.uint32()); + message.config = Mongodconfig44Enterprise.decode( + reader, + reader.uint32() + ); break; case 2: message.resources = Resources.decode(reader, reader.uint32()); @@ -9191,11 +9333,13 @@ export const Mongodbspec50_Mongod = { return message; }, - fromJSON(object: any): Mongodbspec50_Mongod { - const message = { ...baseMongodbspec50_Mongod } as Mongodbspec50_Mongod; + fromJSON(object: any): Mongodbspec44Enterprise_Mongod { + const message = { + ...baseMongodbspec44Enterprise_Mongod, + } as Mongodbspec44Enterprise_Mongod; message.config = object.config !== undefined && object.config !== null - ? Mongodconfig50.fromJSON(object.config) + ? Mongodconfig44Enterprise.fromJSON(object.config) : undefined; message.resources = object.resources !== undefined && object.resources !== null @@ -9204,11 +9348,11 @@ export const Mongodbspec50_Mongod = { return message; }, - toJSON(message: Mongodbspec50_Mongod): unknown { + toJSON(message: Mongodbspec44Enterprise_Mongod): unknown { const obj: any = {}; message.config !== undefined && (obj.config = message.config - ? Mongodconfig50.toJSON(message.config) + ? Mongodconfig44Enterprise.toJSON(message.config) : undefined); message.resources !== undefined && (obj.resources = message.resources @@ -9217,13 +9361,15 @@ export const Mongodbspec50_Mongod = { return obj; }, - fromPartial, I>>( + fromPartial, I>>( object: I - ): Mongodbspec50_Mongod { - const message = { ...baseMongodbspec50_Mongod } as Mongodbspec50_Mongod; + ): Mongodbspec44Enterprise_Mongod { + const message = { + ...baseMongodbspec44Enterprise_Mongod, + } as Mongodbspec44Enterprise_Mongod; message.config = object.config !== undefined && object.config !== null - ? Mongodconfig50.fromPartial(object.config) + ? Mongodconfig44Enterprise.fromPartial(object.config) : undefined; message.resources = object.resources !== undefined && object.resources !== null @@ -9233,21 +9379,25 @@ export const Mongodbspec50_Mongod = { }, }; -messageTypeRegistry.set(Mongodbspec50_Mongod.$type, Mongodbspec50_Mongod); +messageTypeRegistry.set( + Mongodbspec44Enterprise_Mongod.$type, + Mongodbspec44Enterprise_Mongod +); -const baseMongodbspec50_MongoCfg: object = { - $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0.MongoCfg", +const baseMongodbspec44Enterprise_MongoCfg: object = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec4_4_enterprise.MongoCfg", }; -export const Mongodbspec50_MongoCfg = { - $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0.MongoCfg" as const, +export const Mongodbspec44Enterprise_MongoCfg = { + $type: + "yandex.cloud.mdb.mongodb.v1.MongodbSpec4_4_enterprise.MongoCfg" as const, encode( - message: Mongodbspec50_MongoCfg, + message: Mongodbspec44Enterprise_MongoCfg, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { if (message.config !== undefined) { - Mongocfgconfig50.encode( + Mongocfgconfig44Enterprise.encode( message.config, writer.uint32(10).fork() ).ldelim(); @@ -9261,15 +9411,20 @@ export const Mongodbspec50_MongoCfg = { decode( input: _m0.Reader | Uint8Array, length?: number - ): Mongodbspec50_MongoCfg { + ): Mongodbspec44Enterprise_MongoCfg { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; - const message = { ...baseMongodbspec50_MongoCfg } as Mongodbspec50_MongoCfg; + const message = { + ...baseMongodbspec44Enterprise_MongoCfg, + } as Mongodbspec44Enterprise_MongoCfg; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.config = Mongocfgconfig50.decode(reader, reader.uint32()); + message.config = Mongocfgconfig44Enterprise.decode( + reader, + reader.uint32() + ); break; case 2: message.resources = Resources.decode(reader, reader.uint32()); @@ -9282,11 +9437,13 @@ export const Mongodbspec50_MongoCfg = { return message; }, - fromJSON(object: any): Mongodbspec50_MongoCfg { - const message = { ...baseMongodbspec50_MongoCfg } as Mongodbspec50_MongoCfg; + fromJSON(object: any): Mongodbspec44Enterprise_MongoCfg { + const message = { + ...baseMongodbspec44Enterprise_MongoCfg, + } as Mongodbspec44Enterprise_MongoCfg; message.config = object.config !== undefined && object.config !== null - ? Mongocfgconfig50.fromJSON(object.config) + ? Mongocfgconfig44Enterprise.fromJSON(object.config) : undefined; message.resources = object.resources !== undefined && object.resources !== null @@ -9295,11 +9452,11 @@ export const Mongodbspec50_MongoCfg = { return message; }, - toJSON(message: Mongodbspec50_MongoCfg): unknown { + toJSON(message: Mongodbspec44Enterprise_MongoCfg): unknown { const obj: any = {}; message.config !== undefined && (obj.config = message.config - ? Mongocfgconfig50.toJSON(message.config) + ? Mongocfgconfig44Enterprise.toJSON(message.config) : undefined); message.resources !== undefined && (obj.resources = message.resources @@ -9308,13 +9465,15 @@ export const Mongodbspec50_MongoCfg = { return obj; }, - fromPartial, I>>( - object: I - ): Mongodbspec50_MongoCfg { - const message = { ...baseMongodbspec50_MongoCfg } as Mongodbspec50_MongoCfg; + fromPartial< + I extends Exact, I> + >(object: I): Mongodbspec44Enterprise_MongoCfg { + const message = { + ...baseMongodbspec44Enterprise_MongoCfg, + } as Mongodbspec44Enterprise_MongoCfg; message.config = object.config !== undefined && object.config !== null - ? Mongocfgconfig50.fromPartial(object.config) + ? Mongocfgconfig44Enterprise.fromPartial(object.config) : undefined; message.resources = object.resources !== undefined && object.resources !== null @@ -9324,21 +9483,28 @@ export const Mongodbspec50_MongoCfg = { }, }; -messageTypeRegistry.set(Mongodbspec50_MongoCfg.$type, Mongodbspec50_MongoCfg); +messageTypeRegistry.set( + Mongodbspec44Enterprise_MongoCfg.$type, + Mongodbspec44Enterprise_MongoCfg +); -const baseMongodbspec50_Mongos: object = { - $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0.Mongos", +const baseMongodbspec44Enterprise_Mongos: object = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec4_4_enterprise.Mongos", }; -export const Mongodbspec50_Mongos = { - $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0.Mongos" as const, +export const Mongodbspec44Enterprise_Mongos = { + $type: + "yandex.cloud.mdb.mongodb.v1.MongodbSpec4_4_enterprise.Mongos" as const, encode( - message: Mongodbspec50_Mongos, + message: Mongodbspec44Enterprise_Mongos, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { if (message.config !== undefined) { - Mongosconfig50.encode(message.config, writer.uint32(10).fork()).ldelim(); + Mongosconfig44Enterprise.encode( + message.config, + writer.uint32(10).fork() + ).ldelim(); } if (message.resources !== undefined) { Resources.encode(message.resources, writer.uint32(18).fork()).ldelim(); @@ -9349,15 +9515,20 @@ export const Mongodbspec50_Mongos = { decode( input: _m0.Reader | Uint8Array, length?: number - ): Mongodbspec50_Mongos { + ): Mongodbspec44Enterprise_Mongos { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; - const message = { ...baseMongodbspec50_Mongos } as Mongodbspec50_Mongos; + const message = { + ...baseMongodbspec44Enterprise_Mongos, + } as Mongodbspec44Enterprise_Mongos; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.config = Mongosconfig50.decode(reader, reader.uint32()); + message.config = Mongosconfig44Enterprise.decode( + reader, + reader.uint32() + ); break; case 2: message.resources = Resources.decode(reader, reader.uint32()); @@ -9370,11 +9541,13 @@ export const Mongodbspec50_Mongos = { return message; }, - fromJSON(object: any): Mongodbspec50_Mongos { - const message = { ...baseMongodbspec50_Mongos } as Mongodbspec50_Mongos; + fromJSON(object: any): Mongodbspec44Enterprise_Mongos { + const message = { + ...baseMongodbspec44Enterprise_Mongos, + } as Mongodbspec44Enterprise_Mongos; message.config = object.config !== undefined && object.config !== null - ? Mongosconfig50.fromJSON(object.config) + ? Mongosconfig44Enterprise.fromJSON(object.config) : undefined; message.resources = object.resources !== undefined && object.resources !== null @@ -9383,11 +9556,11 @@ export const Mongodbspec50_Mongos = { return message; }, - toJSON(message: Mongodbspec50_Mongos): unknown { + toJSON(message: Mongodbspec44Enterprise_Mongos): unknown { const obj: any = {}; message.config !== undefined && (obj.config = message.config - ? Mongosconfig50.toJSON(message.config) + ? Mongosconfig44Enterprise.toJSON(message.config) : undefined); message.resources !== undefined && (obj.resources = message.resources @@ -9396,13 +9569,15 @@ export const Mongodbspec50_Mongos = { return obj; }, - fromPartial, I>>( + fromPartial, I>>( object: I - ): Mongodbspec50_Mongos { - const message = { ...baseMongodbspec50_Mongos } as Mongodbspec50_Mongos; + ): Mongodbspec44Enterprise_Mongos { + const message = { + ...baseMongodbspec44Enterprise_Mongos, + } as Mongodbspec44Enterprise_Mongos; message.config = object.config !== undefined && object.config !== null - ? Mongosconfig50.fromPartial(object.config) + ? Mongosconfig44Enterprise.fromPartial(object.config) : undefined; message.resources = object.resources !== undefined && object.resources !== null @@ -9412,27 +9587,31 @@ export const Mongodbspec50_Mongos = { }, }; -messageTypeRegistry.set(Mongodbspec50_Mongos.$type, Mongodbspec50_Mongos); +messageTypeRegistry.set( + Mongodbspec44Enterprise_Mongos.$type, + Mongodbspec44Enterprise_Mongos +); -const baseMongodbspec50_MongoInfra: object = { - $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0.MongoInfra", +const baseMongodbspec44Enterprise_MongoInfra: object = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec4_4_enterprise.MongoInfra", }; -export const Mongodbspec50_MongoInfra = { - $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0.MongoInfra" as const, +export const Mongodbspec44Enterprise_MongoInfra = { + $type: + "yandex.cloud.mdb.mongodb.v1.MongodbSpec4_4_enterprise.MongoInfra" as const, encode( - message: Mongodbspec50_MongoInfra, + message: Mongodbspec44Enterprise_MongoInfra, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { if (message.configMongos !== undefined) { - Mongosconfig50.encode( + Mongosconfig44Enterprise.encode( message.configMongos, writer.uint32(10).fork() ).ldelim(); } if (message.configMongocfg !== undefined) { - Mongocfgconfig50.encode( + Mongocfgconfig44Enterprise.encode( message.configMongocfg, writer.uint32(18).fork() ).ldelim(); @@ -9446,20 +9625,23 @@ export const Mongodbspec50_MongoInfra = { decode( input: _m0.Reader | Uint8Array, length?: number - ): Mongodbspec50_MongoInfra { + ): Mongodbspec44Enterprise_MongoInfra { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = { - ...baseMongodbspec50_MongoInfra, - } as Mongodbspec50_MongoInfra; + ...baseMongodbspec44Enterprise_MongoInfra, + } as Mongodbspec44Enterprise_MongoInfra; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { case 1: - message.configMongos = Mongosconfig50.decode(reader, reader.uint32()); + message.configMongos = Mongosconfig44Enterprise.decode( + reader, + reader.uint32() + ); break; case 2: - message.configMongocfg = Mongocfgconfig50.decode( + message.configMongocfg = Mongocfgconfig44Enterprise.decode( reader, reader.uint32() ); @@ -9475,17 +9657,17 @@ export const Mongodbspec50_MongoInfra = { return message; }, - fromJSON(object: any): Mongodbspec50_MongoInfra { + fromJSON(object: any): Mongodbspec44Enterprise_MongoInfra { const message = { - ...baseMongodbspec50_MongoInfra, - } as Mongodbspec50_MongoInfra; + ...baseMongodbspec44Enterprise_MongoInfra, + } as Mongodbspec44Enterprise_MongoInfra; message.configMongos = object.configMongos !== undefined && object.configMongos !== null - ? Mongosconfig50.fromJSON(object.configMongos) + ? Mongosconfig44Enterprise.fromJSON(object.configMongos) : undefined; message.configMongocfg = object.configMongocfg !== undefined && object.configMongocfg !== null - ? Mongocfgconfig50.fromJSON(object.configMongocfg) + ? Mongocfgconfig44Enterprise.fromJSON(object.configMongocfg) : undefined; message.resources = object.resources !== undefined && object.resources !== null @@ -9494,15 +9676,15 @@ export const Mongodbspec50_MongoInfra = { return message; }, - toJSON(message: Mongodbspec50_MongoInfra): unknown { + toJSON(message: Mongodbspec44Enterprise_MongoInfra): unknown { const obj: any = {}; message.configMongos !== undefined && (obj.configMongos = message.configMongos - ? Mongosconfig50.toJSON(message.configMongos) + ? Mongosconfig44Enterprise.toJSON(message.configMongos) : undefined); message.configMongocfg !== undefined && (obj.configMongocfg = message.configMongocfg - ? Mongocfgconfig50.toJSON(message.configMongocfg) + ? Mongocfgconfig44Enterprise.toJSON(message.configMongocfg) : undefined); message.resources !== undefined && (obj.resources = message.resources @@ -9511,19 +9693,19 @@ export const Mongodbspec50_MongoInfra = { return obj; }, - fromPartial, I>>( - object: I - ): Mongodbspec50_MongoInfra { + fromPartial< + I extends Exact, I> + >(object: I): Mongodbspec44Enterprise_MongoInfra { const message = { - ...baseMongodbspec50_MongoInfra, - } as Mongodbspec50_MongoInfra; + ...baseMongodbspec44Enterprise_MongoInfra, + } as Mongodbspec44Enterprise_MongoInfra; message.configMongos = object.configMongos !== undefined && object.configMongos !== null - ? Mongosconfig50.fromPartial(object.configMongos) + ? Mongosconfig44Enterprise.fromPartial(object.configMongos) : undefined; message.configMongocfg = object.configMongocfg !== undefined && object.configMongocfg !== null - ? Mongocfgconfig50.fromPartial(object.configMongocfg) + ? Mongocfgconfig44Enterprise.fromPartial(object.configMongocfg) : undefined; message.resources = object.resources !== undefined && object.resources !== null @@ -9534,37 +9716,1161 @@ export const Mongodbspec50_MongoInfra = { }; messageTypeRegistry.set( - Mongodbspec50_MongoInfra.$type, - Mongodbspec50_MongoInfra + Mongodbspec44Enterprise_MongoInfra.$type, + Mongodbspec44Enterprise_MongoInfra ); -const baseConfigSpec: object = { - $type: "yandex.cloud.mdb.mongodb.v1.ConfigSpec", - version: "", - featureCompatibilityVersion: "", +const baseMongodbspec50: object = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0", }; -export const ConfigSpec = { - $type: "yandex.cloud.mdb.mongodb.v1.ConfigSpec" as const, +export const Mongodbspec50 = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0" as const, encode( - message: ConfigSpec, + message: Mongodbspec50, writer: _m0.Writer = _m0.Writer.create() ): _m0.Writer { - if (message.version !== "") { - writer.uint32(10).string(message.version); - } - if (message.featureCompatibilityVersion !== "") { - writer.uint32(42).string(message.featureCompatibilityVersion); + if (message.mongod !== undefined) { + Mongodbspec50_Mongod.encode( + message.mongod, + writer.uint32(10).fork() + ).ldelim(); } - if (message.mongodbSpec36 !== undefined) { - Mongodbspec36.encode( - message.mongodbSpec36, + if (message.mongocfg !== undefined) { + Mongodbspec50_MongoCfg.encode( + message.mongocfg, writer.uint32(18).fork() ).ldelim(); } - if (message.mongodbSpec40 !== undefined) { - Mongodbspec40.encode( + if (message.mongos !== undefined) { + Mongodbspec50_Mongos.encode( + message.mongos, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.mongoinfra !== undefined) { + Mongodbspec50_MongoInfra.encode( + message.mongoinfra, + writer.uint32(34).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Mongodbspec50 { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMongodbspec50 } as Mongodbspec50; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.mongod = Mongodbspec50_Mongod.decode(reader, reader.uint32()); + break; + case 2: + message.mongocfg = Mongodbspec50_MongoCfg.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.mongos = Mongodbspec50_Mongos.decode(reader, reader.uint32()); + break; + case 4: + message.mongoinfra = Mongodbspec50_MongoInfra.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodbspec50 { + const message = { ...baseMongodbspec50 } as Mongodbspec50; + message.mongod = + object.mongod !== undefined && object.mongod !== null + ? Mongodbspec50_Mongod.fromJSON(object.mongod) + : undefined; + message.mongocfg = + object.mongocfg !== undefined && object.mongocfg !== null + ? Mongodbspec50_MongoCfg.fromJSON(object.mongocfg) + : undefined; + message.mongos = + object.mongos !== undefined && object.mongos !== null + ? Mongodbspec50_Mongos.fromJSON(object.mongos) + : undefined; + message.mongoinfra = + object.mongoinfra !== undefined && object.mongoinfra !== null + ? Mongodbspec50_MongoInfra.fromJSON(object.mongoinfra) + : undefined; + return message; + }, + + toJSON(message: Mongodbspec50): unknown { + const obj: any = {}; + message.mongod !== undefined && + (obj.mongod = message.mongod + ? Mongodbspec50_Mongod.toJSON(message.mongod) + : undefined); + message.mongocfg !== undefined && + (obj.mongocfg = message.mongocfg + ? Mongodbspec50_MongoCfg.toJSON(message.mongocfg) + : undefined); + message.mongos !== undefined && + (obj.mongos = message.mongos + ? Mongodbspec50_Mongos.toJSON(message.mongos) + : undefined); + message.mongoinfra !== undefined && + (obj.mongoinfra = message.mongoinfra + ? Mongodbspec50_MongoInfra.toJSON(message.mongoinfra) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodbspec50 { + const message = { ...baseMongodbspec50 } as Mongodbspec50; + message.mongod = + object.mongod !== undefined && object.mongod !== null + ? Mongodbspec50_Mongod.fromPartial(object.mongod) + : undefined; + message.mongocfg = + object.mongocfg !== undefined && object.mongocfg !== null + ? Mongodbspec50_MongoCfg.fromPartial(object.mongocfg) + : undefined; + message.mongos = + object.mongos !== undefined && object.mongos !== null + ? Mongodbspec50_Mongos.fromPartial(object.mongos) + : undefined; + message.mongoinfra = + object.mongoinfra !== undefined && object.mongoinfra !== null + ? Mongodbspec50_MongoInfra.fromPartial(object.mongoinfra) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Mongodbspec50.$type, Mongodbspec50); + +const baseMongodbspec50_Mongod: object = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0.Mongod", +}; + +export const Mongodbspec50_Mongod = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0.Mongod" as const, + + encode( + message: Mongodbspec50_Mongod, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.config !== undefined) { + Mongodconfig50.encode(message.config, writer.uint32(10).fork()).ldelim(); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodbspec50_Mongod { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMongodbspec50_Mongod } as Mongodbspec50_Mongod; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.config = Mongodconfig50.decode(reader, reader.uint32()); + break; + case 2: + message.resources = Resources.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodbspec50_Mongod { + const message = { ...baseMongodbspec50_Mongod } as Mongodbspec50_Mongod; + message.config = + object.config !== undefined && object.config !== null + ? Mongodconfig50.fromJSON(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + return message; + }, + + toJSON(message: Mongodbspec50_Mongod): unknown { + const obj: any = {}; + message.config !== undefined && + (obj.config = message.config + ? Mongodconfig50.toJSON(message.config) + : undefined); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodbspec50_Mongod { + const message = { ...baseMongodbspec50_Mongod } as Mongodbspec50_Mongod; + message.config = + object.config !== undefined && object.config !== null + ? Mongodconfig50.fromPartial(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Mongodbspec50_Mongod.$type, Mongodbspec50_Mongod); + +const baseMongodbspec50_MongoCfg: object = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0.MongoCfg", +}; + +export const Mongodbspec50_MongoCfg = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0.MongoCfg" as const, + + encode( + message: Mongodbspec50_MongoCfg, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.config !== undefined) { + Mongocfgconfig50.encode( + message.config, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodbspec50_MongoCfg { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMongodbspec50_MongoCfg } as Mongodbspec50_MongoCfg; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.config = Mongocfgconfig50.decode(reader, reader.uint32()); + break; + case 2: + message.resources = Resources.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodbspec50_MongoCfg { + const message = { ...baseMongodbspec50_MongoCfg } as Mongodbspec50_MongoCfg; + message.config = + object.config !== undefined && object.config !== null + ? Mongocfgconfig50.fromJSON(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + return message; + }, + + toJSON(message: Mongodbspec50_MongoCfg): unknown { + const obj: any = {}; + message.config !== undefined && + (obj.config = message.config + ? Mongocfgconfig50.toJSON(message.config) + : undefined); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodbspec50_MongoCfg { + const message = { ...baseMongodbspec50_MongoCfg } as Mongodbspec50_MongoCfg; + message.config = + object.config !== undefined && object.config !== null + ? Mongocfgconfig50.fromPartial(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Mongodbspec50_MongoCfg.$type, Mongodbspec50_MongoCfg); + +const baseMongodbspec50_Mongos: object = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0.Mongos", +}; + +export const Mongodbspec50_Mongos = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0.Mongos" as const, + + encode( + message: Mongodbspec50_Mongos, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.config !== undefined) { + Mongosconfig50.encode(message.config, writer.uint32(10).fork()).ldelim(); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodbspec50_Mongos { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseMongodbspec50_Mongos } as Mongodbspec50_Mongos; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.config = Mongosconfig50.decode(reader, reader.uint32()); + break; + case 2: + message.resources = Resources.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodbspec50_Mongos { + const message = { ...baseMongodbspec50_Mongos } as Mongodbspec50_Mongos; + message.config = + object.config !== undefined && object.config !== null + ? Mongosconfig50.fromJSON(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + return message; + }, + + toJSON(message: Mongodbspec50_Mongos): unknown { + const obj: any = {}; + message.config !== undefined && + (obj.config = message.config + ? Mongosconfig50.toJSON(message.config) + : undefined); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodbspec50_Mongos { + const message = { ...baseMongodbspec50_Mongos } as Mongodbspec50_Mongos; + message.config = + object.config !== undefined && object.config !== null + ? Mongosconfig50.fromPartial(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Mongodbspec50_Mongos.$type, Mongodbspec50_Mongos); + +const baseMongodbspec50_MongoInfra: object = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0.MongoInfra", +}; + +export const Mongodbspec50_MongoInfra = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0.MongoInfra" as const, + + encode( + message: Mongodbspec50_MongoInfra, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.configMongos !== undefined) { + Mongosconfig50.encode( + message.configMongos, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.configMongocfg !== undefined) { + Mongocfgconfig50.encode( + message.configMongocfg, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(26).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodbspec50_MongoInfra { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodbspec50_MongoInfra, + } as Mongodbspec50_MongoInfra; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.configMongos = Mongosconfig50.decode(reader, reader.uint32()); + break; + case 2: + message.configMongocfg = Mongocfgconfig50.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.resources = Resources.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodbspec50_MongoInfra { + const message = { + ...baseMongodbspec50_MongoInfra, + } as Mongodbspec50_MongoInfra; + message.configMongos = + object.configMongos !== undefined && object.configMongos !== null + ? Mongosconfig50.fromJSON(object.configMongos) + : undefined; + message.configMongocfg = + object.configMongocfg !== undefined && object.configMongocfg !== null + ? Mongocfgconfig50.fromJSON(object.configMongocfg) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + return message; + }, + + toJSON(message: Mongodbspec50_MongoInfra): unknown { + const obj: any = {}; + message.configMongos !== undefined && + (obj.configMongos = message.configMongos + ? Mongosconfig50.toJSON(message.configMongos) + : undefined); + message.configMongocfg !== undefined && + (obj.configMongocfg = message.configMongocfg + ? Mongocfgconfig50.toJSON(message.configMongocfg) + : undefined); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodbspec50_MongoInfra { + const message = { + ...baseMongodbspec50_MongoInfra, + } as Mongodbspec50_MongoInfra; + message.configMongos = + object.configMongos !== undefined && object.configMongos !== null + ? Mongosconfig50.fromPartial(object.configMongos) + : undefined; + message.configMongocfg = + object.configMongocfg !== undefined && object.configMongocfg !== null + ? Mongocfgconfig50.fromPartial(object.configMongocfg) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodbspec50_MongoInfra.$type, + Mongodbspec50_MongoInfra +); + +const baseMongodbspec50Enterprise: object = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0_enterprise", +}; + +export const Mongodbspec50Enterprise = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0_enterprise" as const, + + encode( + message: Mongodbspec50Enterprise, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.mongod !== undefined) { + Mongodbspec50Enterprise_Mongod.encode( + message.mongod, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.mongocfg !== undefined) { + Mongodbspec50Enterprise_MongoCfg.encode( + message.mongocfg, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.mongos !== undefined) { + Mongodbspec50Enterprise_Mongos.encode( + message.mongos, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.mongoinfra !== undefined) { + Mongodbspec50Enterprise_MongoInfra.encode( + message.mongoinfra, + writer.uint32(34).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodbspec50Enterprise { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodbspec50Enterprise, + } as Mongodbspec50Enterprise; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.mongod = Mongodbspec50Enterprise_Mongod.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.mongocfg = Mongodbspec50Enterprise_MongoCfg.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.mongos = Mongodbspec50Enterprise_Mongos.decode( + reader, + reader.uint32() + ); + break; + case 4: + message.mongoinfra = Mongodbspec50Enterprise_MongoInfra.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodbspec50Enterprise { + const message = { + ...baseMongodbspec50Enterprise, + } as Mongodbspec50Enterprise; + message.mongod = + object.mongod !== undefined && object.mongod !== null + ? Mongodbspec50Enterprise_Mongod.fromJSON(object.mongod) + : undefined; + message.mongocfg = + object.mongocfg !== undefined && object.mongocfg !== null + ? Mongodbspec50Enterprise_MongoCfg.fromJSON(object.mongocfg) + : undefined; + message.mongos = + object.mongos !== undefined && object.mongos !== null + ? Mongodbspec50Enterprise_Mongos.fromJSON(object.mongos) + : undefined; + message.mongoinfra = + object.mongoinfra !== undefined && object.mongoinfra !== null + ? Mongodbspec50Enterprise_MongoInfra.fromJSON(object.mongoinfra) + : undefined; + return message; + }, + + toJSON(message: Mongodbspec50Enterprise): unknown { + const obj: any = {}; + message.mongod !== undefined && + (obj.mongod = message.mongod + ? Mongodbspec50Enterprise_Mongod.toJSON(message.mongod) + : undefined); + message.mongocfg !== undefined && + (obj.mongocfg = message.mongocfg + ? Mongodbspec50Enterprise_MongoCfg.toJSON(message.mongocfg) + : undefined); + message.mongos !== undefined && + (obj.mongos = message.mongos + ? Mongodbspec50Enterprise_Mongos.toJSON(message.mongos) + : undefined); + message.mongoinfra !== undefined && + (obj.mongoinfra = message.mongoinfra + ? Mongodbspec50Enterprise_MongoInfra.toJSON(message.mongoinfra) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodbspec50Enterprise { + const message = { + ...baseMongodbspec50Enterprise, + } as Mongodbspec50Enterprise; + message.mongod = + object.mongod !== undefined && object.mongod !== null + ? Mongodbspec50Enterprise_Mongod.fromPartial(object.mongod) + : undefined; + message.mongocfg = + object.mongocfg !== undefined && object.mongocfg !== null + ? Mongodbspec50Enterprise_MongoCfg.fromPartial(object.mongocfg) + : undefined; + message.mongos = + object.mongos !== undefined && object.mongos !== null + ? Mongodbspec50Enterprise_Mongos.fromPartial(object.mongos) + : undefined; + message.mongoinfra = + object.mongoinfra !== undefined && object.mongoinfra !== null + ? Mongodbspec50Enterprise_MongoInfra.fromPartial(object.mongoinfra) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Mongodbspec50Enterprise.$type, Mongodbspec50Enterprise); + +const baseMongodbspec50Enterprise_Mongod: object = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0_enterprise.Mongod", +}; + +export const Mongodbspec50Enterprise_Mongod = { + $type: + "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0_enterprise.Mongod" as const, + + encode( + message: Mongodbspec50Enterprise_Mongod, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.config !== undefined) { + Mongodconfig50Enterprise.encode( + message.config, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodbspec50Enterprise_Mongod { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodbspec50Enterprise_Mongod, + } as Mongodbspec50Enterprise_Mongod; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.config = Mongodconfig50Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.resources = Resources.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodbspec50Enterprise_Mongod { + const message = { + ...baseMongodbspec50Enterprise_Mongod, + } as Mongodbspec50Enterprise_Mongod; + message.config = + object.config !== undefined && object.config !== null + ? Mongodconfig50Enterprise.fromJSON(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + return message; + }, + + toJSON(message: Mongodbspec50Enterprise_Mongod): unknown { + const obj: any = {}; + message.config !== undefined && + (obj.config = message.config + ? Mongodconfig50Enterprise.toJSON(message.config) + : undefined); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodbspec50Enterprise_Mongod { + const message = { + ...baseMongodbspec50Enterprise_Mongod, + } as Mongodbspec50Enterprise_Mongod; + message.config = + object.config !== undefined && object.config !== null + ? Mongodconfig50Enterprise.fromPartial(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodbspec50Enterprise_Mongod.$type, + Mongodbspec50Enterprise_Mongod +); + +const baseMongodbspec50Enterprise_MongoCfg: object = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0_enterprise.MongoCfg", +}; + +export const Mongodbspec50Enterprise_MongoCfg = { + $type: + "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0_enterprise.MongoCfg" as const, + + encode( + message: Mongodbspec50Enterprise_MongoCfg, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.config !== undefined) { + Mongocfgconfig50Enterprise.encode( + message.config, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodbspec50Enterprise_MongoCfg { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodbspec50Enterprise_MongoCfg, + } as Mongodbspec50Enterprise_MongoCfg; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.config = Mongocfgconfig50Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.resources = Resources.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodbspec50Enterprise_MongoCfg { + const message = { + ...baseMongodbspec50Enterprise_MongoCfg, + } as Mongodbspec50Enterprise_MongoCfg; + message.config = + object.config !== undefined && object.config !== null + ? Mongocfgconfig50Enterprise.fromJSON(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + return message; + }, + + toJSON(message: Mongodbspec50Enterprise_MongoCfg): unknown { + const obj: any = {}; + message.config !== undefined && + (obj.config = message.config + ? Mongocfgconfig50Enterprise.toJSON(message.config) + : undefined); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodbspec50Enterprise_MongoCfg { + const message = { + ...baseMongodbspec50Enterprise_MongoCfg, + } as Mongodbspec50Enterprise_MongoCfg; + message.config = + object.config !== undefined && object.config !== null + ? Mongocfgconfig50Enterprise.fromPartial(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodbspec50Enterprise_MongoCfg.$type, + Mongodbspec50Enterprise_MongoCfg +); + +const baseMongodbspec50Enterprise_Mongos: object = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0_enterprise.Mongos", +}; + +export const Mongodbspec50Enterprise_Mongos = { + $type: + "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0_enterprise.Mongos" as const, + + encode( + message: Mongodbspec50Enterprise_Mongos, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.config !== undefined) { + Mongosconfig50Enterprise.encode( + message.config, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodbspec50Enterprise_Mongos { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodbspec50Enterprise_Mongos, + } as Mongodbspec50Enterprise_Mongos; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.config = Mongosconfig50Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.resources = Resources.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodbspec50Enterprise_Mongos { + const message = { + ...baseMongodbspec50Enterprise_Mongos, + } as Mongodbspec50Enterprise_Mongos; + message.config = + object.config !== undefined && object.config !== null + ? Mongosconfig50Enterprise.fromJSON(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + return message; + }, + + toJSON(message: Mongodbspec50Enterprise_Mongos): unknown { + const obj: any = {}; + message.config !== undefined && + (obj.config = message.config + ? Mongosconfig50Enterprise.toJSON(message.config) + : undefined); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodbspec50Enterprise_Mongos { + const message = { + ...baseMongodbspec50Enterprise_Mongos, + } as Mongodbspec50Enterprise_Mongos; + message.config = + object.config !== undefined && object.config !== null + ? Mongosconfig50Enterprise.fromPartial(object.config) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodbspec50Enterprise_Mongos.$type, + Mongodbspec50Enterprise_Mongos +); + +const baseMongodbspec50Enterprise_MongoInfra: object = { + $type: "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0_enterprise.MongoInfra", +}; + +export const Mongodbspec50Enterprise_MongoInfra = { + $type: + "yandex.cloud.mdb.mongodb.v1.MongodbSpec5_0_enterprise.MongoInfra" as const, + + encode( + message: Mongodbspec50Enterprise_MongoInfra, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.configMongos !== undefined) { + Mongosconfig50Enterprise.encode( + message.configMongos, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.configMongocfg !== undefined) { + Mongocfgconfig50Enterprise.encode( + message.configMongocfg, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(26).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodbspec50Enterprise_MongoInfra { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodbspec50Enterprise_MongoInfra, + } as Mongodbspec50Enterprise_MongoInfra; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.configMongos = Mongosconfig50Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.configMongocfg = Mongocfgconfig50Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.resources = Resources.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodbspec50Enterprise_MongoInfra { + const message = { + ...baseMongodbspec50Enterprise_MongoInfra, + } as Mongodbspec50Enterprise_MongoInfra; + message.configMongos = + object.configMongos !== undefined && object.configMongos !== null + ? Mongosconfig50Enterprise.fromJSON(object.configMongos) + : undefined; + message.configMongocfg = + object.configMongocfg !== undefined && object.configMongocfg !== null + ? Mongocfgconfig50Enterprise.fromJSON(object.configMongocfg) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromJSON(object.resources) + : undefined; + return message; + }, + + toJSON(message: Mongodbspec50Enterprise_MongoInfra): unknown { + const obj: any = {}; + message.configMongos !== undefined && + (obj.configMongos = message.configMongos + ? Mongosconfig50Enterprise.toJSON(message.configMongos) + : undefined); + message.configMongocfg !== undefined && + (obj.configMongocfg = message.configMongocfg + ? Mongocfgconfig50Enterprise.toJSON(message.configMongocfg) + : undefined); + message.resources !== undefined && + (obj.resources = message.resources + ? Resources.toJSON(message.resources) + : undefined); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodbspec50Enterprise_MongoInfra { + const message = { + ...baseMongodbspec50Enterprise_MongoInfra, + } as Mongodbspec50Enterprise_MongoInfra; + message.configMongos = + object.configMongos !== undefined && object.configMongos !== null + ? Mongosconfig50Enterprise.fromPartial(object.configMongos) + : undefined; + message.configMongocfg = + object.configMongocfg !== undefined && object.configMongocfg !== null + ? Mongocfgconfig50Enterprise.fromPartial(object.configMongocfg) + : undefined; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodbspec50Enterprise_MongoInfra.$type, + Mongodbspec50Enterprise_MongoInfra +); + +const baseConfigSpec: object = { + $type: "yandex.cloud.mdb.mongodb.v1.ConfigSpec", + version: "", + featureCompatibilityVersion: "", +}; + +export const ConfigSpec = { + $type: "yandex.cloud.mdb.mongodb.v1.ConfigSpec" as const, + + encode( + message: ConfigSpec, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.version !== "") { + writer.uint32(10).string(message.version); + } + if (message.featureCompatibilityVersion !== "") { + writer.uint32(42).string(message.featureCompatibilityVersion); + } + if (message.mongodbSpec36 !== undefined) { + Mongodbspec36.encode( + message.mongodbSpec36, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.mongodbSpec40 !== undefined) { + Mongodbspec40.encode( message.mongodbSpec40, writer.uint32(34).fork() ).ldelim(); @@ -9587,6 +10893,18 @@ export const ConfigSpec = { writer.uint32(82).fork() ).ldelim(); } + if (message.mongodbSpec44Enterprise !== undefined) { + Mongodbspec44Enterprise.encode( + message.mongodbSpec44Enterprise, + writer.uint32(90).fork() + ).ldelim(); + } + if (message.mongodbSpec50Enterprise !== undefined) { + Mongodbspec50Enterprise.encode( + message.mongodbSpec50Enterprise, + writer.uint32(98).fork() + ).ldelim(); + } if (message.backupWindowStart !== undefined) { TimeOfDay.encode( message.backupWindowStart, @@ -9636,6 +10954,18 @@ export const ConfigSpec = { case 10: message.mongodbSpec50 = Mongodbspec50.decode(reader, reader.uint32()); break; + case 11: + message.mongodbSpec44Enterprise = Mongodbspec44Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 12: + message.mongodbSpec50Enterprise = Mongodbspec50Enterprise.decode( + reader, + reader.uint32() + ); + break; case 3: message.backupWindowStart = TimeOfDay.decode(reader, reader.uint32()); break; @@ -9687,6 +11017,16 @@ export const ConfigSpec = { object.mongodbSpec_5_0 !== undefined && object.mongodbSpec_5_0 !== null ? Mongodbspec50.fromJSON(object.mongodbSpec_5_0) : undefined; + message.mongodbSpec44Enterprise = + object.mongodbSpec_4_4_enterprise !== undefined && + object.mongodbSpec_4_4_enterprise !== null + ? Mongodbspec44Enterprise.fromJSON(object.mongodbSpec_4_4_enterprise) + : undefined; + message.mongodbSpec50Enterprise = + object.mongodbSpec_5_0_enterprise !== undefined && + object.mongodbSpec_5_0_enterprise !== null + ? Mongodbspec50Enterprise.fromJSON(object.mongodbSpec_5_0_enterprise) + : undefined; message.backupWindowStart = object.backupWindowStart !== undefined && object.backupWindowStart !== null @@ -9729,6 +11069,14 @@ export const ConfigSpec = { (obj.mongodbSpec_5_0 = message.mongodbSpec50 ? Mongodbspec50.toJSON(message.mongodbSpec50) : undefined); + message.mongodbSpec44Enterprise !== undefined && + (obj.mongodbSpec_4_4_enterprise = message.mongodbSpec44Enterprise + ? Mongodbspec44Enterprise.toJSON(message.mongodbSpec44Enterprise) + : undefined); + message.mongodbSpec50Enterprise !== undefined && + (obj.mongodbSpec_5_0_enterprise = message.mongodbSpec50Enterprise + ? Mongodbspec50Enterprise.toJSON(message.mongodbSpec50Enterprise) + : undefined); message.backupWindowStart !== undefined && (obj.backupWindowStart = message.backupWindowStart ? TimeOfDay.toJSON(message.backupWindowStart) @@ -9767,6 +11115,16 @@ export const ConfigSpec = { object.mongodbSpec50 !== undefined && object.mongodbSpec50 !== null ? Mongodbspec50.fromPartial(object.mongodbSpec50) : undefined; + message.mongodbSpec44Enterprise = + object.mongodbSpec44Enterprise !== undefined && + object.mongodbSpec44Enterprise !== null + ? Mongodbspec44Enterprise.fromPartial(object.mongodbSpec44Enterprise) + : undefined; + message.mongodbSpec50Enterprise = + object.mongodbSpec50Enterprise !== undefined && + object.mongodbSpec50Enterprise !== null + ? Mongodbspec50Enterprise.fromPartial(object.mongodbSpec50Enterprise) + : undefined; message.backupWindowStart = object.backupWindowStart !== undefined && object.backupWindowStart !== null diff --git a/src/generated/yandex/cloud/mdb/mongodb/v1/config/mongodb4_4_enterprise.ts b/src/generated/yandex/cloud/mdb/mongodb/v1/config/mongodb4_4_enterprise.ts new file mode 100644 index 00000000..7ccec218 --- /dev/null +++ b/src/generated/yandex/cloud/mdb/mongodb/v1/config/mongodb4_4_enterprise.ts @@ -0,0 +1,2902 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { + DoubleValue, + Int64Value, + BoolValue, +} from "../../../../../../google/protobuf/wrappers"; + +export const protobufPackage = "yandex.cloud.mdb.mongodb.v1.config"; + +/** + * Configuration of a mongod daemon. Supported options are a limited subset of all + * options described in [MongoDB documentation](https://docs.mongodb.com/v4.4/reference/configuration-options/). + */ +export interface Mongodconfig44Enterprise { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise"; + /** `storage` section of mongod configuration. */ + storage?: Mongodconfig44Enterprise_Storage; + /** `operationProfiling` section of mongod configuration. */ + operationProfiling?: Mongodconfig44Enterprise_OperationProfiling; + /** `net` section of mongod configuration. */ + net?: Mongodconfig44Enterprise_Network; + /** `security` section of mongod configuration. */ + security?: Mongodconfig44Enterprise_Security; + /** `AuditLog` section of mongod configuration. */ + auditLog?: Mongodconfig44Enterprise_AuditLog; + /** `SetParameter` section of mongod configuration. */ + setParameter?: Mongodconfig44Enterprise_SetParameter; +} + +export interface Mongodconfig44Enterprise_Storage { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.Storage"; + /** Configuration of the WiredTiger storage engine. */ + wiredTiger?: Mongodconfig44Enterprise_Storage_WiredTiger; + /** Configuration of the MongoDB [journal](https://docs.mongodb.com/v4.4/reference/glossary/#term-journal). */ + journal?: Mongodconfig44Enterprise_Storage_Journal; +} + +/** Configuration of WiredTiger storage engine. */ +export interface Mongodconfig44Enterprise_Storage_WiredTiger { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.Storage.WiredTiger"; + /** Engine configuration for WiredTiger. */ + engineConfig?: Mongodconfig44Enterprise_Storage_WiredTiger_EngineConfig; + /** Collection configuration for WiredTiger. */ + collectionConfig?: Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig; +} + +export interface Mongodconfig44Enterprise_Storage_WiredTiger_EngineConfig { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.Storage.WiredTiger.EngineConfig"; + /** The maximum size of the internal cache that WiredTiger will use for all data. */ + cacheSizeGb?: number; +} + +export interface Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.Storage.WiredTiger.CollectionConfig"; + /** Default type of compression to use for collection data. */ + blockCompressor: Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig_Compressor; +} + +export enum Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig_Compressor { + COMPRESSOR_UNSPECIFIED = 0, + /** NONE - No compression. */ + NONE = 1, + /** SNAPPY - The [Snappy](https://docs.mongodb.com/v4.4/reference/glossary/#term-snappy) compression. */ + SNAPPY = 2, + /** ZLIB - The [zlib](https://docs.mongodb.com/v4.4/reference/glossary/#term-zlib) compression. */ + ZLIB = 3, + UNRECOGNIZED = -1, +} + +export function mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig_CompressorFromJSON( + object: any +): Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig_Compressor { + switch (object) { + case 0: + case "COMPRESSOR_UNSPECIFIED": + return Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.COMPRESSOR_UNSPECIFIED; + case 1: + case "NONE": + return Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.NONE; + case 2: + case "SNAPPY": + return Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.SNAPPY; + case 3: + case "ZLIB": + return Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.ZLIB; + case -1: + case "UNRECOGNIZED": + default: + return Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.UNRECOGNIZED; + } +} + +export function mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig_CompressorToJSON( + object: Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig_Compressor +): string { + switch (object) { + case Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.COMPRESSOR_UNSPECIFIED: + return "COMPRESSOR_UNSPECIFIED"; + case Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.NONE: + return "NONE"; + case Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.SNAPPY: + return "SNAPPY"; + case Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.ZLIB: + return "ZLIB"; + default: + return "UNKNOWN"; + } +} + +export interface Mongodconfig44Enterprise_Storage_Journal { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.Storage.Journal"; + /** + * Commit interval between journal operations, in milliseconds. + * Default: 100. + */ + commitInterval?: number; +} + +export interface Mongodconfig44Enterprise_OperationProfiling { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.OperationProfiling"; + /** Mode which specifies operations that should be profiled. */ + mode: Mongodconfig44Enterprise_OperationProfiling_Mode; + /** + * The slow operation time threshold, in milliseconds. Operations that run + * for longer than this threshold are considered slow, and are processed by the profiler + * running in the SLOW_OP mode. + */ + slowOpThreshold?: number; +} + +export enum Mongodconfig44Enterprise_OperationProfiling_Mode { + MODE_UNSPECIFIED = 0, + /** OFF - The profiler is off and does not collect any data. */ + OFF = 1, + /** SLOW_OP - The profiler collects data for operations that take longer than the value of [slow_op_threshold]. */ + SLOW_OP = 2, + /** ALL - The profiler collects data for all operations. */ + ALL = 3, + UNRECOGNIZED = -1, +} + +export function mongodconfig44Enterprise_OperationProfiling_ModeFromJSON( + object: any +): Mongodconfig44Enterprise_OperationProfiling_Mode { + switch (object) { + case 0: + case "MODE_UNSPECIFIED": + return Mongodconfig44Enterprise_OperationProfiling_Mode.MODE_UNSPECIFIED; + case 1: + case "OFF": + return Mongodconfig44Enterprise_OperationProfiling_Mode.OFF; + case 2: + case "SLOW_OP": + return Mongodconfig44Enterprise_OperationProfiling_Mode.SLOW_OP; + case 3: + case "ALL": + return Mongodconfig44Enterprise_OperationProfiling_Mode.ALL; + case -1: + case "UNRECOGNIZED": + default: + return Mongodconfig44Enterprise_OperationProfiling_Mode.UNRECOGNIZED; + } +} + +export function mongodconfig44Enterprise_OperationProfiling_ModeToJSON( + object: Mongodconfig44Enterprise_OperationProfiling_Mode +): string { + switch (object) { + case Mongodconfig44Enterprise_OperationProfiling_Mode.MODE_UNSPECIFIED: + return "MODE_UNSPECIFIED"; + case Mongodconfig44Enterprise_OperationProfiling_Mode.OFF: + return "OFF"; + case Mongodconfig44Enterprise_OperationProfiling_Mode.SLOW_OP: + return "SLOW_OP"; + case Mongodconfig44Enterprise_OperationProfiling_Mode.ALL: + return "ALL"; + default: + return "UNKNOWN"; + } +} + +export interface Mongodconfig44Enterprise_Network { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.Network"; + /** The maximum number of simultaneous connections that mongod will accept. */ + maxIncomingConnections?: number; +} + +export interface Mongodconfig44Enterprise_Security { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.Security"; + /** If encryption at rest should be enabled or not */ + enableEncryption?: boolean; + /** `kmip` section of mongod security config */ + kmip?: Mongodconfig44Enterprise_Security_KMIP; +} + +export interface Mongodconfig44Enterprise_Security_KMIP { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.Security.KMIP"; + /** KMIP server name */ + serverName: string; + /** KMIP server port */ + port?: number; + /** KMIP Server CA */ + serverCa: string; + /** KMIP client certificate + private key (unencrypted) */ + clientCertificate: string; + /** KMIP Key identifier (if any) */ + keyIdentifier: string; +} + +export interface Mongodconfig44Enterprise_AuditLog { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.AuditLog"; + /** Audit filter */ + filter: string; +} + +export interface Mongodconfig44Enterprise_SetParameter { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.SetParameter"; + /** Enables the auditing of authorization successes */ + auditAuthorizationSuccess?: boolean; +} + +export interface Mongocfgconfig44Enterprise { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig4_4_enterprise"; + /** `storage` section of mongocfg configuration. */ + storage?: Mongocfgconfig44Enterprise_Storage; + /** `operationProfiling` section of mongocfg configuration. */ + operationProfiling?: Mongocfgconfig44Enterprise_OperationProfiling; + /** `net` section of mongocfg configuration. */ + net?: Mongocfgconfig44Enterprise_Network; +} + +export interface Mongocfgconfig44Enterprise_Storage { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig4_4_enterprise.Storage"; + /** Configuration of the WiredTiger storage engine. */ + wiredTiger?: Mongocfgconfig44Enterprise_Storage_WiredTiger; +} + +/** Configuration of WiredTiger storage engine. */ +export interface Mongocfgconfig44Enterprise_Storage_WiredTiger { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig4_4_enterprise.Storage.WiredTiger"; + /** Engine configuration for WiredTiger. */ + engineConfig?: Mongocfgconfig44Enterprise_Storage_WiredTiger_EngineConfig; +} + +export interface Mongocfgconfig44Enterprise_Storage_WiredTiger_EngineConfig { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig4_4_enterprise.Storage.WiredTiger.EngineConfig"; + /** The maximum size of the internal cache that WiredTiger will use for all data. */ + cacheSizeGb?: number; +} + +export interface Mongocfgconfig44Enterprise_OperationProfiling { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig4_4_enterprise.OperationProfiling"; + /** Mode which specifies operations that should be profiled. */ + mode: Mongocfgconfig44Enterprise_OperationProfiling_Mode; + /** + * The slow operation time threshold, in milliseconds. Operations that run + * for longer than this threshold are considered slow, and are processed by the profiler + * running in the SLOW_OP mode. For details see [MongoDB documentation](https://docs.mongodb.com/v4.4/reference/configuration-options/#operationProfiling.slowOpThresholdMs). + */ + slowOpThreshold?: number; +} + +export enum Mongocfgconfig44Enterprise_OperationProfiling_Mode { + MODE_UNSPECIFIED = 0, + /** OFF - The profiler is off and does not collect any data. */ + OFF = 1, + /** SLOW_OP - The profiler collects data for operations that take longer than the value of [slow_op_threshold]. */ + SLOW_OP = 2, + /** ALL - The profiler collects data for all operations. */ + ALL = 3, + UNRECOGNIZED = -1, +} + +export function mongocfgconfig44Enterprise_OperationProfiling_ModeFromJSON( + object: any +): Mongocfgconfig44Enterprise_OperationProfiling_Mode { + switch (object) { + case 0: + case "MODE_UNSPECIFIED": + return Mongocfgconfig44Enterprise_OperationProfiling_Mode.MODE_UNSPECIFIED; + case 1: + case "OFF": + return Mongocfgconfig44Enterprise_OperationProfiling_Mode.OFF; + case 2: + case "SLOW_OP": + return Mongocfgconfig44Enterprise_OperationProfiling_Mode.SLOW_OP; + case 3: + case "ALL": + return Mongocfgconfig44Enterprise_OperationProfiling_Mode.ALL; + case -1: + case "UNRECOGNIZED": + default: + return Mongocfgconfig44Enterprise_OperationProfiling_Mode.UNRECOGNIZED; + } +} + +export function mongocfgconfig44Enterprise_OperationProfiling_ModeToJSON( + object: Mongocfgconfig44Enterprise_OperationProfiling_Mode +): string { + switch (object) { + case Mongocfgconfig44Enterprise_OperationProfiling_Mode.MODE_UNSPECIFIED: + return "MODE_UNSPECIFIED"; + case Mongocfgconfig44Enterprise_OperationProfiling_Mode.OFF: + return "OFF"; + case Mongocfgconfig44Enterprise_OperationProfiling_Mode.SLOW_OP: + return "SLOW_OP"; + case Mongocfgconfig44Enterprise_OperationProfiling_Mode.ALL: + return "ALL"; + default: + return "UNKNOWN"; + } +} + +export interface Mongocfgconfig44Enterprise_Network { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig4_4_enterprise.Network"; + /** The maximum number of simultaneous connections that mongocfg will accept. */ + maxIncomingConnections?: number; +} + +export interface Mongosconfig44Enterprise { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongosConfig4_4_enterprise"; + /** Network settings for mongos. */ + net?: Mongosconfig44Enterprise_Network; +} + +export interface Mongosconfig44Enterprise_Network { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongosConfig4_4_enterprise.Network"; + /** The maximum number of simultaneous connections that mongos will accept. */ + maxIncomingConnections?: number; +} + +export interface Mongodconfigset44Enterprise { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfigSet4_4_enterprise"; + /** + * Effective mongod settings for a MongoDB 4.4 cluster (a combination of settings defined + * in [user_config] and [default_config]). + */ + effectiveConfig?: Mongodconfig44Enterprise; + /** User-defined mongod settings for a MongoDB 4.4 cluster. */ + userConfig?: Mongodconfig44Enterprise; + /** Default mongod configuration for a MongoDB 4.4 cluster. */ + defaultConfig?: Mongodconfig44Enterprise; +} + +export interface Mongocfgconfigset44Enterprise { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfigSet4_4_enterprise"; + /** + * Effective mongocfg settings for a MongoDB 4.4 cluster (a combination of settings defined + * in [user_config] and [default_config]). + */ + effectiveConfig?: Mongocfgconfig44Enterprise; + /** User-defined mongocfg settings for a MongoDB 4.4 cluster. */ + userConfig?: Mongocfgconfig44Enterprise; + /** Default mongocfg configuration for a MongoDB 4.4 cluster. */ + defaultConfig?: Mongocfgconfig44Enterprise; +} + +export interface Mongosconfigset44Enterprise { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongosConfigSet4_4_enterprise"; + /** + * Effective mongos settings for a MongoDB 4.4 cluster (a combination of settings defined + * in [user_config] and [default_config]). + */ + effectiveConfig?: Mongosconfig44Enterprise; + /** User-defined mongos settings for a MongoDB 4.4 cluster. */ + userConfig?: Mongosconfig44Enterprise; + /** Default mongos configuration for a MongoDB 4.4 cluster. */ + defaultConfig?: Mongosconfig44Enterprise; +} + +const baseMongodconfig44Enterprise: object = { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise", +}; + +export const Mongodconfig44Enterprise = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise" as const, + + encode( + message: Mongodconfig44Enterprise, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.storage !== undefined) { + Mongodconfig44Enterprise_Storage.encode( + message.storage, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.operationProfiling !== undefined) { + Mongodconfig44Enterprise_OperationProfiling.encode( + message.operationProfiling, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.net !== undefined) { + Mongodconfig44Enterprise_Network.encode( + message.net, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.security !== undefined) { + Mongodconfig44Enterprise_Security.encode( + message.security, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.auditLog !== undefined) { + Mongodconfig44Enterprise_AuditLog.encode( + message.auditLog, + writer.uint32(42).fork() + ).ldelim(); + } + if (message.setParameter !== undefined) { + Mongodconfig44Enterprise_SetParameter.encode( + message.setParameter, + writer.uint32(50).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig44Enterprise { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig44Enterprise, + } as Mongodconfig44Enterprise; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.storage = Mongodconfig44Enterprise_Storage.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.operationProfiling = + Mongodconfig44Enterprise_OperationProfiling.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.net = Mongodconfig44Enterprise_Network.decode( + reader, + reader.uint32() + ); + break; + case 4: + message.security = Mongodconfig44Enterprise_Security.decode( + reader, + reader.uint32() + ); + break; + case 5: + message.auditLog = Mongodconfig44Enterprise_AuditLog.decode( + reader, + reader.uint32() + ); + break; + case 6: + message.setParameter = Mongodconfig44Enterprise_SetParameter.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig44Enterprise { + const message = { + ...baseMongodconfig44Enterprise, + } as Mongodconfig44Enterprise; + message.storage = + object.storage !== undefined && object.storage !== null + ? Mongodconfig44Enterprise_Storage.fromJSON(object.storage) + : undefined; + message.operationProfiling = + object.operationProfiling !== undefined && + object.operationProfiling !== null + ? Mongodconfig44Enterprise_OperationProfiling.fromJSON( + object.operationProfiling + ) + : undefined; + message.net = + object.net !== undefined && object.net !== null + ? Mongodconfig44Enterprise_Network.fromJSON(object.net) + : undefined; + message.security = + object.security !== undefined && object.security !== null + ? Mongodconfig44Enterprise_Security.fromJSON(object.security) + : undefined; + message.auditLog = + object.auditLog !== undefined && object.auditLog !== null + ? Mongodconfig44Enterprise_AuditLog.fromJSON(object.auditLog) + : undefined; + message.setParameter = + object.setParameter !== undefined && object.setParameter !== null + ? Mongodconfig44Enterprise_SetParameter.fromJSON(object.setParameter) + : undefined; + return message; + }, + + toJSON(message: Mongodconfig44Enterprise): unknown { + const obj: any = {}; + message.storage !== undefined && + (obj.storage = message.storage + ? Mongodconfig44Enterprise_Storage.toJSON(message.storage) + : undefined); + message.operationProfiling !== undefined && + (obj.operationProfiling = message.operationProfiling + ? Mongodconfig44Enterprise_OperationProfiling.toJSON( + message.operationProfiling + ) + : undefined); + message.net !== undefined && + (obj.net = message.net + ? Mongodconfig44Enterprise_Network.toJSON(message.net) + : undefined); + message.security !== undefined && + (obj.security = message.security + ? Mongodconfig44Enterprise_Security.toJSON(message.security) + : undefined); + message.auditLog !== undefined && + (obj.auditLog = message.auditLog + ? Mongodconfig44Enterprise_AuditLog.toJSON(message.auditLog) + : undefined); + message.setParameter !== undefined && + (obj.setParameter = message.setParameter + ? Mongodconfig44Enterprise_SetParameter.toJSON(message.setParameter) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodconfig44Enterprise { + const message = { + ...baseMongodconfig44Enterprise, + } as Mongodconfig44Enterprise; + message.storage = + object.storage !== undefined && object.storage !== null + ? Mongodconfig44Enterprise_Storage.fromPartial(object.storage) + : undefined; + message.operationProfiling = + object.operationProfiling !== undefined && + object.operationProfiling !== null + ? Mongodconfig44Enterprise_OperationProfiling.fromPartial( + object.operationProfiling + ) + : undefined; + message.net = + object.net !== undefined && object.net !== null + ? Mongodconfig44Enterprise_Network.fromPartial(object.net) + : undefined; + message.security = + object.security !== undefined && object.security !== null + ? Mongodconfig44Enterprise_Security.fromPartial(object.security) + : undefined; + message.auditLog = + object.auditLog !== undefined && object.auditLog !== null + ? Mongodconfig44Enterprise_AuditLog.fromPartial(object.auditLog) + : undefined; + message.setParameter = + object.setParameter !== undefined && object.setParameter !== null + ? Mongodconfig44Enterprise_SetParameter.fromPartial(object.setParameter) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig44Enterprise.$type, + Mongodconfig44Enterprise +); + +const baseMongodconfig44Enterprise_Storage: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.Storage", +}; + +export const Mongodconfig44Enterprise_Storage = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.Storage" as const, + + encode( + message: Mongodconfig44Enterprise_Storage, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.wiredTiger !== undefined) { + Mongodconfig44Enterprise_Storage_WiredTiger.encode( + message.wiredTiger, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.journal !== undefined) { + Mongodconfig44Enterprise_Storage_Journal.encode( + message.journal, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig44Enterprise_Storage { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig44Enterprise_Storage, + } as Mongodconfig44Enterprise_Storage; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.wiredTiger = + Mongodconfig44Enterprise_Storage_WiredTiger.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.journal = Mongodconfig44Enterprise_Storage_Journal.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig44Enterprise_Storage { + const message = { + ...baseMongodconfig44Enterprise_Storage, + } as Mongodconfig44Enterprise_Storage; + message.wiredTiger = + object.wiredTiger !== undefined && object.wiredTiger !== null + ? Mongodconfig44Enterprise_Storage_WiredTiger.fromJSON( + object.wiredTiger + ) + : undefined; + message.journal = + object.journal !== undefined && object.journal !== null + ? Mongodconfig44Enterprise_Storage_Journal.fromJSON(object.journal) + : undefined; + return message; + }, + + toJSON(message: Mongodconfig44Enterprise_Storage): unknown { + const obj: any = {}; + message.wiredTiger !== undefined && + (obj.wiredTiger = message.wiredTiger + ? Mongodconfig44Enterprise_Storage_WiredTiger.toJSON(message.wiredTiger) + : undefined); + message.journal !== undefined && + (obj.journal = message.journal + ? Mongodconfig44Enterprise_Storage_Journal.toJSON(message.journal) + : undefined); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodconfig44Enterprise_Storage { + const message = { + ...baseMongodconfig44Enterprise_Storage, + } as Mongodconfig44Enterprise_Storage; + message.wiredTiger = + object.wiredTiger !== undefined && object.wiredTiger !== null + ? Mongodconfig44Enterprise_Storage_WiredTiger.fromPartial( + object.wiredTiger + ) + : undefined; + message.journal = + object.journal !== undefined && object.journal !== null + ? Mongodconfig44Enterprise_Storage_Journal.fromPartial(object.journal) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig44Enterprise_Storage.$type, + Mongodconfig44Enterprise_Storage +); + +const baseMongodconfig44Enterprise_Storage_WiredTiger: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.Storage.WiredTiger", +}; + +export const Mongodconfig44Enterprise_Storage_WiredTiger = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.Storage.WiredTiger" as const, + + encode( + message: Mongodconfig44Enterprise_Storage_WiredTiger, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.engineConfig !== undefined) { + Mongodconfig44Enterprise_Storage_WiredTiger_EngineConfig.encode( + message.engineConfig, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.collectionConfig !== undefined) { + Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig.encode( + message.collectionConfig, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig44Enterprise_Storage_WiredTiger { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig44Enterprise_Storage_WiredTiger, + } as Mongodconfig44Enterprise_Storage_WiredTiger; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.engineConfig = + Mongodconfig44Enterprise_Storage_WiredTiger_EngineConfig.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.collectionConfig = + Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig44Enterprise_Storage_WiredTiger { + const message = { + ...baseMongodconfig44Enterprise_Storage_WiredTiger, + } as Mongodconfig44Enterprise_Storage_WiredTiger; + message.engineConfig = + object.engineConfig !== undefined && object.engineConfig !== null + ? Mongodconfig44Enterprise_Storage_WiredTiger_EngineConfig.fromJSON( + object.engineConfig + ) + : undefined; + message.collectionConfig = + object.collectionConfig !== undefined && object.collectionConfig !== null + ? Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig.fromJSON( + object.collectionConfig + ) + : undefined; + return message; + }, + + toJSON(message: Mongodconfig44Enterprise_Storage_WiredTiger): unknown { + const obj: any = {}; + message.engineConfig !== undefined && + (obj.engineConfig = message.engineConfig + ? Mongodconfig44Enterprise_Storage_WiredTiger_EngineConfig.toJSON( + message.engineConfig + ) + : undefined); + message.collectionConfig !== undefined && + (obj.collectionConfig = message.collectionConfig + ? Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig.toJSON( + message.collectionConfig + ) + : undefined); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodconfig44Enterprise_Storage_WiredTiger { + const message = { + ...baseMongodconfig44Enterprise_Storage_WiredTiger, + } as Mongodconfig44Enterprise_Storage_WiredTiger; + message.engineConfig = + object.engineConfig !== undefined && object.engineConfig !== null + ? Mongodconfig44Enterprise_Storage_WiredTiger_EngineConfig.fromPartial( + object.engineConfig + ) + : undefined; + message.collectionConfig = + object.collectionConfig !== undefined && object.collectionConfig !== null + ? Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig.fromPartial( + object.collectionConfig + ) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig44Enterprise_Storage_WiredTiger.$type, + Mongodconfig44Enterprise_Storage_WiredTiger +); + +const baseMongodconfig44Enterprise_Storage_WiredTiger_EngineConfig: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.Storage.WiredTiger.EngineConfig", +}; + +export const Mongodconfig44Enterprise_Storage_WiredTiger_EngineConfig = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.Storage.WiredTiger.EngineConfig" as const, + + encode( + message: Mongodconfig44Enterprise_Storage_WiredTiger_EngineConfig, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.cacheSizeGb !== undefined) { + DoubleValue.encode( + { $type: "google.protobuf.DoubleValue", value: message.cacheSizeGb! }, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig44Enterprise_Storage_WiredTiger_EngineConfig { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig44Enterprise_Storage_WiredTiger_EngineConfig, + } as Mongodconfig44Enterprise_Storage_WiredTiger_EngineConfig; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.cacheSizeGb = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON( + object: any + ): Mongodconfig44Enterprise_Storage_WiredTiger_EngineConfig { + const message = { + ...baseMongodconfig44Enterprise_Storage_WiredTiger_EngineConfig, + } as Mongodconfig44Enterprise_Storage_WiredTiger_EngineConfig; + message.cacheSizeGb = + object.cacheSizeGb !== undefined && object.cacheSizeGb !== null + ? Number(object.cacheSizeGb) + : undefined; + return message; + }, + + toJSON( + message: Mongodconfig44Enterprise_Storage_WiredTiger_EngineConfig + ): unknown { + const obj: any = {}; + message.cacheSizeGb !== undefined && + (obj.cacheSizeGb = message.cacheSizeGb); + return obj; + }, + + fromPartial< + I extends Exact< + DeepPartial, + I + > + >(object: I): Mongodconfig44Enterprise_Storage_WiredTiger_EngineConfig { + const message = { + ...baseMongodconfig44Enterprise_Storage_WiredTiger_EngineConfig, + } as Mongodconfig44Enterprise_Storage_WiredTiger_EngineConfig; + message.cacheSizeGb = object.cacheSizeGb ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig44Enterprise_Storage_WiredTiger_EngineConfig.$type, + Mongodconfig44Enterprise_Storage_WiredTiger_EngineConfig +); + +const baseMongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig: object = + { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.Storage.WiredTiger.CollectionConfig", + blockCompressor: 0, + }; + +export const Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.Storage.WiredTiger.CollectionConfig" as const, + + encode( + message: Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.blockCompressor !== 0) { + writer.uint32(8).int32(message.blockCompressor); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig, + } as Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.blockCompressor = reader.int32() as any; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON( + object: any + ): Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig { + const message = { + ...baseMongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig, + } as Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig; + message.blockCompressor = + object.blockCompressor !== undefined && object.blockCompressor !== null + ? mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig_CompressorFromJSON( + object.blockCompressor + ) + : 0; + return message; + }, + + toJSON( + message: Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig + ): unknown { + const obj: any = {}; + message.blockCompressor !== undefined && + (obj.blockCompressor = + mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig_CompressorToJSON( + message.blockCompressor + )); + return obj; + }, + + fromPartial< + I extends Exact< + DeepPartial, + I + > + >(object: I): Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig { + const message = { + ...baseMongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig, + } as Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig; + message.blockCompressor = object.blockCompressor ?? 0; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig.$type, + Mongodconfig44Enterprise_Storage_WiredTiger_CollectionConfig +); + +const baseMongodconfig44Enterprise_Storage_Journal: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.Storage.Journal", +}; + +export const Mongodconfig44Enterprise_Storage_Journal = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.Storage.Journal" as const, + + encode( + message: Mongodconfig44Enterprise_Storage_Journal, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.commitInterval !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.commitInterval! }, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig44Enterprise_Storage_Journal { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig44Enterprise_Storage_Journal, + } as Mongodconfig44Enterprise_Storage_Journal; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.commitInterval = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig44Enterprise_Storage_Journal { + const message = { + ...baseMongodconfig44Enterprise_Storage_Journal, + } as Mongodconfig44Enterprise_Storage_Journal; + message.commitInterval = + object.commitInterval !== undefined && object.commitInterval !== null + ? Number(object.commitInterval) + : undefined; + return message; + }, + + toJSON(message: Mongodconfig44Enterprise_Storage_Journal): unknown { + const obj: any = {}; + message.commitInterval !== undefined && + (obj.commitInterval = message.commitInterval); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodconfig44Enterprise_Storage_Journal { + const message = { + ...baseMongodconfig44Enterprise_Storage_Journal, + } as Mongodconfig44Enterprise_Storage_Journal; + message.commitInterval = object.commitInterval ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig44Enterprise_Storage_Journal.$type, + Mongodconfig44Enterprise_Storage_Journal +); + +const baseMongodconfig44Enterprise_OperationProfiling: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.OperationProfiling", + mode: 0, +}; + +export const Mongodconfig44Enterprise_OperationProfiling = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.OperationProfiling" as const, + + encode( + message: Mongodconfig44Enterprise_OperationProfiling, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.mode !== 0) { + writer.uint32(8).int32(message.mode); + } + if (message.slowOpThreshold !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.slowOpThreshold!, + }, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig44Enterprise_OperationProfiling { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig44Enterprise_OperationProfiling, + } as Mongodconfig44Enterprise_OperationProfiling; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.mode = reader.int32() as any; + break; + case 2: + message.slowOpThreshold = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig44Enterprise_OperationProfiling { + const message = { + ...baseMongodconfig44Enterprise_OperationProfiling, + } as Mongodconfig44Enterprise_OperationProfiling; + message.mode = + object.mode !== undefined && object.mode !== null + ? mongodconfig44Enterprise_OperationProfiling_ModeFromJSON(object.mode) + : 0; + message.slowOpThreshold = + object.slowOpThreshold !== undefined && object.slowOpThreshold !== null + ? Number(object.slowOpThreshold) + : undefined; + return message; + }, + + toJSON(message: Mongodconfig44Enterprise_OperationProfiling): unknown { + const obj: any = {}; + message.mode !== undefined && + (obj.mode = mongodconfig44Enterprise_OperationProfiling_ModeToJSON( + message.mode + )); + message.slowOpThreshold !== undefined && + (obj.slowOpThreshold = message.slowOpThreshold); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodconfig44Enterprise_OperationProfiling { + const message = { + ...baseMongodconfig44Enterprise_OperationProfiling, + } as Mongodconfig44Enterprise_OperationProfiling; + message.mode = object.mode ?? 0; + message.slowOpThreshold = object.slowOpThreshold ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig44Enterprise_OperationProfiling.$type, + Mongodconfig44Enterprise_OperationProfiling +); + +const baseMongodconfig44Enterprise_Network: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.Network", +}; + +export const Mongodconfig44Enterprise_Network = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.Network" as const, + + encode( + message: Mongodconfig44Enterprise_Network, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.maxIncomingConnections !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxIncomingConnections!, + }, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig44Enterprise_Network { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig44Enterprise_Network, + } as Mongodconfig44Enterprise_Network; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.maxIncomingConnections = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig44Enterprise_Network { + const message = { + ...baseMongodconfig44Enterprise_Network, + } as Mongodconfig44Enterprise_Network; + message.maxIncomingConnections = + object.maxIncomingConnections !== undefined && + object.maxIncomingConnections !== null + ? Number(object.maxIncomingConnections) + : undefined; + return message; + }, + + toJSON(message: Mongodconfig44Enterprise_Network): unknown { + const obj: any = {}; + message.maxIncomingConnections !== undefined && + (obj.maxIncomingConnections = message.maxIncomingConnections); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodconfig44Enterprise_Network { + const message = { + ...baseMongodconfig44Enterprise_Network, + } as Mongodconfig44Enterprise_Network; + message.maxIncomingConnections = object.maxIncomingConnections ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig44Enterprise_Network.$type, + Mongodconfig44Enterprise_Network +); + +const baseMongodconfig44Enterprise_Security: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.Security", +}; + +export const Mongodconfig44Enterprise_Security = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.Security" as const, + + encode( + message: Mongodconfig44Enterprise_Security, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.enableEncryption !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableEncryption!, + }, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.kmip !== undefined) { + Mongodconfig44Enterprise_Security_KMIP.encode( + message.kmip, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig44Enterprise_Security { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig44Enterprise_Security, + } as Mongodconfig44Enterprise_Security; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.enableEncryption = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 2: + message.kmip = Mongodconfig44Enterprise_Security_KMIP.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig44Enterprise_Security { + const message = { + ...baseMongodconfig44Enterprise_Security, + } as Mongodconfig44Enterprise_Security; + message.enableEncryption = + object.enableEncryption !== undefined && object.enableEncryption !== null + ? Boolean(object.enableEncryption) + : undefined; + message.kmip = + object.kmip !== undefined && object.kmip !== null + ? Mongodconfig44Enterprise_Security_KMIP.fromJSON(object.kmip) + : undefined; + return message; + }, + + toJSON(message: Mongodconfig44Enterprise_Security): unknown { + const obj: any = {}; + message.enableEncryption !== undefined && + (obj.enableEncryption = message.enableEncryption); + message.kmip !== undefined && + (obj.kmip = message.kmip + ? Mongodconfig44Enterprise_Security_KMIP.toJSON(message.kmip) + : undefined); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodconfig44Enterprise_Security { + const message = { + ...baseMongodconfig44Enterprise_Security, + } as Mongodconfig44Enterprise_Security; + message.enableEncryption = object.enableEncryption ?? undefined; + message.kmip = + object.kmip !== undefined && object.kmip !== null + ? Mongodconfig44Enterprise_Security_KMIP.fromPartial(object.kmip) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig44Enterprise_Security.$type, + Mongodconfig44Enterprise_Security +); + +const baseMongodconfig44Enterprise_Security_KMIP: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.Security.KMIP", + serverName: "", + serverCa: "", + clientCertificate: "", + keyIdentifier: "", +}; + +export const Mongodconfig44Enterprise_Security_KMIP = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.Security.KMIP" as const, + + encode( + message: Mongodconfig44Enterprise_Security_KMIP, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.serverName !== "") { + writer.uint32(10).string(message.serverName); + } + if (message.port !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.port! }, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.serverCa !== "") { + writer.uint32(26).string(message.serverCa); + } + if (message.clientCertificate !== "") { + writer.uint32(34).string(message.clientCertificate); + } + if (message.keyIdentifier !== "") { + writer.uint32(42).string(message.keyIdentifier); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig44Enterprise_Security_KMIP { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig44Enterprise_Security_KMIP, + } as Mongodconfig44Enterprise_Security_KMIP; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.serverName = reader.string(); + break; + case 2: + message.port = Int64Value.decode(reader, reader.uint32()).value; + break; + case 3: + message.serverCa = reader.string(); + break; + case 4: + message.clientCertificate = reader.string(); + break; + case 5: + message.keyIdentifier = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig44Enterprise_Security_KMIP { + const message = { + ...baseMongodconfig44Enterprise_Security_KMIP, + } as Mongodconfig44Enterprise_Security_KMIP; + message.serverName = + object.serverName !== undefined && object.serverName !== null + ? String(object.serverName) + : ""; + message.port = + object.port !== undefined && object.port !== null + ? Number(object.port) + : undefined; + message.serverCa = + object.serverCa !== undefined && object.serverCa !== null + ? String(object.serverCa) + : ""; + message.clientCertificate = + object.clientCertificate !== undefined && + object.clientCertificate !== null + ? String(object.clientCertificate) + : ""; + message.keyIdentifier = + object.keyIdentifier !== undefined && object.keyIdentifier !== null + ? String(object.keyIdentifier) + : ""; + return message; + }, + + toJSON(message: Mongodconfig44Enterprise_Security_KMIP): unknown { + const obj: any = {}; + message.serverName !== undefined && (obj.serverName = message.serverName); + message.port !== undefined && (obj.port = message.port); + message.serverCa !== undefined && (obj.serverCa = message.serverCa); + message.clientCertificate !== undefined && + (obj.clientCertificate = message.clientCertificate); + message.keyIdentifier !== undefined && + (obj.keyIdentifier = message.keyIdentifier); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodconfig44Enterprise_Security_KMIP { + const message = { + ...baseMongodconfig44Enterprise_Security_KMIP, + } as Mongodconfig44Enterprise_Security_KMIP; + message.serverName = object.serverName ?? ""; + message.port = object.port ?? undefined; + message.serverCa = object.serverCa ?? ""; + message.clientCertificate = object.clientCertificate ?? ""; + message.keyIdentifier = object.keyIdentifier ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig44Enterprise_Security_KMIP.$type, + Mongodconfig44Enterprise_Security_KMIP +); + +const baseMongodconfig44Enterprise_AuditLog: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.AuditLog", + filter: "", +}; + +export const Mongodconfig44Enterprise_AuditLog = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.AuditLog" as const, + + encode( + message: Mongodconfig44Enterprise_AuditLog, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.filter !== "") { + writer.uint32(10).string(message.filter); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig44Enterprise_AuditLog { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig44Enterprise_AuditLog, + } as Mongodconfig44Enterprise_AuditLog; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.filter = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig44Enterprise_AuditLog { + const message = { + ...baseMongodconfig44Enterprise_AuditLog, + } as Mongodconfig44Enterprise_AuditLog; + message.filter = + object.filter !== undefined && object.filter !== null + ? String(object.filter) + : ""; + return message; + }, + + toJSON(message: Mongodconfig44Enterprise_AuditLog): unknown { + const obj: any = {}; + message.filter !== undefined && (obj.filter = message.filter); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodconfig44Enterprise_AuditLog { + const message = { + ...baseMongodconfig44Enterprise_AuditLog, + } as Mongodconfig44Enterprise_AuditLog; + message.filter = object.filter ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig44Enterprise_AuditLog.$type, + Mongodconfig44Enterprise_AuditLog +); + +const baseMongodconfig44Enterprise_SetParameter: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.SetParameter", +}; + +export const Mongodconfig44Enterprise_SetParameter = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig4_4_enterprise.SetParameter" as const, + + encode( + message: Mongodconfig44Enterprise_SetParameter, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.auditAuthorizationSuccess !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.auditAuthorizationSuccess!, + }, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig44Enterprise_SetParameter { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig44Enterprise_SetParameter, + } as Mongodconfig44Enterprise_SetParameter; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.auditAuthorizationSuccess = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig44Enterprise_SetParameter { + const message = { + ...baseMongodconfig44Enterprise_SetParameter, + } as Mongodconfig44Enterprise_SetParameter; + message.auditAuthorizationSuccess = + object.auditAuthorizationSuccess !== undefined && + object.auditAuthorizationSuccess !== null + ? Boolean(object.auditAuthorizationSuccess) + : undefined; + return message; + }, + + toJSON(message: Mongodconfig44Enterprise_SetParameter): unknown { + const obj: any = {}; + message.auditAuthorizationSuccess !== undefined && + (obj.auditAuthorizationSuccess = message.auditAuthorizationSuccess); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodconfig44Enterprise_SetParameter { + const message = { + ...baseMongodconfig44Enterprise_SetParameter, + } as Mongodconfig44Enterprise_SetParameter; + message.auditAuthorizationSuccess = + object.auditAuthorizationSuccess ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig44Enterprise_SetParameter.$type, + Mongodconfig44Enterprise_SetParameter +); + +const baseMongocfgconfig44Enterprise: object = { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig4_4_enterprise", +}; + +export const Mongocfgconfig44Enterprise = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig4_4_enterprise" as const, + + encode( + message: Mongocfgconfig44Enterprise, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.storage !== undefined) { + Mongocfgconfig44Enterprise_Storage.encode( + message.storage, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.operationProfiling !== undefined) { + Mongocfgconfig44Enterprise_OperationProfiling.encode( + message.operationProfiling, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.net !== undefined) { + Mongocfgconfig44Enterprise_Network.encode( + message.net, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongocfgconfig44Enterprise { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongocfgconfig44Enterprise, + } as Mongocfgconfig44Enterprise; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.storage = Mongocfgconfig44Enterprise_Storage.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.operationProfiling = + Mongocfgconfig44Enterprise_OperationProfiling.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.net = Mongocfgconfig44Enterprise_Network.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongocfgconfig44Enterprise { + const message = { + ...baseMongocfgconfig44Enterprise, + } as Mongocfgconfig44Enterprise; + message.storage = + object.storage !== undefined && object.storage !== null + ? Mongocfgconfig44Enterprise_Storage.fromJSON(object.storage) + : undefined; + message.operationProfiling = + object.operationProfiling !== undefined && + object.operationProfiling !== null + ? Mongocfgconfig44Enterprise_OperationProfiling.fromJSON( + object.operationProfiling + ) + : undefined; + message.net = + object.net !== undefined && object.net !== null + ? Mongocfgconfig44Enterprise_Network.fromJSON(object.net) + : undefined; + return message; + }, + + toJSON(message: Mongocfgconfig44Enterprise): unknown { + const obj: any = {}; + message.storage !== undefined && + (obj.storage = message.storage + ? Mongocfgconfig44Enterprise_Storage.toJSON(message.storage) + : undefined); + message.operationProfiling !== undefined && + (obj.operationProfiling = message.operationProfiling + ? Mongocfgconfig44Enterprise_OperationProfiling.toJSON( + message.operationProfiling + ) + : undefined); + message.net !== undefined && + (obj.net = message.net + ? Mongocfgconfig44Enterprise_Network.toJSON(message.net) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongocfgconfig44Enterprise { + const message = { + ...baseMongocfgconfig44Enterprise, + } as Mongocfgconfig44Enterprise; + message.storage = + object.storage !== undefined && object.storage !== null + ? Mongocfgconfig44Enterprise_Storage.fromPartial(object.storage) + : undefined; + message.operationProfiling = + object.operationProfiling !== undefined && + object.operationProfiling !== null + ? Mongocfgconfig44Enterprise_OperationProfiling.fromPartial( + object.operationProfiling + ) + : undefined; + message.net = + object.net !== undefined && object.net !== null + ? Mongocfgconfig44Enterprise_Network.fromPartial(object.net) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongocfgconfig44Enterprise.$type, + Mongocfgconfig44Enterprise +); + +const baseMongocfgconfig44Enterprise_Storage: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig4_4_enterprise.Storage", +}; + +export const Mongocfgconfig44Enterprise_Storage = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig4_4_enterprise.Storage" as const, + + encode( + message: Mongocfgconfig44Enterprise_Storage, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.wiredTiger !== undefined) { + Mongocfgconfig44Enterprise_Storage_WiredTiger.encode( + message.wiredTiger, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongocfgconfig44Enterprise_Storage { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongocfgconfig44Enterprise_Storage, + } as Mongocfgconfig44Enterprise_Storage; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.wiredTiger = + Mongocfgconfig44Enterprise_Storage_WiredTiger.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongocfgconfig44Enterprise_Storage { + const message = { + ...baseMongocfgconfig44Enterprise_Storage, + } as Mongocfgconfig44Enterprise_Storage; + message.wiredTiger = + object.wiredTiger !== undefined && object.wiredTiger !== null + ? Mongocfgconfig44Enterprise_Storage_WiredTiger.fromJSON( + object.wiredTiger + ) + : undefined; + return message; + }, + + toJSON(message: Mongocfgconfig44Enterprise_Storage): unknown { + const obj: any = {}; + message.wiredTiger !== undefined && + (obj.wiredTiger = message.wiredTiger + ? Mongocfgconfig44Enterprise_Storage_WiredTiger.toJSON( + message.wiredTiger + ) + : undefined); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongocfgconfig44Enterprise_Storage { + const message = { + ...baseMongocfgconfig44Enterprise_Storage, + } as Mongocfgconfig44Enterprise_Storage; + message.wiredTiger = + object.wiredTiger !== undefined && object.wiredTiger !== null + ? Mongocfgconfig44Enterprise_Storage_WiredTiger.fromPartial( + object.wiredTiger + ) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongocfgconfig44Enterprise_Storage.$type, + Mongocfgconfig44Enterprise_Storage +); + +const baseMongocfgconfig44Enterprise_Storage_WiredTiger: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig4_4_enterprise.Storage.WiredTiger", +}; + +export const Mongocfgconfig44Enterprise_Storage_WiredTiger = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig4_4_enterprise.Storage.WiredTiger" as const, + + encode( + message: Mongocfgconfig44Enterprise_Storage_WiredTiger, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.engineConfig !== undefined) { + Mongocfgconfig44Enterprise_Storage_WiredTiger_EngineConfig.encode( + message.engineConfig, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongocfgconfig44Enterprise_Storage_WiredTiger { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongocfgconfig44Enterprise_Storage_WiredTiger, + } as Mongocfgconfig44Enterprise_Storage_WiredTiger; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.engineConfig = + Mongocfgconfig44Enterprise_Storage_WiredTiger_EngineConfig.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongocfgconfig44Enterprise_Storage_WiredTiger { + const message = { + ...baseMongocfgconfig44Enterprise_Storage_WiredTiger, + } as Mongocfgconfig44Enterprise_Storage_WiredTiger; + message.engineConfig = + object.engineConfig !== undefined && object.engineConfig !== null + ? Mongocfgconfig44Enterprise_Storage_WiredTiger_EngineConfig.fromJSON( + object.engineConfig + ) + : undefined; + return message; + }, + + toJSON(message: Mongocfgconfig44Enterprise_Storage_WiredTiger): unknown { + const obj: any = {}; + message.engineConfig !== undefined && + (obj.engineConfig = message.engineConfig + ? Mongocfgconfig44Enterprise_Storage_WiredTiger_EngineConfig.toJSON( + message.engineConfig + ) + : undefined); + return obj; + }, + + fromPartial< + I extends Exact< + DeepPartial, + I + > + >(object: I): Mongocfgconfig44Enterprise_Storage_WiredTiger { + const message = { + ...baseMongocfgconfig44Enterprise_Storage_WiredTiger, + } as Mongocfgconfig44Enterprise_Storage_WiredTiger; + message.engineConfig = + object.engineConfig !== undefined && object.engineConfig !== null + ? Mongocfgconfig44Enterprise_Storage_WiredTiger_EngineConfig.fromPartial( + object.engineConfig + ) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongocfgconfig44Enterprise_Storage_WiredTiger.$type, + Mongocfgconfig44Enterprise_Storage_WiredTiger +); + +const baseMongocfgconfig44Enterprise_Storage_WiredTiger_EngineConfig: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig4_4_enterprise.Storage.WiredTiger.EngineConfig", +}; + +export const Mongocfgconfig44Enterprise_Storage_WiredTiger_EngineConfig = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig4_4_enterprise.Storage.WiredTiger.EngineConfig" as const, + + encode( + message: Mongocfgconfig44Enterprise_Storage_WiredTiger_EngineConfig, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.cacheSizeGb !== undefined) { + DoubleValue.encode( + { $type: "google.protobuf.DoubleValue", value: message.cacheSizeGb! }, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongocfgconfig44Enterprise_Storage_WiredTiger_EngineConfig { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongocfgconfig44Enterprise_Storage_WiredTiger_EngineConfig, + } as Mongocfgconfig44Enterprise_Storage_WiredTiger_EngineConfig; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.cacheSizeGb = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON( + object: any + ): Mongocfgconfig44Enterprise_Storage_WiredTiger_EngineConfig { + const message = { + ...baseMongocfgconfig44Enterprise_Storage_WiredTiger_EngineConfig, + } as Mongocfgconfig44Enterprise_Storage_WiredTiger_EngineConfig; + message.cacheSizeGb = + object.cacheSizeGb !== undefined && object.cacheSizeGb !== null + ? Number(object.cacheSizeGb) + : undefined; + return message; + }, + + toJSON( + message: Mongocfgconfig44Enterprise_Storage_WiredTiger_EngineConfig + ): unknown { + const obj: any = {}; + message.cacheSizeGb !== undefined && + (obj.cacheSizeGb = message.cacheSizeGb); + return obj; + }, + + fromPartial< + I extends Exact< + DeepPartial, + I + > + >(object: I): Mongocfgconfig44Enterprise_Storage_WiredTiger_EngineConfig { + const message = { + ...baseMongocfgconfig44Enterprise_Storage_WiredTiger_EngineConfig, + } as Mongocfgconfig44Enterprise_Storage_WiredTiger_EngineConfig; + message.cacheSizeGb = object.cacheSizeGb ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongocfgconfig44Enterprise_Storage_WiredTiger_EngineConfig.$type, + Mongocfgconfig44Enterprise_Storage_WiredTiger_EngineConfig +); + +const baseMongocfgconfig44Enterprise_OperationProfiling: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig4_4_enterprise.OperationProfiling", + mode: 0, +}; + +export const Mongocfgconfig44Enterprise_OperationProfiling = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig4_4_enterprise.OperationProfiling" as const, + + encode( + message: Mongocfgconfig44Enterprise_OperationProfiling, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.mode !== 0) { + writer.uint32(8).int32(message.mode); + } + if (message.slowOpThreshold !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.slowOpThreshold!, + }, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongocfgconfig44Enterprise_OperationProfiling { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongocfgconfig44Enterprise_OperationProfiling, + } as Mongocfgconfig44Enterprise_OperationProfiling; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.mode = reader.int32() as any; + break; + case 2: + message.slowOpThreshold = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongocfgconfig44Enterprise_OperationProfiling { + const message = { + ...baseMongocfgconfig44Enterprise_OperationProfiling, + } as Mongocfgconfig44Enterprise_OperationProfiling; + message.mode = + object.mode !== undefined && object.mode !== null + ? mongocfgconfig44Enterprise_OperationProfiling_ModeFromJSON( + object.mode + ) + : 0; + message.slowOpThreshold = + object.slowOpThreshold !== undefined && object.slowOpThreshold !== null + ? Number(object.slowOpThreshold) + : undefined; + return message; + }, + + toJSON(message: Mongocfgconfig44Enterprise_OperationProfiling): unknown { + const obj: any = {}; + message.mode !== undefined && + (obj.mode = mongocfgconfig44Enterprise_OperationProfiling_ModeToJSON( + message.mode + )); + message.slowOpThreshold !== undefined && + (obj.slowOpThreshold = message.slowOpThreshold); + return obj; + }, + + fromPartial< + I extends Exact< + DeepPartial, + I + > + >(object: I): Mongocfgconfig44Enterprise_OperationProfiling { + const message = { + ...baseMongocfgconfig44Enterprise_OperationProfiling, + } as Mongocfgconfig44Enterprise_OperationProfiling; + message.mode = object.mode ?? 0; + message.slowOpThreshold = object.slowOpThreshold ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongocfgconfig44Enterprise_OperationProfiling.$type, + Mongocfgconfig44Enterprise_OperationProfiling +); + +const baseMongocfgconfig44Enterprise_Network: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig4_4_enterprise.Network", +}; + +export const Mongocfgconfig44Enterprise_Network = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig4_4_enterprise.Network" as const, + + encode( + message: Mongocfgconfig44Enterprise_Network, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.maxIncomingConnections !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxIncomingConnections!, + }, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongocfgconfig44Enterprise_Network { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongocfgconfig44Enterprise_Network, + } as Mongocfgconfig44Enterprise_Network; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.maxIncomingConnections = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongocfgconfig44Enterprise_Network { + const message = { + ...baseMongocfgconfig44Enterprise_Network, + } as Mongocfgconfig44Enterprise_Network; + message.maxIncomingConnections = + object.maxIncomingConnections !== undefined && + object.maxIncomingConnections !== null + ? Number(object.maxIncomingConnections) + : undefined; + return message; + }, + + toJSON(message: Mongocfgconfig44Enterprise_Network): unknown { + const obj: any = {}; + message.maxIncomingConnections !== undefined && + (obj.maxIncomingConnections = message.maxIncomingConnections); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongocfgconfig44Enterprise_Network { + const message = { + ...baseMongocfgconfig44Enterprise_Network, + } as Mongocfgconfig44Enterprise_Network; + message.maxIncomingConnections = object.maxIncomingConnections ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongocfgconfig44Enterprise_Network.$type, + Mongocfgconfig44Enterprise_Network +); + +const baseMongosconfig44Enterprise: object = { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongosConfig4_4_enterprise", +}; + +export const Mongosconfig44Enterprise = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongosConfig4_4_enterprise" as const, + + encode( + message: Mongosconfig44Enterprise, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.net !== undefined) { + Mongosconfig44Enterprise_Network.encode( + message.net, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongosconfig44Enterprise { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongosconfig44Enterprise, + } as Mongosconfig44Enterprise; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.net = Mongosconfig44Enterprise_Network.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongosconfig44Enterprise { + const message = { + ...baseMongosconfig44Enterprise, + } as Mongosconfig44Enterprise; + message.net = + object.net !== undefined && object.net !== null + ? Mongosconfig44Enterprise_Network.fromJSON(object.net) + : undefined; + return message; + }, + + toJSON(message: Mongosconfig44Enterprise): unknown { + const obj: any = {}; + message.net !== undefined && + (obj.net = message.net + ? Mongosconfig44Enterprise_Network.toJSON(message.net) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongosconfig44Enterprise { + const message = { + ...baseMongosconfig44Enterprise, + } as Mongosconfig44Enterprise; + message.net = + object.net !== undefined && object.net !== null + ? Mongosconfig44Enterprise_Network.fromPartial(object.net) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongosconfig44Enterprise.$type, + Mongosconfig44Enterprise +); + +const baseMongosconfig44Enterprise_Network: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongosConfig4_4_enterprise.Network", +}; + +export const Mongosconfig44Enterprise_Network = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongosConfig4_4_enterprise.Network" as const, + + encode( + message: Mongosconfig44Enterprise_Network, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.maxIncomingConnections !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxIncomingConnections!, + }, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongosconfig44Enterprise_Network { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongosconfig44Enterprise_Network, + } as Mongosconfig44Enterprise_Network; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.maxIncomingConnections = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongosconfig44Enterprise_Network { + const message = { + ...baseMongosconfig44Enterprise_Network, + } as Mongosconfig44Enterprise_Network; + message.maxIncomingConnections = + object.maxIncomingConnections !== undefined && + object.maxIncomingConnections !== null + ? Number(object.maxIncomingConnections) + : undefined; + return message; + }, + + toJSON(message: Mongosconfig44Enterprise_Network): unknown { + const obj: any = {}; + message.maxIncomingConnections !== undefined && + (obj.maxIncomingConnections = message.maxIncomingConnections); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongosconfig44Enterprise_Network { + const message = { + ...baseMongosconfig44Enterprise_Network, + } as Mongosconfig44Enterprise_Network; + message.maxIncomingConnections = object.maxIncomingConnections ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongosconfig44Enterprise_Network.$type, + Mongosconfig44Enterprise_Network +); + +const baseMongodconfigset44Enterprise: object = { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfigSet4_4_enterprise", +}; + +export const Mongodconfigset44Enterprise = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfigSet4_4_enterprise" as const, + + encode( + message: Mongodconfigset44Enterprise, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.effectiveConfig !== undefined) { + Mongodconfig44Enterprise.encode( + message.effectiveConfig, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.userConfig !== undefined) { + Mongodconfig44Enterprise.encode( + message.userConfig, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.defaultConfig !== undefined) { + Mongodconfig44Enterprise.encode( + message.defaultConfig, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfigset44Enterprise { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfigset44Enterprise, + } as Mongodconfigset44Enterprise; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.effectiveConfig = Mongodconfig44Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.userConfig = Mongodconfig44Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.defaultConfig = Mongodconfig44Enterprise.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfigset44Enterprise { + const message = { + ...baseMongodconfigset44Enterprise, + } as Mongodconfigset44Enterprise; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Mongodconfig44Enterprise.fromJSON(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Mongodconfig44Enterprise.fromJSON(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Mongodconfig44Enterprise.fromJSON(object.defaultConfig) + : undefined; + return message; + }, + + toJSON(message: Mongodconfigset44Enterprise): unknown { + const obj: any = {}; + message.effectiveConfig !== undefined && + (obj.effectiveConfig = message.effectiveConfig + ? Mongodconfig44Enterprise.toJSON(message.effectiveConfig) + : undefined); + message.userConfig !== undefined && + (obj.userConfig = message.userConfig + ? Mongodconfig44Enterprise.toJSON(message.userConfig) + : undefined); + message.defaultConfig !== undefined && + (obj.defaultConfig = message.defaultConfig + ? Mongodconfig44Enterprise.toJSON(message.defaultConfig) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodconfigset44Enterprise { + const message = { + ...baseMongodconfigset44Enterprise, + } as Mongodconfigset44Enterprise; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Mongodconfig44Enterprise.fromPartial(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Mongodconfig44Enterprise.fromPartial(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Mongodconfig44Enterprise.fromPartial(object.defaultConfig) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfigset44Enterprise.$type, + Mongodconfigset44Enterprise +); + +const baseMongocfgconfigset44Enterprise: object = { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfigSet4_4_enterprise", +}; + +export const Mongocfgconfigset44Enterprise = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfigSet4_4_enterprise" as const, + + encode( + message: Mongocfgconfigset44Enterprise, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.effectiveConfig !== undefined) { + Mongocfgconfig44Enterprise.encode( + message.effectiveConfig, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.userConfig !== undefined) { + Mongocfgconfig44Enterprise.encode( + message.userConfig, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.defaultConfig !== undefined) { + Mongocfgconfig44Enterprise.encode( + message.defaultConfig, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongocfgconfigset44Enterprise { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongocfgconfigset44Enterprise, + } as Mongocfgconfigset44Enterprise; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.effectiveConfig = Mongocfgconfig44Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.userConfig = Mongocfgconfig44Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.defaultConfig = Mongocfgconfig44Enterprise.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongocfgconfigset44Enterprise { + const message = { + ...baseMongocfgconfigset44Enterprise, + } as Mongocfgconfigset44Enterprise; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Mongocfgconfig44Enterprise.fromJSON(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Mongocfgconfig44Enterprise.fromJSON(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Mongocfgconfig44Enterprise.fromJSON(object.defaultConfig) + : undefined; + return message; + }, + + toJSON(message: Mongocfgconfigset44Enterprise): unknown { + const obj: any = {}; + message.effectiveConfig !== undefined && + (obj.effectiveConfig = message.effectiveConfig + ? Mongocfgconfig44Enterprise.toJSON(message.effectiveConfig) + : undefined); + message.userConfig !== undefined && + (obj.userConfig = message.userConfig + ? Mongocfgconfig44Enterprise.toJSON(message.userConfig) + : undefined); + message.defaultConfig !== undefined && + (obj.defaultConfig = message.defaultConfig + ? Mongocfgconfig44Enterprise.toJSON(message.defaultConfig) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongocfgconfigset44Enterprise { + const message = { + ...baseMongocfgconfigset44Enterprise, + } as Mongocfgconfigset44Enterprise; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Mongocfgconfig44Enterprise.fromPartial(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Mongocfgconfig44Enterprise.fromPartial(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Mongocfgconfig44Enterprise.fromPartial(object.defaultConfig) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongocfgconfigset44Enterprise.$type, + Mongocfgconfigset44Enterprise +); + +const baseMongosconfigset44Enterprise: object = { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongosConfigSet4_4_enterprise", +}; + +export const Mongosconfigset44Enterprise = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongosConfigSet4_4_enterprise" as const, + + encode( + message: Mongosconfigset44Enterprise, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.effectiveConfig !== undefined) { + Mongosconfig44Enterprise.encode( + message.effectiveConfig, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.userConfig !== undefined) { + Mongosconfig44Enterprise.encode( + message.userConfig, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.defaultConfig !== undefined) { + Mongosconfig44Enterprise.encode( + message.defaultConfig, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongosconfigset44Enterprise { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongosconfigset44Enterprise, + } as Mongosconfigset44Enterprise; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.effectiveConfig = Mongosconfig44Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.userConfig = Mongosconfig44Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.defaultConfig = Mongosconfig44Enterprise.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongosconfigset44Enterprise { + const message = { + ...baseMongosconfigset44Enterprise, + } as Mongosconfigset44Enterprise; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Mongosconfig44Enterprise.fromJSON(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Mongosconfig44Enterprise.fromJSON(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Mongosconfig44Enterprise.fromJSON(object.defaultConfig) + : undefined; + return message; + }, + + toJSON(message: Mongosconfigset44Enterprise): unknown { + const obj: any = {}; + message.effectiveConfig !== undefined && + (obj.effectiveConfig = message.effectiveConfig + ? Mongosconfig44Enterprise.toJSON(message.effectiveConfig) + : undefined); + message.userConfig !== undefined && + (obj.userConfig = message.userConfig + ? Mongosconfig44Enterprise.toJSON(message.userConfig) + : undefined); + message.defaultConfig !== undefined && + (obj.defaultConfig = message.defaultConfig + ? Mongosconfig44Enterprise.toJSON(message.defaultConfig) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongosconfigset44Enterprise { + const message = { + ...baseMongosconfigset44Enterprise, + } as Mongosconfigset44Enterprise; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Mongosconfig44Enterprise.fromPartial(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Mongosconfig44Enterprise.fromPartial(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Mongosconfig44Enterprise.fromPartial(object.defaultConfig) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongosconfigset44Enterprise.$type, + Mongosconfigset44Enterprise +); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/mongodb/v1/config/mongodb5_0_enterprise.ts b/src/generated/yandex/cloud/mdb/mongodb/v1/config/mongodb5_0_enterprise.ts new file mode 100644 index 00000000..770cc57c --- /dev/null +++ b/src/generated/yandex/cloud/mdb/mongodb/v1/config/mongodb5_0_enterprise.ts @@ -0,0 +1,2927 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { + DoubleValue, + Int64Value, + BoolValue, +} from "../../../../../../google/protobuf/wrappers"; + +export const protobufPackage = "yandex.cloud.mdb.mongodb.v1.config"; + +/** + * Configuration of a mongod daemon. Supported options are a limited subset of all + * options described in [MongoDB documentation](https://docs.mongodb.com/v5.0/reference/configuration-options/). + */ +export interface Mongodconfig50Enterprise { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise"; + /** `storage` section of mongod configuration. */ + storage?: Mongodconfig50Enterprise_Storage; + /** `operationProfiling` section of mongod configuration. */ + operationProfiling?: Mongodconfig50Enterprise_OperationProfiling; + /** `net` section of mongod configuration. */ + net?: Mongodconfig50Enterprise_Network; + /** `security` section of mongod configuration. */ + security?: Mongodconfig50Enterprise_Security; + /** `AuditLog` section of mongod configuration. */ + auditLog?: Mongodconfig50Enterprise_AuditLog; + /** `SetParameter` section of mongod configuration. */ + setParameter?: Mongodconfig50Enterprise_SetParameter; +} + +export interface Mongodconfig50Enterprise_Storage { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.Storage"; + /** Configuration of the WiredTiger storage engine. */ + wiredTiger?: Mongodconfig50Enterprise_Storage_WiredTiger; + /** Configuration of the MongoDB [journal](https://docs.mongodb.com/v5.0/reference/glossary/#term-journal). */ + journal?: Mongodconfig50Enterprise_Storage_Journal; +} + +/** Configuration of WiredTiger storage engine. */ +export interface Mongodconfig50Enterprise_Storage_WiredTiger { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.Storage.WiredTiger"; + /** Engine configuration for WiredTiger. */ + engineConfig?: Mongodconfig50Enterprise_Storage_WiredTiger_EngineConfig; + /** Collection configuration for WiredTiger. */ + collectionConfig?: Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig; +} + +export interface Mongodconfig50Enterprise_Storage_WiredTiger_EngineConfig { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.Storage.WiredTiger.EngineConfig"; + /** The maximum size of the internal cache that WiredTiger will use for all data. */ + cacheSizeGb?: number; +} + +export interface Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.Storage.WiredTiger.CollectionConfig"; + /** Default type of compression to use for collection data. */ + blockCompressor: Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig_Compressor; +} + +export enum Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig_Compressor { + COMPRESSOR_UNSPECIFIED = 0, + /** NONE - No compression. */ + NONE = 1, + /** SNAPPY - The [Snappy](https://docs.mongodb.com/v5.0/reference/glossary/#term-snappy) compression. */ + SNAPPY = 2, + /** ZLIB - The [zlib](https://docs.mongodb.com/v5.0/reference/glossary/#term-zlib) compression. */ + ZLIB = 3, + UNRECOGNIZED = -1, +} + +export function mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig_CompressorFromJSON( + object: any +): Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig_Compressor { + switch (object) { + case 0: + case "COMPRESSOR_UNSPECIFIED": + return Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.COMPRESSOR_UNSPECIFIED; + case 1: + case "NONE": + return Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.NONE; + case 2: + case "SNAPPY": + return Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.SNAPPY; + case 3: + case "ZLIB": + return Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.ZLIB; + case -1: + case "UNRECOGNIZED": + default: + return Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.UNRECOGNIZED; + } +} + +export function mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig_CompressorToJSON( + object: Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig_Compressor +): string { + switch (object) { + case Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.COMPRESSOR_UNSPECIFIED: + return "COMPRESSOR_UNSPECIFIED"; + case Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.NONE: + return "NONE"; + case Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.SNAPPY: + return "SNAPPY"; + case Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig_Compressor.ZLIB: + return "ZLIB"; + default: + return "UNKNOWN"; + } +} + +export interface Mongodconfig50Enterprise_Storage_Journal { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.Storage.Journal"; + /** + * Commit interval between journal operations, in milliseconds. + * Default: 100. + */ + commitInterval?: number; +} + +export interface Mongodconfig50Enterprise_OperationProfiling { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.OperationProfiling"; + /** Mode which specifies operations that should be profiled. */ + mode: Mongodconfig50Enterprise_OperationProfiling_Mode; + /** + * The slow operation time threshold, in milliseconds. Operations that run + * for longer than this threshold are considered slow, and are processed by the profiler + * running in the SLOW_OP mode. + */ + slowOpThreshold?: number; +} + +export enum Mongodconfig50Enterprise_OperationProfiling_Mode { + MODE_UNSPECIFIED = 0, + /** OFF - The profiler is off and does not collect any data. */ + OFF = 1, + /** SLOW_OP - The profiler collects data for operations that take longer than the value of [slow_op_threshold]. */ + SLOW_OP = 2, + /** ALL - The profiler collects data for all operations. */ + ALL = 3, + UNRECOGNIZED = -1, +} + +export function mongodconfig50Enterprise_OperationProfiling_ModeFromJSON( + object: any +): Mongodconfig50Enterprise_OperationProfiling_Mode { + switch (object) { + case 0: + case "MODE_UNSPECIFIED": + return Mongodconfig50Enterprise_OperationProfiling_Mode.MODE_UNSPECIFIED; + case 1: + case "OFF": + return Mongodconfig50Enterprise_OperationProfiling_Mode.OFF; + case 2: + case "SLOW_OP": + return Mongodconfig50Enterprise_OperationProfiling_Mode.SLOW_OP; + case 3: + case "ALL": + return Mongodconfig50Enterprise_OperationProfiling_Mode.ALL; + case -1: + case "UNRECOGNIZED": + default: + return Mongodconfig50Enterprise_OperationProfiling_Mode.UNRECOGNIZED; + } +} + +export function mongodconfig50Enterprise_OperationProfiling_ModeToJSON( + object: Mongodconfig50Enterprise_OperationProfiling_Mode +): string { + switch (object) { + case Mongodconfig50Enterprise_OperationProfiling_Mode.MODE_UNSPECIFIED: + return "MODE_UNSPECIFIED"; + case Mongodconfig50Enterprise_OperationProfiling_Mode.OFF: + return "OFF"; + case Mongodconfig50Enterprise_OperationProfiling_Mode.SLOW_OP: + return "SLOW_OP"; + case Mongodconfig50Enterprise_OperationProfiling_Mode.ALL: + return "ALL"; + default: + return "UNKNOWN"; + } +} + +export interface Mongodconfig50Enterprise_Network { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.Network"; + /** The maximum number of simultaneous connections that mongod will accept. */ + maxIncomingConnections?: number; +} + +export interface Mongodconfig50Enterprise_Security { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.Security"; + /** If encryption at rest should be enabled or not */ + enableEncryption?: boolean; + /** `kmip` section of mongod security config */ + kmip?: Mongodconfig50Enterprise_Security_KMIP; +} + +export interface Mongodconfig50Enterprise_Security_KMIP { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.Security.KMIP"; + /** KMIP server name */ + serverName: string; + /** KMIP server port */ + port?: number; + /** KMIP Server CA */ + serverCa: string; + /** KMIP client certificate + private key (unencrypted) */ + clientCertificate: string; + /** KMIP Key identifier (if any) */ + keyIdentifier: string; +} + +export interface Mongodconfig50Enterprise_AuditLog { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.AuditLog"; + /** Audit filter */ + filter: string; + /** Allows runtime configuration of audit filter and auditAuthorizationSuccess */ + runtimeConfiguration?: boolean; +} + +export interface Mongodconfig50Enterprise_SetParameter { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.SetParameter"; + /** Enables the auditing of authorization successes */ + auditAuthorizationSuccess?: boolean; +} + +export interface Mongocfgconfig50Enterprise { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig5_0_enterprise"; + /** `storage` section of mongocfg configuration. */ + storage?: Mongocfgconfig50Enterprise_Storage; + /** `operationProfiling` section of mongocfg configuration. */ + operationProfiling?: Mongocfgconfig50Enterprise_OperationProfiling; + /** `net` section of mongocfg configuration. */ + net?: Mongocfgconfig50Enterprise_Network; +} + +export interface Mongocfgconfig50Enterprise_Storage { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig5_0_enterprise.Storage"; + /** Configuration of the WiredTiger storage engine. */ + wiredTiger?: Mongocfgconfig50Enterprise_Storage_WiredTiger; +} + +/** Configuration of WiredTiger storage engine. */ +export interface Mongocfgconfig50Enterprise_Storage_WiredTiger { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig5_0_enterprise.Storage.WiredTiger"; + /** Engine configuration for WiredTiger. */ + engineConfig?: Mongocfgconfig50Enterprise_Storage_WiredTiger_EngineConfig; +} + +export interface Mongocfgconfig50Enterprise_Storage_WiredTiger_EngineConfig { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig5_0_enterprise.Storage.WiredTiger.EngineConfig"; + /** The maximum size of the internal cache that WiredTiger will use for all data. */ + cacheSizeGb?: number; +} + +export interface Mongocfgconfig50Enterprise_OperationProfiling { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig5_0_enterprise.OperationProfiling"; + /** Mode which specifies operations that should be profiled. */ + mode: Mongocfgconfig50Enterprise_OperationProfiling_Mode; + /** + * The slow operation time threshold, in milliseconds. Operations that run + * for longer than this threshold are considered slow, and are processed by the profiler + * running in the SLOW_OP mode. For details see [MongoDB documentation](https://docs.mongodb.com/v5.0/reference/configuration-options/#operationProfiling.slowOpThresholdMs). + */ + slowOpThreshold?: number; +} + +export enum Mongocfgconfig50Enterprise_OperationProfiling_Mode { + MODE_UNSPECIFIED = 0, + /** OFF - The profiler is off and does not collect any data. */ + OFF = 1, + /** SLOW_OP - The profiler collects data for operations that take longer than the value of [slow_op_threshold]. */ + SLOW_OP = 2, + /** ALL - The profiler collects data for all operations. */ + ALL = 3, + UNRECOGNIZED = -1, +} + +export function mongocfgconfig50Enterprise_OperationProfiling_ModeFromJSON( + object: any +): Mongocfgconfig50Enterprise_OperationProfiling_Mode { + switch (object) { + case 0: + case "MODE_UNSPECIFIED": + return Mongocfgconfig50Enterprise_OperationProfiling_Mode.MODE_UNSPECIFIED; + case 1: + case "OFF": + return Mongocfgconfig50Enterprise_OperationProfiling_Mode.OFF; + case 2: + case "SLOW_OP": + return Mongocfgconfig50Enterprise_OperationProfiling_Mode.SLOW_OP; + case 3: + case "ALL": + return Mongocfgconfig50Enterprise_OperationProfiling_Mode.ALL; + case -1: + case "UNRECOGNIZED": + default: + return Mongocfgconfig50Enterprise_OperationProfiling_Mode.UNRECOGNIZED; + } +} + +export function mongocfgconfig50Enterprise_OperationProfiling_ModeToJSON( + object: Mongocfgconfig50Enterprise_OperationProfiling_Mode +): string { + switch (object) { + case Mongocfgconfig50Enterprise_OperationProfiling_Mode.MODE_UNSPECIFIED: + return "MODE_UNSPECIFIED"; + case Mongocfgconfig50Enterprise_OperationProfiling_Mode.OFF: + return "OFF"; + case Mongocfgconfig50Enterprise_OperationProfiling_Mode.SLOW_OP: + return "SLOW_OP"; + case Mongocfgconfig50Enterprise_OperationProfiling_Mode.ALL: + return "ALL"; + default: + return "UNKNOWN"; + } +} + +export interface Mongocfgconfig50Enterprise_Network { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig5_0_enterprise.Network"; + /** The maximum number of simultaneous connections that mongocfg will accept. */ + maxIncomingConnections?: number; +} + +export interface Mongosconfig50Enterprise { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongosConfig5_0_enterprise"; + /** Network settings for mongos. */ + net?: Mongosconfig50Enterprise_Network; +} + +export interface Mongosconfig50Enterprise_Network { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongosConfig5_0_enterprise.Network"; + /** The maximum number of simultaneous connections that mongos will accept. */ + maxIncomingConnections?: number; +} + +export interface Mongodconfigset50Enterprise { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfigSet5_0_enterprise"; + /** + * Effective mongod settings for a MongoDB 5.0 cluster (a combination of settings defined + * in [user_config] and [default_config]). + */ + effectiveConfig?: Mongodconfig50Enterprise; + /** User-defined mongod settings for a MongoDB 5.0 cluster. */ + userConfig?: Mongodconfig50Enterprise; + /** Default mongod configuration for a MongoDB 5.0 cluster. */ + defaultConfig?: Mongodconfig50Enterprise; +} + +export interface Mongocfgconfigset50Enterprise { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfigSet5_0_enterprise"; + /** + * Effective mongocfg settings for a MongoDB 5.0 cluster (a combination of settings defined + * in [user_config] and [default_config]). + */ + effectiveConfig?: Mongocfgconfig50Enterprise; + /** User-defined mongocfg settings for a MongoDB 5.0 cluster. */ + userConfig?: Mongocfgconfig50Enterprise; + /** Default mongocfg configuration for a MongoDB 5.0 cluster. */ + defaultConfig?: Mongocfgconfig50Enterprise; +} + +export interface Mongosconfigset50Enterprise { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongosConfigSet5_0_enterprise"; + /** + * Effective mongos settings for a MongoDB 5.0 cluster (a combination of settings defined + * in [user_config] and [default_config]). + */ + effectiveConfig?: Mongosconfig50Enterprise; + /** User-defined mongos settings for a MongoDB 5.0 cluster. */ + userConfig?: Mongosconfig50Enterprise; + /** Default mongos configuration for a MongoDB 5.0 cluster. */ + defaultConfig?: Mongosconfig50Enterprise; +} + +const baseMongodconfig50Enterprise: object = { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise", +}; + +export const Mongodconfig50Enterprise = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise" as const, + + encode( + message: Mongodconfig50Enterprise, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.storage !== undefined) { + Mongodconfig50Enterprise_Storage.encode( + message.storage, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.operationProfiling !== undefined) { + Mongodconfig50Enterprise_OperationProfiling.encode( + message.operationProfiling, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.net !== undefined) { + Mongodconfig50Enterprise_Network.encode( + message.net, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.security !== undefined) { + Mongodconfig50Enterprise_Security.encode( + message.security, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.auditLog !== undefined) { + Mongodconfig50Enterprise_AuditLog.encode( + message.auditLog, + writer.uint32(42).fork() + ).ldelim(); + } + if (message.setParameter !== undefined) { + Mongodconfig50Enterprise_SetParameter.encode( + message.setParameter, + writer.uint32(50).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig50Enterprise { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig50Enterprise, + } as Mongodconfig50Enterprise; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.storage = Mongodconfig50Enterprise_Storage.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.operationProfiling = + Mongodconfig50Enterprise_OperationProfiling.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.net = Mongodconfig50Enterprise_Network.decode( + reader, + reader.uint32() + ); + break; + case 4: + message.security = Mongodconfig50Enterprise_Security.decode( + reader, + reader.uint32() + ); + break; + case 5: + message.auditLog = Mongodconfig50Enterprise_AuditLog.decode( + reader, + reader.uint32() + ); + break; + case 6: + message.setParameter = Mongodconfig50Enterprise_SetParameter.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig50Enterprise { + const message = { + ...baseMongodconfig50Enterprise, + } as Mongodconfig50Enterprise; + message.storage = + object.storage !== undefined && object.storage !== null + ? Mongodconfig50Enterprise_Storage.fromJSON(object.storage) + : undefined; + message.operationProfiling = + object.operationProfiling !== undefined && + object.operationProfiling !== null + ? Mongodconfig50Enterprise_OperationProfiling.fromJSON( + object.operationProfiling + ) + : undefined; + message.net = + object.net !== undefined && object.net !== null + ? Mongodconfig50Enterprise_Network.fromJSON(object.net) + : undefined; + message.security = + object.security !== undefined && object.security !== null + ? Mongodconfig50Enterprise_Security.fromJSON(object.security) + : undefined; + message.auditLog = + object.auditLog !== undefined && object.auditLog !== null + ? Mongodconfig50Enterprise_AuditLog.fromJSON(object.auditLog) + : undefined; + message.setParameter = + object.setParameter !== undefined && object.setParameter !== null + ? Mongodconfig50Enterprise_SetParameter.fromJSON(object.setParameter) + : undefined; + return message; + }, + + toJSON(message: Mongodconfig50Enterprise): unknown { + const obj: any = {}; + message.storage !== undefined && + (obj.storage = message.storage + ? Mongodconfig50Enterprise_Storage.toJSON(message.storage) + : undefined); + message.operationProfiling !== undefined && + (obj.operationProfiling = message.operationProfiling + ? Mongodconfig50Enterprise_OperationProfiling.toJSON( + message.operationProfiling + ) + : undefined); + message.net !== undefined && + (obj.net = message.net + ? Mongodconfig50Enterprise_Network.toJSON(message.net) + : undefined); + message.security !== undefined && + (obj.security = message.security + ? Mongodconfig50Enterprise_Security.toJSON(message.security) + : undefined); + message.auditLog !== undefined && + (obj.auditLog = message.auditLog + ? Mongodconfig50Enterprise_AuditLog.toJSON(message.auditLog) + : undefined); + message.setParameter !== undefined && + (obj.setParameter = message.setParameter + ? Mongodconfig50Enterprise_SetParameter.toJSON(message.setParameter) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodconfig50Enterprise { + const message = { + ...baseMongodconfig50Enterprise, + } as Mongodconfig50Enterprise; + message.storage = + object.storage !== undefined && object.storage !== null + ? Mongodconfig50Enterprise_Storage.fromPartial(object.storage) + : undefined; + message.operationProfiling = + object.operationProfiling !== undefined && + object.operationProfiling !== null + ? Mongodconfig50Enterprise_OperationProfiling.fromPartial( + object.operationProfiling + ) + : undefined; + message.net = + object.net !== undefined && object.net !== null + ? Mongodconfig50Enterprise_Network.fromPartial(object.net) + : undefined; + message.security = + object.security !== undefined && object.security !== null + ? Mongodconfig50Enterprise_Security.fromPartial(object.security) + : undefined; + message.auditLog = + object.auditLog !== undefined && object.auditLog !== null + ? Mongodconfig50Enterprise_AuditLog.fromPartial(object.auditLog) + : undefined; + message.setParameter = + object.setParameter !== undefined && object.setParameter !== null + ? Mongodconfig50Enterprise_SetParameter.fromPartial(object.setParameter) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig50Enterprise.$type, + Mongodconfig50Enterprise +); + +const baseMongodconfig50Enterprise_Storage: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.Storage", +}; + +export const Mongodconfig50Enterprise_Storage = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.Storage" as const, + + encode( + message: Mongodconfig50Enterprise_Storage, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.wiredTiger !== undefined) { + Mongodconfig50Enterprise_Storage_WiredTiger.encode( + message.wiredTiger, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.journal !== undefined) { + Mongodconfig50Enterprise_Storage_Journal.encode( + message.journal, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig50Enterprise_Storage { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig50Enterprise_Storage, + } as Mongodconfig50Enterprise_Storage; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.wiredTiger = + Mongodconfig50Enterprise_Storage_WiredTiger.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.journal = Mongodconfig50Enterprise_Storage_Journal.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig50Enterprise_Storage { + const message = { + ...baseMongodconfig50Enterprise_Storage, + } as Mongodconfig50Enterprise_Storage; + message.wiredTiger = + object.wiredTiger !== undefined && object.wiredTiger !== null + ? Mongodconfig50Enterprise_Storage_WiredTiger.fromJSON( + object.wiredTiger + ) + : undefined; + message.journal = + object.journal !== undefined && object.journal !== null + ? Mongodconfig50Enterprise_Storage_Journal.fromJSON(object.journal) + : undefined; + return message; + }, + + toJSON(message: Mongodconfig50Enterprise_Storage): unknown { + const obj: any = {}; + message.wiredTiger !== undefined && + (obj.wiredTiger = message.wiredTiger + ? Mongodconfig50Enterprise_Storage_WiredTiger.toJSON(message.wiredTiger) + : undefined); + message.journal !== undefined && + (obj.journal = message.journal + ? Mongodconfig50Enterprise_Storage_Journal.toJSON(message.journal) + : undefined); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodconfig50Enterprise_Storage { + const message = { + ...baseMongodconfig50Enterprise_Storage, + } as Mongodconfig50Enterprise_Storage; + message.wiredTiger = + object.wiredTiger !== undefined && object.wiredTiger !== null + ? Mongodconfig50Enterprise_Storage_WiredTiger.fromPartial( + object.wiredTiger + ) + : undefined; + message.journal = + object.journal !== undefined && object.journal !== null + ? Mongodconfig50Enterprise_Storage_Journal.fromPartial(object.journal) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig50Enterprise_Storage.$type, + Mongodconfig50Enterprise_Storage +); + +const baseMongodconfig50Enterprise_Storage_WiredTiger: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.Storage.WiredTiger", +}; + +export const Mongodconfig50Enterprise_Storage_WiredTiger = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.Storage.WiredTiger" as const, + + encode( + message: Mongodconfig50Enterprise_Storage_WiredTiger, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.engineConfig !== undefined) { + Mongodconfig50Enterprise_Storage_WiredTiger_EngineConfig.encode( + message.engineConfig, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.collectionConfig !== undefined) { + Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig.encode( + message.collectionConfig, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig50Enterprise_Storage_WiredTiger { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig50Enterprise_Storage_WiredTiger, + } as Mongodconfig50Enterprise_Storage_WiredTiger; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.engineConfig = + Mongodconfig50Enterprise_Storage_WiredTiger_EngineConfig.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.collectionConfig = + Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig50Enterprise_Storage_WiredTiger { + const message = { + ...baseMongodconfig50Enterprise_Storage_WiredTiger, + } as Mongodconfig50Enterprise_Storage_WiredTiger; + message.engineConfig = + object.engineConfig !== undefined && object.engineConfig !== null + ? Mongodconfig50Enterprise_Storage_WiredTiger_EngineConfig.fromJSON( + object.engineConfig + ) + : undefined; + message.collectionConfig = + object.collectionConfig !== undefined && object.collectionConfig !== null + ? Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig.fromJSON( + object.collectionConfig + ) + : undefined; + return message; + }, + + toJSON(message: Mongodconfig50Enterprise_Storage_WiredTiger): unknown { + const obj: any = {}; + message.engineConfig !== undefined && + (obj.engineConfig = message.engineConfig + ? Mongodconfig50Enterprise_Storage_WiredTiger_EngineConfig.toJSON( + message.engineConfig + ) + : undefined); + message.collectionConfig !== undefined && + (obj.collectionConfig = message.collectionConfig + ? Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig.toJSON( + message.collectionConfig + ) + : undefined); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodconfig50Enterprise_Storage_WiredTiger { + const message = { + ...baseMongodconfig50Enterprise_Storage_WiredTiger, + } as Mongodconfig50Enterprise_Storage_WiredTiger; + message.engineConfig = + object.engineConfig !== undefined && object.engineConfig !== null + ? Mongodconfig50Enterprise_Storage_WiredTiger_EngineConfig.fromPartial( + object.engineConfig + ) + : undefined; + message.collectionConfig = + object.collectionConfig !== undefined && object.collectionConfig !== null + ? Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig.fromPartial( + object.collectionConfig + ) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig50Enterprise_Storage_WiredTiger.$type, + Mongodconfig50Enterprise_Storage_WiredTiger +); + +const baseMongodconfig50Enterprise_Storage_WiredTiger_EngineConfig: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.Storage.WiredTiger.EngineConfig", +}; + +export const Mongodconfig50Enterprise_Storage_WiredTiger_EngineConfig = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.Storage.WiredTiger.EngineConfig" as const, + + encode( + message: Mongodconfig50Enterprise_Storage_WiredTiger_EngineConfig, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.cacheSizeGb !== undefined) { + DoubleValue.encode( + { $type: "google.protobuf.DoubleValue", value: message.cacheSizeGb! }, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig50Enterprise_Storage_WiredTiger_EngineConfig { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig50Enterprise_Storage_WiredTiger_EngineConfig, + } as Mongodconfig50Enterprise_Storage_WiredTiger_EngineConfig; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.cacheSizeGb = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON( + object: any + ): Mongodconfig50Enterprise_Storage_WiredTiger_EngineConfig { + const message = { + ...baseMongodconfig50Enterprise_Storage_WiredTiger_EngineConfig, + } as Mongodconfig50Enterprise_Storage_WiredTiger_EngineConfig; + message.cacheSizeGb = + object.cacheSizeGb !== undefined && object.cacheSizeGb !== null + ? Number(object.cacheSizeGb) + : undefined; + return message; + }, + + toJSON( + message: Mongodconfig50Enterprise_Storage_WiredTiger_EngineConfig + ): unknown { + const obj: any = {}; + message.cacheSizeGb !== undefined && + (obj.cacheSizeGb = message.cacheSizeGb); + return obj; + }, + + fromPartial< + I extends Exact< + DeepPartial, + I + > + >(object: I): Mongodconfig50Enterprise_Storage_WiredTiger_EngineConfig { + const message = { + ...baseMongodconfig50Enterprise_Storage_WiredTiger_EngineConfig, + } as Mongodconfig50Enterprise_Storage_WiredTiger_EngineConfig; + message.cacheSizeGb = object.cacheSizeGb ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig50Enterprise_Storage_WiredTiger_EngineConfig.$type, + Mongodconfig50Enterprise_Storage_WiredTiger_EngineConfig +); + +const baseMongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig: object = + { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.Storage.WiredTiger.CollectionConfig", + blockCompressor: 0, + }; + +export const Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.Storage.WiredTiger.CollectionConfig" as const, + + encode( + message: Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.blockCompressor !== 0) { + writer.uint32(8).int32(message.blockCompressor); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig, + } as Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.blockCompressor = reader.int32() as any; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON( + object: any + ): Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig { + const message = { + ...baseMongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig, + } as Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig; + message.blockCompressor = + object.blockCompressor !== undefined && object.blockCompressor !== null + ? mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig_CompressorFromJSON( + object.blockCompressor + ) + : 0; + return message; + }, + + toJSON( + message: Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig + ): unknown { + const obj: any = {}; + message.blockCompressor !== undefined && + (obj.blockCompressor = + mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig_CompressorToJSON( + message.blockCompressor + )); + return obj; + }, + + fromPartial< + I extends Exact< + DeepPartial, + I + > + >(object: I): Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig { + const message = { + ...baseMongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig, + } as Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig; + message.blockCompressor = object.blockCompressor ?? 0; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig.$type, + Mongodconfig50Enterprise_Storage_WiredTiger_CollectionConfig +); + +const baseMongodconfig50Enterprise_Storage_Journal: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.Storage.Journal", +}; + +export const Mongodconfig50Enterprise_Storage_Journal = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.Storage.Journal" as const, + + encode( + message: Mongodconfig50Enterprise_Storage_Journal, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.commitInterval !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.commitInterval! }, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig50Enterprise_Storage_Journal { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig50Enterprise_Storage_Journal, + } as Mongodconfig50Enterprise_Storage_Journal; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.commitInterval = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig50Enterprise_Storage_Journal { + const message = { + ...baseMongodconfig50Enterprise_Storage_Journal, + } as Mongodconfig50Enterprise_Storage_Journal; + message.commitInterval = + object.commitInterval !== undefined && object.commitInterval !== null + ? Number(object.commitInterval) + : undefined; + return message; + }, + + toJSON(message: Mongodconfig50Enterprise_Storage_Journal): unknown { + const obj: any = {}; + message.commitInterval !== undefined && + (obj.commitInterval = message.commitInterval); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodconfig50Enterprise_Storage_Journal { + const message = { + ...baseMongodconfig50Enterprise_Storage_Journal, + } as Mongodconfig50Enterprise_Storage_Journal; + message.commitInterval = object.commitInterval ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig50Enterprise_Storage_Journal.$type, + Mongodconfig50Enterprise_Storage_Journal +); + +const baseMongodconfig50Enterprise_OperationProfiling: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.OperationProfiling", + mode: 0, +}; + +export const Mongodconfig50Enterprise_OperationProfiling = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.OperationProfiling" as const, + + encode( + message: Mongodconfig50Enterprise_OperationProfiling, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.mode !== 0) { + writer.uint32(8).int32(message.mode); + } + if (message.slowOpThreshold !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.slowOpThreshold!, + }, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig50Enterprise_OperationProfiling { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig50Enterprise_OperationProfiling, + } as Mongodconfig50Enterprise_OperationProfiling; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.mode = reader.int32() as any; + break; + case 2: + message.slowOpThreshold = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig50Enterprise_OperationProfiling { + const message = { + ...baseMongodconfig50Enterprise_OperationProfiling, + } as Mongodconfig50Enterprise_OperationProfiling; + message.mode = + object.mode !== undefined && object.mode !== null + ? mongodconfig50Enterprise_OperationProfiling_ModeFromJSON(object.mode) + : 0; + message.slowOpThreshold = + object.slowOpThreshold !== undefined && object.slowOpThreshold !== null + ? Number(object.slowOpThreshold) + : undefined; + return message; + }, + + toJSON(message: Mongodconfig50Enterprise_OperationProfiling): unknown { + const obj: any = {}; + message.mode !== undefined && + (obj.mode = mongodconfig50Enterprise_OperationProfiling_ModeToJSON( + message.mode + )); + message.slowOpThreshold !== undefined && + (obj.slowOpThreshold = message.slowOpThreshold); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodconfig50Enterprise_OperationProfiling { + const message = { + ...baseMongodconfig50Enterprise_OperationProfiling, + } as Mongodconfig50Enterprise_OperationProfiling; + message.mode = object.mode ?? 0; + message.slowOpThreshold = object.slowOpThreshold ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig50Enterprise_OperationProfiling.$type, + Mongodconfig50Enterprise_OperationProfiling +); + +const baseMongodconfig50Enterprise_Network: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.Network", +}; + +export const Mongodconfig50Enterprise_Network = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.Network" as const, + + encode( + message: Mongodconfig50Enterprise_Network, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.maxIncomingConnections !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxIncomingConnections!, + }, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig50Enterprise_Network { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig50Enterprise_Network, + } as Mongodconfig50Enterprise_Network; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.maxIncomingConnections = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig50Enterprise_Network { + const message = { + ...baseMongodconfig50Enterprise_Network, + } as Mongodconfig50Enterprise_Network; + message.maxIncomingConnections = + object.maxIncomingConnections !== undefined && + object.maxIncomingConnections !== null + ? Number(object.maxIncomingConnections) + : undefined; + return message; + }, + + toJSON(message: Mongodconfig50Enterprise_Network): unknown { + const obj: any = {}; + message.maxIncomingConnections !== undefined && + (obj.maxIncomingConnections = message.maxIncomingConnections); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodconfig50Enterprise_Network { + const message = { + ...baseMongodconfig50Enterprise_Network, + } as Mongodconfig50Enterprise_Network; + message.maxIncomingConnections = object.maxIncomingConnections ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig50Enterprise_Network.$type, + Mongodconfig50Enterprise_Network +); + +const baseMongodconfig50Enterprise_Security: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.Security", +}; + +export const Mongodconfig50Enterprise_Security = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.Security" as const, + + encode( + message: Mongodconfig50Enterprise_Security, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.enableEncryption !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableEncryption!, + }, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.kmip !== undefined) { + Mongodconfig50Enterprise_Security_KMIP.encode( + message.kmip, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig50Enterprise_Security { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig50Enterprise_Security, + } as Mongodconfig50Enterprise_Security; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.enableEncryption = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 2: + message.kmip = Mongodconfig50Enterprise_Security_KMIP.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig50Enterprise_Security { + const message = { + ...baseMongodconfig50Enterprise_Security, + } as Mongodconfig50Enterprise_Security; + message.enableEncryption = + object.enableEncryption !== undefined && object.enableEncryption !== null + ? Boolean(object.enableEncryption) + : undefined; + message.kmip = + object.kmip !== undefined && object.kmip !== null + ? Mongodconfig50Enterprise_Security_KMIP.fromJSON(object.kmip) + : undefined; + return message; + }, + + toJSON(message: Mongodconfig50Enterprise_Security): unknown { + const obj: any = {}; + message.enableEncryption !== undefined && + (obj.enableEncryption = message.enableEncryption); + message.kmip !== undefined && + (obj.kmip = message.kmip + ? Mongodconfig50Enterprise_Security_KMIP.toJSON(message.kmip) + : undefined); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodconfig50Enterprise_Security { + const message = { + ...baseMongodconfig50Enterprise_Security, + } as Mongodconfig50Enterprise_Security; + message.enableEncryption = object.enableEncryption ?? undefined; + message.kmip = + object.kmip !== undefined && object.kmip !== null + ? Mongodconfig50Enterprise_Security_KMIP.fromPartial(object.kmip) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig50Enterprise_Security.$type, + Mongodconfig50Enterprise_Security +); + +const baseMongodconfig50Enterprise_Security_KMIP: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.Security.KMIP", + serverName: "", + serverCa: "", + clientCertificate: "", + keyIdentifier: "", +}; + +export const Mongodconfig50Enterprise_Security_KMIP = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.Security.KMIP" as const, + + encode( + message: Mongodconfig50Enterprise_Security_KMIP, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.serverName !== "") { + writer.uint32(10).string(message.serverName); + } + if (message.port !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.port! }, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.serverCa !== "") { + writer.uint32(26).string(message.serverCa); + } + if (message.clientCertificate !== "") { + writer.uint32(34).string(message.clientCertificate); + } + if (message.keyIdentifier !== "") { + writer.uint32(42).string(message.keyIdentifier); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig50Enterprise_Security_KMIP { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig50Enterprise_Security_KMIP, + } as Mongodconfig50Enterprise_Security_KMIP; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.serverName = reader.string(); + break; + case 2: + message.port = Int64Value.decode(reader, reader.uint32()).value; + break; + case 3: + message.serverCa = reader.string(); + break; + case 4: + message.clientCertificate = reader.string(); + break; + case 5: + message.keyIdentifier = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig50Enterprise_Security_KMIP { + const message = { + ...baseMongodconfig50Enterprise_Security_KMIP, + } as Mongodconfig50Enterprise_Security_KMIP; + message.serverName = + object.serverName !== undefined && object.serverName !== null + ? String(object.serverName) + : ""; + message.port = + object.port !== undefined && object.port !== null + ? Number(object.port) + : undefined; + message.serverCa = + object.serverCa !== undefined && object.serverCa !== null + ? String(object.serverCa) + : ""; + message.clientCertificate = + object.clientCertificate !== undefined && + object.clientCertificate !== null + ? String(object.clientCertificate) + : ""; + message.keyIdentifier = + object.keyIdentifier !== undefined && object.keyIdentifier !== null + ? String(object.keyIdentifier) + : ""; + return message; + }, + + toJSON(message: Mongodconfig50Enterprise_Security_KMIP): unknown { + const obj: any = {}; + message.serverName !== undefined && (obj.serverName = message.serverName); + message.port !== undefined && (obj.port = message.port); + message.serverCa !== undefined && (obj.serverCa = message.serverCa); + message.clientCertificate !== undefined && + (obj.clientCertificate = message.clientCertificate); + message.keyIdentifier !== undefined && + (obj.keyIdentifier = message.keyIdentifier); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodconfig50Enterprise_Security_KMIP { + const message = { + ...baseMongodconfig50Enterprise_Security_KMIP, + } as Mongodconfig50Enterprise_Security_KMIP; + message.serverName = object.serverName ?? ""; + message.port = object.port ?? undefined; + message.serverCa = object.serverCa ?? ""; + message.clientCertificate = object.clientCertificate ?? ""; + message.keyIdentifier = object.keyIdentifier ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig50Enterprise_Security_KMIP.$type, + Mongodconfig50Enterprise_Security_KMIP +); + +const baseMongodconfig50Enterprise_AuditLog: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.AuditLog", + filter: "", +}; + +export const Mongodconfig50Enterprise_AuditLog = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.AuditLog" as const, + + encode( + message: Mongodconfig50Enterprise_AuditLog, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.filter !== "") { + writer.uint32(10).string(message.filter); + } + if (message.runtimeConfiguration !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.runtimeConfiguration!, + }, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig50Enterprise_AuditLog { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig50Enterprise_AuditLog, + } as Mongodconfig50Enterprise_AuditLog; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.filter = reader.string(); + break; + case 2: + message.runtimeConfiguration = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig50Enterprise_AuditLog { + const message = { + ...baseMongodconfig50Enterprise_AuditLog, + } as Mongodconfig50Enterprise_AuditLog; + message.filter = + object.filter !== undefined && object.filter !== null + ? String(object.filter) + : ""; + message.runtimeConfiguration = + object.runtimeConfiguration !== undefined && + object.runtimeConfiguration !== null + ? Boolean(object.runtimeConfiguration) + : undefined; + return message; + }, + + toJSON(message: Mongodconfig50Enterprise_AuditLog): unknown { + const obj: any = {}; + message.filter !== undefined && (obj.filter = message.filter); + message.runtimeConfiguration !== undefined && + (obj.runtimeConfiguration = message.runtimeConfiguration); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodconfig50Enterprise_AuditLog { + const message = { + ...baseMongodconfig50Enterprise_AuditLog, + } as Mongodconfig50Enterprise_AuditLog; + message.filter = object.filter ?? ""; + message.runtimeConfiguration = object.runtimeConfiguration ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig50Enterprise_AuditLog.$type, + Mongodconfig50Enterprise_AuditLog +); + +const baseMongodconfig50Enterprise_SetParameter: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.SetParameter", +}; + +export const Mongodconfig50Enterprise_SetParameter = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfig5_0_enterprise.SetParameter" as const, + + encode( + message: Mongodconfig50Enterprise_SetParameter, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.auditAuthorizationSuccess !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.auditAuthorizationSuccess!, + }, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfig50Enterprise_SetParameter { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfig50Enterprise_SetParameter, + } as Mongodconfig50Enterprise_SetParameter; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.auditAuthorizationSuccess = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfig50Enterprise_SetParameter { + const message = { + ...baseMongodconfig50Enterprise_SetParameter, + } as Mongodconfig50Enterprise_SetParameter; + message.auditAuthorizationSuccess = + object.auditAuthorizationSuccess !== undefined && + object.auditAuthorizationSuccess !== null + ? Boolean(object.auditAuthorizationSuccess) + : undefined; + return message; + }, + + toJSON(message: Mongodconfig50Enterprise_SetParameter): unknown { + const obj: any = {}; + message.auditAuthorizationSuccess !== undefined && + (obj.auditAuthorizationSuccess = message.auditAuthorizationSuccess); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongodconfig50Enterprise_SetParameter { + const message = { + ...baseMongodconfig50Enterprise_SetParameter, + } as Mongodconfig50Enterprise_SetParameter; + message.auditAuthorizationSuccess = + object.auditAuthorizationSuccess ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfig50Enterprise_SetParameter.$type, + Mongodconfig50Enterprise_SetParameter +); + +const baseMongocfgconfig50Enterprise: object = { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig5_0_enterprise", +}; + +export const Mongocfgconfig50Enterprise = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig5_0_enterprise" as const, + + encode( + message: Mongocfgconfig50Enterprise, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.storage !== undefined) { + Mongocfgconfig50Enterprise_Storage.encode( + message.storage, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.operationProfiling !== undefined) { + Mongocfgconfig50Enterprise_OperationProfiling.encode( + message.operationProfiling, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.net !== undefined) { + Mongocfgconfig50Enterprise_Network.encode( + message.net, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongocfgconfig50Enterprise { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongocfgconfig50Enterprise, + } as Mongocfgconfig50Enterprise; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.storage = Mongocfgconfig50Enterprise_Storage.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.operationProfiling = + Mongocfgconfig50Enterprise_OperationProfiling.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.net = Mongocfgconfig50Enterprise_Network.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongocfgconfig50Enterprise { + const message = { + ...baseMongocfgconfig50Enterprise, + } as Mongocfgconfig50Enterprise; + message.storage = + object.storage !== undefined && object.storage !== null + ? Mongocfgconfig50Enterprise_Storage.fromJSON(object.storage) + : undefined; + message.operationProfiling = + object.operationProfiling !== undefined && + object.operationProfiling !== null + ? Mongocfgconfig50Enterprise_OperationProfiling.fromJSON( + object.operationProfiling + ) + : undefined; + message.net = + object.net !== undefined && object.net !== null + ? Mongocfgconfig50Enterprise_Network.fromJSON(object.net) + : undefined; + return message; + }, + + toJSON(message: Mongocfgconfig50Enterprise): unknown { + const obj: any = {}; + message.storage !== undefined && + (obj.storage = message.storage + ? Mongocfgconfig50Enterprise_Storage.toJSON(message.storage) + : undefined); + message.operationProfiling !== undefined && + (obj.operationProfiling = message.operationProfiling + ? Mongocfgconfig50Enterprise_OperationProfiling.toJSON( + message.operationProfiling + ) + : undefined); + message.net !== undefined && + (obj.net = message.net + ? Mongocfgconfig50Enterprise_Network.toJSON(message.net) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongocfgconfig50Enterprise { + const message = { + ...baseMongocfgconfig50Enterprise, + } as Mongocfgconfig50Enterprise; + message.storage = + object.storage !== undefined && object.storage !== null + ? Mongocfgconfig50Enterprise_Storage.fromPartial(object.storage) + : undefined; + message.operationProfiling = + object.operationProfiling !== undefined && + object.operationProfiling !== null + ? Mongocfgconfig50Enterprise_OperationProfiling.fromPartial( + object.operationProfiling + ) + : undefined; + message.net = + object.net !== undefined && object.net !== null + ? Mongocfgconfig50Enterprise_Network.fromPartial(object.net) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongocfgconfig50Enterprise.$type, + Mongocfgconfig50Enterprise +); + +const baseMongocfgconfig50Enterprise_Storage: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig5_0_enterprise.Storage", +}; + +export const Mongocfgconfig50Enterprise_Storage = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig5_0_enterprise.Storage" as const, + + encode( + message: Mongocfgconfig50Enterprise_Storage, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.wiredTiger !== undefined) { + Mongocfgconfig50Enterprise_Storage_WiredTiger.encode( + message.wiredTiger, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongocfgconfig50Enterprise_Storage { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongocfgconfig50Enterprise_Storage, + } as Mongocfgconfig50Enterprise_Storage; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.wiredTiger = + Mongocfgconfig50Enterprise_Storage_WiredTiger.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongocfgconfig50Enterprise_Storage { + const message = { + ...baseMongocfgconfig50Enterprise_Storage, + } as Mongocfgconfig50Enterprise_Storage; + message.wiredTiger = + object.wiredTiger !== undefined && object.wiredTiger !== null + ? Mongocfgconfig50Enterprise_Storage_WiredTiger.fromJSON( + object.wiredTiger + ) + : undefined; + return message; + }, + + toJSON(message: Mongocfgconfig50Enterprise_Storage): unknown { + const obj: any = {}; + message.wiredTiger !== undefined && + (obj.wiredTiger = message.wiredTiger + ? Mongocfgconfig50Enterprise_Storage_WiredTiger.toJSON( + message.wiredTiger + ) + : undefined); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongocfgconfig50Enterprise_Storage { + const message = { + ...baseMongocfgconfig50Enterprise_Storage, + } as Mongocfgconfig50Enterprise_Storage; + message.wiredTiger = + object.wiredTiger !== undefined && object.wiredTiger !== null + ? Mongocfgconfig50Enterprise_Storage_WiredTiger.fromPartial( + object.wiredTiger + ) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongocfgconfig50Enterprise_Storage.$type, + Mongocfgconfig50Enterprise_Storage +); + +const baseMongocfgconfig50Enterprise_Storage_WiredTiger: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig5_0_enterprise.Storage.WiredTiger", +}; + +export const Mongocfgconfig50Enterprise_Storage_WiredTiger = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig5_0_enterprise.Storage.WiredTiger" as const, + + encode( + message: Mongocfgconfig50Enterprise_Storage_WiredTiger, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.engineConfig !== undefined) { + Mongocfgconfig50Enterprise_Storage_WiredTiger_EngineConfig.encode( + message.engineConfig, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongocfgconfig50Enterprise_Storage_WiredTiger { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongocfgconfig50Enterprise_Storage_WiredTiger, + } as Mongocfgconfig50Enterprise_Storage_WiredTiger; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.engineConfig = + Mongocfgconfig50Enterprise_Storage_WiredTiger_EngineConfig.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongocfgconfig50Enterprise_Storage_WiredTiger { + const message = { + ...baseMongocfgconfig50Enterprise_Storage_WiredTiger, + } as Mongocfgconfig50Enterprise_Storage_WiredTiger; + message.engineConfig = + object.engineConfig !== undefined && object.engineConfig !== null + ? Mongocfgconfig50Enterprise_Storage_WiredTiger_EngineConfig.fromJSON( + object.engineConfig + ) + : undefined; + return message; + }, + + toJSON(message: Mongocfgconfig50Enterprise_Storage_WiredTiger): unknown { + const obj: any = {}; + message.engineConfig !== undefined && + (obj.engineConfig = message.engineConfig + ? Mongocfgconfig50Enterprise_Storage_WiredTiger_EngineConfig.toJSON( + message.engineConfig + ) + : undefined); + return obj; + }, + + fromPartial< + I extends Exact< + DeepPartial, + I + > + >(object: I): Mongocfgconfig50Enterprise_Storage_WiredTiger { + const message = { + ...baseMongocfgconfig50Enterprise_Storage_WiredTiger, + } as Mongocfgconfig50Enterprise_Storage_WiredTiger; + message.engineConfig = + object.engineConfig !== undefined && object.engineConfig !== null + ? Mongocfgconfig50Enterprise_Storage_WiredTiger_EngineConfig.fromPartial( + object.engineConfig + ) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongocfgconfig50Enterprise_Storage_WiredTiger.$type, + Mongocfgconfig50Enterprise_Storage_WiredTiger +); + +const baseMongocfgconfig50Enterprise_Storage_WiredTiger_EngineConfig: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig5_0_enterprise.Storage.WiredTiger.EngineConfig", +}; + +export const Mongocfgconfig50Enterprise_Storage_WiredTiger_EngineConfig = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig5_0_enterprise.Storage.WiredTiger.EngineConfig" as const, + + encode( + message: Mongocfgconfig50Enterprise_Storage_WiredTiger_EngineConfig, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.cacheSizeGb !== undefined) { + DoubleValue.encode( + { $type: "google.protobuf.DoubleValue", value: message.cacheSizeGb! }, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongocfgconfig50Enterprise_Storage_WiredTiger_EngineConfig { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongocfgconfig50Enterprise_Storage_WiredTiger_EngineConfig, + } as Mongocfgconfig50Enterprise_Storage_WiredTiger_EngineConfig; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.cacheSizeGb = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON( + object: any + ): Mongocfgconfig50Enterprise_Storage_WiredTiger_EngineConfig { + const message = { + ...baseMongocfgconfig50Enterprise_Storage_WiredTiger_EngineConfig, + } as Mongocfgconfig50Enterprise_Storage_WiredTiger_EngineConfig; + message.cacheSizeGb = + object.cacheSizeGb !== undefined && object.cacheSizeGb !== null + ? Number(object.cacheSizeGb) + : undefined; + return message; + }, + + toJSON( + message: Mongocfgconfig50Enterprise_Storage_WiredTiger_EngineConfig + ): unknown { + const obj: any = {}; + message.cacheSizeGb !== undefined && + (obj.cacheSizeGb = message.cacheSizeGb); + return obj; + }, + + fromPartial< + I extends Exact< + DeepPartial, + I + > + >(object: I): Mongocfgconfig50Enterprise_Storage_WiredTiger_EngineConfig { + const message = { + ...baseMongocfgconfig50Enterprise_Storage_WiredTiger_EngineConfig, + } as Mongocfgconfig50Enterprise_Storage_WiredTiger_EngineConfig; + message.cacheSizeGb = object.cacheSizeGb ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongocfgconfig50Enterprise_Storage_WiredTiger_EngineConfig.$type, + Mongocfgconfig50Enterprise_Storage_WiredTiger_EngineConfig +); + +const baseMongocfgconfig50Enterprise_OperationProfiling: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig5_0_enterprise.OperationProfiling", + mode: 0, +}; + +export const Mongocfgconfig50Enterprise_OperationProfiling = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig5_0_enterprise.OperationProfiling" as const, + + encode( + message: Mongocfgconfig50Enterprise_OperationProfiling, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.mode !== 0) { + writer.uint32(8).int32(message.mode); + } + if (message.slowOpThreshold !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.slowOpThreshold!, + }, + writer.uint32(18).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongocfgconfig50Enterprise_OperationProfiling { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongocfgconfig50Enterprise_OperationProfiling, + } as Mongocfgconfig50Enterprise_OperationProfiling; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.mode = reader.int32() as any; + break; + case 2: + message.slowOpThreshold = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongocfgconfig50Enterprise_OperationProfiling { + const message = { + ...baseMongocfgconfig50Enterprise_OperationProfiling, + } as Mongocfgconfig50Enterprise_OperationProfiling; + message.mode = + object.mode !== undefined && object.mode !== null + ? mongocfgconfig50Enterprise_OperationProfiling_ModeFromJSON( + object.mode + ) + : 0; + message.slowOpThreshold = + object.slowOpThreshold !== undefined && object.slowOpThreshold !== null + ? Number(object.slowOpThreshold) + : undefined; + return message; + }, + + toJSON(message: Mongocfgconfig50Enterprise_OperationProfiling): unknown { + const obj: any = {}; + message.mode !== undefined && + (obj.mode = mongocfgconfig50Enterprise_OperationProfiling_ModeToJSON( + message.mode + )); + message.slowOpThreshold !== undefined && + (obj.slowOpThreshold = message.slowOpThreshold); + return obj; + }, + + fromPartial< + I extends Exact< + DeepPartial, + I + > + >(object: I): Mongocfgconfig50Enterprise_OperationProfiling { + const message = { + ...baseMongocfgconfig50Enterprise_OperationProfiling, + } as Mongocfgconfig50Enterprise_OperationProfiling; + message.mode = object.mode ?? 0; + message.slowOpThreshold = object.slowOpThreshold ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongocfgconfig50Enterprise_OperationProfiling.$type, + Mongocfgconfig50Enterprise_OperationProfiling +); + +const baseMongocfgconfig50Enterprise_Network: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig5_0_enterprise.Network", +}; + +export const Mongocfgconfig50Enterprise_Network = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfig5_0_enterprise.Network" as const, + + encode( + message: Mongocfgconfig50Enterprise_Network, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.maxIncomingConnections !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxIncomingConnections!, + }, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongocfgconfig50Enterprise_Network { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongocfgconfig50Enterprise_Network, + } as Mongocfgconfig50Enterprise_Network; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.maxIncomingConnections = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongocfgconfig50Enterprise_Network { + const message = { + ...baseMongocfgconfig50Enterprise_Network, + } as Mongocfgconfig50Enterprise_Network; + message.maxIncomingConnections = + object.maxIncomingConnections !== undefined && + object.maxIncomingConnections !== null + ? Number(object.maxIncomingConnections) + : undefined; + return message; + }, + + toJSON(message: Mongocfgconfig50Enterprise_Network): unknown { + const obj: any = {}; + message.maxIncomingConnections !== undefined && + (obj.maxIncomingConnections = message.maxIncomingConnections); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongocfgconfig50Enterprise_Network { + const message = { + ...baseMongocfgconfig50Enterprise_Network, + } as Mongocfgconfig50Enterprise_Network; + message.maxIncomingConnections = object.maxIncomingConnections ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongocfgconfig50Enterprise_Network.$type, + Mongocfgconfig50Enterprise_Network +); + +const baseMongosconfig50Enterprise: object = { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongosConfig5_0_enterprise", +}; + +export const Mongosconfig50Enterprise = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongosConfig5_0_enterprise" as const, + + encode( + message: Mongosconfig50Enterprise, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.net !== undefined) { + Mongosconfig50Enterprise_Network.encode( + message.net, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongosconfig50Enterprise { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongosconfig50Enterprise, + } as Mongosconfig50Enterprise; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.net = Mongosconfig50Enterprise_Network.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongosconfig50Enterprise { + const message = { + ...baseMongosconfig50Enterprise, + } as Mongosconfig50Enterprise; + message.net = + object.net !== undefined && object.net !== null + ? Mongosconfig50Enterprise_Network.fromJSON(object.net) + : undefined; + return message; + }, + + toJSON(message: Mongosconfig50Enterprise): unknown { + const obj: any = {}; + message.net !== undefined && + (obj.net = message.net + ? Mongosconfig50Enterprise_Network.toJSON(message.net) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongosconfig50Enterprise { + const message = { + ...baseMongosconfig50Enterprise, + } as Mongosconfig50Enterprise; + message.net = + object.net !== undefined && object.net !== null + ? Mongosconfig50Enterprise_Network.fromPartial(object.net) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongosconfig50Enterprise.$type, + Mongosconfig50Enterprise +); + +const baseMongosconfig50Enterprise_Network: object = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongosConfig5_0_enterprise.Network", +}; + +export const Mongosconfig50Enterprise_Network = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongosConfig5_0_enterprise.Network" as const, + + encode( + message: Mongosconfig50Enterprise_Network, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.maxIncomingConnections !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxIncomingConnections!, + }, + writer.uint32(10).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongosconfig50Enterprise_Network { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongosconfig50Enterprise_Network, + } as Mongosconfig50Enterprise_Network; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.maxIncomingConnections = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongosconfig50Enterprise_Network { + const message = { + ...baseMongosconfig50Enterprise_Network, + } as Mongosconfig50Enterprise_Network; + message.maxIncomingConnections = + object.maxIncomingConnections !== undefined && + object.maxIncomingConnections !== null + ? Number(object.maxIncomingConnections) + : undefined; + return message; + }, + + toJSON(message: Mongosconfig50Enterprise_Network): unknown { + const obj: any = {}; + message.maxIncomingConnections !== undefined && + (obj.maxIncomingConnections = message.maxIncomingConnections); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Mongosconfig50Enterprise_Network { + const message = { + ...baseMongosconfig50Enterprise_Network, + } as Mongosconfig50Enterprise_Network; + message.maxIncomingConnections = object.maxIncomingConnections ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongosconfig50Enterprise_Network.$type, + Mongosconfig50Enterprise_Network +); + +const baseMongodconfigset50Enterprise: object = { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongodConfigSet5_0_enterprise", +}; + +export const Mongodconfigset50Enterprise = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongodConfigSet5_0_enterprise" as const, + + encode( + message: Mongodconfigset50Enterprise, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.effectiveConfig !== undefined) { + Mongodconfig50Enterprise.encode( + message.effectiveConfig, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.userConfig !== undefined) { + Mongodconfig50Enterprise.encode( + message.userConfig, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.defaultConfig !== undefined) { + Mongodconfig50Enterprise.encode( + message.defaultConfig, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongodconfigset50Enterprise { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongodconfigset50Enterprise, + } as Mongodconfigset50Enterprise; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.effectiveConfig = Mongodconfig50Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.userConfig = Mongodconfig50Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.defaultConfig = Mongodconfig50Enterprise.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongodconfigset50Enterprise { + const message = { + ...baseMongodconfigset50Enterprise, + } as Mongodconfigset50Enterprise; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Mongodconfig50Enterprise.fromJSON(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Mongodconfig50Enterprise.fromJSON(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Mongodconfig50Enterprise.fromJSON(object.defaultConfig) + : undefined; + return message; + }, + + toJSON(message: Mongodconfigset50Enterprise): unknown { + const obj: any = {}; + message.effectiveConfig !== undefined && + (obj.effectiveConfig = message.effectiveConfig + ? Mongodconfig50Enterprise.toJSON(message.effectiveConfig) + : undefined); + message.userConfig !== undefined && + (obj.userConfig = message.userConfig + ? Mongodconfig50Enterprise.toJSON(message.userConfig) + : undefined); + message.defaultConfig !== undefined && + (obj.defaultConfig = message.defaultConfig + ? Mongodconfig50Enterprise.toJSON(message.defaultConfig) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongodconfigset50Enterprise { + const message = { + ...baseMongodconfigset50Enterprise, + } as Mongodconfigset50Enterprise; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Mongodconfig50Enterprise.fromPartial(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Mongodconfig50Enterprise.fromPartial(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Mongodconfig50Enterprise.fromPartial(object.defaultConfig) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongodconfigset50Enterprise.$type, + Mongodconfigset50Enterprise +); + +const baseMongocfgconfigset50Enterprise: object = { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfigSet5_0_enterprise", +}; + +export const Mongocfgconfigset50Enterprise = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongoCfgConfigSet5_0_enterprise" as const, + + encode( + message: Mongocfgconfigset50Enterprise, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.effectiveConfig !== undefined) { + Mongocfgconfig50Enterprise.encode( + message.effectiveConfig, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.userConfig !== undefined) { + Mongocfgconfig50Enterprise.encode( + message.userConfig, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.defaultConfig !== undefined) { + Mongocfgconfig50Enterprise.encode( + message.defaultConfig, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongocfgconfigset50Enterprise { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongocfgconfigset50Enterprise, + } as Mongocfgconfigset50Enterprise; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.effectiveConfig = Mongocfgconfig50Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.userConfig = Mongocfgconfig50Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.defaultConfig = Mongocfgconfig50Enterprise.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongocfgconfigset50Enterprise { + const message = { + ...baseMongocfgconfigset50Enterprise, + } as Mongocfgconfigset50Enterprise; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Mongocfgconfig50Enterprise.fromJSON(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Mongocfgconfig50Enterprise.fromJSON(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Mongocfgconfig50Enterprise.fromJSON(object.defaultConfig) + : undefined; + return message; + }, + + toJSON(message: Mongocfgconfigset50Enterprise): unknown { + const obj: any = {}; + message.effectiveConfig !== undefined && + (obj.effectiveConfig = message.effectiveConfig + ? Mongocfgconfig50Enterprise.toJSON(message.effectiveConfig) + : undefined); + message.userConfig !== undefined && + (obj.userConfig = message.userConfig + ? Mongocfgconfig50Enterprise.toJSON(message.userConfig) + : undefined); + message.defaultConfig !== undefined && + (obj.defaultConfig = message.defaultConfig + ? Mongocfgconfig50Enterprise.toJSON(message.defaultConfig) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongocfgconfigset50Enterprise { + const message = { + ...baseMongocfgconfigset50Enterprise, + } as Mongocfgconfigset50Enterprise; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Mongocfgconfig50Enterprise.fromPartial(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Mongocfgconfig50Enterprise.fromPartial(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Mongocfgconfig50Enterprise.fromPartial(object.defaultConfig) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongocfgconfigset50Enterprise.$type, + Mongocfgconfigset50Enterprise +); + +const baseMongosconfigset50Enterprise: object = { + $type: "yandex.cloud.mdb.mongodb.v1.config.MongosConfigSet5_0_enterprise", +}; + +export const Mongosconfigset50Enterprise = { + $type: + "yandex.cloud.mdb.mongodb.v1.config.MongosConfigSet5_0_enterprise" as const, + + encode( + message: Mongosconfigset50Enterprise, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.effectiveConfig !== undefined) { + Mongosconfig50Enterprise.encode( + message.effectiveConfig, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.userConfig !== undefined) { + Mongosconfig50Enterprise.encode( + message.userConfig, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.defaultConfig !== undefined) { + Mongosconfig50Enterprise.encode( + message.defaultConfig, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Mongosconfigset50Enterprise { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseMongosconfigset50Enterprise, + } as Mongosconfigset50Enterprise; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.effectiveConfig = Mongosconfig50Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.userConfig = Mongosconfig50Enterprise.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.defaultConfig = Mongosconfig50Enterprise.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Mongosconfigset50Enterprise { + const message = { + ...baseMongosconfigset50Enterprise, + } as Mongosconfigset50Enterprise; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Mongosconfig50Enterprise.fromJSON(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Mongosconfig50Enterprise.fromJSON(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Mongosconfig50Enterprise.fromJSON(object.defaultConfig) + : undefined; + return message; + }, + + toJSON(message: Mongosconfigset50Enterprise): unknown { + const obj: any = {}; + message.effectiveConfig !== undefined && + (obj.effectiveConfig = message.effectiveConfig + ? Mongosconfig50Enterprise.toJSON(message.effectiveConfig) + : undefined); + message.userConfig !== undefined && + (obj.userConfig = message.userConfig + ? Mongosconfig50Enterprise.toJSON(message.userConfig) + : undefined); + message.defaultConfig !== undefined && + (obj.defaultConfig = message.defaultConfig + ? Mongosconfig50Enterprise.toJSON(message.defaultConfig) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): Mongosconfigset50Enterprise { + const message = { + ...baseMongosconfigset50Enterprise, + } as Mongosconfigset50Enterprise; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? Mongosconfig50Enterprise.fromPartial(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? Mongosconfig50Enterprise.fromPartial(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? Mongosconfig50Enterprise.fromPartial(object.defaultConfig) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Mongosconfigset50Enterprise.$type, + Mongosconfigset50Enterprise +); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/mysql/v1/backup.ts b/src/generated/yandex/cloud/mdb/mysql/v1/backup.ts index 1443c308..f0babe72 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1/backup.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1/backup.ts @@ -7,8 +7,9 @@ import { Timestamp } from "../../../../../google/protobuf/timestamp"; export const protobufPackage = "yandex.cloud.mdb.mysql.v1"; /** - * A MySQL backup. For more information, see - * the [documentation](/docs/managed-mysql/concepts/backup). + * An object that represents MySQL backup. + * + * See [the documentation](/docs/managed-mysql/concepts/backup) for details. */ export interface Backup { $type: "yandex.cloud.mdb.mysql.v1.Backup"; @@ -16,11 +17,11 @@ export interface Backup { id: string; /** ID of the folder that the backup belongs to. */ folderId: string; - /** Comment for API reference generated automatically. */ + /** Creation timestamp (the time when the backup operation was completed). */ createdAt?: Date; - /** ID of the MySQL cluster that the backup was created for. */ + /** ID of the cluster that the backup was created for. */ sourceClusterId: string; - /** Time when the backup operation was started. */ + /** Start timestamp (the time when the backup operation was started). */ startedAt?: Date; } diff --git a/src/generated/yandex/cloud/mdb/mysql/v1/backup_service.ts b/src/generated/yandex/cloud/mdb/mysql/v1/backup_service.ts index ab5d8109..44a19d46 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1/backup_service.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1/backup_service.ts @@ -22,7 +22,8 @@ export interface GetBackupRequest { $type: "yandex.cloud.mdb.mysql.v1.GetBackupRequest"; /** * ID of the backup to return information about. - * To get the backup ID, use a [ClusterService.ListBackups] request. + * + * To get this ID, make a [BackupService.List] request (lists all backups in a folder) or a [ClusterService.ListBackups] request (lists all backups for an existing cluster). */ backupId: string; } @@ -31,31 +32,34 @@ export interface ListBackupsRequest { $type: "yandex.cloud.mdb.mysql.v1.ListBackupsRequest"; /** * ID of the folder to list backups in. - * To get the folder ID, use a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + * + * To get this ID, make a [yandex.cloud.resourcemanager.v1.FolderService.List] request. */ folderId: string; /** - * The maximum number of results per page to return. If the number of available - * results is larger than [page_size], the service returns a [ListBackupsResponse.next_page_token] - * that can be used to get the next page of results in subsequent list requests. + * The maximum number of results per page to return. + * + * If the number of available results is larger than [page_size], the API returns a [ListBackupsResponse.next_page_token] that can be used to get the next page of results in the subsequent [BackupService.List] requests. */ pageSize: number; /** - * Page token. To get the next page of results, Set [page_token] to the [ListBackupsResponse.next_page_token] - * returned by a previous list request. + * Page token that can be used to iterate through multiple pages of results. + * + * To get the next page of results, set [page_token] to the [ListBackupsResponse.next_page_token] returned by the previous [BackupService.List] request. */ pageToken: string; } export interface ListBackupsResponse { $type: "yandex.cloud.mdb.mysql.v1.ListBackupsResponse"; - /** List of MySQL backups. */ + /** List of backups. */ backups: Backup[]; /** - * This token allows you to get the next page of results for list requests. If the number of results - * is larger than [ListBackupsRequest.page_size], use the [next_page_token] as the value - * for the [ListBackupsRequest.page_token] parameter in the next list request. Each subsequent - * list request will have its own [next_page_token] to continue paging through the results. + * The token that can be used to get the next page of results. + * + * If the number of results is larger than [ListBackupsRequest.page_size], use the [next_page_token] as the value for the [ListBackupsRequest.page_token] in the subsequent [BackupService.List] request to iterate through multiple pages of results. + * + * Each of the subsequent [BackupService.List] requests should use the [next_page_token] value returned by the previous request to continue paging through the results. */ nextPageToken: string; } @@ -292,13 +296,13 @@ export const ListBackupsResponse = { messageTypeRegistry.set(ListBackupsResponse.$type, ListBackupsResponse); -/** A set of methods for managing MySQL backups. */ +/** + * A set of methods for managing MySQL backups. + * + * See [the documentation](/docs/managed-mysql/operations/cluster-backups) for details. + */ export const BackupServiceService = { - /** - * Returns the specified MySQL backup. - * - * To get the list of available MySQL backups, make a [List] request. - */ + /** Retrieves information about the specified backup. */ get: { path: "/yandex.cloud.mdb.mysql.v1.BackupService/Get", requestStream: false, @@ -310,7 +314,11 @@ export const BackupServiceService = { Buffer.from(Backup.encode(value).finish()), responseDeserialize: (value: Buffer) => Backup.decode(value), }, - /** Retrieves the list of MySQL backups available for the specified folder. */ + /** + * Retrieves the list of backups in a folder. + * + * To list backups for an existing cluster, make a [ClusterService.ListBackups] request. + */ list: { path: "/yandex.cloud.mdb.mysql.v1.BackupService/List", requestStream: false, @@ -325,22 +333,18 @@ export const BackupServiceService = { } as const; export interface BackupServiceServer extends UntypedServiceImplementation { + /** Retrieves information about the specified backup. */ + get: handleUnaryCall; /** - * Returns the specified MySQL backup. + * Retrieves the list of backups in a folder. * - * To get the list of available MySQL backups, make a [List] request. + * To list backups for an existing cluster, make a [ClusterService.ListBackups] request. */ - get: handleUnaryCall; - /** Retrieves the list of MySQL backups available for the specified folder. */ list: handleUnaryCall; } export interface BackupServiceClient extends Client { - /** - * Returns the specified MySQL backup. - * - * To get the list of available MySQL backups, make a [List] request. - */ + /** Retrieves information about the specified backup. */ get( request: GetBackupRequest, callback: (error: ServiceError | null, response: Backup) => void @@ -356,7 +360,11 @@ export interface BackupServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Backup) => void ): ClientUnaryCall; - /** Retrieves the list of MySQL backups available for the specified folder. */ + /** + * Retrieves the list of backups in a folder. + * + * To list backups for an existing cluster, make a [ClusterService.ListBackups] request. + */ list( request: ListBackupsRequest, callback: ( diff --git a/src/generated/yandex/cloud/mdb/mysql/v1/cluster.ts b/src/generated/yandex/cloud/mdb/mysql/v1/cluster.ts index 9b57f717..73a796eb 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1/cluster.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1/cluster.ts @@ -14,64 +14,61 @@ import { Mysqlconfigset80 } from "../../../../../yandex/cloud/mdb/mysql/v1/confi export const protobufPackage = "yandex.cloud.mdb.mysql.v1"; /** - * A MySQL cluster. For more information, see - * the [documentation](/docs/managed-mysql/concepts). + * An object that represents MySQL cluster. + * + * See [the documentation](/docs/managed-mysql/concepts) for details. */ export interface Cluster { $type: "yandex.cloud.mdb.mysql.v1.Cluster"; /** - * ID of the MySQL cluster. - * This ID is assigned by Managed Service for MySQL at creation time. + * ID of the cluster. + * + * This ID is assigned by Yandex Cloud at the time of creation. */ id: string; - /** ID of the folder that the MySQL cluster belongs to. */ + /** ID of the folder that the cluster belongs to. */ folderId: string; + /** Creation timestamp of the cluster. */ createdAt?: Date; - /** - * Name of the MySQL cluster. - * The name must be unique within the folder, comply with RFC 1035 - * and be 1-63 characters long. - */ + /** Name of the cluster. */ name: string; - /** Description of the MySQL cluster. 0-256 characters long. */ + /** Description of the cluster. */ description: string; - /** - * Custom labels for the MySQL cluster as `key:value` pairs. - * Maximum 64 per resource. - */ + /** Custom labels for the cluster as `key:value` pairs. */ labels: { [key: string]: string }; - /** Deployment environment of the MySQL cluster. */ + /** Deployment environment of the cluster. */ environment: Cluster_Environment; - /** Description of monitoring systems relevant to the MySQL cluster. */ + /** Monitoring systems data that is relevant to the cluster. */ monitoring: Monitoring[]; - /** Configuration of the MySQL cluster. */ + /** Configuration of the cluster. */ config?: ClusterConfig; /** ID of the network that the cluster belongs to. */ networkId: string; - /** Aggregated cluster health. */ + /** Aggregated health of the cluster. */ health: Cluster_Health; /** Current state of the cluster. */ status: Cluster_Status; - /** Maintenance window for the cluster. */ + /** Maintenance window settings for the cluster. */ maintenanceWindow?: MaintenanceWindow; /** Planned maintenance operation to be started for the cluster within the nearest [maintenance_window]. */ plannedOperation?: MaintenanceOperation; - /** User security groups */ + /** Effective list of security group IDs applied to the cluster. */ securityGroupIds: string[]; - /** Deletion Protection inhibits deletion of the cluster */ + /** This option prevents unintended deletion of the cluster. */ deletionProtection: boolean; } export enum Cluster_Environment { ENVIRONMENT_UNSPECIFIED = 0, /** - * PRODUCTION - Stable environment with a conservative update policy: - * only hotfixes are applied during regular maintenance. + * PRODUCTION - Environment for stable versions of your apps. + * A conservative update policy is in effect: only bug fixes are applied during regular maintenance. */ PRODUCTION = 1, /** - * PRESTABLE - Environment with more aggressive update policy: new versions - * are rolled out irrespective of backward compatibility. + * PRESTABLE - Environment for testing, including the Managed Service for MySQL itself. + * This environment gets new features, improvements, and bug fixes in the first place, compared to the production environment. + * However, not every update ensures backward compatibility. */ PRESTABLE = 2, UNRECOGNIZED = -1, @@ -109,13 +106,13 @@ export function cluster_EnvironmentToJSON(object: Cluster_Environment): string { } export enum Cluster_Health { - /** HEALTH_UNKNOWN - State of the cluster is unknown ([Host.health] for every host in the cluster is UNKNOWN). */ + /** HEALTH_UNKNOWN - Health of the cluster is unknown ([Host.health] for every host in the cluster is `UNKNOWN`). */ HEALTH_UNKNOWN = 0, - /** ALIVE - Cluster is alive and well ([Host.health] for every host in the cluster is ALIVE). */ + /** ALIVE - Cluster is alive and well ([Host.health] for every host in the cluster is `ALIVE`). */ ALIVE = 1, - /** DEAD - Cluster is inoperable ([Host.health] for every host in the cluster is DEAD). */ + /** DEAD - Cluster is inoperable ([Host.health] for every host in the cluster is `DEAD`). */ DEAD = 2, - /** DEGRADED - Cluster is working below capacity ([Host.health] for at least one host in the cluster is not ALIVE). */ + /** DEGRADED - Cluster is degraded ([Host.health] for at least one host in the cluster is not `ALIVE`). */ DEGRADED = 3, UNRECOGNIZED = -1, } @@ -169,7 +166,7 @@ export enum Cluster_Status { UPDATING = 4, /** STOPPING - Cluster is stopping. */ STOPPING = 5, - /** STOPPED - Cluster stopped. */ + /** STOPPED - Cluster is stopped. */ STOPPED = 6, /** STARTING - Cluster is starting. */ STARTING = 7, @@ -238,73 +235,74 @@ export interface Cluster_LabelsEntry { value: string; } +/** Cluster-related monitoring system data. */ export interface Monitoring { $type: "yandex.cloud.mdb.mysql.v1.Monitoring"; /** Name of the monitoring system. */ name: string; /** Description of the monitoring system. */ description: string; - /** Link to the monitoring system charts for the MySQL cluster. */ + /** Link to the monitoring system charts for the cluster. */ link: string; } export interface ClusterConfig { $type: "yandex.cloud.mdb.mysql.v1.ClusterConfig"; - /** Version of MySQL server software. */ + /** Version of MySQL used in the cluster. */ version: string; /** Configuration of a MySQL 5.7 server. */ mysqlConfig57?: Mysqlconfigset57 | undefined; /** Configuration of a MySQL 8.0 server. */ mysqlConfig80?: Mysqlconfigset80 | undefined; - /** Resources allocated to MySQL hosts. */ + /** Resource preset for the cluster hosts. */ resources?: Resources; /** Time to start the daily backup, in the UTC timezone. */ backupWindowStart?: TimeOfDay; - /** Access policy to DB */ + /** Access policy for external services. */ access?: Access; + /** Configuration of the performance diagnostics service. */ + performanceDiagnostics?: PerformanceDiagnostics; } export interface Host { $type: "yandex.cloud.mdb.mysql.v1.Host"; /** - * Name of the MySQL host. The host name is assigned by Managed Service for MySQL - * at creation time, and cannot be changed. 1-63 characters long. + * Name of the host. * - * The name is unique across all existing database hosts in Yandex.Cloud, - * as it defines the FQDN of the host. + * This name is assigned by Yandex Cloud at the time of creation. + * The name is unique across all existing MDB hosts in Yandex Cloud, as it defines the FQDN of the host. */ name: string; - /** - * ID of the MySQL host. The ID is assigned by Managed Service for MySQL - * at creation time. - */ + /** ID of the cluster the host belongs to. */ clusterId: string; - /** ID of the availability zone where the MySQL host resides. */ + /** ID of the availability zone where the host resides. */ zoneId: string; /** Resources allocated to the host. */ resources?: Resources; /** Role of the host in the cluster. */ role: Host_Role; - /** Status code of the aggregated health of the host. */ + /** Aggregated health of the host. */ health: Host_Health; - /** Services provided by the host. */ + /** List of services provided by the host. */ services: Service[]; /** ID of the subnet that the host belongs to. */ subnetId: string; - /** Flag showing public IP assignment status to this host. */ + /** Flag that shows if public IP address is assigned to the host so that the host can be accessed from the internet. */ assignPublicIp: boolean; /** Name of the host to be used as the replication source for cascading replication. */ replicationSource: string; - /** Host backup priority */ + /** Host backup priority. */ backupPriority: number; + /** Host master promotion priority. */ + priority: number; } export enum Host_Role { - /** ROLE_UNKNOWN - Role of the host in the cluster is unknown. */ + /** ROLE_UNKNOWN - Role of the host is unknown. */ ROLE_UNKNOWN = 0, - /** MASTER - Host is the master MySQL server in the cluster. */ + /** MASTER - Host is the master. */ MASTER = 1, - /** REPLICA - Host is a replica MySQL server in the cluster. */ + /** REPLICA - Host is a replica. */ REPLICA = 2, UNRECOGNIZED = -1, } @@ -343,11 +341,11 @@ export function host_RoleToJSON(object: Host_Role): string { export enum Host_Health { /** HEALTH_UNKNOWN - Health of the host is unknown. */ HEALTH_UNKNOWN = 0, - /** ALIVE - The host is performing all its functions normally. */ + /** ALIVE - Host is performing all its functions normally. */ ALIVE = 1, - /** DEAD - The host is inoperable, and cannot perform any of its essential functions. */ + /** DEAD - Host is inoperable, and cannot perform any of its essential functions. */ DEAD = 2, - /** DEGRADED - The host is degraded, and can perform only some of its essential functions. */ + /** DEGRADED - Host is degraded, and can perform only some of its essential functions. */ DEGRADED = 3, UNRECOGNIZED = -1, } @@ -392,7 +390,7 @@ export interface Service { $type: "yandex.cloud.mdb.mysql.v1.Service"; /** Type of the service provided by the host. */ type: Service_Type; - /** Status code of server availability. */ + /** Aggregated health of the service. */ health: Service_Health; } @@ -430,11 +428,11 @@ export function service_TypeToJSON(object: Service_Type): string { } export enum Service_Health { - /** HEALTH_UNKNOWN - Health of the server is unknown. */ + /** HEALTH_UNKNOWN - Health of the service is unknown. */ HEALTH_UNKNOWN = 0, - /** ALIVE - The server is working normally. */ + /** ALIVE - The service is working normally. */ ALIVE = 1, - /** DEAD - The server is dead or unresponsive. */ + /** DEAD - The service is dead or unresponsive. */ DEAD = 2, UNRECOGNIZED = -1, } @@ -470,43 +468,56 @@ export function service_HealthToJSON(object: Service_Health): string { } } +/** Cluster resource preset. */ export interface Resources { $type: "yandex.cloud.mdb.mysql.v1.Resources"; /** - * ID of the preset for computational resources available to a host (CPU, memory etc.). - * All available presets are listed in the [documentation](/docs/managed-mysql/concepts/instance-types). + * ID of the resource preset that defines available computational resources (vCPU, RAM, etc.) for a cluster host. + * + * All available presets are listed in [the documentation](/docs/managed-mysql/concepts/instance-types). */ resourcePresetId: string; - /** Volume of the storage available to a host. */ + /** Volume of the storage (for each cluster host, in bytes). */ diskSize: number; /** - * Type of the storage environment for the host. + * Type of the storage. + * * Possible values: - * * network-ssd - network SSD drive, - * * local-ssd - local SSD storage. + * * `network-hdd` - standard network storage + * * `network-ssd` - fast network storage + * * `network-ssd-nonreplicated` - fast network nonreplicated storage + * * `local-ssd` - fast local storage. + * + * See [the documentation](/docs/managed-mysql/concepts/storage) for details. */ diskTypeId: string; } export interface Access { $type: "yandex.cloud.mdb.mysql.v1.Access"; - /** Allow access for DataLens */ + /** + * Allows access from DataLens. + * + * See [the documentation](/docs/managed-mysql/operations/datalens-connect) for details. + */ dataLens: boolean; /** - * Allow SQL queries to the cluster databases from the Yandex.Cloud management console. + * Allows SQL queries to the cluster databases from Yandex Cloud management console. * - * See [SQL queries in the management console](/docs/managed-mysql/operations/web-sql-query) for more details. + * See [the documentation](/docs/managed-mysql/operations/web-sql-query) for details. */ webSql: boolean; + /** Allow access for DataTransfer. */ + dataTransfer: boolean; } export interface PerformanceDiagnostics { $type: "yandex.cloud.mdb.mysql.v1.PerformanceDiagnostics"; - /** Configuration setting which enables/disables performance diagnostics service in cluster. */ + /** Flag that shows if performance statistics gathering is enabled for the cluster. */ enabled: boolean; - /** Interval (in seconds) for my_session sampling */ + /** Interval (in seconds) for `my_session` sampling. */ sessionsSamplingInterval: number; - /** Interval (in seconds) for my_statements sampling */ + /** Interval (in seconds) for `my_statements` sampling. */ statementsSamplingInterval: number; } @@ -1038,6 +1049,12 @@ export const ClusterConfig = { if (message.access !== undefined) { Access.encode(message.access, writer.uint32(42).fork()).ldelim(); } + if (message.performanceDiagnostics !== undefined) { + PerformanceDiagnostics.encode( + message.performanceDiagnostics, + writer.uint32(58).fork() + ).ldelim(); + } return writer; }, @@ -1072,6 +1089,12 @@ export const ClusterConfig = { case 5: message.access = Access.decode(reader, reader.uint32()); break; + case 7: + message.performanceDiagnostics = PerformanceDiagnostics.decode( + reader, + reader.uint32() + ); + break; default: reader.skipType(tag & 7); break; @@ -1107,6 +1130,11 @@ export const ClusterConfig = { object.access !== undefined && object.access !== null ? Access.fromJSON(object.access) : undefined; + message.performanceDiagnostics = + object.performanceDiagnostics !== undefined && + object.performanceDiagnostics !== null + ? PerformanceDiagnostics.fromJSON(object.performanceDiagnostics) + : undefined; return message; }, @@ -1131,6 +1159,10 @@ export const ClusterConfig = { : undefined); message.access !== undefined && (obj.access = message.access ? Access.toJSON(message.access) : undefined); + message.performanceDiagnostics !== undefined && + (obj.performanceDiagnostics = message.performanceDiagnostics + ? PerformanceDiagnostics.toJSON(message.performanceDiagnostics) + : undefined); return obj; }, @@ -1160,6 +1192,11 @@ export const ClusterConfig = { object.access !== undefined && object.access !== null ? Access.fromPartial(object.access) : undefined; + message.performanceDiagnostics = + object.performanceDiagnostics !== undefined && + object.performanceDiagnostics !== null + ? PerformanceDiagnostics.fromPartial(object.performanceDiagnostics) + : undefined; return message; }, }; @@ -1177,6 +1214,7 @@ const baseHost: object = { assignPublicIp: false, replicationSource: "", backupPriority: 0, + priority: 0, }; export const Host = { @@ -1216,6 +1254,9 @@ export const Host = { if (message.backupPriority !== 0) { writer.uint32(88).int64(message.backupPriority); } + if (message.priority !== 0) { + writer.uint32(96).int64(message.priority); + } return writer; }, @@ -1260,6 +1301,9 @@ export const Host = { case 11: message.backupPriority = longToNumber(reader.int64() as Long); break; + case 12: + message.priority = longToNumber(reader.int64() as Long); + break; default: reader.skipType(tag & 7); break; @@ -1314,6 +1358,10 @@ export const Host = { object.backupPriority !== undefined && object.backupPriority !== null ? Number(object.backupPriority) : 0; + message.priority = + object.priority !== undefined && object.priority !== null + ? Number(object.priority) + : 0; return message; }, @@ -1343,6 +1391,8 @@ export const Host = { (obj.replicationSource = message.replicationSource); message.backupPriority !== undefined && (obj.backupPriority = Math.round(message.backupPriority)); + message.priority !== undefined && + (obj.priority = Math.round(message.priority)); return obj; }, @@ -1363,6 +1413,7 @@ export const Host = { message.assignPublicIp = object.assignPublicIp ?? false; message.replicationSource = object.replicationSource ?? ""; message.backupPriority = object.backupPriority ?? 0; + message.priority = object.priority ?? 0; return message; }, }; @@ -1537,6 +1588,7 @@ const baseAccess: object = { $type: "yandex.cloud.mdb.mysql.v1.Access", dataLens: false, webSql: false, + dataTransfer: false, }; export const Access = { @@ -1552,6 +1604,9 @@ export const Access = { if (message.webSql === true) { writer.uint32(16).bool(message.webSql); } + if (message.dataTransfer === true) { + writer.uint32(24).bool(message.dataTransfer); + } return writer; }, @@ -1568,6 +1623,9 @@ export const Access = { case 2: message.webSql = reader.bool(); break; + case 3: + message.dataTransfer = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -1586,6 +1644,10 @@ export const Access = { object.webSql !== undefined && object.webSql !== null ? Boolean(object.webSql) : false; + message.dataTransfer = + object.dataTransfer !== undefined && object.dataTransfer !== null + ? Boolean(object.dataTransfer) + : false; return message; }, @@ -1593,6 +1655,8 @@ export const Access = { const obj: any = {}; message.dataLens !== undefined && (obj.dataLens = message.dataLens); message.webSql !== undefined && (obj.webSql = message.webSql); + message.dataTransfer !== undefined && + (obj.dataTransfer = message.dataTransfer); return obj; }, @@ -1600,6 +1664,7 @@ export const Access = { const message = { ...baseAccess } as Access; message.dataLens = object.dataLens ?? false; message.webSql = object.webSql ?? false; + message.dataTransfer = object.dataTransfer ?? false; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/mysql/v1/cluster_service.ts b/src/generated/yandex/cloud/mdb/mysql/v1/cluster_service.ts index a709441a..d99fd4fd 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1/cluster_service.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1/cluster_service.ts @@ -20,6 +20,7 @@ import { Cluster_Environment, Resources, Access, + PerformanceDiagnostics, Cluster, Host, cluster_EnvironmentFromJSON, @@ -41,8 +42,9 @@ export const protobufPackage = "yandex.cloud.mdb.mysql.v1"; export interface GetClusterRequest { $type: "yandex.cloud.mdb.mysql.v1.GetClusterRequest"; /** - * ID of the MySQL cluster to return. - * To get the cluster ID use a [ClusterService.List] request. + * ID of the cluster to return information about. + * + * To get this ID, make a [ClusterService.List] request. */ clusterId: string; } @@ -50,23 +52,26 @@ export interface GetClusterRequest { export interface ListClustersRequest { $type: "yandex.cloud.mdb.mysql.v1.ListClustersRequest"; /** - * ID of the folder to list MySQL clusters in. - * To get the folder ID, use a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + * ID of the folder to list clusters in. + * + * To get this ID, make a [yandex.cloud.resourcemanager.v1.FolderService.List] request. */ folderId: string; /** - * The maximum number of results per page to return. If the number of available - * results is larger than [page_size], the service returns a [ListClustersResponse.next_page_token] - * that can be used to get the next page of results in subsequent list requests. + * The maximum number of results per page to return. + * + * If the number of available results is larger than [page_size], the API returns a [ListClustersResponse.next_page_token] that can be used to get the next page of results in the subsequent [ClusterService.List] requests. */ pageSize: number; /** - * Page token. To get the next page of results, set [page_token] to the [ListClustersResponse.next_page_token] - * returned by a previous list request. + * Page token that can be used to iterate through multiple pages of results. + * + * To get the next page of results, set [page_token] to the [ListClustersResponse.next_page_token] returned by the previous [ClusterService.List] request. */ pageToken: string; /** - * A filter expression that filters resources listed in the response. + * A filter expression that selects clusters listed in the response. + * * The expression must specify: * 1. The field name. Currently you can only use filtering with the [Cluster.name] field. * 2. An `=` operator. @@ -77,45 +82,47 @@ export interface ListClustersRequest { export interface ListClustersResponse { $type: "yandex.cloud.mdb.mysql.v1.ListClustersResponse"; - /** List of MySQL clusters. */ + /** List of clusters. */ clusters: Cluster[]; /** - * This token allows you to get the next page of results for list requests. If the number of results - * is larger than [ListClustersRequest.page_size], use the [next_page_token] as the value - * for the [ListClustersRequest.page_token] parameter in the next list request. Each subsequent - * list request will have its own [next_page_token] to continue paging through the results. + * The token that can be used to get the next page of results. + * + * If the number of results is larger than [ListClustersRequest.page_size], use the [next_page_token] as the value for the [ListClustersRequest.page_token] in the subsequent [ClusterService.List] request to iterate through multiple pages of results. + * + * Each of the subsequent [ClusterService.List] requests should use the [next_page_token] value returned by the previous request to continue paging through the results. */ nextPageToken: string; } export interface CreateClusterRequest { $type: "yandex.cloud.mdb.mysql.v1.CreateClusterRequest"; - /** ID of the folder to create the MySQL cluster in. */ + /** + * ID of the folder to create the cluster in. + * + * To get this ID, make a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + */ folderId: string; - /** Name of the MySQL cluster. The name must be unique within the folder. */ + /** Name of the cluster. The name must be unique within the folder. */ name: string; - /** Description of the MySQL cluster. */ + /** Description of the cluster. */ description: string; - /** - * Custom labels for the MySQL cluster as `key:value` pairs. Maximum 64 per resource. - * For example, "project": "mvp" or "source": "dictionary". - */ + /** Custom labels for the cluster as `key:value` pairs. */ labels: { [key: string]: string }; - /** Deployment environment of the MySQL cluster. */ + /** Deployment environment of the cluster. */ environment: Cluster_Environment; - /** Configuration and resources for hosts that should be created for the MySQL cluster. */ + /** Configuration of the cluster. */ configSpec?: ConfigSpec; - /** Descriptions of databases to be created in the MySQL cluster. */ + /** Configuration of databases in the cluster. */ databaseSpecs: DatabaseSpec[]; - /** Descriptions of database users to be created in the MySQL cluster. */ + /** Configuration of database users in the cluster. */ userSpecs: UserSpec[]; - /** Individual configurations for hosts that should be created for the MySQL cluster. */ + /** Configuration of hosts in the cluster. */ hostSpecs: HostSpec[]; /** ID of the network to create the cluster in. */ networkId: string; - /** User security groups */ + /** List of security group IDs to apply to the cluster. */ securityGroupIds: string[]; - /** Deletion Protection inhibits deletion of the cluster */ + /** This option prevents unintended deletion of the cluster. */ deletionProtection: boolean; } @@ -127,38 +134,38 @@ export interface CreateClusterRequest_LabelsEntry { export interface CreateClusterMetadata { $type: "yandex.cloud.mdb.mysql.v1.CreateClusterMetadata"; - /** ID of the MySQL cluster that is being created. */ + /** ID of the cluster that is being created. */ clusterId: string; } export interface UpdateClusterRequest { $type: "yandex.cloud.mdb.mysql.v1.UpdateClusterRequest"; /** - * ID of the MySQL cluster to update. - * To get the MySQL cluster ID, use a [ClusterService.List] request. + * ID of the cluster to update. + * + * To get this ID, make a [ClusterService.List] request. */ clusterId: string; - /** Field mask that specifies which fields of the MySQL cluster should be updated. */ + /** Field mask that specifies which settings of the cluster should be updated. */ updateMask?: FieldMask; - /** New description of the MySQL cluster. */ + /** New description of the cluster. */ description: string; /** - * Custom labels for the MySQL cluster as `key:value` pairs. Maximum 64 per resource. - * For example, "project": "mvp" or "source": "dictionary". + * New set of custom labels for the cluster as `key:value` pairs. * - * The new set of labels will completely replace the old ones. To add a label, request the current - * set with the [ClusterService.Get] method, then send an [ClusterService.Update] request with the new label added to the set. + * This set will completely replace the current one. + * To add a label, request the current label set with the [ClusterService.Get] request, then send an [ClusterService.Update] request with the new label added to the current set. */ labels: { [key: string]: string }; - /** New configuration and resources for hosts in the cluster. */ + /** New configuration of the cluster. */ configSpec?: ConfigSpec; - /** New name for the cluster. */ + /** New name of the cluster. */ name: string; - /** New maintenance window settings for the cluster. */ + /** Configuration of a maintenance window in an MySQL cluster. */ maintenanceWindow?: MaintenanceWindow; - /** User security groups */ + /** New list of security group IDs to apply to the cluster. */ securityGroupIds: string[]; - /** Deletion Protection inhibits deletion of the cluster */ + /** This option prevents unintended deletion of the cluster. */ deletionProtection: boolean; } @@ -170,72 +177,69 @@ export interface UpdateClusterRequest_LabelsEntry { export interface UpdateClusterMetadata { $type: "yandex.cloud.mdb.mysql.v1.UpdateClusterMetadata"; - /** ID of the MySQL cluster that is being modified. */ + /** ID of the cluster that is being updated. */ clusterId: string; } export interface DeleteClusterRequest { $type: "yandex.cloud.mdb.mysql.v1.DeleteClusterRequest"; /** - * ID of the MySQL cluster to delete. - * To get the MySQL cluster ID, use a [ClusterService.List] request. + * ID of the cluster to delete. + * + * To get this ID, make a [ClusterService.List] request. */ clusterId: string; } export interface DeleteClusterMetadata { $type: "yandex.cloud.mdb.mysql.v1.DeleteClusterMetadata"; - /** ID of the MySQL cluster that is being deleted. */ + /** ID of the cluster that is being deleted. */ clusterId: string; } export interface BackupClusterRequest { $type: "yandex.cloud.mdb.mysql.v1.BackupClusterRequest"; /** - * ID of the MySQL cluster to back up. - * To get the MySQL cluster ID, use a [ClusterService.List] request. + * ID of the cluster to back up. + * + * To get this ID, make a [ClusterService.List] request. */ clusterId: string; } export interface BackupClusterMetadata { $type: "yandex.cloud.mdb.mysql.v1.BackupClusterMetadata"; - /** ID of the MySQL cluster that is being backed up. */ + /** ID of the cluster that is being backed up. */ clusterId: string; } export interface RestoreClusterRequest { $type: "yandex.cloud.mdb.mysql.v1.RestoreClusterRequest"; /** - * ID of the backup to create a cluster from. - * To get the backup ID, use a [ClusterService.ListBackups] request. + * ID of the backup to restore from. + * + * To get this ID, make a [BackupService.List] request (lists all backups in a folder) or a [ClusterService.ListBackups] request (lists all backups for an existing cluster). */ backupId: string; /** Timestamp of the moment to which the MySQL cluster should be restored. */ time?: Date; - /** Name of the new MySQL cluster. The name must be unique within the folder. */ + /** Name of the new MySQL cluster the backup will be restored to. The name must be unique within the folder. */ name: string; - /** Description of the new MySQL cluster. */ + /** Description of the new cluster. */ description: string; - /** - * Custom labels for the MySQL cluster as `key:value` pairs. Maximum 64 per resource. - * For example, "project": "mvp" or "source": "dictionary". - */ + /** Custom labels for the new cluster as `key:value` pairs. */ labels: { [key: string]: string }; - /** Deployment environment of the new MySQL cluster. */ + /** Deployment environment for the new cluster. */ environment: Cluster_Environment; - /** Configuration for the MySQL cluster to be created. */ + /** Configuration of the new cluster. */ configSpec?: ConfigSpec; - /** - * Configurations for MySQL hosts that should be added - * to the cluster that is being created from the backup. - */ + /** Configuration of hosts in the new cluster. */ hostSpecs: HostSpec[]; - /** ID of the network to create the MySQL cluster in. */ + /** ID of the network to create the new cluster in. */ networkId: string; - /** ID of the folder to create the MySQL cluster in. */ + /** ID of the folder to create the new cluster in. */ folderId: string; - /** User security groups */ + /** List of security group IDs to apply to the new cluster. */ securityGroupIds: string[]; } @@ -247,7 +251,7 @@ export interface RestoreClusterRequest_LabelsEntry { export interface RestoreClusterMetadata { $type: "yandex.cloud.mdb.mysql.v1.RestoreClusterMetadata"; - /** ID of the new MySQL cluster that is being created from a backup. */ + /** ID of the new cluster that is being created from a backup. */ clusterId: string; /** ID of the backup that is being used for creating a cluster. */ backupId: string; @@ -255,25 +259,42 @@ export interface RestoreClusterMetadata { export interface StartClusterFailoverRequest { $type: "yandex.cloud.mdb.mysql.v1.StartClusterFailoverRequest"; - /** ID of MySQL cluster. */ + /** + * ID of the cluster to start failover for. + * + * To get this ID, make a [ClusterService.List] request. + */ clusterId: string; - /** New master host. Switch to the most up-to-date replica if not provided. */ + /** + * Host name to switch master role to. + * If not provided, then the master role is switched to the most up-to-date replica host. + * + * To get this name, make a [ClusterService.ListHosts] request. + */ hostName: string; } export interface StartClusterFailoverMetadata { $type: "yandex.cloud.mdb.mysql.v1.StartClusterFailoverMetadata"; - /** ID of the MySQL cluster being failovered. */ + /** ID of the cluster that is being failovered. */ clusterId: string; } export interface RescheduleMaintenanceRequest { $type: "yandex.cloud.mdb.mysql.v1.RescheduleMaintenanceRequest"; - /** ID of the MySQL cluster to reschedule the maintenance operation for. */ + /** + * ID of the cluster to reschedule the maintenance operation for. + * + * To get this ID, make a [ClusterService.List] request. + */ clusterId: string; /** The type of reschedule request. */ rescheduleType: RescheduleMaintenanceRequest_RescheduleType; - /** The time until which this maintenance operation should be delayed. The value should be ahead of the first time when the maintenance operation has been scheduled for no more than two weeks. The value can also point to the past moment of time if [reschedule_type.IMMEDIATE] reschedule type is chosen. */ + /** + * The time until which this maintenance operation should be delayed. + * The value should be ahead of the first time when the maintenance operation has been scheduled for no more than two weeks. + * The value can also point to the past moment of time if `IMMEDIATE` reschedule type is chosen. + */ delayedUntil?: Date; } @@ -328,18 +349,18 @@ export function rescheduleMaintenanceRequest_RescheduleTypeToJSON( } } -/** Rescheduled maintenance operation metadata. */ export interface RescheduleMaintenanceMetadata { $type: "yandex.cloud.mdb.mysql.v1.RescheduleMaintenanceMetadata"; - /** Required. ID of the MySQL cluster. */ + /** ID of the cluster the maintenance operation is being rescheduled for. */ clusterId: string; - /** Required. The time until which this maintenance operation is to be delayed. */ + /** The time until which this maintenance operation is to be delayed. */ delayedUntil?: Date; } +/** A single log record. */ export interface LogRecord { $type: "yandex.cloud.mdb.mysql.v1.LogRecord"; - /** Log record timestamp in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. */ + /** Timestamp of the log record. */ timestamp?: Date; /** Contents of the log record. */ message: { [key: string]: string }; @@ -354,33 +375,44 @@ export interface LogRecord_MessageEntry { export interface ListClusterLogsRequest { $type: "yandex.cloud.mdb.mysql.v1.ListClusterLogsRequest"; /** - * ID of the MySQL cluster to request logs for. - * To get the MySQL cluster ID use a [ClusterService.List] request. + * ID of the cluster to request logs for. + * + * To get this ID, make a [ClusterService.List] request. */ clusterId: string; /** * Columns from the logs table to request. - * If no columns are specified, entire log records are returned. + * If no columns are specified, complete log records are returned. */ columnFilter: string[]; - /** Type of the service to request logs about. */ + /** The log type. */ serviceType: ListClusterLogsRequest_ServiceType; - /** Start timestamp for the logs request. */ + /** + * Start timestamp for the logs request. + * The logs in the response will be within [from_time] to [to_time] range. + */ fromTime?: Date; - /** End timestamp for the logs request. */ + /** + * End timestamp for the logs request. + * The logs in the response will be within [from_time] to [to_time] range. + */ toTime?: Date; /** - * The maximum number of results per page to return. If the number of available - * results is larger than [page_size], the service returns a [ListClusterLogsResponse.next_page_token] - * that can be used to get the next page of results in subsequent list requests. + * The maximum number of results per page to return. + * + * If the number of available results is larger than [page_size], the API returns a [ListClusterLogsResponse.next_page_token] that can be used to get the next page of results in the subsequent [ClusterService.ListLogs] requests. */ pageSize: number; /** - * Page token. To get the next page of results, set [page_token] to the - * [ListClusterLogsResponse.next_page_token] returned by a previous list request. + * Page token that can be used to iterate through multiple pages of results. + * + * To get the next page of results, set [page_token] to the [ListClusterLogsResponse.next_page_token] returned by the previous [ClusterService.ListLogs] request. */ pageToken: string; - /** Always return `next_page_token`, even if current page is empty. */ + /** + * Option that controls the behavior of result pagination. + * If it is set to `true`, then [ListClusterLogsResponse.next_page_token] will always be returned, even if the current page is empty. + */ alwaysNextPageToken: boolean; } @@ -447,50 +479,64 @@ export interface ListClusterLogsResponse { /** Requested log records. */ logs: LogRecord[]; /** - * This token allows you to get the next page of results for list requests. If the number of results - * is larger than [ListClusterLogsRequest.page_size], use the [next_page_token] as the value - * for the [ListClusterLogsRequest.page_token] query parameter in the next list request. - * Each subsequent list request will have its own [next_page_token] to continue paging through the results. - * This value is interchangeable with `next_record_token` from StreamLogs method. + * The token that can be used to get the next page of results. + * + * If the number of results is larger than [ListClusterLogsRequest.page_size], use the [next_page_token] as the value for the [ListClusterLogsRequest.page_token] in the subsequent [ClusterService.ListLogs] request to iterate through multiple pages of results. + * + * Each of the subsequent [ClusterService.ListLogs] requests should use the [next_page_token] value returned by the previous request to continue paging through the results. + * + * This value is interchangeable with [StreamLogRecord.next_record_token] from [ClusterService.StreamLogs] method. */ nextPageToken: string; } +/** A single log record in the logs stream. */ export interface StreamLogRecord { $type: "yandex.cloud.mdb.mysql.v1.StreamLogRecord"; /** One of the requested log records. */ record?: LogRecord; /** - * This token allows you to continue streaming logs starting from the exact - * same record. To continue streaming, specify value of `next_record_token` - * as value for `record_token` parameter in the next StreamLogs request. - * This value is interchangeable with `next_page_token` from ListLogs method. + * The token that can be used to continue streaming logs starting from the exact same record. + * To continue streaming, specify value of [next_record_token] as the [StreamClusterLogsRequest.record_token] value in the next [ClusterService.StreamLogs] request. + * + * This value is interchangeable with [ListClusterLogsResponse.next_page_token] from [ClusterService.ListLogs] method. */ nextRecordToken: string; } export interface StreamClusterLogsRequest { $type: "yandex.cloud.mdb.mysql.v1.StreamClusterLogsRequest"; - /** Required. ID of the MySQL cluster. */ + /** + * ID of the cluster to stream logs for. + * + * To get this ID, make a [ClusterService.List] request. + */ clusterId: string; - /** Columns from logs table to get in the response. */ + /** + * Columns from the logs table to request. + * If no columns are specified, complete log records are returned. + */ columnFilter: string[]; + /** The log type. */ serviceType: StreamClusterLogsRequest_ServiceType; /** Start timestamp for the logs request. */ fromTime?: Date; /** * End timestamp for the logs request. - * If this field is not set, all existing logs will be sent and then the new ones as - * they appear. In essence it has 'tail -f' semantics. + * If this field is not set, all existing log records beginning from [from_time] will be returned first, and then the new records will be returned as they appear. + * + * In essence it has `tail -f` command semantics. */ toTime?: Date; /** - * Record token. Set `record_token` to the `next_record_token` returned by a previous StreamLogs - * request to start streaming from next log record. + * Record token that can be used to control logs streaming. + * + * Set [record_token] to the [StreamLogRecord.next_record_token], returned by the previous [ClusterService.StreamLogs] request to start streaming from the next log record. */ recordToken: string; /** - * A filter expression that filters resources listed in the response. + * A filter expression that selects clusters logs listed in the response. + * * The expression must specify: * 1. The field name. Currently filtering can be applied to the [LogRecord.logs.hostname] field. * 2. An `=` operator. @@ -560,30 +606,36 @@ export function streamClusterLogsRequest_ServiceTypeToJSON( export interface ListClusterOperationsRequest { $type: "yandex.cloud.mdb.mysql.v1.ListClusterOperationsRequest"; - /** ID of the MySQL cluster to list operations for. */ + /** + * ID of the cluster to list operations for. + * + * To get this ID, make a [ClusterService.List] request. + */ clusterId: string; /** - * The maximum number of results per page to return. If the number of available - * results is larger than [page_size], the service returns a [ListClusterOperationsResponse.next_page_token] - * that can be used to get the next page of results in subsequent list requests. + * The maximum number of results per page to return. + * + * If the number of available results is larger than [page_size], the API returns a [ListClusterOperationsResponse.next_page_token] that can be used to get the next page of results in the subsequent [ClusterService.ListOperations] requests. */ pageSize: number; /** - * Page token. To get the next page of results, set [page_token] to the [ListClusterOperationsResponse.next_page_token] - * returned by a previous list request. + * Page token that can be used to iterate through multiple pages of results. + * + * To get the next page of results, set [page_token] to the [ListClusterOperationsResponse.next_page_token] returned by the previous [ClusterService.ListOperations] request. */ pageToken: string; } export interface ListClusterOperationsResponse { $type: "yandex.cloud.mdb.mysql.v1.ListClusterOperationsResponse"; - /** List of operations for the specified MySQL cluster. */ + /** List of operations in the cluster. */ operations: Operation[]; /** - * This token allows you to get the next page of results for list requests. If the number of results - * is larger than [ListClusterOperationsRequest.page_size], use the [next_page_token] as the value - * for the [ListClusterOperationsRequest.page_token] query parameter in the next list request. - * Each subsequent list request will have its own [next_page_token] to continue paging through the results. + * The token that can be used to get the next page of results. + * + * If the number of results is larger than [ListClusterOperationsRequest.page_size], use the [next_page_token] as the value for the [ListClusterOperationsRequest.page_token] in the subsequent [ClusterService.ListOperations] request to iterate through multiple pages of results. + * + * Each of the subsequent [ClusterService.ListOperations] requests should use the [next_page_token] value returned by the previous request to continue paging through the results. */ nextPageToken: string; } @@ -591,32 +643,35 @@ export interface ListClusterOperationsResponse { export interface ListClusterBackupsRequest { $type: "yandex.cloud.mdb.mysql.v1.ListClusterBackupsRequest"; /** - * ID of the MySQL cluster. - * To get the MySQL cluster ID use a [ClusterService.List] request. + * ID of the cluster to list backups for. + * + * To get this ID, make a [ClusterService.List] request. */ clusterId: string; /** - * The maximum number of results per page to return. If the number of available - * results is larger than [page_size], the service returns a [ListClusterBackupsResponse.next_page_token] - * that can be used to get the next page of results in subsequent list requests. + * The maximum number of results per page to return. + * + * If the number of available results is larger than [page_size], the API returns a [ListClusterBackupsResponse.next_page_token] that can be used to get the next page of results in the subsequent [ClusterService.ListBackups] requests. */ pageSize: number; /** - * Page token. To get the next page of results, set [page_token] to the [ListClusterBackupsResponse.next_page_token] - * returned by a previous list request. + * Page token that can be used to iterate through multiple pages of results. + * + * To get the next page of results, set [page_token] to the [ListClusterBackupsResponse.next_page_token] returned by the previous [ClusterService.ListBackups] request. */ pageToken: string; } export interface ListClusterBackupsResponse { $type: "yandex.cloud.mdb.mysql.v1.ListClusterBackupsResponse"; - /** List of MySQL backups. */ + /** List of the cluster backups. */ backups: Backup[]; /** - * This token allows you to get the next page of results for list requests. If the number of results - * is larger than [ListClusterBackupsRequest.page_size], use the [next_page_token] as the value - * for the [ListClusterBackupsRequest.page_token] query parameter in the next list request. - * Each subsequent list request will have its own [next_page_token] to continue paging through the results. + * The token that can be used to get the next page of results. + * + * If the number of results is larger than [ListClusterBackupsRequest.page_size], use the [next_page_token] as the value for the [ListClusterBackupsRequest.page_token] in the subsequent [ClusterService.ListBackups] request to iterate through multiple pages of results. + * + * Each of the subsequent [ClusterService.ListBackups] requests should use the [next_page_token] value returned by the previous request to continue paging through the results. */ nextPageToken: string; } @@ -624,32 +679,35 @@ export interface ListClusterBackupsResponse { export interface ListClusterHostsRequest { $type: "yandex.cloud.mdb.mysql.v1.ListClusterHostsRequest"; /** - * ID of the MySQL cluster. - * To get the MySQL cluster ID use a [ClusterService.List] request. + * ID of the cluster to list hosts for. + * + * To get this ID, make a [ClusterService.List] request. */ clusterId: string; /** - * The maximum number of results per page to return. If the number of available - * results is larger than [page_size], the service returns a [ListClusterHostsResponse.next_page_token] - * that can be used to get the next page of results in subsequent list requests. + * The maximum number of results per page to return. + * + * If the number of available results is larger than [page_size], the API returns a [ListClusterHostsResponse.next_page_token] that can be used to get the next page of results in the subsequent [ClusterService.ListHosts] requests. */ pageSize: number; /** - * Page token. To get the next page of results, set [page_token] to the [ListClusterHostsResponse.next_page_token] - * returned by a previous list request. + * Page token that can be used to iterate through multiple pages of results. + * + * To get the next page of results, set [page_token] to the [ListClusterHostsResponse.next_page_token] returned by the previous [ClusterService.ListHosts] request. */ pageToken: string; } export interface ListClusterHostsResponse { $type: "yandex.cloud.mdb.mysql.v1.ListClusterHostsResponse"; - /** List of MySQL hosts. */ + /** List of hosts in the cluster. */ hosts: Host[]; /** - * This token allows you to get the next page of results for list requests. If the number of results - * is larger than [ListClusterHostsRequest.page_size], use the [next_page_token] as the value - * for the [ListClusterHostsRequest.page_token] query parameter in the next list request. - * Each subsequent list request will have its own [next_page_token] to continue paging through the results. + * The token that can be used to get the next page of results. + * + * If the number of results is larger than [ListClusterHostsRequest.page_size], use the [next_page_token] as the value for the [ListClusterHostsRequest.page_token] in the subsequent [ClusterService.ListHosts] request to iterate through multiple pages of results. + * + * Each of the subsequent [ClusterService.ListHosts] requests should use the [next_page_token] value returned by the previous request to continue paging through the results. */ nextPageToken: string; } @@ -657,36 +715,42 @@ export interface ListClusterHostsResponse { export interface AddClusterHostsRequest { $type: "yandex.cloud.mdb.mysql.v1.AddClusterHostsRequest"; /** - * ID of the MySQL cluster to add hosts to. - * To get the MySQL cluster ID, use a [ClusterService.List] request. + * ID of the cluster to add hosts to. + * + * To get this ID, make a [ClusterService.List] request. */ clusterId: string; - /** Configurations for MySQL hosts that should be added to the cluster. */ + /** Configuration of the newly added hosts. */ hostSpecs: HostSpec[]; } export interface AddClusterHostsMetadata { $type: "yandex.cloud.mdb.mysql.v1.AddClusterHostsMetadata"; - /** ID of the MySQL cluster to which the hosts are being added. */ + /** ID of the cluster to which the hosts are being added. */ clusterId: string; - /** Names of hosts that are being added to the cluster. */ + /** Names of hosts that are being added. */ hostNames: string[]; } export interface DeleteClusterHostsRequest { $type: "yandex.cloud.mdb.mysql.v1.DeleteClusterHostsRequest"; /** - * ID of the MySQL cluster to remove hosts from. - * To get the MySQL cluster ID, use a [ClusterService.List] request. + * ID of the cluster to delete hosts from. + * + * To get this ID, make a [ClusterService.List] request. */ clusterId: string; - /** Names of hosts to delete. */ + /** + * Names of hosts to delete. + * + * To get these names, make a [ClusterService.ListHosts] request. + */ hostNames: string[]; } export interface DeleteClusterHostsMetadata { $type: "yandex.cloud.mdb.mysql.v1.DeleteClusterHostsMetadata"; - /** ID of the MySQL cluster to remove hosts from. */ + /** ID of the cluster from which the hosts are being deleted. */ clusterId: string; /** Names of hosts that are being deleted. */ hostNames: string[]; @@ -694,43 +758,59 @@ export interface DeleteClusterHostsMetadata { export interface StartClusterRequest { $type: "yandex.cloud.mdb.mysql.v1.StartClusterRequest"; - /** ID of the MySQL cluster to start. */ + /** + * ID of the cluster to start. + * + * To get this ID, make a [ClusterService.List] request. + */ clusterId: string; } export interface StartClusterMetadata { $type: "yandex.cloud.mdb.mysql.v1.StartClusterMetadata"; - /** ID of the MySQL cluster being started. */ + /** ID of the cluster that is being started. */ clusterId: string; } export interface StopClusterRequest { $type: "yandex.cloud.mdb.mysql.v1.StopClusterRequest"; - /** ID of the MySQL cluster to stop. */ + /** + * ID of the cluster to stop. + * + * To get this ID, make a [ClusterService.List] request. + */ clusterId: string; } export interface StopClusterMetadata { $type: "yandex.cloud.mdb.mysql.v1.StopClusterMetadata"; - /** ID of the MySQL cluster being stopped. */ + /** ID of the cluster that is being stopped. */ clusterId: string; } export interface MoveClusterRequest { $type: "yandex.cloud.mdb.mysql.v1.MoveClusterRequest"; - /** ID of the MySQL cluster to move. */ + /** + * ID of the cluster to move. + * + * To get this ID, make a [ClusterService.List] request. + */ clusterId: string; - /** ID of the destination folder. */ + /** + * ID of the destination folder. + * + * To get this ID, make a [yandex.cloud.resourcemanager.v1.FolderService.List] request. + */ destinationFolderId: string; } export interface MoveClusterMetadata { $type: "yandex.cloud.mdb.mysql.v1.MoveClusterMetadata"; - /** ID of the MySQL cluster being moved. */ + /** ID of the cluster that is being moved. */ clusterId: string; /** ID of the source folder. */ sourceFolderId: string; - /** ID of the destnation folder. */ + /** ID of the destination folder. */ destinationFolderId: string; } @@ -747,9 +827,9 @@ export interface UpdateClusterHostsRequest { export interface UpdateClusterHostsMetadata { $type: "yandex.cloud.mdb.mysql.v1.UpdateClusterHostsMetadata"; - /** ID of the MySQL cluster to modify hosts in. */ + /** ID of the cluster in which the hosts are being updated. */ clusterId: string; - /** Names of hosts that are being modified. */ + /** Names of hosts that are being updated. */ hostNames: string[]; } @@ -757,71 +837,81 @@ export interface UpdateHostSpec { $type: "yandex.cloud.mdb.mysql.v1.UpdateHostSpec"; /** * Name of the host to update. - * To get the MySQL host name, use a [ClusterService.ListHosts] request. + * To get a MySQL host name, use a [ClusterService.ListHosts] request. */ hostName: string; /** * [Host.name] of the host to be used as the replication source (for cascading replication). - * To get the MySQL host name, use a [ClusterService.ListHosts] request. + * To get a MySQL host name, use a [ClusterService.ListHosts] request. */ replicationSource: string; - /** Field mask that specifies which fields of the MySQL host should be updated. */ + /** Field mask that specifies which settings of the MySQL host should be updated. */ updateMask?: FieldMask; - /** Host backup priority */ + /** Host backup priority. */ backupPriority: number; /** Whether the host should get a public IP address on creation. */ assignPublicIp: boolean; + /** Host master promotion priority. */ + priority: number; } export interface HostSpec { $type: "yandex.cloud.mdb.mysql.v1.HostSpec"; /** * ID of the availability zone where the host resides. - * To get a list of available zones, use the [yandex.cloud.compute.v1.ZoneService.List] request. + * + * To get a list of available zones, make the [yandex.cloud.compute.v1.ZoneService.List] request. */ zoneId: string; /** - * ID of the subnet that the host should belong to. This subnet should be a part - * of the network that the cluster belongs to. - * The ID of the network is set in the field [Cluster.network_id]. + * ID of the subnet to assign to the host. + * + * This subnet should be a part of the cluster network (the network ID is specified in the [ClusterService.CreateClusterRequest.network_id]). */ subnetId: string; /** - * Whether the host should get a public IP address on creation. + * Option that enables public IP address for the host so that the host can be accessed from the internet. * - * After a host has been created, this setting cannot be changed. To remove an assigned public IP, or to assign - * a public IP to a host without one, recreate the host with [assign_public_ip] set as needed. + * After a host has been created, this setting cannot be changed. + * To remove an assigned public IP address, or to assign a public IP address to a host without one, recreate the host with the appropriate [assign_public_ip] value set. * * Possible values: - * * false - don't assign a public IP to the host. - * * true - the host should have a public IP address. + * * `false` - don't assign a public IP address to the host. + * * `true` - assign a public IP address to the host. */ assignPublicIp: boolean; /** [Host.name] of the host to be used as the replication source (for cascading replication). */ replicationSource: string; /** Host backup priority */ backupPriority: number; + /** Host master promotion priority */ + priority: number; } export interface ConfigSpec { $type: "yandex.cloud.mdb.mysql.v1.ConfigSpec"; /** * Version of MySQL used in the cluster. - * Possible values: - * * 5.7 - * * 8.0 + * + * Possible values: `5.7`, `8.0`. */ version: string; /** Configuration for a MySQL 5.7 cluster. */ mysqlConfig57?: Mysqlconfig57 | undefined; /** Configuration for a MySQL 8.0 cluster. */ mysqlConfig80?: Mysqlconfig80 | undefined; - /** Resources allocated to MySQL hosts. */ + /** Resource preset for the cluster hosts. */ resources?: Resources; /** Time to start the daily backup, in the UTC timezone. */ backupWindowStart?: TimeOfDay; - /** Access policy to DB */ + /** + * Access policy for external services. + * + * If the specific services need to access the cluster, then set the necessary values in this policy. + */ access?: Access; + /** Configuration of the performance diagnostics service. */ + performanceDiagnostics?: PerformanceDiagnostics; } const baseGetClusterRequest: object = { @@ -5118,6 +5208,7 @@ const baseUpdateHostSpec: object = { replicationSource: "", backupPriority: 0, assignPublicIp: false, + priority: 0, }; export const UpdateHostSpec = { @@ -5142,6 +5233,9 @@ export const UpdateHostSpec = { if (message.assignPublicIp === true) { writer.uint32(40).bool(message.assignPublicIp); } + if (message.priority !== 0) { + writer.uint32(48).int64(message.priority); + } return writer; }, @@ -5167,6 +5261,9 @@ export const UpdateHostSpec = { case 5: message.assignPublicIp = reader.bool(); break; + case 6: + message.priority = longToNumber(reader.int64() as Long); + break; default: reader.skipType(tag & 7); break; @@ -5198,6 +5295,10 @@ export const UpdateHostSpec = { object.assignPublicIp !== undefined && object.assignPublicIp !== null ? Boolean(object.assignPublicIp) : false; + message.priority = + object.priority !== undefined && object.priority !== null + ? Number(object.priority) + : 0; return message; }, @@ -5214,6 +5315,8 @@ export const UpdateHostSpec = { (obj.backupPriority = Math.round(message.backupPriority)); message.assignPublicIp !== undefined && (obj.assignPublicIp = message.assignPublicIp); + message.priority !== undefined && + (obj.priority = Math.round(message.priority)); return obj; }, @@ -5229,6 +5332,7 @@ export const UpdateHostSpec = { : undefined; message.backupPriority = object.backupPriority ?? 0; message.assignPublicIp = object.assignPublicIp ?? false; + message.priority = object.priority ?? 0; return message; }, }; @@ -5242,6 +5346,7 @@ const baseHostSpec: object = { assignPublicIp: false, replicationSource: "", backupPriority: 0, + priority: 0, }; export const HostSpec = { @@ -5266,6 +5371,9 @@ export const HostSpec = { if (message.backupPriority !== 0) { writer.uint32(40).int64(message.backupPriority); } + if (message.priority !== 0) { + writer.uint32(48).int64(message.priority); + } return writer; }, @@ -5291,6 +5399,9 @@ export const HostSpec = { case 5: message.backupPriority = longToNumber(reader.int64() as Long); break; + case 6: + message.priority = longToNumber(reader.int64() as Long); + break; default: reader.skipType(tag & 7); break; @@ -5322,6 +5433,10 @@ export const HostSpec = { object.backupPriority !== undefined && object.backupPriority !== null ? Number(object.backupPriority) : 0; + message.priority = + object.priority !== undefined && object.priority !== null + ? Number(object.priority) + : 0; return message; }, @@ -5335,6 +5450,8 @@ export const HostSpec = { (obj.replicationSource = message.replicationSource); message.backupPriority !== undefined && (obj.backupPriority = Math.round(message.backupPriority)); + message.priority !== undefined && + (obj.priority = Math.round(message.priority)); return obj; }, @@ -5345,6 +5462,7 @@ export const HostSpec = { message.assignPublicIp = object.assignPublicIp ?? false; message.replicationSource = object.replicationSource ?? ""; message.backupPriority = object.backupPriority ?? 0; + message.priority = object.priority ?? 0; return message; }, }; @@ -5390,6 +5508,12 @@ export const ConfigSpec = { if (message.access !== undefined) { Access.encode(message.access, writer.uint32(42).fork()).ldelim(); } + if (message.performanceDiagnostics !== undefined) { + PerformanceDiagnostics.encode( + message.performanceDiagnostics, + writer.uint32(58).fork() + ).ldelim(); + } return writer; }, @@ -5418,6 +5542,12 @@ export const ConfigSpec = { case 5: message.access = Access.decode(reader, reader.uint32()); break; + case 7: + message.performanceDiagnostics = PerformanceDiagnostics.decode( + reader, + reader.uint32() + ); + break; default: reader.skipType(tag & 7); break; @@ -5453,6 +5583,11 @@ export const ConfigSpec = { object.access !== undefined && object.access !== null ? Access.fromJSON(object.access) : undefined; + message.performanceDiagnostics = + object.performanceDiagnostics !== undefined && + object.performanceDiagnostics !== null + ? PerformanceDiagnostics.fromJSON(object.performanceDiagnostics) + : undefined; return message; }, @@ -5477,6 +5612,10 @@ export const ConfigSpec = { : undefined); message.access !== undefined && (obj.access = message.access ? Access.toJSON(message.access) : undefined); + message.performanceDiagnostics !== undefined && + (obj.performanceDiagnostics = message.performanceDiagnostics + ? PerformanceDiagnostics.toJSON(message.performanceDiagnostics) + : undefined); return obj; }, @@ -5506,6 +5645,11 @@ export const ConfigSpec = { object.access !== undefined && object.access !== null ? Access.fromPartial(object.access) : undefined; + message.performanceDiagnostics = + object.performanceDiagnostics !== undefined && + object.performanceDiagnostics !== null + ? PerformanceDiagnostics.fromPartial(object.performanceDiagnostics) + : undefined; return message; }, }; @@ -5514,11 +5658,7 @@ messageTypeRegistry.set(ConfigSpec.$type, ConfigSpec); /** A set of methods for managing MySQL clusters. */ export const ClusterServiceService = { - /** - * Returns the specified MySQL cluster. - * - * To get the list of available MySQL clusters, make a [List] request. - */ + /** Retrieves information about a cluster. */ get: { path: "/yandex.cloud.mdb.mysql.v1.ClusterService/Get", requestStream: false, @@ -5530,7 +5670,7 @@ export const ClusterServiceService = { Buffer.from(Cluster.encode(value).finish()), responseDeserialize: (value: Buffer) => Cluster.decode(value), }, - /** Retrieves the list of MySQL clusters that belong to the specified folder. */ + /** Retrieves the list of clusters in a folder. */ list: { path: "/yandex.cloud.mdb.mysql.v1.ClusterService/List", requestStream: false, @@ -5542,7 +5682,7 @@ export const ClusterServiceService = { Buffer.from(ListClustersResponse.encode(value).finish()), responseDeserialize: (value: Buffer) => ListClustersResponse.decode(value), }, - /** Creates a MySQL cluster in the specified folder. */ + /** Creates a cluster in a folder. */ create: { path: "/yandex.cloud.mdb.mysql.v1.ClusterService/Create", requestStream: false, @@ -5554,7 +5694,7 @@ export const ClusterServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Modifies the specified MySQL cluster. */ + /** Updates a cluster. */ update: { path: "/yandex.cloud.mdb.mysql.v1.ClusterService/Update", requestStream: false, @@ -5566,7 +5706,7 @@ export const ClusterServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Deletes the specified MySQL cluster. */ + /** Deletes a cluster. */ delete: { path: "/yandex.cloud.mdb.mysql.v1.ClusterService/Delete", requestStream: false, @@ -5578,7 +5718,7 @@ export const ClusterServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Starts the specified MySQL cluster. */ + /** Starts a cluster. */ start: { path: "/yandex.cloud.mdb.mysql.v1.ClusterService/Start", requestStream: false, @@ -5590,7 +5730,7 @@ export const ClusterServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Stops the specified MySQL cluster. */ + /** Stops a cluster. */ stop: { path: "/yandex.cloud.mdb.mysql.v1.ClusterService/Stop", requestStream: false, @@ -5602,7 +5742,7 @@ export const ClusterServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Moves the specified MySQL cluster to the specified folder. */ + /** Moves a cluster to a folder. */ move: { path: "/yandex.cloud.mdb.mysql.v1.ClusterService/Move", requestStream: false, @@ -5614,7 +5754,11 @@ export const ClusterServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Creates a backup for the specified MySQL cluster. */ + /** + * Creates a backup for a cluster. + * + * To get information about a backup, make a [BackupService.Get] request. + */ backup: { path: "/yandex.cloud.mdb.mysql.v1.ClusterService/Backup", requestStream: false, @@ -5626,7 +5770,11 @@ export const ClusterServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Creates a new MySQL cluster using the specified backup. */ + /** + * Restores a backup to a new cluster. + * + * See [the documentation](/docs/managed-mysql/concepts/backup) for details. + */ restore: { path: "/yandex.cloud.mdb.mysql.v1.ClusterService/Restore", requestStream: false, @@ -5651,7 +5799,7 @@ export const ClusterServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Start a manual failover on the specified MySQL cluster. */ + /** Starts a manual failover for a cluster. */ startFailover: { path: "/yandex.cloud.mdb.mysql.v1.ClusterService/StartFailover", requestStream: false, @@ -5664,7 +5812,11 @@ export const ClusterServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Retrieves logs for the specified MySQL cluster. */ + /** + * Retrieves logs for a cluster. + * + * Alternatively, logs can be streamed using [StreamLogs]. + */ listLogs: { path: "/yandex.cloud.mdb.mysql.v1.ClusterService/ListLogs", requestStream: false, @@ -5677,7 +5829,11 @@ export const ClusterServiceService = { responseDeserialize: (value: Buffer) => ListClusterLogsResponse.decode(value), }, - /** Same as ListLogs but using server-side streaming. Also allows for 'tail -f' semantics. */ + /** + * Retrieves a log stream for a cluster. + * + * This method is similar to [ListLogs], but uses server-side streaming, which allows for the `tail -f` command semantics. + */ streamLogs: { path: "/yandex.cloud.mdb.mysql.v1.ClusterService/StreamLogs", requestStream: false, @@ -5690,7 +5846,7 @@ export const ClusterServiceService = { Buffer.from(StreamLogRecord.encode(value).finish()), responseDeserialize: (value: Buffer) => StreamLogRecord.decode(value), }, - /** Retrieves the list of operations for the specified MySQL cluster. */ + /** Retrieves a list of operations for a cluster. */ listOperations: { path: "/yandex.cloud.mdb.mysql.v1.ClusterService/ListOperations", requestStream: false, @@ -5704,7 +5860,11 @@ export const ClusterServiceService = { responseDeserialize: (value: Buffer) => ListClusterOperationsResponse.decode(value), }, - /** Retrieves the list of available backups for the specified MySQL cluster. */ + /** + * Retrieves a list of backups for a cluster. + * + * To list all backups in a folder, make a [BackupService.List] request. + */ listBackups: { path: "/yandex.cloud.mdb.mysql.v1.ClusterService/ListBackups", requestStream: false, @@ -5718,7 +5878,7 @@ export const ClusterServiceService = { responseDeserialize: (value: Buffer) => ListClusterBackupsResponse.decode(value), }, - /** Retrieves a list of hosts for the specified MySQL cluster. */ + /** Retrieves a list of hosts for a cluster. */ listHosts: { path: "/yandex.cloud.mdb.mysql.v1.ClusterService/ListHosts", requestStream: false, @@ -5732,7 +5892,7 @@ export const ClusterServiceService = { responseDeserialize: (value: Buffer) => ListClusterHostsResponse.decode(value), }, - /** Creates new hosts for a cluster. */ + /** Adds new hosts in a cluster. */ addHosts: { path: "/yandex.cloud.mdb.mysql.v1.ClusterService/AddHosts", requestStream: false, @@ -5773,57 +5933,73 @@ export const ClusterServiceService = { } as const; export interface ClusterServiceServer extends UntypedServiceImplementation { - /** - * Returns the specified MySQL cluster. - * - * To get the list of available MySQL clusters, make a [List] request. - */ + /** Retrieves information about a cluster. */ get: handleUnaryCall; - /** Retrieves the list of MySQL clusters that belong to the specified folder. */ + /** Retrieves the list of clusters in a folder. */ list: handleUnaryCall; - /** Creates a MySQL cluster in the specified folder. */ + /** Creates a cluster in a folder. */ create: handleUnaryCall; - /** Modifies the specified MySQL cluster. */ + /** Updates a cluster. */ update: handleUnaryCall; - /** Deletes the specified MySQL cluster. */ + /** Deletes a cluster. */ delete: handleUnaryCall; - /** Starts the specified MySQL cluster. */ + /** Starts a cluster. */ start: handleUnaryCall; - /** Stops the specified MySQL cluster. */ + /** Stops a cluster. */ stop: handleUnaryCall; - /** Moves the specified MySQL cluster to the specified folder. */ + /** Moves a cluster to a folder. */ move: handleUnaryCall; - /** Creates a backup for the specified MySQL cluster. */ + /** + * Creates a backup for a cluster. + * + * To get information about a backup, make a [BackupService.Get] request. + */ backup: handleUnaryCall; - /** Creates a new MySQL cluster using the specified backup. */ + /** + * Restores a backup to a new cluster. + * + * See [the documentation](/docs/managed-mysql/concepts/backup) for details. + */ restore: handleUnaryCall; /** Reschedules planned maintenance operation. */ rescheduleMaintenance: handleUnaryCall< RescheduleMaintenanceRequest, Operation >; - /** Start a manual failover on the specified MySQL cluster. */ + /** Starts a manual failover for a cluster. */ startFailover: handleUnaryCall; - /** Retrieves logs for the specified MySQL cluster. */ + /** + * Retrieves logs for a cluster. + * + * Alternatively, logs can be streamed using [StreamLogs]. + */ listLogs: handleUnaryCall; - /** Same as ListLogs but using server-side streaming. Also allows for 'tail -f' semantics. */ + /** + * Retrieves a log stream for a cluster. + * + * This method is similar to [ListLogs], but uses server-side streaming, which allows for the `tail -f` command semantics. + */ streamLogs: handleServerStreamingCall< StreamClusterLogsRequest, StreamLogRecord >; - /** Retrieves the list of operations for the specified MySQL cluster. */ + /** Retrieves a list of operations for a cluster. */ listOperations: handleUnaryCall< ListClusterOperationsRequest, ListClusterOperationsResponse >; - /** Retrieves the list of available backups for the specified MySQL cluster. */ + /** + * Retrieves a list of backups for a cluster. + * + * To list all backups in a folder, make a [BackupService.List] request. + */ listBackups: handleUnaryCall< ListClusterBackupsRequest, ListClusterBackupsResponse >; - /** Retrieves a list of hosts for the specified MySQL cluster. */ + /** Retrieves a list of hosts for a cluster. */ listHosts: handleUnaryCall; - /** Creates new hosts for a cluster. */ + /** Adds new hosts in a cluster. */ addHosts: handleUnaryCall; /** Updates the specified hosts. */ updateHosts: handleUnaryCall; @@ -5832,11 +6008,7 @@ export interface ClusterServiceServer extends UntypedServiceImplementation { } export interface ClusterServiceClient extends Client { - /** - * Returns the specified MySQL cluster. - * - * To get the list of available MySQL clusters, make a [List] request. - */ + /** Retrieves information about a cluster. */ get( request: GetClusterRequest, callback: (error: ServiceError | null, response: Cluster) => void @@ -5852,7 +6024,7 @@ export interface ClusterServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Cluster) => void ): ClientUnaryCall; - /** Retrieves the list of MySQL clusters that belong to the specified folder. */ + /** Retrieves the list of clusters in a folder. */ list( request: ListClustersRequest, callback: ( @@ -5877,7 +6049,7 @@ export interface ClusterServiceClient extends Client { response: ListClustersResponse ) => void ): ClientUnaryCall; - /** Creates a MySQL cluster in the specified folder. */ + /** Creates a cluster in a folder. */ create( request: CreateClusterRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -5893,7 +6065,7 @@ export interface ClusterServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Modifies the specified MySQL cluster. */ + /** Updates a cluster. */ update( request: UpdateClusterRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -5909,7 +6081,7 @@ export interface ClusterServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Deletes the specified MySQL cluster. */ + /** Deletes a cluster. */ delete( request: DeleteClusterRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -5925,7 +6097,7 @@ export interface ClusterServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Starts the specified MySQL cluster. */ + /** Starts a cluster. */ start( request: StartClusterRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -5941,7 +6113,7 @@ export interface ClusterServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Stops the specified MySQL cluster. */ + /** Stops a cluster. */ stop( request: StopClusterRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -5957,7 +6129,7 @@ export interface ClusterServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Moves the specified MySQL cluster to the specified folder. */ + /** Moves a cluster to a folder. */ move( request: MoveClusterRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -5973,7 +6145,11 @@ export interface ClusterServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Creates a backup for the specified MySQL cluster. */ + /** + * Creates a backup for a cluster. + * + * To get information about a backup, make a [BackupService.Get] request. + */ backup( request: BackupClusterRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -5989,7 +6165,11 @@ export interface ClusterServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Creates a new MySQL cluster using the specified backup. */ + /** + * Restores a backup to a new cluster. + * + * See [the documentation](/docs/managed-mysql/concepts/backup) for details. + */ restore( request: RestoreClusterRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -6021,7 +6201,7 @@ export interface ClusterServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Start a manual failover on the specified MySQL cluster. */ + /** Starts a manual failover for a cluster. */ startFailover( request: StartClusterFailoverRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -6037,7 +6217,11 @@ export interface ClusterServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Retrieves logs for the specified MySQL cluster. */ + /** + * Retrieves logs for a cluster. + * + * Alternatively, logs can be streamed using [StreamLogs]. + */ listLogs( request: ListClusterLogsRequest, callback: ( @@ -6062,7 +6246,11 @@ export interface ClusterServiceClient extends Client { response: ListClusterLogsResponse ) => void ): ClientUnaryCall; - /** Same as ListLogs but using server-side streaming. Also allows for 'tail -f' semantics. */ + /** + * Retrieves a log stream for a cluster. + * + * This method is similar to [ListLogs], but uses server-side streaming, which allows for the `tail -f` command semantics. + */ streamLogs( request: StreamClusterLogsRequest, options?: Partial @@ -6072,7 +6260,7 @@ export interface ClusterServiceClient extends Client { metadata?: Metadata, options?: Partial ): ClientReadableStream; - /** Retrieves the list of operations for the specified MySQL cluster. */ + /** Retrieves a list of operations for a cluster. */ listOperations( request: ListClusterOperationsRequest, callback: ( @@ -6097,7 +6285,11 @@ export interface ClusterServiceClient extends Client { response: ListClusterOperationsResponse ) => void ): ClientUnaryCall; - /** Retrieves the list of available backups for the specified MySQL cluster. */ + /** + * Retrieves a list of backups for a cluster. + * + * To list all backups in a folder, make a [BackupService.List] request. + */ listBackups( request: ListClusterBackupsRequest, callback: ( @@ -6122,7 +6314,7 @@ export interface ClusterServiceClient extends Client { response: ListClusterBackupsResponse ) => void ): ClientUnaryCall; - /** Retrieves a list of hosts for the specified MySQL cluster. */ + /** Retrieves a list of hosts for a cluster. */ listHosts( request: ListClusterHostsRequest, callback: ( @@ -6147,7 +6339,7 @@ export interface ClusterServiceClient extends Client { response: ListClusterHostsResponse ) => void ): ClientUnaryCall; - /** Creates new hosts for a cluster. */ + /** Adds new hosts in a cluster. */ addHosts( request: AddClusterHostsRequest, callback: (error: ServiceError | null, response: Operation) => void diff --git a/src/generated/yandex/cloud/mdb/mysql/v1/config/mysql5_7.ts b/src/generated/yandex/cloud/mdb/mysql/v1/config/mysql5_7.ts index 9b008799..d4f88278 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1/config/mysql5_7.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1/config/mysql5_7.ts @@ -10,301 +10,301 @@ import { export const protobufPackage = "yandex.cloud.mdb.mysql.v1.config"; -/** Options and structure of `MysqlConfig5_7` reflects MySQL 5.7 configuration file */ +/** Options and structure of `MysqlConfig5_7` reflects MySQL 5.7 configuration file. */ export interface Mysqlconfig57 { $type: "yandex.cloud.mdb.mysql.v1.config.MysqlConfig5_7"; /** * Size of the InnoDB buffer pool used for caching table and index data. * - * For details, see [MySQL documentation for the parameter](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_buffer_pool_size). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_buffer_pool_size) for details. */ innodbBufferPoolSize?: number; /** * The maximum permitted number of simultaneous client connections. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_max_connections). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_max_connections) for details. */ maxConnections?: number; /** * Time that it takes to process a query before it is considered slow. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_long_query_time). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_long_query_time) for details. */ longQueryTime?: number; /** * Enable writing of general query log of MySQL. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_general_log). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_general_log) for details. */ generalLog?: boolean; /** * Enable writing of audit log of MySQL. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/mysql-security-excerpt/5.6/en/audit-log-options-variables.html#option_mysqld_audit-log). + * See [MySQL documentation](https://dev.mysql.com/doc/mysql-security-excerpt/5.6/en/audit-log-options-variables.html#option_mysqld_audit-log) for details. */ auditLog?: boolean; /** * Server SQL mode of MySQL. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html#sql-mode-setting). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html#sql-mode-setting) for details. */ sqlMode: Mysqlconfig57_SQLMode[]; /** * The maximum size in bytes of one packet. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_max_allowed_packet). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_max_allowed_packet) for details. */ maxAllowedPacket?: number; /** * Authentication plugin used in the managed MySQL cluster. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_default_authentication_plugin) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_default_authentication_plugin for details. */ defaultAuthenticationPlugin: Mysqlconfig57_AuthPlugin; /** * Transaction log flush behaviour. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_flush_log_at_trx_commit) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_flush_log_at_trx_commit for details. */ innodbFlushLogAtTrxCommit?: number; /** - * Max time in seconds for a transaction to wait for a row lock + * Max time in seconds for a transaction to wait for a row lock. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_lock_wait_timeout) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_lock_wait_timeout for details. */ innodbLockWaitTimeout?: number; /** * Default transaction isolation level. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_transaction_isolation) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_transaction_isolation for details. */ transactionIsolation: Mysqlconfig57_TransactionIsolation; /** - * Print information about deadlocks in error log + * Print information about deadlocks in error log. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_print_all_deadlocks) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_print_all_deadlocks for details. */ innodbPrintAllDeadlocks?: boolean; /** * The number of seconds to wait for more data from a connection before aborting the read. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_net_read_timeout) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_net_read_timeout for details. */ netReadTimeout?: number; /** * The number of seconds to wait for a block to be written to a connection before aborting the write. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_net_write_timeout) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_net_write_timeout for details. */ netWriteTimeout?: number; /** * The maximum permitted result length in bytes for the GROUP_CONCAT() function. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_group_concat_max_len) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_group_concat_max_len for details. */ groupConcatMaxLen?: number; /** * The maximum size of internal in-memory temporary tables. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_tmp_table_size) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_tmp_table_size for details. */ tmpTableSize?: number; /** * This variable sets the maximum size to which user-created MEMORY tables are permitted to grow. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_max_heap_table_size) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_max_heap_table_size for details. */ maxHeapTableSize?: number; /** * The servers default time zone. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_default-time-zone) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_default-time-zone for details. */ defaultTimeZone: string; /** * The servers default character set. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_character_set_server) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_character_set_server for details. */ characterSetServer: string; /** * The server default collation. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_collation_server) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_collation_server for details. */ collationServer: string; /** - * Enables Innodb adaptive hash index + * Enables InnoDB adaptive hash index. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_adaptive_hash_index) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_adaptive_hash_index for details. */ innodbAdaptiveHashIndex?: boolean; /** * Enables the NUMA interleave memory policy for allocation of the InnoDB buffer pool. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_numa_interleave) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_numa_interleave for details. */ innodbNumaInterleave?: boolean; /** * The size in bytes of the buffer that InnoDB uses to write to the log files on disk. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_log_buffer_size) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_log_buffer_size for details. */ innodbLogBufferSize?: number; /** - * The size in bytes of the single Innodb Redo log file. + * The size in bytes of the single InnoDB Redo log file. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_log_file_size) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_log_file_size for details. */ innodbLogFileSize?: number; /** - * Limits IO available for InnoDB background tasks + * Limits IO available for InnoDB background tasks. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_io_capacity) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_io_capacity for details. */ innodbIoCapacity?: number; /** - * Limits IO available for InnoDB background tasks + * Limits IO available for InnoDB background tasks. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_io_capacity_max) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_io_capacity_max for details. */ innodbIoCapacityMax?: number; /** * The number of I/O threads for read operations in InnoDB. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_read_io_threads) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_read_io_threads for details. */ innodbReadIoThreads?: number; /** * The number of I/O threads for write operations in InnoDB. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_write_io_threads) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_write_io_threads for details. */ innodbWriteIoThreads?: number; /** * The number of background threads devoted to the InnoDB purge operation. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_purge_threads) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_purge_threads for details. */ innodbPurgeThreads?: number; /** * Defines the maximum number of threads permitted inside of InnoDB. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_thread_concurrency) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_thread_concurrency for details. */ innodbThreadConcurrency?: number; /** - * Limits the max size of InnoDB temp tablespace + * Limits the max size of InnoDB temp tablespace. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_temp_data_file_path) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_temp_data_file_path for details. */ innodbTempDataFileMaxSize?: number; /** - * How many threads the server should cache for reuse. + * A number of threads the server should cache for reuse. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_thread_cache_size). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_thread_cache_size) for details. */ threadCacheSize?: number; /** * The stack size for each thread. The default is large enough for normal operation. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_thread_stack). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_thread_stack) for details. */ threadStack?: number; /** * The minimum size of the buffer that is used for plain index scans, range index scans, and joins that do not use indexes and thus perform full table scans. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_join_buffer_size). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_join_buffer_size) for details. */ joinBufferSize?: number; /** * Each session that must perform a sort allocates a buffer of this size. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_sort_buffer_size). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_sort_buffer_size) for details. */ sortBufferSize?: number; /** * The number of table definitions that can be stored in the definition cache. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_table_definition_cache). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_table_definition_cache) for details. */ tableDefinitionCache?: number; /** * The number of open tables for all threads. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_table_open_cache). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_table_open_cache) for details. */ tableOpenCache?: number; /** * The number of open tables cache instances. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_table_open_cache_instances). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_table_open_cache_instances) for details. */ tableOpenCacheInstances?: number; /** - * This system variable determines whether the server enables certain nonstandard behaviors for default values and NULL-value handling in TIMESTAMP columns. + * Determines whether the server enables certain nonstandard behaviors for default values and NULL-value handling in TIMESTAMP columns. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_explicit_defaults_for_timestamp). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_explicit_defaults_for_timestamp) for details. */ explicitDefaultsForTimestamp?: boolean; /** * Can be used to control the operation of AUTO_INCREMENT columns. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/replication-options-master.html#sysvar_auto_increment_increment). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/replication-options-master.html#sysvar_auto_increment_increment) for details. */ autoIncrementIncrement?: number; /** * Can be used to control the operation of AUTO_INCREMENT columns. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/replication-options-master.html#sysvar_auto_increment_offset). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/replication-options-master.html#sysvar_auto_increment_offset) for details. */ autoIncrementOffset?: number; /** * Controls how often the MySQL server synchronizes the binary log to disk. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/replication-options-binary-log.html#sysvar_sync_binlog). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/replication-options-binary-log.html#sysvar_sync_binlog) for details. */ syncBinlog?: number; /** * The size of the cache to hold changes to the binary log during a transaction. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/replication-options-binary-log.html#sysvar_binlog_cache_size). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/replication-options-binary-log.html#sysvar_binlog_cache_size) for details. */ binlogCacheSize?: number; /** * Controls how many microseconds the binary log commit waits before synchronizing the binary log file to disk. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/replication-options-binary-log.html#sysvar_binlog_group_commit_sync_delay). + * See [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/replication-options-binary-log.html#sysvar_binlog_group_commit_sync_delay) for details. */ binlogGroupCommitSyncDelay?: number; /** * For MySQL row-based replication, this variable determines how row images are written to the binary log. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/replication-options-binary-log.html#sysvar_binlog_row_image). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/replication-options-binary-log.html#sysvar_binlog_row_image) for details. */ binlogRowImage: Mysqlconfig57_BinlogRowImage; /** * When enabled, it causes the server to write informational log events such as row query log events into its binary log. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/replication-options-binary-log.html#sysvar_binlog_rows_query_log_events). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/replication-options-binary-log.html#sysvar_binlog_rows_query_log_events) for details. */ binlogRowsQueryLogEvents?: boolean; /** * The number of replica acknowledgments the source must receive per transaction before proceeding. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/replication-options-master.html#sysvar_rpl_semi_sync_master_wait_for_slave_count). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/replication-options-master.html#sysvar_rpl_semi_sync_master_wait_for_slave_count) for details. */ rplSemiSyncMasterWaitForSlaveCount?: number; /** - * When using a multithreaded replica, this variable specifies the policy used to decide which transactions are allowed to execute in parallel on the replica. + * When using a multi-threaded replica, this variable specifies the policy used to decide which transactions are allowed to execute in parallel on the replica. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/replication-options-replica.html#sysvar_slave_parallel_type). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/replication-options-replica.html#sysvar_slave_parallel_type) for details. */ slaveParallelType: Mysqlconfig57_SlaveParallelType; /** * Sets the number of applier threads for executing replication transactions in parallel. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/replication-options-replica.html#sysvar_slave_parallel_workers). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/replication-options-replica.html#sysvar_slave_parallel_workers) for details. */ slaveParallelWorkers?: number; /** The size of the binary log to hold. */ @@ -312,64 +312,105 @@ export interface Mysqlconfig57 { /** * The number of seconds the server waits for activity on an interactive connection before closing it. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_interactive_timeout). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_interactive_timeout) for details. */ interactiveTimeout?: number; /** * The number of seconds the server waits for activity on a noninteractive connection before closing it. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_wait_timeout). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_wait_timeout) for details. */ waitTimeout?: number; /** Replication lag threshold (seconds) which will switch MySQL to 'offline_mode = ON' to prevent users from reading stale data. */ mdbOfflineModeEnableLag?: number; /** * Replication lag threshold (seconds) which will switch MySQL to 'offline_mode = OFF'. - * Should be less than mdb_offline_mode_enable_lag. + * Should be less than mdb_offline_mode_enable_lag value. */ mdbOfflineModeDisableLag?: number; /** * The limit on memory consumption for the range optimizer. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_range_optimizer_max_mem_size). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_range_optimizer_max_mem_size) for details. */ rangeOptimizerMaxMemSize?: number; /** - * Manages slow query log + * Manages slow query log. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_slow_query_log). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_slow_query_log) for details. */ slowQueryLog?: boolean; /** - * Query execution time, after which query to be logged unconditionally, that is, log_slow_rate_limit will not apply to it + * Query execution time, after which query to be logged unconditionally, that is, `log_slow_rate_limit`` will not apply to it. * - * For details, see [Percona documentation for the variable](https://www.percona.com/doc/percona-server/8.0/diagnostics/slow_extended.html#slow_query_log_always_write_time). + * See [Percona documentation](https://www.percona.com/doc/percona-server/8.0/diagnostics/slow_extended.html#slow_query_log_always_write_time) for details. */ slowQueryLogAlwaysWriteTime?: number; /** - * Specifies slow log granularity for log_slow_rate_limit: QUERY or SESSION + * Specifies slow log granularity for `log_slow_rate_limit` values QUERY or SESSION. * - * For details, see [Percona documentation for the variable](https://www.percona.com/doc/percona-server/8.0/diagnostics/slow_extended.html#log_slow_rate_type). + * See [Percona documentation](https://www.percona.com/doc/percona-server/8.0/diagnostics/slow_extended.html#log_slow_rate_type) for details. */ logSlowRateType: Mysqlconfig57_LogSlowRateType; /** * Specifies what fraction of session/query should be logged. Logging is enabled for every nth session/query. * - * For details, see [Percona documentation for the variable](https://www.percona.com/doc/percona-server/8.0/diagnostics/slow_extended.html#log_slow_rate_limit). + * See [Percona documentation](https://www.percona.com/doc/percona-server/8.0/diagnostics/slow_extended.html#log_slow_rate_limit) for details. */ logSlowRateLimit?: number; /** - * When TRUE, statements executed by stored procedures are logged to the slow log + * When TRUE, statements executed by stored procedures are logged to the slow log. * - * For details, see [Percona documentation for the variable](https://www.percona.com/doc/percona-server/8.0/diagnostics/slow_extended.html#log_slow_sp_statements). + * See [Percona documentation](https://www.percona.com/doc/percona-server/8.0/diagnostics/slow_extended.html#log_slow_sp_statements) for details. */ logSlowSpStatements?: boolean; /** - * Filters the slow log by the query's execution plan + * Filters the slow log by the query's execution plan. * - * For details, see [Percona documentation for the variable](https://www.percona.com/doc/percona-server/8.0/diagnostics/slow_extended.html#log_slow_filter). + * See [Percona documentation](https://www.percona.com/doc/percona-server/8.0/diagnostics/slow_extended.html#log_slow_filter) for details. */ logSlowFilter: Mysqlconfig57_LogSlowFilterType[]; + /** + * Replication lag threshold (seconds) which allows replica to be promoted to master while executing "switchover from". + * Should be less than mdb_offline_mode_disable_lag. + */ + mdbPriorityChoiceMaxLag?: number; + /** + * Specifies the page size for InnoDB tablespaces. + * + * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_page_size). + */ + innodbPageSize?: number; + /** + * The limit in bytes on the size of the temporary log files used during online DDL operations + * + * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_online_alter_log_max_size). + */ + innodbOnlineAlterLogMaxSize?: number; + /** + * Minimum length of words that are stored in an InnoDB FULLTEXT index + * + * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_ft_min_token_size). + */ + innodbFtMinTokenSize?: number; + /** + * Maximum length of words that are stored in an InnoDB FULLTEXT index + * + * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_ft_max_token_size). + */ + innodbFtMaxTokenSize?: number; + /** + * Table names storage and comparison strategy + * + * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_lower_case_table_names). + */ + lowerCaseTableNames?: number; + /** + * Manages MySQL 5.6 compatibility + * + * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_show_compatibility_56). + */ + showCompatibility56?: boolean; } export enum Mysqlconfig57_SQLMode { @@ -1373,6 +1414,66 @@ export const Mysqlconfig57 = { writer.int32(v); } writer.ldelim(); + if (message.mdbPriorityChoiceMaxLag !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.mdbPriorityChoiceMaxLag!, + }, + writer.uint32(498).fork() + ).ldelim(); + } + if (message.innodbPageSize !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.innodbPageSize! }, + writer.uint32(506).fork() + ).ldelim(); + } + if (message.innodbOnlineAlterLogMaxSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.innodbOnlineAlterLogMaxSize!, + }, + writer.uint32(514).fork() + ).ldelim(); + } + if (message.innodbFtMinTokenSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.innodbFtMinTokenSize!, + }, + writer.uint32(522).fork() + ).ldelim(); + } + if (message.innodbFtMaxTokenSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.innodbFtMaxTokenSize!, + }, + writer.uint32(530).fork() + ).ldelim(); + } + if (message.lowerCaseTableNames !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.lowerCaseTableNames!, + }, + writer.uint32(538).fork() + ).ldelim(); + } + if (message.showCompatibility56 !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.showCompatibility56!, + }, + writer.uint32(546).fork() + ).ldelim(); + } return writer; }, @@ -1726,6 +1827,48 @@ export const Mysqlconfig57 = { message.logSlowFilter.push(reader.int32() as any); } break; + case 62: + message.mdbPriorityChoiceMaxLag = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 63: + message.innodbPageSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 64: + message.innodbOnlineAlterLogMaxSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 65: + message.innodbFtMinTokenSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 66: + message.innodbFtMaxTokenSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 67: + message.lowerCaseTableNames = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 68: + message.showCompatibility56 = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; default: reader.skipType(tag & 7); break; @@ -2015,6 +2158,40 @@ export const Mysqlconfig57 = { message.logSlowFilter = (object.logSlowFilter ?? []).map((e: any) => mysqlconfig57_LogSlowFilterTypeFromJSON(e) ); + message.mdbPriorityChoiceMaxLag = + object.mdbPriorityChoiceMaxLag !== undefined && + object.mdbPriorityChoiceMaxLag !== null + ? Number(object.mdbPriorityChoiceMaxLag) + : undefined; + message.innodbPageSize = + object.innodbPageSize !== undefined && object.innodbPageSize !== null + ? Number(object.innodbPageSize) + : undefined; + message.innodbOnlineAlterLogMaxSize = + object.innodbOnlineAlterLogMaxSize !== undefined && + object.innodbOnlineAlterLogMaxSize !== null + ? Number(object.innodbOnlineAlterLogMaxSize) + : undefined; + message.innodbFtMinTokenSize = + object.innodbFtMinTokenSize !== undefined && + object.innodbFtMinTokenSize !== null + ? Number(object.innodbFtMinTokenSize) + : undefined; + message.innodbFtMaxTokenSize = + object.innodbFtMaxTokenSize !== undefined && + object.innodbFtMaxTokenSize !== null + ? Number(object.innodbFtMaxTokenSize) + : undefined; + message.lowerCaseTableNames = + object.lowerCaseTableNames !== undefined && + object.lowerCaseTableNames !== null + ? Number(object.lowerCaseTableNames) + : undefined; + message.showCompatibility56 = + object.showCompatibility_56 !== undefined && + object.showCompatibility_56 !== null + ? Boolean(object.showCompatibility_56) + : undefined; return message; }, @@ -2158,6 +2335,20 @@ export const Mysqlconfig57 = { } else { obj.logSlowFilter = []; } + message.mdbPriorityChoiceMaxLag !== undefined && + (obj.mdbPriorityChoiceMaxLag = message.mdbPriorityChoiceMaxLag); + message.innodbPageSize !== undefined && + (obj.innodbPageSize = message.innodbPageSize); + message.innodbOnlineAlterLogMaxSize !== undefined && + (obj.innodbOnlineAlterLogMaxSize = message.innodbOnlineAlterLogMaxSize); + message.innodbFtMinTokenSize !== undefined && + (obj.innodbFtMinTokenSize = message.innodbFtMinTokenSize); + message.innodbFtMaxTokenSize !== undefined && + (obj.innodbFtMaxTokenSize = message.innodbFtMaxTokenSize); + message.lowerCaseTableNames !== undefined && + (obj.lowerCaseTableNames = message.lowerCaseTableNames); + message.showCompatibility56 !== undefined && + (obj.showCompatibility_56 = message.showCompatibility56); return obj; }, @@ -2241,6 +2432,15 @@ export const Mysqlconfig57 = { message.logSlowRateLimit = object.logSlowRateLimit ?? undefined; message.logSlowSpStatements = object.logSlowSpStatements ?? undefined; message.logSlowFilter = object.logSlowFilter?.map((e) => e) || []; + message.mdbPriorityChoiceMaxLag = + object.mdbPriorityChoiceMaxLag ?? undefined; + message.innodbPageSize = object.innodbPageSize ?? undefined; + message.innodbOnlineAlterLogMaxSize = + object.innodbOnlineAlterLogMaxSize ?? undefined; + message.innodbFtMinTokenSize = object.innodbFtMinTokenSize ?? undefined; + message.innodbFtMaxTokenSize = object.innodbFtMaxTokenSize ?? undefined; + message.lowerCaseTableNames = object.lowerCaseTableNames ?? undefined; + message.showCompatibility56 = object.showCompatibility56 ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/mysql/v1/config/mysql8_0.ts b/src/generated/yandex/cloud/mdb/mysql/v1/config/mysql8_0.ts index f79c9405..20d8be1c 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1/config/mysql8_0.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1/config/mysql8_0.ts @@ -10,307 +10,307 @@ import { export const protobufPackage = "yandex.cloud.mdb.mysql.v1.config"; -/** Options and structure of `MysqlConfig8_0` reflects MySQL 8.0 configuration file */ +/** Options and structure of `MysqlConfig8_0` reflects MySQL 8.0 configuration file. */ export interface Mysqlconfig80 { $type: "yandex.cloud.mdb.mysql.v1.config.MysqlConfig8_0"; /** * Size of the InnoDB buffer pool used for caching table and index data. * - * For details, see [MySQL documentation for the parameter](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_buffer_pool_size). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_buffer_pool_size) for details. */ innodbBufferPoolSize?: number; /** * The maximum permitted number of simultaneous client connections. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_max_connections). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_max_connections) for details. */ maxConnections?: number; /** * Time that it takes to process a query before it is considered slow. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_long_query_time). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_long_query_time) for details. */ longQueryTime?: number; /** * Enable writing of general query log of MySQL. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_general_log). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_general_log) for details. */ generalLog?: boolean; /** * Enable writing of audit log of MySQL. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/audit-log-reference.html#audit-log-options-variables). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/audit-log-reference.html#audit-log-options-variables) for details. */ auditLog?: boolean; /** * Server SQL mode of MySQL. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/sql-mode.html#sql-mode-setting). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/sql-mode.html#sql-mode-setting) for details. */ sqlMode: Mysqlconfig80_SQLMode[]; /** * The maximum size in bytes of one packet. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_max_allowed_packet). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_max_allowed_packet) for details. */ maxAllowedPacket?: number; /** * Authentication plugin used in the managed MySQL cluster. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_default_authentication_plugin) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_default_authentication_plugin for details. */ defaultAuthenticationPlugin: Mysqlconfig80_AuthPlugin; /** * Transaction log flush behaviour. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_flush_log_at_trx_commit) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_flush_log_at_trx_commit for details. */ innodbFlushLogAtTrxCommit?: number; /** - * Max time in seconds for a transaction to wait for a row lock + * Max time in seconds for a transaction to wait for a row lock. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_lock_wait_timeout) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_lock_wait_timeout for details. */ innodbLockWaitTimeout?: number; /** * Default transaction isolation level. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_transaction_isolation) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_transaction_isolation for details. */ transactionIsolation: Mysqlconfig80_TransactionIsolation; /** - * Print information about deadlocks in error log + * Print information about deadlocks in error log. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_print_all_deadlocks) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_print_all_deadlocks for details. */ innodbPrintAllDeadlocks?: boolean; /** * The number of seconds to wait for more data from a connection before aborting the read. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_net_read_timeout) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_net_read_timeout for details. */ netReadTimeout?: number; /** * The number of seconds to wait for a block to be written to a connection before aborting the write. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_net_write_timeout) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_net_write_timeout for details. */ netWriteTimeout?: number; /** * The maximum permitted result length in bytes for the GROUP_CONCAT() function. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_group_concat_max_len) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_group_concat_max_len for details. */ groupConcatMaxLen?: number; /** * The maximum size of internal in-memory temporary tables. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_tmp_table_size) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_tmp_table_size for details. */ tmpTableSize?: number; /** * This variable sets the maximum size to which user-created MEMORY tables are permitted to grow. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_max_heap_table_size) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_max_heap_table_size for details. */ maxHeapTableSize?: number; /** * The servers default time zone. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-options.html#option_mysqld_default-time-zone) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-options.html#option_mysqld_default-time-zone for details. */ defaultTimeZone: string; /** * The servers default character set. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_character_set_server) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_character_set_server for details. */ characterSetServer: string; /** * The server default collation. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_collation_server) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_collation_server for details. */ collationServer: string; /** - * Enables Innodb adaptive hash index + * Enables InnoDB adaptive hash index. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_adaptive_hash_index) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_adaptive_hash_index for details. */ innodbAdaptiveHashIndex?: boolean; /** * Enables the NUMA interleave memory policy for allocation of the InnoDB buffer pool. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_numa_interleave) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_numa_interleave for details. */ innodbNumaInterleave?: boolean; /** * The size in bytes of the buffer that InnoDB uses to write to the log files on disk. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_log_buffer_size) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_log_buffer_size for details. */ innodbLogBufferSize?: number; /** - * The size in bytes of the single Innodb Redo log file. + * The size in bytes of the single InnoDB Redo log file. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_log_file_size) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_log_file_size for details. */ innodbLogFileSize?: number; /** - * Limits IO available for InnoDB background tasks + * Limits IO available for InnoDB background tasks. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_io_capacity) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_io_capacity for details. */ innodbIoCapacity?: number; /** - * Limits IO available for InnoDB background tasks + * Limits IO available for InnoDB background tasks. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_io_capacity_max) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_io_capacity_max for details. */ innodbIoCapacityMax?: number; /** * The number of I/O threads for read operations in InnoDB. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_read_io_threads) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_read_io_threads for details. */ innodbReadIoThreads?: number; /** * The number of I/O threads for write operations in InnoDB. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_write_io_threads) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_write_io_threads for details. */ innodbWriteIoThreads?: number; /** * The number of background threads devoted to the InnoDB purge operation. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_purge_threads) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_purge_threads for details. */ innodbPurgeThreads?: number; /** * Defines the maximum number of threads permitted inside of InnoDB. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_thread_concurrency) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_thread_concurrency for details. */ innodbThreadConcurrency?: number; /** - * Limits the max size of InnoDB temp tablespace + * Limits the max size of InnoDB temp tablespace. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_temp_data_file_path) + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_temp_data_file_path for details. */ innodbTempDataFileMaxSize?: number; /** * How many threads the server should cache for reuse. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_thread_cache_size). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_thread_cache_size) for details. */ threadCacheSize?: number; /** * The stack size for each thread. The default is large enough for normal operation. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_thread_stack). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_thread_stack) for details. */ threadStack?: number; /** * The minimum size of the buffer that is used for plain index scans, range index scans, and joins that do not use indexes and thus perform full table scans. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_join_buffer_size). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_join_buffer_size) for details. */ joinBufferSize?: number; /** * Each session that must perform a sort allocates a buffer of this size. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_sort_buffer_size). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_sort_buffer_size) for details. */ sortBufferSize?: number; /** * The number of table definitions that can be stored in the definition cache. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_table_definition_cache). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_table_definition_cache) for details. */ tableDefinitionCache?: number; /** * The number of open tables for all threads. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_table_open_cache). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_table_open_cache) for details. */ tableOpenCache?: number; /** * The number of open tables cache instances. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_table_open_cache_instances). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_table_open_cache_instances) for details. */ tableOpenCacheInstances?: number; /** - * This system variable determines whether the server enables certain nonstandard behaviors for default values and NULL-value handling in TIMESTAMP columns. + * Determines whether the server enables certain nonstandard behaviors for default values and NULL-value handling in TIMESTAMP columns. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_explicit_defaults_for_timestamp). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_explicit_defaults_for_timestamp) for details. */ explicitDefaultsForTimestamp?: boolean; /** * Can be used to control the operation of AUTO_INCREMENT columns. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/replication-options-master.html#sysvar_auto_increment_increment). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/replication-options-master.html#sysvar_auto_increment_increment) for details. */ autoIncrementIncrement?: number; /** * Can be used to control the operation of AUTO_INCREMENT columns. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/replication-options-master.html#sysvar_auto_increment_offset). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/replication-options-master.html#sysvar_auto_increment_offset) for details. */ autoIncrementOffset?: number; /** * Controls how often the MySQL server synchronizes the binary log to disk. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/replication-options-binary-log.html#sysvar_sync_binlog). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/replication-options-binary-log.html#sysvar_sync_binlog) for details. */ syncBinlog?: number; /** * The size of the cache to hold changes to the binary log during a transaction. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/replication-options-binary-log.html#sysvar_binlog_cache_size). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/replication-options-binary-log.html#sysvar_binlog_cache_size) for details. */ binlogCacheSize?: number; /** * Controls how many microseconds the binary log commit waits before synchronizing the binary log file to disk. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/replication-options-binary-log.html#sysvar_binlog_group_commit_sync_delay). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/replication-options-binary-log.html#sysvar_binlog_group_commit_sync_delay) for details. */ binlogGroupCommitSyncDelay?: number; /** * For MySQL row-based replication, this variable determines how row images are written to the binary log. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/replication-options-binary-log.html#sysvar_binlog_row_image). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/replication-options-binary-log.html#sysvar_binlog_row_image) for details. */ binlogRowImage: Mysqlconfig80_BinlogRowImage; /** * When enabled, it causes the server to write informational log events such as row query log events into its binary log. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/replication-options-binary-log.html#sysvar_binlog_rows_query_log_events). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/replication-options-binary-log.html#sysvar_binlog_rows_query_log_events) for details. */ binlogRowsQueryLogEvents?: boolean; /** * The number of replica acknowledgments the source must receive per transaction before proceeding. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/replication-options-master.html#sysvar_rpl_semi_sync_master_wait_for_slave_count). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/replication-options-master.html#sysvar_rpl_semi_sync_master_wait_for_slave_count) for details. */ rplSemiSyncMasterWaitForSlaveCount?: number; /** - * When using a multithreaded replica, this variable specifies the policy used to decide which transactions are allowed to execute in parallel on the replica. + * When using a multi-threaded replica, this variable specifies the policy used to decide which transactions are allowed to execute in parallel on the replica. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/replication-options-replica.html#sysvar_slave_parallel_type). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/replication-options-replica.html#sysvar_slave_parallel_type) for details. */ slaveParallelType: Mysqlconfig80_SlaveParallelType; /** * Sets the number of applier threads for executing replication transactions in parallel. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/replication-options-replica.html#sysvar_slave_parallel_workers). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/replication-options-replica.html#sysvar_slave_parallel_workers) for details. */ slaveParallelWorkers?: number; /** - * The time limit for regular expression matching operations performed by REGEXP_LIKE and similar functions + * The time limit for regular expression matching operations performed by REGEXP_LIKE and similar functions. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/replication-options-replica.html#sysvar_regexp_time_limit). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/replication-options-replica.html#sysvar_regexp_time_limit) for details. */ regexpTimeLimit?: number; /** The size of the binary log to hold. */ @@ -318,13 +318,13 @@ export interface Mysqlconfig80 { /** * The number of seconds the server waits for activity on an interactive connection before closing it. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_interactive_timeout). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_interactive_timeout) for details. */ interactiveTimeout?: number; /** * The number of seconds the server waits for activity on a noninteractive connection before closing it. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_wait_timeout). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_wait_timeout) for details. */ waitTimeout?: number; /** Replication lag threshold (seconds) which will switch MySQL to 'offline_mode = ON' to prevent users from reading stale data. */ @@ -337,45 +337,80 @@ export interface Mysqlconfig80 { /** * The limit on memory consumption for the range optimizer. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_range_optimizer_max_mem_size). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_range_optimizer_max_mem_size) for details. */ rangeOptimizerMaxMemSize?: number; /** - * Manages slow query log + * Manages slow query log. * - * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_slow_query_log). + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_slow_query_log) for details. */ slowQueryLog?: boolean; /** - * Query execution time, after which query to be logged unconditionally, that is, log_slow_rate_limit will not apply to it + * Query execution time, after which query to be logged unconditionally, that is, `log_slow_rate_limit` will not apply to it. * - * For details, see [Percona documentation for the variable](https://www.percona.com/doc/percona-server/8.0/diagnostics/slow_extended.html#slow_query_log_always_write_time). + * See [Percona documentation](https://www.percona.com/doc/percona-server/8.0/diagnostics/slow_extended.html#slow_query_log_always_write_time) for details. */ slowQueryLogAlwaysWriteTime?: number; /** - * Specifies slow log granularity for log_slow_rate_limit: QUERY or SESSION + * Specifies slow log granularity for `log_slow_rate_limit` QUERY or SESSION value. * - * For details, see [Percona documentation for the variable](https://www.percona.com/doc/percona-server/8.0/diagnostics/slow_extended.html#log_slow_rate_type). + * See [Percona documentation](https://www.percona.com/doc/percona-server/8.0/diagnostics/slow_extended.html#log_slow_rate_type) for details. */ logSlowRateType: Mysqlconfig80_LogSlowRateType; /** * Specifies what fraction of session/query should be logged. Logging is enabled for every nth session/query. * - * For details, see [Percona documentation for the variable](https://www.percona.com/doc/percona-server/8.0/diagnostics/slow_extended.html#log_slow_rate_limit). + * See [Percona documentation](https://www.percona.com/doc/percona-server/8.0/diagnostics/slow_extended.html#log_slow_rate_limit) for details. */ logSlowRateLimit?: number; /** - * When TRUE, statements executed by stored procedures are logged to the slow log + * When TRUE, statements executed by stored procedures are logged to the slow log. * - * For details, see [Percona documentation for the variable](https://www.percona.com/doc/percona-server/8.0/diagnostics/slow_extended.html#log_slow_sp_statements). + * See [Percona documentation](https://www.percona.com/doc/percona-server/8.0/diagnostics/slow_extended.html#log_slow_sp_statements) for details. */ logSlowSpStatements?: boolean; /** - * Filters the slow log by the query's execution plan + * Filters the slow log by the query's execution plan. * - * For details, see [Percona documentation for the variable](https://www.percona.com/doc/percona-server/8.0/diagnostics/slow_extended.html#log_slow_filter). + * See [Percona documentation](https://www.percona.com/doc/percona-server/8.0/diagnostics/slow_extended.html#log_slow_filter) for details. */ logSlowFilter: Mysqlconfig80_LogSlowFilterType[]; + /** + * Replication lag threshold (seconds) which allows replica to be promoted to master while executing "switchover from". + * Should be less than mdb_offline_mode_disable_lag. + */ + mdbPriorityChoiceMaxLag?: number; + /** + * Specifies the page size for InnoDB tablespaces. + * + * For details, see [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_page_size). + */ + innodbPageSize?: number; + /** + * The limit in bytes on the size of the temporary log files used during online DDL operations + * + * See [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_online_alter_log_max_size) for details. + */ + innodbOnlineAlterLogMaxSize?: number; + /** + * Minimum length of words that are stored in an InnoDB FULLTEXT index + * + * See [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_ft_min_token_size) for details. + */ + innodbFtMinTokenSize?: number; + /** + * Maximum length of words that are stored in an InnoDB FULLTEXT index + * + * See [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_ft_max_token_size) for details. + */ + innodbFtMaxTokenSize?: number; + /** + * Table names storage and comparison strategy + * + * See [MySQL documentation for the variable](https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_lower_case_table_names) for details. + */ + lowerCaseTableNames?: number; } export enum Mysqlconfig80_SQLMode { @@ -1328,6 +1363,57 @@ export const Mysqlconfig80 = { writer.int32(v); } writer.ldelim(); + if (message.mdbPriorityChoiceMaxLag !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.mdbPriorityChoiceMaxLag!, + }, + writer.uint32(506).fork() + ).ldelim(); + } + if (message.innodbPageSize !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.innodbPageSize! }, + writer.uint32(514).fork() + ).ldelim(); + } + if (message.innodbOnlineAlterLogMaxSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.innodbOnlineAlterLogMaxSize!, + }, + writer.uint32(522).fork() + ).ldelim(); + } + if (message.innodbFtMinTokenSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.innodbFtMinTokenSize!, + }, + writer.uint32(530).fork() + ).ldelim(); + } + if (message.innodbFtMaxTokenSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.innodbFtMaxTokenSize!, + }, + writer.uint32(538).fork() + ).ldelim(); + } + if (message.lowerCaseTableNames !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.lowerCaseTableNames!, + }, + writer.uint32(546).fork() + ).ldelim(); + } return writer; }, @@ -1687,6 +1773,42 @@ export const Mysqlconfig80 = { message.logSlowFilter.push(reader.int32() as any); } break; + case 63: + message.mdbPriorityChoiceMaxLag = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 64: + message.innodbPageSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 65: + message.innodbOnlineAlterLogMaxSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 66: + message.innodbFtMinTokenSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 67: + message.innodbFtMaxTokenSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 68: + message.lowerCaseTableNames = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; default: reader.skipType(tag & 7); break; @@ -1980,6 +2102,35 @@ export const Mysqlconfig80 = { message.logSlowFilter = (object.logSlowFilter ?? []).map((e: any) => mysqlconfig80_LogSlowFilterTypeFromJSON(e) ); + message.mdbPriorityChoiceMaxLag = + object.mdbPriorityChoiceMaxLag !== undefined && + object.mdbPriorityChoiceMaxLag !== null + ? Number(object.mdbPriorityChoiceMaxLag) + : undefined; + message.innodbPageSize = + object.innodbPageSize !== undefined && object.innodbPageSize !== null + ? Number(object.innodbPageSize) + : undefined; + message.innodbOnlineAlterLogMaxSize = + object.innodbOnlineAlterLogMaxSize !== undefined && + object.innodbOnlineAlterLogMaxSize !== null + ? Number(object.innodbOnlineAlterLogMaxSize) + : undefined; + message.innodbFtMinTokenSize = + object.innodbFtMinTokenSize !== undefined && + object.innodbFtMinTokenSize !== null + ? Number(object.innodbFtMinTokenSize) + : undefined; + message.innodbFtMaxTokenSize = + object.innodbFtMaxTokenSize !== undefined && + object.innodbFtMaxTokenSize !== null + ? Number(object.innodbFtMaxTokenSize) + : undefined; + message.lowerCaseTableNames = + object.lowerCaseTableNames !== undefined && + object.lowerCaseTableNames !== null + ? Number(object.lowerCaseTableNames) + : undefined; return message; }, @@ -2125,6 +2276,18 @@ export const Mysqlconfig80 = { } else { obj.logSlowFilter = []; } + message.mdbPriorityChoiceMaxLag !== undefined && + (obj.mdbPriorityChoiceMaxLag = message.mdbPriorityChoiceMaxLag); + message.innodbPageSize !== undefined && + (obj.innodbPageSize = message.innodbPageSize); + message.innodbOnlineAlterLogMaxSize !== undefined && + (obj.innodbOnlineAlterLogMaxSize = message.innodbOnlineAlterLogMaxSize); + message.innodbFtMinTokenSize !== undefined && + (obj.innodbFtMinTokenSize = message.innodbFtMinTokenSize); + message.innodbFtMaxTokenSize !== undefined && + (obj.innodbFtMaxTokenSize = message.innodbFtMaxTokenSize); + message.lowerCaseTableNames !== undefined && + (obj.lowerCaseTableNames = message.lowerCaseTableNames); return obj; }, @@ -2209,6 +2372,14 @@ export const Mysqlconfig80 = { message.logSlowRateLimit = object.logSlowRateLimit ?? undefined; message.logSlowSpStatements = object.logSlowSpStatements ?? undefined; message.logSlowFilter = object.logSlowFilter?.map((e) => e) || []; + message.mdbPriorityChoiceMaxLag = + object.mdbPriorityChoiceMaxLag ?? undefined; + message.innodbPageSize = object.innodbPageSize ?? undefined; + message.innodbOnlineAlterLogMaxSize = + object.innodbOnlineAlterLogMaxSize ?? undefined; + message.innodbFtMinTokenSize = object.innodbFtMinTokenSize ?? undefined; + message.innodbFtMaxTokenSize = object.innodbFtMaxTokenSize ?? undefined; + message.lowerCaseTableNames = object.lowerCaseTableNames ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/mysql/v1/database.ts b/src/generated/yandex/cloud/mdb/mysql/v1/database.ts index fbeb4d29..a3d30c8f 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1/database.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1/database.ts @@ -6,20 +6,21 @@ import _m0 from "protobufjs/minimal"; export const protobufPackage = "yandex.cloud.mdb.mysql.v1"; /** - * A MySQL database. For more information, see - * the [documentation](/docs/managed-mysql/concepts). + * An object that represents MySQL database. + * + * See [the documentation](/docs/managed-mysql/operations/databases) for details. */ export interface Database { $type: "yandex.cloud.mdb.mysql.v1.Database"; /** Name of the database. */ name: string; - /** ID of the MySQL cluster that the database belongs to. */ + /** ID of the cluster that the database belongs to. */ clusterId: string; } export interface DatabaseSpec { $type: "yandex.cloud.mdb.mysql.v1.DatabaseSpec"; - /** Name of the MySQL database. */ + /** Name of the database. */ name: string; } diff --git a/src/generated/yandex/cloud/mdb/mysql/v1/database_service.ts b/src/generated/yandex/cloud/mdb/mysql/v1/database_service.ts index 34f0d622..c9af7fa0 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1/database_service.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1/database_service.ts @@ -25,13 +25,15 @@ export const protobufPackage = "yandex.cloud.mdb.mysql.v1"; export interface GetDatabaseRequest { $type: "yandex.cloud.mdb.mysql.v1.GetDatabaseRequest"; /** - * ID of the MySQL cluster that the database belongs to. - * To get the cluster ID use a [ClusterService.List] request. + * ID of the cluster that the database belongs to. + * + * To get this ID, make a [ClusterService.List] request. */ clusterId: string; /** - * Name of the MySQL database to return. - * To get the name of the database use a [DatabaseService.List] request. + * Name of the database to return information about. + * + * To get this name, make a [DatabaseService.List] request. */ databaseName: string; } @@ -39,32 +41,35 @@ export interface GetDatabaseRequest { export interface ListDatabasesRequest { $type: "yandex.cloud.mdb.mysql.v1.ListDatabasesRequest"; /** - * ID of the MySQL cluster to list databases in. - * To get the cluster ID use a [ClusterService.List] request. + * ID of the cluster to list databases in. + * + * To get this ID, make a [ClusterService.List] request. */ clusterId: string; /** - * The maximum number of results per page to return. If the number of available - * results is larger than [page_size], the service returns a [ListDatabasesResponse.next_page_token] - * that can be used to get the next page of results in subsequent list requests. + * The maximum number of results per page to return. + * + * If the number of available results is larger than [page_size], the API returns a [ListDatabasesResponse.next_page_token] that can be used to get the next page of results in the subsequent [DatabaseService.List] requests. */ pageSize: number; /** - * Page token. To get the next page of results, Set [page_token] to the [ListDatabasesResponse.next_page_token] - * returned by a previous list request. + * Page token that can be used to iterate through multiple pages of results. + * + * To get the next page of results, set [page_token] to the [ListDatabasesResponse.next_page_token] returned by the previous [DatabaseService.List] request. */ pageToken: string; } export interface ListDatabasesResponse { $type: "yandex.cloud.mdb.mysql.v1.ListDatabasesResponse"; - /** List of MySQL databases. */ + /** List of databases. */ databases: Database[]; /** - * This token allows you to get the next page of results for list requests. If the number of results - * is larger than [ListDatabasesRequest.page_size], use the [next_page_token] as the value - * for the [ListDatabasesRequest.page_token] parameter in the next list request. Each subsequent - * list request will have its own [next_page_token] to continue paging through the results. + * The token that can be used to get the next page of results. + * + * If the number of results is larger than [ListDatabasesRequest.page_size], use the [next_page_token] as the value for the [ListDatabasesRequest.page_token] in the subsequent [DatabaseService.List] request to iterate through multiple pages of results. + * + * Each of the subsequent [DatabaseService.List] requests should use the [next_page_token] value returned by the previous request to continue paging through the results. */ nextPageToken: string; } @@ -72,41 +77,44 @@ export interface ListDatabasesResponse { export interface CreateDatabaseRequest { $type: "yandex.cloud.mdb.mysql.v1.CreateDatabaseRequest"; /** - * ID of the MySQL cluster to create a database in. - * To get the cluster ID use a [ClusterService.List] request. + * ID of the cluster to create the database in. + * + * To get this ID, make a [ClusterService.List] request. */ clusterId: string; - /** Configuration of the database to create. */ + /** Configuration of the database. */ databaseSpec?: DatabaseSpec; } export interface CreateDatabaseMetadata { $type: "yandex.cloud.mdb.mysql.v1.CreateDatabaseMetadata"; - /** ID of the MySQL cluster where a database is being created. */ + /** ID of the cluster the database is being created in. */ clusterId: string; - /** Name of the MySQL database that is being created. */ + /** Name of the database that is being created. */ databaseName: string; } export interface DeleteDatabaseRequest { $type: "yandex.cloud.mdb.mysql.v1.DeleteDatabaseRequest"; /** - * ID of the MySQL cluster to delete a database in. - * To get the cluster ID, use a [ClusterService.List] request. + * ID of the cluster to delete the database from. + * + * To get this ID, make a [ClusterService.List] request. */ clusterId: string; /** * Name of the database to delete. - * To get the name of the database, use a [DatabaseService.List] request. + * + * To get this name, make a [DatabaseService.List] request. */ databaseName: string; } export interface DeleteDatabaseMetadata { $type: "yandex.cloud.mdb.mysql.v1.DeleteDatabaseMetadata"; - /** ID of the MySQL cluster where a database is being deleted. */ + /** ID of the cluster the database is being deleted from. */ clusterId: string; - /** Name of the MySQL database that is being deleted. */ + /** Name of the database that is being deleted. */ databaseName: string; } @@ -686,13 +694,13 @@ export const DeleteDatabaseMetadata = { messageTypeRegistry.set(DeleteDatabaseMetadata.$type, DeleteDatabaseMetadata); -/** A set of methods for managing MySQL databases. */ +/** + * A set of methods for managing MySQL databases in a cluster. + * + * See [the documentation](/docs/managed-mysql/operations/databases) for details. + */ export const DatabaseServiceService = { - /** - * Returns the specified MySQL database. - * - * To get the list of available MySQL databases, make a [List] request. - */ + /** Retrieves information about the specified database. */ get: { path: "/yandex.cloud.mdb.mysql.v1.DatabaseService/Get", requestStream: false, @@ -704,7 +712,7 @@ export const DatabaseServiceService = { Buffer.from(Database.encode(value).finish()), responseDeserialize: (value: Buffer) => Database.decode(value), }, - /** Retrieves the list of MySQL databases in the specified cluster. */ + /** Retrieves the list of databases in a cluster. */ list: { path: "/yandex.cloud.mdb.mysql.v1.DatabaseService/List", requestStream: false, @@ -716,7 +724,7 @@ export const DatabaseServiceService = { Buffer.from(ListDatabasesResponse.encode(value).finish()), responseDeserialize: (value: Buffer) => ListDatabasesResponse.decode(value), }, - /** Creates a new MySQL database in the specified cluster. */ + /** Creates a new database in a cluster. */ create: { path: "/yandex.cloud.mdb.mysql.v1.DatabaseService/Create", requestStream: false, @@ -728,7 +736,7 @@ export const DatabaseServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Deletes the specified MySQL database. */ + /** Deletes a database from a cluster. */ delete: { path: "/yandex.cloud.mdb.mysql.v1.DatabaseService/Delete", requestStream: false, @@ -743,26 +751,18 @@ export const DatabaseServiceService = { } as const; export interface DatabaseServiceServer extends UntypedServiceImplementation { - /** - * Returns the specified MySQL database. - * - * To get the list of available MySQL databases, make a [List] request. - */ + /** Retrieves information about the specified database. */ get: handleUnaryCall; - /** Retrieves the list of MySQL databases in the specified cluster. */ + /** Retrieves the list of databases in a cluster. */ list: handleUnaryCall; - /** Creates a new MySQL database in the specified cluster. */ + /** Creates a new database in a cluster. */ create: handleUnaryCall; - /** Deletes the specified MySQL database. */ + /** Deletes a database from a cluster. */ delete: handleUnaryCall; } export interface DatabaseServiceClient extends Client { - /** - * Returns the specified MySQL database. - * - * To get the list of available MySQL databases, make a [List] request. - */ + /** Retrieves information about the specified database. */ get( request: GetDatabaseRequest, callback: (error: ServiceError | null, response: Database) => void @@ -778,7 +778,7 @@ export interface DatabaseServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Database) => void ): ClientUnaryCall; - /** Retrieves the list of MySQL databases in the specified cluster. */ + /** Retrieves the list of databases in a cluster. */ list( request: ListDatabasesRequest, callback: ( @@ -803,7 +803,7 @@ export interface DatabaseServiceClient extends Client { response: ListDatabasesResponse ) => void ): ClientUnaryCall; - /** Creates a new MySQL database in the specified cluster. */ + /** Creates a new database in a cluster. */ create( request: CreateDatabaseRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -819,7 +819,7 @@ export interface DatabaseServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Deletes the specified MySQL database. */ + /** Deletes a database from a cluster. */ delete( request: DeleteDatabaseRequest, callback: (error: ServiceError | null, response: Operation) => void diff --git a/src/generated/yandex/cloud/mdb/mysql/v1/maintenance.ts b/src/generated/yandex/cloud/mdb/mysql/v1/maintenance.ts index 157bc229..6be4bc39 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1/maintenance.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1/maintenance.ts @@ -6,7 +6,7 @@ import { Timestamp } from "../../../../../google/protobuf/timestamp"; export const protobufPackage = "yandex.cloud.mdb.mysql.v1"; -/** A maintenance window settings. */ +/** Configuration of a maintenance window in a MySQL cluster. */ export interface MaintenanceWindow { $type: "yandex.cloud.mdb.mysql.v1.MaintenanceWindow"; /** Maintenance operation can be scheduled anytime. */ diff --git a/src/generated/yandex/cloud/mdb/mysql/v1/resource_preset.ts b/src/generated/yandex/cloud/mdb/mysql/v1/resource_preset.ts index f18b20d2..dfbbd118 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1/resource_preset.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1/resource_preset.ts @@ -5,10 +5,15 @@ import _m0 from "protobufjs/minimal"; export const protobufPackage = "yandex.cloud.mdb.mysql.v1"; -/** A preset of resources for hardware configuration of MySQL hosts. */ +/** + * An object that represents MySQL resource preset. + * A resource preset defines hardware configuration for cluster hosts. + * + * See [the documentation](/docs/managed-mysql/concepts/instance-types) for details. + */ export interface ResourcePreset { $type: "yandex.cloud.mdb.mysql.v1.ResourcePreset"; - /** ID of the resource preset. */ + /** ID of the resource preset that defines available computational resources (vCPU, RAM, etc.) for a cluster host. */ id: string; /** IDs of availability zones where the resource preset is available. */ zoneIds: string[]; diff --git a/src/generated/yandex/cloud/mdb/mysql/v1/resource_preset_service.ts b/src/generated/yandex/cloud/mdb/mysql/v1/resource_preset_service.ts index d755fb2b..e6842533 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1/resource_preset_service.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1/resource_preset_service.ts @@ -21,8 +21,9 @@ export const protobufPackage = "yandex.cloud.mdb.mysql.v1"; export interface GetResourcePresetRequest { $type: "yandex.cloud.mdb.mysql.v1.GetResourcePresetRequest"; /** - * ID of the resource preset to return. - * To get the resource preset ID, use a [ResourcePresetService.List] request. + * ID of the resource preset to return information about. + * + * To get this ID, make a [ResourcePresetService.List] request. */ resourcePresetId: string; } @@ -30,14 +31,15 @@ export interface GetResourcePresetRequest { export interface ListResourcePresetsRequest { $type: "yandex.cloud.mdb.mysql.v1.ListResourcePresetsRequest"; /** - * The maximum number of results per page to return. If the number of available - * results is larger than [page_size], the service returns a [ListResourcePresetsResponse.next_page_token] - * that can be used to get the next page of results in subsequent list requests. + * The maximum number of results per page to return. + * + * If the number of available results is larger than [page_size], the API returns a [ListResourcePresetsResponse.next_page_token] that can be used to get the next page of results in the subsequent [ResourcePresetService.List] requests. */ pageSize: number; /** - * Page token. To get the next page of results, set [page_token] to the [ListResourcePresetsResponse.next_page_token] - * returned by a previous list request. + * Page token that can be used to iterate through multiple pages of results. + * + * To get the next page of results, set [page_token] to the [ListResourcePresetsResponse.next_page_token] returned by the previous [ResourcePresetService.List] request. */ pageToken: string; } @@ -47,10 +49,11 @@ export interface ListResourcePresetsResponse { /** List of resource presets. */ resourcePresets: ResourcePreset[]; /** - * This token allows you to get the next page of results for list requests. If the number of results - * is larger than [ListResourcePresetsRequest.page_size], use the [next_page_token] as the value - * for the [ListResourcePresetsRequest.page_token] parameter in the next list request. Each subsequent - * list request will have its own [next_page_token] to continue paging through the results. + * The token that can be used to get the next page of results. + * + * If the number of results is larger than [ListResourcePresetsRequest.page_size], use the [next_page_token] as the value for the [ListResourcePresetsRequest.page_token] in the subsequent [ResourcePresetService.List] request to iterate through multiple pages of results. + * + * Each of the subsequent [ResourcePresetService.List] requests should use the [next_page_token] value returned by the previous request to continue paging through the results. */ nextPageToken: string; } @@ -314,13 +317,13 @@ messageTypeRegistry.set( ListResourcePresetsResponse ); -/** A set of methods for managing resource presets. */ +/** + * A set of methods for managing MySQL resource presets. + * + * See [the documentation](/docs/managed-mysql/concepts/instance-types) for details. + */ export const ResourcePresetServiceService = { - /** - * Returns the specified resource preset. - * - * To get the list of available resource presets, make a [List] request. - */ + /** Retrieves information about a resource preset. */ get: { path: "/yandex.cloud.mdb.mysql.v1.ResourcePresetService/Get", requestStream: false, @@ -351,11 +354,7 @@ export const ResourcePresetServiceService = { export interface ResourcePresetServiceServer extends UntypedServiceImplementation { - /** - * Returns the specified resource preset. - * - * To get the list of available resource presets, make a [List] request. - */ + /** Retrieves information about a resource preset. */ get: handleUnaryCall; /** Retrieves the list of available resource presets. */ list: handleUnaryCall< @@ -365,11 +364,7 @@ export interface ResourcePresetServiceServer } export interface ResourcePresetServiceClient extends Client { - /** - * Returns the specified resource preset. - * - * To get the list of available resource presets, make a [List] request. - */ + /** Retrieves information about a resource preset. */ get( request: GetResourcePresetRequest, callback: (error: ServiceError | null, response: ResourcePreset) => void diff --git a/src/generated/yandex/cloud/mdb/mysql/v1/user.ts b/src/generated/yandex/cloud/mdb/mysql/v1/user.ts index 4710ac0a..54827507 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1/user.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1/user.ts @@ -8,18 +8,18 @@ export const protobufPackage = "yandex.cloud.mdb.mysql.v1"; export enum GlobalPermission { GLOBAL_PERMISSION_UNSPECIFIED = 0, - /** REPLICATION_CLIENT - Enables use of the SHOW MASTER STATUS, SHOW SLAVE STATUS, and SHOW BINARY LOGS statements. */ + /** REPLICATION_CLIENT - Enables use of the `SHOW MASTER STATUS`, `SHOW SLAVE STATUS`, and `SHOW BINARY LOGS` statements. */ REPLICATION_CLIENT = 1, /** * REPLICATION_SLAVE - Enables the account to request updates that have been made to databases on the master server, - * using the SHOW SLAVE HOSTS, SHOW RELAYLOG EVENTS, and SHOW BINLOG EVENTS statements. + * using the `SHOW SLAVE HOSTS`, `SHOW RELAYLOG EVENTS` and `SHOW BINLOG EVENTS` statements. */ REPLICATION_SLAVE = 2, /** - * PROCESS - Enables display of information about the threads executing within the server - * (that is, information about the statements being executed by sessions). - * The privilege enables use of SHOW PROCESSLIST or mysqladmin processlist to see threads belonging - * to other accounts; you can always see your own threads. The PROCESS privilege also enables use of SHOW ENGINE. + * PROCESS - Enables display of information about the the statements currently being performed by sessions (the set of threads executing within the server). + * + * The privilege enables use of `SHOW PROCESSLIST` or `mysqladmin` processlist to see threads belonging to other users. + * You can always see your own threads. The `PROCESS` privilege also enables use of `SHOW ENGINE`. */ PROCESS = 3, UNRECOGNIZED = -1, @@ -109,14 +109,15 @@ export function authPluginToJSON(object: AuthPlugin): string { } /** - * A MySQL user. For more information, see - * the [documentation](/docs/managed-mysql/concepts). + * An object that represents MySQL user. + * + * See [the documentation](/docs/managed-mysql/operations/cluster-users) for details. */ export interface User { $type: "yandex.cloud.mdb.mysql.v1.User"; - /** Name of the MySQL user. */ + /** Name of the user. */ name: string; - /** ID of the MySQL cluster the user belongs to. */ + /** ID of the cluster the user belongs to. */ clusterId: string; /** Set of permissions granted to the user. */ permissions: Permission[]; @@ -132,7 +133,11 @@ export interface Permission { $type: "yandex.cloud.mdb.mysql.v1.Permission"; /** Name of the database that the permission grants access to. */ databaseName: string; - /** Roles granted to the user within the database. */ + /** + * Roles granted to the user within the database. + * + * See [the documentation](/docs/managed-mysql/operations/grant) for details. + */ roles: Permission_Privilege[]; } @@ -142,7 +147,7 @@ export enum Permission_Privilege { ALL_PRIVILEGES = 1, /** ALTER - Altering tables. */ ALTER = 2, - /** ALTER_ROUTINE - Altering stored routines (stored procedures and functions). */ + /** ALTER_ROUTINE - Altering stored routines and functions. */ ALTER_ROUTINE = 3, /** CREATE - Creating tables or indexes. */ CREATE = 4, @@ -164,17 +169,17 @@ export enum Permission_Privilege { INDEX = 12, /** INSERT - Inserting rows into the database. */ INSERT = 13, - /** LOCK_TABLES - Using LOCK TABLES statement for tables available with SELECT privilege. */ + /** LOCK_TABLES - Using `LOCK TABLES` statement for tables available with `SELECT` privilege. */ LOCK_TABLES = 14, /** * SELECT - Selecting rows from tables. * - * Some SELECT statements can be allowed without the SELECT privilege. All - * statements that read column values require the SELECT privilege. See - * details in [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/privileges-provided.html#priv_select). + * Some `SELECT` statements can be allowed without the `SELECT` privilege. All statements that read column values require the `SELECT` privilege. + * + * See [MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_select) for details. */ SELECT = 15, - /** SHOW_VIEW - Using the SHOW CREATE VIEW statement. Also needed for views used with EXPLAIN. */ + /** SHOW_VIEW - Using the `SHOW CREATE VIEW` statement. Also needed for views used with `EXPLAIN`. */ SHOW_VIEW = 16, /** TRIGGER - Creating, removing, executing, or displaying triggers for a table. */ TRIGGER = 17, @@ -319,11 +324,16 @@ export interface ConnectionLimits { export interface UserSpec { $type: "yandex.cloud.mdb.mysql.v1.UserSpec"; - /** Name of the MySQL user. */ + /** Name of the user. */ name: string; - /** Password of the MySQL user. */ + /** Password of the user. */ password: string; - /** Set of permissions to grant to the user. */ + /** + * Set of permissions granted to the user to access specific databases. + * One permission per database. + * + * When a permission for a database is set, the user will have access to the database. + */ permissions: Permission[]; /** Set of global permissions to grant to the user. */ globalPermissions: GlobalPermission[]; diff --git a/src/generated/yandex/cloud/mdb/mysql/v1/user_service.ts b/src/generated/yandex/cloud/mdb/mysql/v1/user_service.ts index a2f93715..435312c7 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1/user_service.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1/user_service.ts @@ -33,41 +33,52 @@ export const protobufPackage = "yandex.cloud.mdb.mysql.v1"; export interface GetUserRequest { $type: "yandex.cloud.mdb.mysql.v1.GetUserRequest"; - /** ID of the MySQL cluster. */ + /** + * ID of the cluster the user belongs to. + * + * To get this ID, make a [ClusterService.List] request. + */ clusterId: string; - /** Required. */ + /** + * Name of the user to return information about. + * + * To get this name, make a [UserService.List] request. + */ userName: string; } export interface ListUsersRequest { $type: "yandex.cloud.mdb.mysql.v1.ListUsersRequest"; /** - * ID of the cluster to list MySQL users in. - * To get the cluster ID, use a [ClusterService.List] request. + * ID of the cluster to list the users in. + * + * To get this ID, make a [ClusterService.List] request. */ clusterId: string; /** - * The maximum number of results per page to return. If the number of available - * results is larger than [page_size], the service returns a [ListUsersResponse.next_page_token] - * that can be used to get the next page of results in subsequent list requests. + * The maximum number of results per page to return. + * + * If the number of available results is larger than [page_size], the API returns a [ListUsersResponse.next_page_token] that can be used to get the next page of results in the subsequent [UserService.List] requests. */ pageSize: number; /** - * Page token. To get the next page of results, set [page_token] to the [ListUsersResponse.next_page_token] - * returned by a previous list request. + * Page token that can be used to iterate through multiple pages of results. + * + * To get the next page of results, set [page_token] to the [ListUsersResponse.next_page_token] returned by the previous [UserService.List] request. */ pageToken: string; } export interface ListUsersResponse { $type: "yandex.cloud.mdb.mysql.v1.ListUsersResponse"; - /** Requested list of MySQL users. */ + /** List of users. */ users: User[]; /** - * This token allows you to get the next page of results for list requests. If the number of results - * is larger than [ListUsersRequest.page_size], use the [next_page_token] as the value - * for the [ListUsersRequest.page_token] parameter in the next list request. Each subsequent - * list request will have its own [next_page_token] to continue paging through the results. + * The token that can be used to get the next page of results. + * + * If the number of results is larger than [ListUsersRequest.page_size], use the [next_page_token] as the value for the [ListUsersRequest.page_token] in the subsequent [UserService.List] request to iterate through multiple pages of results. + * + * Each of the subsequent [UserService.List] requests should use the [next_page_token] value returned by the previous request to continue paging through the results. */ nextPageToken: string; } @@ -75,17 +86,18 @@ export interface ListUsersResponse { export interface CreateUserRequest { $type: "yandex.cloud.mdb.mysql.v1.CreateUserRequest"; /** - * ID of the MySQL cluster to create a user for. - * To get the cluster ID, use a [ClusterService.List] request. + * ID of the cluster to create the user in. + * + * To get this ID, make a [ClusterService.List] request. */ clusterId: string; - /** Properties of the user to be created. */ + /** Configuration of the user. */ userSpec?: UserSpec; } export interface CreateUserMetadata { $type: "yandex.cloud.mdb.mysql.v1.CreateUserMetadata"; - /** ID of the MySQL cluster the user is being created for. */ + /** ID of the cluster the user is being created in. */ clusterId: string; /** Name of the user that is being created. */ userName: string; @@ -94,20 +106,22 @@ export interface CreateUserMetadata { export interface UpdateUserRequest { $type: "yandex.cloud.mdb.mysql.v1.UpdateUserRequest"; /** - * ID of the MySQL cluster the user belongs to. - * To get the cluster ID use a [ClusterService.List] request. + * ID of the cluster to update the user in. + * + * To get this ID, make a [ClusterService.List] request. */ clusterId: string; /** - * Name of the user to be updated. - * To get the name of the user use a [UserService.List] request. + * Name of the user to update. + * + * To get this name, make a [UserService.List] request. */ userName: string; - /** Field mask that specifies which fields of the MySQL user should be updated. */ + /** Field mask that specifies which settings of the user should be updated. */ updateMask?: FieldMask; /** New password for the user. */ password: string; - /** New set of permissions for the user. */ + /** A new set of permissions that should be granted to the user. */ permissions: Permission[]; /** New set of global permissions to grant to the user. */ globalPermissions: GlobalPermission[]; @@ -119,7 +133,7 @@ export interface UpdateUserRequest { export interface UpdateUserMetadata { $type: "yandex.cloud.mdb.mysql.v1.UpdateUserMetadata"; - /** ID of the MySQL cluster the user belongs to. */ + /** ID of the cluster the user is being updated in. */ clusterId: string; /** Name of the user that is being updated. */ userName: string; @@ -128,20 +142,22 @@ export interface UpdateUserMetadata { export interface DeleteUserRequest { $type: "yandex.cloud.mdb.mysql.v1.DeleteUserRequest"; /** - * ID of the MySQL cluster the user belongs to. - * To get the cluster ID, use a [ClusterService.List] request. + * ID of the cluster to delete the user from. + * + * To get this ID, make a [ClusterService.List] request. */ clusterId: string; /** * Name of the user to delete. - * To get the name of the user, use a [UserService.List] request. + * + * To get this name, make a [UserService.List] request. */ userName: string; } export interface DeleteUserMetadata { $type: "yandex.cloud.mdb.mysql.v1.DeleteUserMetadata"; - /** ID of the MySQL cluster the user belongs to. */ + /** ID of the cluster the user is being deleted from. */ clusterId: string; /** Name of the user that is being deleted. */ userName: string; @@ -150,13 +166,15 @@ export interface DeleteUserMetadata { export interface GrantUserPermissionRequest { $type: "yandex.cloud.mdb.mysql.v1.GrantUserPermissionRequest"; /** - * ID of the MySQL cluster the user belongs to. - * To get the cluster ID, use a [ClusterService.List] request. + * ID of the cluster to grant permission to the user in. + * + * To get this ID, make a [ClusterService.List] request. */ clusterId: string; /** - * Name of the user to grant the permission to. - * To get the name of the user, use a [UserService.List] request. + * Name of the user to grant permission to. + * + * To get this name, make a [UserService.List] request. */ userName: string; /** Permission that should be granted to the specified user. */ @@ -165,10 +183,7 @@ export interface GrantUserPermissionRequest { export interface GrantUserPermissionMetadata { $type: "yandex.cloud.mdb.mysql.v1.GrantUserPermissionMetadata"; - /** - * ID of the MySQL cluster the user belongs to. - * To get the cluster ID, use a [ClusterService.List] request. - */ + /** ID of the cluster the user is being granted a permission in. */ clusterId: string; /** Name of the user that is being granted a permission. */ userName: string; @@ -177,22 +192,24 @@ export interface GrantUserPermissionMetadata { export interface RevokeUserPermissionRequest { $type: "yandex.cloud.mdb.mysql.v1.RevokeUserPermissionRequest"; /** - * ID of the MySQL cluster the user belongs to. - * To get the cluster ID, use a [ClusterService.List] request. + * ID of the cluster to revoke permission from the user in. + * + * To get this ID, make a [ClusterService.List] request. */ clusterId: string; /** - * Name of the user to revoke a permission from. - * To get the name of the user, use a [UserService.List] request. + * Name of the user to revoke permission from. + * + * To get this name, make a [UserService.List] request. */ userName: string; - /** Permission that should be revoked from the specified user. */ + /** Permission that should be revoked from the user. */ permission?: Permission; } export interface RevokeUserPermissionMetadata { $type: "yandex.cloud.mdb.mysql.v1.RevokeUserPermissionMetadata"; - /** ID of the MySQL cluster the user belongs to. */ + /** ID of the cluster the user is being revoked a permission in. */ clusterId: string; /** Name of the user whose permission is being revoked. */ userName: string; @@ -1397,13 +1414,13 @@ messageTypeRegistry.set( RevokeUserPermissionMetadata ); -/** A set of methods for managing MySQL users. */ +/** + * A set of methods for managing MySQL users. + * + * See [the documentation](/docs/managed-mysql/operations/cluster-users) for details. + */ export const UserServiceService = { - /** - * Returns the specified MySQL user. - * - * To get the list of available MySQL users, make a [List] request. - */ + /** Retrieves information about the specified user. */ get: { path: "/yandex.cloud.mdb.mysql.v1.UserService/Get", requestStream: false, @@ -1415,7 +1432,7 @@ export const UserServiceService = { Buffer.from(User.encode(value).finish()), responseDeserialize: (value: Buffer) => User.decode(value), }, - /** Retrieves a list of MySQL users in the specified cluster. */ + /** Retrieves the list of users in a cluster. */ list: { path: "/yandex.cloud.mdb.mysql.v1.UserService/List", requestStream: false, @@ -1427,7 +1444,7 @@ export const UserServiceService = { Buffer.from(ListUsersResponse.encode(value).finish()), responseDeserialize: (value: Buffer) => ListUsersResponse.decode(value), }, - /** Creates a MySQL user in the specified cluster. */ + /** Creates a user in a cluster. */ create: { path: "/yandex.cloud.mdb.mysql.v1.UserService/Create", requestStream: false, @@ -1439,7 +1456,7 @@ export const UserServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Modifies the specified MySQL user. */ + /** Updates a user in a cluster. */ update: { path: "/yandex.cloud.mdb.mysql.v1.UserService/Update", requestStream: false, @@ -1451,7 +1468,7 @@ export const UserServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Deletes the specified MySQL user. */ + /** Deletes a user in a cluster. */ delete: { path: "/yandex.cloud.mdb.mysql.v1.UserService/Delete", requestStream: false, @@ -1463,7 +1480,7 @@ export const UserServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Grants a permission to the specified MySQL user. */ + /** Grants permission to access a database to a user in a cluster. */ grantPermission: { path: "/yandex.cloud.mdb.mysql.v1.UserService/GrantPermission", requestStream: false, @@ -1476,7 +1493,7 @@ export const UserServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Revokes a permission from the specified MySQL user. */ + /** Revokes permission to access a database from a user in a cluster. */ revokePermission: { path: "/yandex.cloud.mdb.mysql.v1.UserService/RevokePermission", requestStream: false, @@ -1492,32 +1509,24 @@ export const UserServiceService = { } as const; export interface UserServiceServer extends UntypedServiceImplementation { - /** - * Returns the specified MySQL user. - * - * To get the list of available MySQL users, make a [List] request. - */ + /** Retrieves information about the specified user. */ get: handleUnaryCall; - /** Retrieves a list of MySQL users in the specified cluster. */ + /** Retrieves the list of users in a cluster. */ list: handleUnaryCall; - /** Creates a MySQL user in the specified cluster. */ + /** Creates a user in a cluster. */ create: handleUnaryCall; - /** Modifies the specified MySQL user. */ + /** Updates a user in a cluster. */ update: handleUnaryCall; - /** Deletes the specified MySQL user. */ + /** Deletes a user in a cluster. */ delete: handleUnaryCall; - /** Grants a permission to the specified MySQL user. */ + /** Grants permission to access a database to a user in a cluster. */ grantPermission: handleUnaryCall; - /** Revokes a permission from the specified MySQL user. */ + /** Revokes permission to access a database from a user in a cluster. */ revokePermission: handleUnaryCall; } export interface UserServiceClient extends Client { - /** - * Returns the specified MySQL user. - * - * To get the list of available MySQL users, make a [List] request. - */ + /** Retrieves information about the specified user. */ get( request: GetUserRequest, callback: (error: ServiceError | null, response: User) => void @@ -1533,7 +1542,7 @@ export interface UserServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: User) => void ): ClientUnaryCall; - /** Retrieves a list of MySQL users in the specified cluster. */ + /** Retrieves the list of users in a cluster. */ list( request: ListUsersRequest, callback: (error: ServiceError | null, response: ListUsersResponse) => void @@ -1549,7 +1558,7 @@ export interface UserServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: ListUsersResponse) => void ): ClientUnaryCall; - /** Creates a MySQL user in the specified cluster. */ + /** Creates a user in a cluster. */ create( request: CreateUserRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -1565,7 +1574,7 @@ export interface UserServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Modifies the specified MySQL user. */ + /** Updates a user in a cluster. */ update( request: UpdateUserRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -1581,7 +1590,7 @@ export interface UserServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Deletes the specified MySQL user. */ + /** Deletes a user in a cluster. */ delete( request: DeleteUserRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -1597,7 +1606,7 @@ export interface UserServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Grants a permission to the specified MySQL user. */ + /** Grants permission to access a database to a user in a cluster. */ grantPermission( request: GrantUserPermissionRequest, callback: (error: ServiceError | null, response: Operation) => void @@ -1613,7 +1622,7 @@ export interface UserServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Revokes a permission from the specified MySQL user. */ + /** Revokes permission to access a database from a user in a cluster. */ revokePermission( request: RevokeUserPermissionRequest, callback: (error: ServiceError | null, response: Operation) => void diff --git a/src/generated/yandex/cloud/mdb/mysql/v1alpha/cluster.ts b/src/generated/yandex/cloud/mdb/mysql/v1alpha/cluster.ts index b6ba7546..1e895864 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1alpha/cluster.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1alpha/cluster.ts @@ -253,7 +253,7 @@ export interface Host { * Name of the MySQL host. The host name is assigned by Managed Service for MySQL * at creation time, and cannot be changed. 1-63 characters long. * - * The name is unique across all existing database hosts in Yandex.Cloud, + * The name is unique across all existing database hosts in Yandex Cloud, * as it defines the FQDN of the host. */ name: string; diff --git a/src/generated/yandex/cloud/mdb/mysql/v1alpha/cluster_service.ts b/src/generated/yandex/cloud/mdb/mysql/v1alpha/cluster_service.ts index 9f87696a..a6956fc3 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1alpha/cluster_service.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1alpha/cluster_service.ts @@ -129,7 +129,7 @@ export interface UpdateClusterRequest { * To get the MySQL cluster ID, use a [ClusterService.List] request. */ clusterId: string; - /** Field mask that specifies which fields of the MySQL cluster should be updated. */ + /** Field mask that specifies which settings of the MySQL cluster should be updated. */ updateMask?: FieldMask; /** New description of the MySQL cluster. */ description: string; diff --git a/src/generated/yandex/cloud/mdb/mysql/v1alpha/database.ts b/src/generated/yandex/cloud/mdb/mysql/v1alpha/database.ts index 8032d8fc..3f807261 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1alpha/database.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1alpha/database.ts @@ -5,10 +5,7 @@ import _m0 from "protobufjs/minimal"; export const protobufPackage = "yandex.cloud.mdb.mysql.v1alpha"; -/** - * A MySQL database. For more information, see - * the [documentation](/docs/managed-mysql/concepts). - */ +/** A MySQL database. For more information, see the [documentation](/docs/managed-mysql/concepts). */ export interface Database { $type: "yandex.cloud.mdb.mysql.v1alpha.Database"; /** Name of the database. */ diff --git a/src/generated/yandex/cloud/mdb/mysql/v1alpha/user.ts b/src/generated/yandex/cloud/mdb/mysql/v1alpha/user.ts index c1f01574..f78b5020 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1alpha/user.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1alpha/user.ts @@ -5,10 +5,7 @@ import _m0 from "protobufjs/minimal"; export const protobufPackage = "yandex.cloud.mdb.mysql.v1alpha"; -/** - * A MySQL user. For more information, see - * the [documentation](/docs/managed-mysql/concepts). - */ +/** A MySQL user. For more information, see the [documentation](/docs/managed-mysql/concepts). */ export interface User { $type: "yandex.cloud.mdb.mysql.v1alpha.User"; /** Name of the MySQL user. */ @@ -60,9 +57,7 @@ export enum Permission_Privilege { /** * SELECT - Selecting rows from tables. * - * Some SELECT statements can be allowed without the SELECT privilege. All - * statements that read column values require the SELECT privilege. See - * details in [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/privileges-provided.html#priv_select). + * Some SELECT statements can be allowed without the SELECT privilege. All statements that read column values require the SELECT privilege. See details in [MySQL documentation](https://dev.mysql.com/doc/refman/5.7/en/privileges-provided.html#priv_select). */ SELECT = 15, /** SHOW_VIEW - Using the SHOW CREATE VIEW statement. Also needed for views used with EXPLAIN. */ diff --git a/src/generated/yandex/cloud/mdb/mysql/v1alpha/user_service.ts b/src/generated/yandex/cloud/mdb/mysql/v1alpha/user_service.ts index 0f5cba20..b47e33b9 100644 --- a/src/generated/yandex/cloud/mdb/mysql/v1alpha/user_service.ts +++ b/src/generated/yandex/cloud/mdb/mysql/v1alpha/user_service.ts @@ -34,21 +34,11 @@ export interface GetUserRequest { export interface ListUsersRequest { $type: "yandex.cloud.mdb.mysql.v1alpha.ListUsersRequest"; - /** - * ID of the cluster to list MySQL users in. - * To get the cluster ID, use a [ClusterService.List] request. - */ + /** ID of the cluster to list MySQL users in. To get the cluster ID, use a [ClusterService.List] request. */ clusterId: string; - /** - * The maximum number of results per page to return. If the number of available - * results is larger than [page_size], the service returns a [ListUsersResponse.next_page_token] - * that can be used to get the next page of results in subsequent list requests. - */ + /** The maximum number of results per page to return. If the number of available results is larger than [page_size], the service returns a [ListUsersResponse.next_page_token] that can be used to get the next page of results in subsequent list requests. */ pageSize: number; - /** - * Page token. To get the next page of results, set [page_token] to the [ListUsersResponse.next_page_token] - * returned by a previous list request. - */ + /** Page token. To get the next page of results, set [page_token] to the [ListUsersResponse.next_page_token] returned by a previous list request. */ pageToken: string; } @@ -56,21 +46,13 @@ export interface ListUsersResponse { $type: "yandex.cloud.mdb.mysql.v1alpha.ListUsersResponse"; /** Requested list of MySQL users. */ users: User[]; - /** - * This token allows you to get the next page of results for list requests. If the number of results - * is larger than [ListUsersRequest.page_size], use the [next_page_token] as the value - * for the [ListUsersRequest.page_token] parameter in the next list request. Each subsequent - * list request will have its own [next_page_token] to continue paging through the results. - */ + /** This token allows you to get the next page of results for list requests. If the number of results is larger than [ListUsersRequest.page_size], use the [next_page_token] as the value for the [ListUsersRequest.page_token] parameter in the next list request. Each subsequent list request will have its own [next_page_token] to continue paging through the results. */ nextPageToken: string; } export interface CreateUserRequest { $type: "yandex.cloud.mdb.mysql.v1alpha.CreateUserRequest"; - /** - * ID of the MySQL cluster to create a user for. - * To get the cluster ID, use a [ClusterService.List] request. - */ + /** ID of the MySQL cluster to create a user for. To get the cluster ID, use a [ClusterService.List] request. */ clusterId: string; /** Properties of the user to be created. */ userSpec?: UserSpec; @@ -86,17 +68,11 @@ export interface CreateUserMetadata { export interface UpdateUserRequest { $type: "yandex.cloud.mdb.mysql.v1alpha.UpdateUserRequest"; - /** - * ID of the MySQL cluster the user belongs to. - * To get the cluster ID use a [ClusterService.List] request. - */ + /** ID of the MySQL cluster the user belongs to. To get the cluster ID, use a [ClusterService.List] request. */ clusterId: string; - /** - * Name of the user to be updated. - * To get the name of the user use a [UserService.List] request. - */ + /** Name of the user to be updated. To get the name of the user, use a [UserService.List] request. */ userName: string; - /** Field mask that specifies which fields of the MySQL user should be updated. */ + /** Field mask that specifies which settings of the MySQL user should be updated. */ updateMask?: FieldMask; /** New password for the user. */ password: string; @@ -108,21 +84,15 @@ export interface UpdateUserMetadata { $type: "yandex.cloud.mdb.mysql.v1alpha.UpdateUserMetadata"; /** ID of the MySQL cluster the user belongs to. */ clusterId: string; - /** Name of the user that is being updated. */ + /** Name of a user that is being updated. */ userName: string; } export interface DeleteUserRequest { $type: "yandex.cloud.mdb.mysql.v1alpha.DeleteUserRequest"; - /** - * ID of the MySQL cluster the user belongs to. - * To get the cluster ID, use a [ClusterService.List] request. - */ + /** ID of the MySQL cluster the user belongs to. To get the cluster ID, use a [ClusterService.List] request. */ clusterId: string; - /** - * Name of the user to delete. - * To get the name of the user, use a [UserService.List] request. - */ + /** Name of the user to delete. To get the name of the user, use a [UserService.List] request. */ userName: string; } @@ -136,15 +106,9 @@ export interface DeleteUserMetadata { export interface GrantUserPermissionRequest { $type: "yandex.cloud.mdb.mysql.v1alpha.GrantUserPermissionRequest"; - /** - * ID of the MySQL cluster the user belongs to. - * To get the cluster ID, use a [ClusterService.List] request. - */ + /** ID of the MySQL cluster the user belongs to. To get the cluster ID, use a [ClusterService.List] request. */ clusterId: string; - /** - * Name of the user to grant the permission to. - * To get the name of the user, use a [UserService.List] request. - */ + /** Name of the user to grant the permission to. To get the name of the user, use a [UserService.List] request. */ userName: string; /** Permission that should be granted to the specified user. */ permission?: Permission; @@ -152,10 +116,7 @@ export interface GrantUserPermissionRequest { export interface GrantUserPermissionMetadata { $type: "yandex.cloud.mdb.mysql.v1alpha.GrantUserPermissionMetadata"; - /** - * ID of the MySQL cluster the user belongs to. - * To get the cluster ID, use a [ClusterService.List] request. - */ + /** ID of the MySQL cluster the user belongs to. To get the cluster ID, use a [ClusterService.List] request. */ clusterId: string; /** Name of the user that is being granted a permission. */ userName: string; @@ -163,15 +124,9 @@ export interface GrantUserPermissionMetadata { export interface RevokeUserPermissionRequest { $type: "yandex.cloud.mdb.mysql.v1alpha.RevokeUserPermissionRequest"; - /** - * ID of the MySQL cluster the user belongs to. - * To get the cluster ID, use a [ClusterService.List] request. - */ + /** ID of the MySQL cluster the user belongs to. To get the cluster ID, use a [ClusterService.List] request. */ clusterId: string; - /** - * Name of the user to revoke a permission from. - * To get the name of the user, use a [UserService.List] request. - */ + /** Name of the user to revoke a permission from. To get the name of the user, use a [UserService.List] request. */ userName: string; /** Name of the database that the user should lose access to. */ databaseName: string; diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/cluster.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/cluster.ts index b1d29d7f..93be9eff 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/cluster.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/cluster.ts @@ -16,6 +16,7 @@ import { Postgresqlconfigset111c } from "../../../../../yandex/cloud/mdb/postgre import { PostgresqlConfigSet12 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/postgresql12"; import { Postgresqlconfigset121c } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/postgresql12_1c"; import { PostgresqlConfigSet13 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/postgresql13"; +import { PostgresqlConfigSet14 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/postgresql14"; import { Postgresqlhostconfig96 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host9_6"; import { Postgresqlhostconfig101c } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host10_1c"; import { PostgresqlHostConfig10 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host10"; @@ -24,6 +25,7 @@ import { Postgresqlhostconfig111c } from "../../../../../yandex/cloud/mdb/postgr import { PostgresqlHostConfig12 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host12"; import { Postgresqlhostconfig121c } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host12_1c"; import { PostgresqlHostConfig13 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host13"; +import { PostgresqlHostConfig14 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host14"; import { BoolValue, Int64Value } from "../../../../../google/protobuf/wrappers"; export const protobufPackage = "yandex.cloud.mdb.postgresql.v1"; @@ -284,6 +286,8 @@ export interface ClusterConfig { postgresqlConfig121c?: Postgresqlconfigset121c | undefined; /** Configuration of a PostgreSQL 13 server. */ postgresqlConfig13?: PostgresqlConfigSet13 | undefined; + /** Configuration of a PostgreSQL 14 server. */ + postgresqlConfig14?: PostgresqlConfigSet14 | undefined; /** Configuration of the connection pooler. */ poolerConfig?: ConnectionPoolerConfig; /** Resources allocated to PostgreSQL hosts. */ @@ -368,7 +372,7 @@ export interface Host { * Name of the PostgreSQL host. The host name is assigned by MDB at creation time, and cannot be changed. * 1-63 characters long. * - * The name is unique across all existing MDB hosts in Yandex.Cloud, as it defines the FQDN of the host. + * The name is unique across all existing MDB hosts in Yandex Cloud, as it defines the FQDN of the host. */ name: string; /** ID of the PostgreSQL host. The ID is assigned by MDB at creation time. */ @@ -555,6 +559,8 @@ export interface HostConfig { postgresqlConfig121c?: Postgresqlhostconfig121c | undefined; /** Configuration for a host with PostgreSQL 13 server deployed. */ postgresqlConfig13?: PostgresqlHostConfig13 | undefined; + /** Configuration for a host with PostgreSQL 14 server deployed. */ + postgresqlConfig14?: PostgresqlHostConfig14 | undefined; } export interface Service { @@ -670,13 +676,15 @@ export interface Access { /** Allow access for DataLens */ dataLens: boolean; /** - * Allow SQL queries to the cluster databases from the Yandex.Cloud management console. + * Allow SQL queries to the cluster databases from the Yandex Cloud management console. * * See [SQL queries in the management console](/docs/managed-postgresql/operations/web-sql-query) for more details. */ webSql: boolean; /** Allow access for Serverless */ serverless: boolean; + /** Allow access for DataTransfer. */ + dataTransfer: boolean; } export interface PerformanceDiagnostics { @@ -1241,6 +1249,12 @@ export const ClusterConfig = { writer.uint32(122).fork() ).ldelim(); } + if (message.postgresqlConfig14 !== undefined) { + PostgresqlConfigSet14.encode( + message.postgresqlConfig14, + writer.uint32(130).fork() + ).ldelim(); + } if (message.poolerConfig !== undefined) { ConnectionPoolerConfig.encode( message.poolerConfig, @@ -1341,6 +1355,12 @@ export const ClusterConfig = { reader.uint32() ); break; + case 16: + message.postgresqlConfig14 = PostgresqlConfigSet14.decode( + reader, + reader.uint32() + ); + break; case 4: message.poolerConfig = ConnectionPoolerConfig.decode( reader, @@ -1428,6 +1448,11 @@ export const ClusterConfig = { object.postgresqlConfig_13 !== null ? PostgresqlConfigSet13.fromJSON(object.postgresqlConfig_13) : undefined; + message.postgresqlConfig14 = + object.postgresqlConfig_14 !== undefined && + object.postgresqlConfig_14 !== null + ? PostgresqlConfigSet14.fromJSON(object.postgresqlConfig_14) + : undefined; message.poolerConfig = object.poolerConfig !== undefined && object.poolerConfig !== null ? ConnectionPoolerConfig.fromJSON(object.poolerConfig) @@ -1497,6 +1522,10 @@ export const ClusterConfig = { (obj.postgresqlConfig_13 = message.postgresqlConfig13 ? PostgresqlConfigSet13.toJSON(message.postgresqlConfig13) : undefined); + message.postgresqlConfig14 !== undefined && + (obj.postgresqlConfig_14 = message.postgresqlConfig14 + ? PostgresqlConfigSet14.toJSON(message.postgresqlConfig14) + : undefined); message.poolerConfig !== undefined && (obj.poolerConfig = message.poolerConfig ? ConnectionPoolerConfig.toJSON(message.poolerConfig) @@ -1567,6 +1596,11 @@ export const ClusterConfig = { object.postgresqlConfig13 !== null ? PostgresqlConfigSet13.fromPartial(object.postgresqlConfig13) : undefined; + message.postgresqlConfig14 = + object.postgresqlConfig14 !== undefined && + object.postgresqlConfig14 !== null + ? PostgresqlConfigSet14.fromPartial(object.postgresqlConfig14) + : undefined; message.poolerConfig = object.poolerConfig !== undefined && object.poolerConfig !== null ? ConnectionPoolerConfig.fromPartial(object.poolerConfig) @@ -1976,6 +2010,12 @@ export const HostConfig = { writer.uint32(66).fork() ).ldelim(); } + if (message.postgresqlConfig14 !== undefined) { + PostgresqlHostConfig14.encode( + message.postgresqlConfig14, + writer.uint32(74).fork() + ).ldelim(); + } return writer; }, @@ -2034,6 +2074,12 @@ export const HostConfig = { reader.uint32() ); break; + case 9: + message.postgresqlConfig14 = PostgresqlHostConfig14.decode( + reader, + reader.uint32() + ); + break; default: reader.skipType(tag & 7); break; @@ -2084,6 +2130,11 @@ export const HostConfig = { object.postgresqlHostConfig_13 !== null ? PostgresqlHostConfig13.fromJSON(object.postgresqlHostConfig_13) : undefined; + message.postgresqlConfig14 = + object.postgresqlHostConfig_14 !== undefined && + object.postgresqlHostConfig_14 !== null + ? PostgresqlHostConfig14.fromJSON(object.postgresqlHostConfig_14) + : undefined; return message; }, @@ -2121,6 +2172,10 @@ export const HostConfig = { (obj.postgresqlHostConfig_13 = message.postgresqlConfig13 ? PostgresqlHostConfig13.toJSON(message.postgresqlConfig13) : undefined); + message.postgresqlConfig14 !== undefined && + (obj.postgresqlHostConfig_14 = message.postgresqlConfig14 + ? PostgresqlHostConfig14.toJSON(message.postgresqlConfig14) + : undefined); return obj; }, @@ -2168,6 +2223,11 @@ export const HostConfig = { object.postgresqlConfig13 !== null ? PostgresqlHostConfig13.fromPartial(object.postgresqlConfig13) : undefined; + message.postgresqlConfig14 = + object.postgresqlConfig14 !== undefined && + object.postgresqlConfig14 !== null + ? PostgresqlHostConfig14.fromPartial(object.postgresqlConfig14) + : undefined; return message; }, }; @@ -2343,6 +2403,7 @@ const baseAccess: object = { dataLens: false, webSql: false, serverless: false, + dataTransfer: false, }; export const Access = { @@ -2361,6 +2422,9 @@ export const Access = { if (message.serverless === true) { writer.uint32(24).bool(message.serverless); } + if (message.dataTransfer === true) { + writer.uint32(32).bool(message.dataTransfer); + } return writer; }, @@ -2380,6 +2444,9 @@ export const Access = { case 3: message.serverless = reader.bool(); break; + case 4: + message.dataTransfer = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -2402,6 +2469,10 @@ export const Access = { object.serverless !== undefined && object.serverless !== null ? Boolean(object.serverless) : false; + message.dataTransfer = + object.dataTransfer !== undefined && object.dataTransfer !== null + ? Boolean(object.dataTransfer) + : false; return message; }, @@ -2410,6 +2481,8 @@ export const Access = { message.dataLens !== undefined && (obj.dataLens = message.dataLens); message.webSql !== undefined && (obj.webSql = message.webSql); message.serverless !== undefined && (obj.serverless = message.serverless); + message.dataTransfer !== undefined && + (obj.dataTransfer = message.dataTransfer); return obj; }, @@ -2418,6 +2491,7 @@ export const Access = { message.dataLens = object.dataLens ?? false; message.webSql = object.webSql ?? false; message.serverless = object.serverless ?? false; + message.dataTransfer = object.dataTransfer ?? false; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/cluster_service.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/cluster_service.ts index 41132d64..886f947b 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/cluster_service.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/cluster_service.ts @@ -43,6 +43,7 @@ import { Postgresqlconfig111c } from "../../../../../yandex/cloud/mdb/postgresql import { PostgresqlConfig12 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/postgresql12"; import { Postgresqlconfig121c } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/postgresql12_1c"; import { PostgresqlConfig13 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/postgresql13"; +import { PostgresqlConfig14 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/postgresql14"; import { Postgresqlhostconfig96 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host9_6"; import { Postgresqlhostconfig101c } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host10_1c"; import { PostgresqlHostConfig10 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host10"; @@ -51,6 +52,7 @@ import { Postgresqlhostconfig111c } from "../../../../../yandex/cloud/mdb/postgr import { PostgresqlHostConfig12 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host12"; import { Postgresqlhostconfig121c } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host12_1c"; import { PostgresqlHostConfig13 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host13"; +import { PostgresqlHostConfig14 } from "../../../../../yandex/cloud/mdb/postgresql/v1/config/host14"; import { Int64Value, BoolValue } from "../../../../../google/protobuf/wrappers"; export const protobufPackage = "yandex.cloud.mdb.postgresql.v1"; @@ -841,8 +843,10 @@ export interface ConfigSpec { postgresqlConfig12?: PostgresqlConfig12 | undefined; /** Configuration for a PostgreSQL 12 1C cluster. */ postgresqlConfig121c?: Postgresqlconfig121c | undefined; - /** Configuration for a PostgreSQL 13 1C cluster. */ + /** Configuration for a PostgreSQL 13 cluster. */ postgresqlConfig13?: PostgresqlConfig13 | undefined; + /** Configuration for a PostgreSQL 14 cluster. */ + postgresqlConfig14?: PostgresqlConfig14 | undefined; /** Configuration of the connection pooler. */ poolerConfig?: ConnectionPoolerConfig; /** Resources allocated to PostgreSQL hosts. */ @@ -877,6 +881,8 @@ export interface ConfigHostSpec { postgresqlConfig121c?: Postgresqlhostconfig121c | undefined; /** Configuration for a host with PostgreSQL 13 server deployed. */ postgresqlConfig13?: PostgresqlHostConfig13 | undefined; + /** Configuration for a host with PostgreSQL 14 server deployed. */ + postgresqlConfig14?: PostgresqlHostConfig14 | undefined; } const baseGetClusterRequest: object = { @@ -5537,6 +5543,12 @@ export const ConfigSpec = { writer.uint32(122).fork() ).ldelim(); } + if (message.postgresqlConfig14 !== undefined) { + PostgresqlConfig14.encode( + message.postgresqlConfig14, + writer.uint32(130).fork() + ).ldelim(); + } if (message.poolerConfig !== undefined) { ConnectionPoolerConfig.encode( message.poolerConfig, @@ -5637,6 +5649,12 @@ export const ConfigSpec = { reader.uint32() ); break; + case 16: + message.postgresqlConfig14 = PostgresqlConfig14.decode( + reader, + reader.uint32() + ); + break; case 4: message.poolerConfig = ConnectionPoolerConfig.decode( reader, @@ -5724,6 +5742,11 @@ export const ConfigSpec = { object.postgresqlConfig_13 !== null ? PostgresqlConfig13.fromJSON(object.postgresqlConfig_13) : undefined; + message.postgresqlConfig14 = + object.postgresqlConfig_14 !== undefined && + object.postgresqlConfig_14 !== null + ? PostgresqlConfig14.fromJSON(object.postgresqlConfig_14) + : undefined; message.poolerConfig = object.poolerConfig !== undefined && object.poolerConfig !== null ? ConnectionPoolerConfig.fromJSON(object.poolerConfig) @@ -5793,6 +5816,10 @@ export const ConfigSpec = { (obj.postgresqlConfig_13 = message.postgresqlConfig13 ? PostgresqlConfig13.toJSON(message.postgresqlConfig13) : undefined); + message.postgresqlConfig14 !== undefined && + (obj.postgresqlConfig_14 = message.postgresqlConfig14 + ? PostgresqlConfig14.toJSON(message.postgresqlConfig14) + : undefined); message.poolerConfig !== undefined && (obj.poolerConfig = message.poolerConfig ? ConnectionPoolerConfig.toJSON(message.poolerConfig) @@ -5863,6 +5890,11 @@ export const ConfigSpec = { object.postgresqlConfig13 !== null ? PostgresqlConfig13.fromPartial(object.postgresqlConfig13) : undefined; + message.postgresqlConfig14 = + object.postgresqlConfig14 !== undefined && + object.postgresqlConfig14 !== null + ? PostgresqlConfig14.fromPartial(object.postgresqlConfig14) + : undefined; message.poolerConfig = object.poolerConfig !== undefined && object.poolerConfig !== null ? ConnectionPoolerConfig.fromPartial(object.poolerConfig) @@ -5952,6 +5984,12 @@ export const ConfigHostSpec = { writer.uint32(66).fork() ).ldelim(); } + if (message.postgresqlConfig14 !== undefined) { + PostgresqlHostConfig14.encode( + message.postgresqlConfig14, + writer.uint32(74).fork() + ).ldelim(); + } return writer; }, @@ -6010,6 +6048,12 @@ export const ConfigHostSpec = { reader.uint32() ); break; + case 9: + message.postgresqlConfig14 = PostgresqlHostConfig14.decode( + reader, + reader.uint32() + ); + break; default: reader.skipType(tag & 7); break; @@ -6060,6 +6104,11 @@ export const ConfigHostSpec = { object.postgresqlHostConfig_13 !== null ? PostgresqlHostConfig13.fromJSON(object.postgresqlHostConfig_13) : undefined; + message.postgresqlConfig14 = + object.postgresqlHostConfig_14 !== undefined && + object.postgresqlHostConfig_14 !== null + ? PostgresqlHostConfig14.fromJSON(object.postgresqlHostConfig_14) + : undefined; return message; }, @@ -6097,6 +6146,10 @@ export const ConfigHostSpec = { (obj.postgresqlHostConfig_13 = message.postgresqlConfig13 ? PostgresqlHostConfig13.toJSON(message.postgresqlConfig13) : undefined); + message.postgresqlConfig14 !== undefined && + (obj.postgresqlHostConfig_14 = message.postgresqlConfig14 + ? PostgresqlHostConfig14.toJSON(message.postgresqlConfig14) + : undefined); return obj; }, @@ -6144,6 +6197,11 @@ export const ConfigHostSpec = { object.postgresqlConfig13 !== null ? PostgresqlHostConfig13.fromPartial(object.postgresqlConfig13) : undefined; + message.postgresqlConfig14 = + object.postgresqlConfig14 !== undefined && + object.postgresqlConfig14 !== null + ? PostgresqlHostConfig14.fromPartial(object.postgresqlConfig14) + : undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/host14.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/host14.ts new file mode 100644 index 00000000..72f96036 --- /dev/null +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/host14.ts @@ -0,0 +1,2028 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { + Int64Value, + DoubleValue, + BoolValue, +} from "../../../../../../google/protobuf/wrappers"; + +export const protobufPackage = "yandex.cloud.mdb.postgresql.v1.config"; + +/** + * Options and structure of `PostgresqlConfig` reflects PostgreSQL configuration file + * parameters which detailed description is available in + * [PostgreSQL documentation](https://www.postgresql.org/docs/11/runtime-config.html). + */ +export interface PostgresqlHostConfig14 { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig14"; + /** in milliseconds. */ + recoveryMinApplyDelay?: number; + /** in bytes. */ + sharedBuffers?: number; + /** in bytes. */ + tempBuffers?: number; + /** in bytes. */ + workMem?: number; + /** in bytes. */ + tempFileLimit?: number; + backendFlushAfter?: number; + oldSnapshotThreshold?: number; + /** in milliseconds. */ + maxStandbyStreamingDelay?: number; + constraintExclusion: PostgresqlHostConfig14_ConstraintExclusion; + cursorTupleFraction?: number; + fromCollapseLimit?: number; + joinCollapseLimit?: number; + forceParallelMode: PostgresqlHostConfig14_ForceParallelMode; + clientMinMessages: PostgresqlHostConfig14_LogLevel; + logMinMessages: PostgresqlHostConfig14_LogLevel; + logMinErrorStatement: PostgresqlHostConfig14_LogLevel; + /** in milliseconds. */ + logMinDurationStatement?: number; + logCheckpoints?: boolean; + logConnections?: boolean; + logDisconnections?: boolean; + logDuration?: boolean; + logErrorVerbosity: PostgresqlHostConfig14_LogErrorVerbosity; + logLockWaits?: boolean; + logStatement: PostgresqlHostConfig14_LogStatement; + logTempFiles?: number; + searchPath: string; + rowSecurity?: boolean; + defaultTransactionIsolation: PostgresqlHostConfig14_TransactionIsolation; + /** in milliseconds. */ + statementTimeout?: number; + /** in milliseconds. */ + lockTimeout?: number; + /** in milliseconds. */ + idleInTransactionSessionTimeout?: number; + byteaOutput: PostgresqlHostConfig14_ByteaOutput; + xmlbinary: PostgresqlHostConfig14_XmlBinary; + xmloption: PostgresqlHostConfig14_XmlOption; + /** in bytes. */ + ginPendingListLimit?: number; + /** in milliseconds. */ + deadlockTimeout?: number; + maxLocksPerTransaction?: number; + maxPredLocksPerTransaction?: number; + arrayNulls?: boolean; + backslashQuote: PostgresqlHostConfig14_BackslashQuote; + defaultWithOids?: boolean; + escapeStringWarning?: boolean; + loCompatPrivileges?: boolean; + quoteAllIdentifiers?: boolean; + standardConformingStrings?: boolean; + synchronizeSeqscans?: boolean; + transformNullEquals?: boolean; + exitOnError?: boolean; + seqPageCost?: number; + randomPageCost?: number; + enableBitmapscan?: boolean; + enableHashagg?: boolean; + enableHashjoin?: boolean; + enableIndexscan?: boolean; + enableIndexonlyscan?: boolean; + enableMaterial?: boolean; + enableMergejoin?: boolean; + enableNestloop?: boolean; + enableSeqscan?: boolean; + enableSort?: boolean; + enableTidscan?: boolean; + maxParallelWorkers?: number; + maxParallelWorkersPerGather?: number; + timezone: string; + effectiveIoConcurrency?: number; + effectiveCacheSize?: number; +} + +export enum PostgresqlHostConfig14_ConstraintExclusion { + CONSTRAINT_EXCLUSION_UNSPECIFIED = 0, + CONSTRAINT_EXCLUSION_ON = 1, + CONSTRAINT_EXCLUSION_OFF = 2, + CONSTRAINT_EXCLUSION_PARTITION = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig14_ConstraintExclusionFromJSON( + object: any +): PostgresqlHostConfig14_ConstraintExclusion { + switch (object) { + case 0: + case "CONSTRAINT_EXCLUSION_UNSPECIFIED": + return PostgresqlHostConfig14_ConstraintExclusion.CONSTRAINT_EXCLUSION_UNSPECIFIED; + case 1: + case "CONSTRAINT_EXCLUSION_ON": + return PostgresqlHostConfig14_ConstraintExclusion.CONSTRAINT_EXCLUSION_ON; + case 2: + case "CONSTRAINT_EXCLUSION_OFF": + return PostgresqlHostConfig14_ConstraintExclusion.CONSTRAINT_EXCLUSION_OFF; + case 3: + case "CONSTRAINT_EXCLUSION_PARTITION": + return PostgresqlHostConfig14_ConstraintExclusion.CONSTRAINT_EXCLUSION_PARTITION; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig14_ConstraintExclusion.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig14_ConstraintExclusionToJSON( + object: PostgresqlHostConfig14_ConstraintExclusion +): string { + switch (object) { + case PostgresqlHostConfig14_ConstraintExclusion.CONSTRAINT_EXCLUSION_UNSPECIFIED: + return "CONSTRAINT_EXCLUSION_UNSPECIFIED"; + case PostgresqlHostConfig14_ConstraintExclusion.CONSTRAINT_EXCLUSION_ON: + return "CONSTRAINT_EXCLUSION_ON"; + case PostgresqlHostConfig14_ConstraintExclusion.CONSTRAINT_EXCLUSION_OFF: + return "CONSTRAINT_EXCLUSION_OFF"; + case PostgresqlHostConfig14_ConstraintExclusion.CONSTRAINT_EXCLUSION_PARTITION: + return "CONSTRAINT_EXCLUSION_PARTITION"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlHostConfig14_ForceParallelMode { + FORCE_PARALLEL_MODE_UNSPECIFIED = 0, + FORCE_PARALLEL_MODE_ON = 1, + FORCE_PARALLEL_MODE_OFF = 2, + FORCE_PARALLEL_MODE_REGRESS = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig14_ForceParallelModeFromJSON( + object: any +): PostgresqlHostConfig14_ForceParallelMode { + switch (object) { + case 0: + case "FORCE_PARALLEL_MODE_UNSPECIFIED": + return PostgresqlHostConfig14_ForceParallelMode.FORCE_PARALLEL_MODE_UNSPECIFIED; + case 1: + case "FORCE_PARALLEL_MODE_ON": + return PostgresqlHostConfig14_ForceParallelMode.FORCE_PARALLEL_MODE_ON; + case 2: + case "FORCE_PARALLEL_MODE_OFF": + return PostgresqlHostConfig14_ForceParallelMode.FORCE_PARALLEL_MODE_OFF; + case 3: + case "FORCE_PARALLEL_MODE_REGRESS": + return PostgresqlHostConfig14_ForceParallelMode.FORCE_PARALLEL_MODE_REGRESS; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig14_ForceParallelMode.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig14_ForceParallelModeToJSON( + object: PostgresqlHostConfig14_ForceParallelMode +): string { + switch (object) { + case PostgresqlHostConfig14_ForceParallelMode.FORCE_PARALLEL_MODE_UNSPECIFIED: + return "FORCE_PARALLEL_MODE_UNSPECIFIED"; + case PostgresqlHostConfig14_ForceParallelMode.FORCE_PARALLEL_MODE_ON: + return "FORCE_PARALLEL_MODE_ON"; + case PostgresqlHostConfig14_ForceParallelMode.FORCE_PARALLEL_MODE_OFF: + return "FORCE_PARALLEL_MODE_OFF"; + case PostgresqlHostConfig14_ForceParallelMode.FORCE_PARALLEL_MODE_REGRESS: + return "FORCE_PARALLEL_MODE_REGRESS"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlHostConfig14_LogLevel { + LOG_LEVEL_UNSPECIFIED = 0, + LOG_LEVEL_DEBUG5 = 1, + LOG_LEVEL_DEBUG4 = 2, + LOG_LEVEL_DEBUG3 = 3, + LOG_LEVEL_DEBUG2 = 4, + LOG_LEVEL_DEBUG1 = 5, + LOG_LEVEL_LOG = 6, + LOG_LEVEL_NOTICE = 7, + LOG_LEVEL_WARNING = 8, + LOG_LEVEL_ERROR = 9, + LOG_LEVEL_FATAL = 10, + LOG_LEVEL_PANIC = 11, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig14_LogLevelFromJSON( + object: any +): PostgresqlHostConfig14_LogLevel { + switch (object) { + case 0: + case "LOG_LEVEL_UNSPECIFIED": + return PostgresqlHostConfig14_LogLevel.LOG_LEVEL_UNSPECIFIED; + case 1: + case "LOG_LEVEL_DEBUG5": + return PostgresqlHostConfig14_LogLevel.LOG_LEVEL_DEBUG5; + case 2: + case "LOG_LEVEL_DEBUG4": + return PostgresqlHostConfig14_LogLevel.LOG_LEVEL_DEBUG4; + case 3: + case "LOG_LEVEL_DEBUG3": + return PostgresqlHostConfig14_LogLevel.LOG_LEVEL_DEBUG3; + case 4: + case "LOG_LEVEL_DEBUG2": + return PostgresqlHostConfig14_LogLevel.LOG_LEVEL_DEBUG2; + case 5: + case "LOG_LEVEL_DEBUG1": + return PostgresqlHostConfig14_LogLevel.LOG_LEVEL_DEBUG1; + case 6: + case "LOG_LEVEL_LOG": + return PostgresqlHostConfig14_LogLevel.LOG_LEVEL_LOG; + case 7: + case "LOG_LEVEL_NOTICE": + return PostgresqlHostConfig14_LogLevel.LOG_LEVEL_NOTICE; + case 8: + case "LOG_LEVEL_WARNING": + return PostgresqlHostConfig14_LogLevel.LOG_LEVEL_WARNING; + case 9: + case "LOG_LEVEL_ERROR": + return PostgresqlHostConfig14_LogLevel.LOG_LEVEL_ERROR; + case 10: + case "LOG_LEVEL_FATAL": + return PostgresqlHostConfig14_LogLevel.LOG_LEVEL_FATAL; + case 11: + case "LOG_LEVEL_PANIC": + return PostgresqlHostConfig14_LogLevel.LOG_LEVEL_PANIC; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig14_LogLevel.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig14_LogLevelToJSON( + object: PostgresqlHostConfig14_LogLevel +): string { + switch (object) { + case PostgresqlHostConfig14_LogLevel.LOG_LEVEL_UNSPECIFIED: + return "LOG_LEVEL_UNSPECIFIED"; + case PostgresqlHostConfig14_LogLevel.LOG_LEVEL_DEBUG5: + return "LOG_LEVEL_DEBUG5"; + case PostgresqlHostConfig14_LogLevel.LOG_LEVEL_DEBUG4: + return "LOG_LEVEL_DEBUG4"; + case PostgresqlHostConfig14_LogLevel.LOG_LEVEL_DEBUG3: + return "LOG_LEVEL_DEBUG3"; + case PostgresqlHostConfig14_LogLevel.LOG_LEVEL_DEBUG2: + return "LOG_LEVEL_DEBUG2"; + case PostgresqlHostConfig14_LogLevel.LOG_LEVEL_DEBUG1: + return "LOG_LEVEL_DEBUG1"; + case PostgresqlHostConfig14_LogLevel.LOG_LEVEL_LOG: + return "LOG_LEVEL_LOG"; + case PostgresqlHostConfig14_LogLevel.LOG_LEVEL_NOTICE: + return "LOG_LEVEL_NOTICE"; + case PostgresqlHostConfig14_LogLevel.LOG_LEVEL_WARNING: + return "LOG_LEVEL_WARNING"; + case PostgresqlHostConfig14_LogLevel.LOG_LEVEL_ERROR: + return "LOG_LEVEL_ERROR"; + case PostgresqlHostConfig14_LogLevel.LOG_LEVEL_FATAL: + return "LOG_LEVEL_FATAL"; + case PostgresqlHostConfig14_LogLevel.LOG_LEVEL_PANIC: + return "LOG_LEVEL_PANIC"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlHostConfig14_LogErrorVerbosity { + LOG_ERROR_VERBOSITY_UNSPECIFIED = 0, + LOG_ERROR_VERBOSITY_TERSE = 1, + LOG_ERROR_VERBOSITY_DEFAULT = 2, + LOG_ERROR_VERBOSITY_VERBOSE = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig14_LogErrorVerbosityFromJSON( + object: any +): PostgresqlHostConfig14_LogErrorVerbosity { + switch (object) { + case 0: + case "LOG_ERROR_VERBOSITY_UNSPECIFIED": + return PostgresqlHostConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED; + case 1: + case "LOG_ERROR_VERBOSITY_TERSE": + return PostgresqlHostConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE; + case 2: + case "LOG_ERROR_VERBOSITY_DEFAULT": + return PostgresqlHostConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT; + case 3: + case "LOG_ERROR_VERBOSITY_VERBOSE": + return PostgresqlHostConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig14_LogErrorVerbosity.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig14_LogErrorVerbosityToJSON( + object: PostgresqlHostConfig14_LogErrorVerbosity +): string { + switch (object) { + case PostgresqlHostConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED: + return "LOG_ERROR_VERBOSITY_UNSPECIFIED"; + case PostgresqlHostConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE: + return "LOG_ERROR_VERBOSITY_TERSE"; + case PostgresqlHostConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT: + return "LOG_ERROR_VERBOSITY_DEFAULT"; + case PostgresqlHostConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE: + return "LOG_ERROR_VERBOSITY_VERBOSE"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlHostConfig14_LogStatement { + LOG_STATEMENT_UNSPECIFIED = 0, + LOG_STATEMENT_NONE = 1, + LOG_STATEMENT_DDL = 2, + LOG_STATEMENT_MOD = 3, + LOG_STATEMENT_ALL = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig14_LogStatementFromJSON( + object: any +): PostgresqlHostConfig14_LogStatement { + switch (object) { + case 0: + case "LOG_STATEMENT_UNSPECIFIED": + return PostgresqlHostConfig14_LogStatement.LOG_STATEMENT_UNSPECIFIED; + case 1: + case "LOG_STATEMENT_NONE": + return PostgresqlHostConfig14_LogStatement.LOG_STATEMENT_NONE; + case 2: + case "LOG_STATEMENT_DDL": + return PostgresqlHostConfig14_LogStatement.LOG_STATEMENT_DDL; + case 3: + case "LOG_STATEMENT_MOD": + return PostgresqlHostConfig14_LogStatement.LOG_STATEMENT_MOD; + case 4: + case "LOG_STATEMENT_ALL": + return PostgresqlHostConfig14_LogStatement.LOG_STATEMENT_ALL; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig14_LogStatement.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig14_LogStatementToJSON( + object: PostgresqlHostConfig14_LogStatement +): string { + switch (object) { + case PostgresqlHostConfig14_LogStatement.LOG_STATEMENT_UNSPECIFIED: + return "LOG_STATEMENT_UNSPECIFIED"; + case PostgresqlHostConfig14_LogStatement.LOG_STATEMENT_NONE: + return "LOG_STATEMENT_NONE"; + case PostgresqlHostConfig14_LogStatement.LOG_STATEMENT_DDL: + return "LOG_STATEMENT_DDL"; + case PostgresqlHostConfig14_LogStatement.LOG_STATEMENT_MOD: + return "LOG_STATEMENT_MOD"; + case PostgresqlHostConfig14_LogStatement.LOG_STATEMENT_ALL: + return "LOG_STATEMENT_ALL"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlHostConfig14_TransactionIsolation { + TRANSACTION_ISOLATION_UNSPECIFIED = 0, + TRANSACTION_ISOLATION_READ_UNCOMMITTED = 1, + TRANSACTION_ISOLATION_READ_COMMITTED = 2, + TRANSACTION_ISOLATION_REPEATABLE_READ = 3, + TRANSACTION_ISOLATION_SERIALIZABLE = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig14_TransactionIsolationFromJSON( + object: any +): PostgresqlHostConfig14_TransactionIsolation { + switch (object) { + case 0: + case "TRANSACTION_ISOLATION_UNSPECIFIED": + return PostgresqlHostConfig14_TransactionIsolation.TRANSACTION_ISOLATION_UNSPECIFIED; + case 1: + case "TRANSACTION_ISOLATION_READ_UNCOMMITTED": + return PostgresqlHostConfig14_TransactionIsolation.TRANSACTION_ISOLATION_READ_UNCOMMITTED; + case 2: + case "TRANSACTION_ISOLATION_READ_COMMITTED": + return PostgresqlHostConfig14_TransactionIsolation.TRANSACTION_ISOLATION_READ_COMMITTED; + case 3: + case "TRANSACTION_ISOLATION_REPEATABLE_READ": + return PostgresqlHostConfig14_TransactionIsolation.TRANSACTION_ISOLATION_REPEATABLE_READ; + case 4: + case "TRANSACTION_ISOLATION_SERIALIZABLE": + return PostgresqlHostConfig14_TransactionIsolation.TRANSACTION_ISOLATION_SERIALIZABLE; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig14_TransactionIsolation.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig14_TransactionIsolationToJSON( + object: PostgresqlHostConfig14_TransactionIsolation +): string { + switch (object) { + case PostgresqlHostConfig14_TransactionIsolation.TRANSACTION_ISOLATION_UNSPECIFIED: + return "TRANSACTION_ISOLATION_UNSPECIFIED"; + case PostgresqlHostConfig14_TransactionIsolation.TRANSACTION_ISOLATION_READ_UNCOMMITTED: + return "TRANSACTION_ISOLATION_READ_UNCOMMITTED"; + case PostgresqlHostConfig14_TransactionIsolation.TRANSACTION_ISOLATION_READ_COMMITTED: + return "TRANSACTION_ISOLATION_READ_COMMITTED"; + case PostgresqlHostConfig14_TransactionIsolation.TRANSACTION_ISOLATION_REPEATABLE_READ: + return "TRANSACTION_ISOLATION_REPEATABLE_READ"; + case PostgresqlHostConfig14_TransactionIsolation.TRANSACTION_ISOLATION_SERIALIZABLE: + return "TRANSACTION_ISOLATION_SERIALIZABLE"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlHostConfig14_ByteaOutput { + BYTEA_OUTPUT_UNSPECIFIED = 0, + BYTEA_OUTPUT_HEX = 1, + BYTEA_OUTPUT_ESCAPED = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig14_ByteaOutputFromJSON( + object: any +): PostgresqlHostConfig14_ByteaOutput { + switch (object) { + case 0: + case "BYTEA_OUTPUT_UNSPECIFIED": + return PostgresqlHostConfig14_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED; + case 1: + case "BYTEA_OUTPUT_HEX": + return PostgresqlHostConfig14_ByteaOutput.BYTEA_OUTPUT_HEX; + case 2: + case "BYTEA_OUTPUT_ESCAPED": + return PostgresqlHostConfig14_ByteaOutput.BYTEA_OUTPUT_ESCAPED; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig14_ByteaOutput.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig14_ByteaOutputToJSON( + object: PostgresqlHostConfig14_ByteaOutput +): string { + switch (object) { + case PostgresqlHostConfig14_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED: + return "BYTEA_OUTPUT_UNSPECIFIED"; + case PostgresqlHostConfig14_ByteaOutput.BYTEA_OUTPUT_HEX: + return "BYTEA_OUTPUT_HEX"; + case PostgresqlHostConfig14_ByteaOutput.BYTEA_OUTPUT_ESCAPED: + return "BYTEA_OUTPUT_ESCAPED"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlHostConfig14_XmlBinary { + XML_BINARY_UNSPECIFIED = 0, + XML_BINARY_BASE64 = 1, + XML_BINARY_HEX = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig14_XmlBinaryFromJSON( + object: any +): PostgresqlHostConfig14_XmlBinary { + switch (object) { + case 0: + case "XML_BINARY_UNSPECIFIED": + return PostgresqlHostConfig14_XmlBinary.XML_BINARY_UNSPECIFIED; + case 1: + case "XML_BINARY_BASE64": + return PostgresqlHostConfig14_XmlBinary.XML_BINARY_BASE64; + case 2: + case "XML_BINARY_HEX": + return PostgresqlHostConfig14_XmlBinary.XML_BINARY_HEX; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig14_XmlBinary.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig14_XmlBinaryToJSON( + object: PostgresqlHostConfig14_XmlBinary +): string { + switch (object) { + case PostgresqlHostConfig14_XmlBinary.XML_BINARY_UNSPECIFIED: + return "XML_BINARY_UNSPECIFIED"; + case PostgresqlHostConfig14_XmlBinary.XML_BINARY_BASE64: + return "XML_BINARY_BASE64"; + case PostgresqlHostConfig14_XmlBinary.XML_BINARY_HEX: + return "XML_BINARY_HEX"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlHostConfig14_XmlOption { + XML_OPTION_UNSPECIFIED = 0, + XML_OPTION_DOCUMENT = 1, + XML_OPTION_CONTENT = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig14_XmlOptionFromJSON( + object: any +): PostgresqlHostConfig14_XmlOption { + switch (object) { + case 0: + case "XML_OPTION_UNSPECIFIED": + return PostgresqlHostConfig14_XmlOption.XML_OPTION_UNSPECIFIED; + case 1: + case "XML_OPTION_DOCUMENT": + return PostgresqlHostConfig14_XmlOption.XML_OPTION_DOCUMENT; + case 2: + case "XML_OPTION_CONTENT": + return PostgresqlHostConfig14_XmlOption.XML_OPTION_CONTENT; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig14_XmlOption.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig14_XmlOptionToJSON( + object: PostgresqlHostConfig14_XmlOption +): string { + switch (object) { + case PostgresqlHostConfig14_XmlOption.XML_OPTION_UNSPECIFIED: + return "XML_OPTION_UNSPECIFIED"; + case PostgresqlHostConfig14_XmlOption.XML_OPTION_DOCUMENT: + return "XML_OPTION_DOCUMENT"; + case PostgresqlHostConfig14_XmlOption.XML_OPTION_CONTENT: + return "XML_OPTION_CONTENT"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlHostConfig14_BackslashQuote { + BACKSLASH_QUOTE_UNSPECIFIED = 0, + BACKSLASH_QUOTE = 1, + BACKSLASH_QUOTE_ON = 2, + BACKSLASH_QUOTE_OFF = 3, + BACKSLASH_QUOTE_SAFE_ENCODING = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlHostConfig14_BackslashQuoteFromJSON( + object: any +): PostgresqlHostConfig14_BackslashQuote { + switch (object) { + case 0: + case "BACKSLASH_QUOTE_UNSPECIFIED": + return PostgresqlHostConfig14_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED; + case 1: + case "BACKSLASH_QUOTE": + return PostgresqlHostConfig14_BackslashQuote.BACKSLASH_QUOTE; + case 2: + case "BACKSLASH_QUOTE_ON": + return PostgresqlHostConfig14_BackslashQuote.BACKSLASH_QUOTE_ON; + case 3: + case "BACKSLASH_QUOTE_OFF": + return PostgresqlHostConfig14_BackslashQuote.BACKSLASH_QUOTE_OFF; + case 4: + case "BACKSLASH_QUOTE_SAFE_ENCODING": + return PostgresqlHostConfig14_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlHostConfig14_BackslashQuote.UNRECOGNIZED; + } +} + +export function postgresqlHostConfig14_BackslashQuoteToJSON( + object: PostgresqlHostConfig14_BackslashQuote +): string { + switch (object) { + case PostgresqlHostConfig14_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED: + return "BACKSLASH_QUOTE_UNSPECIFIED"; + case PostgresqlHostConfig14_BackslashQuote.BACKSLASH_QUOTE: + return "BACKSLASH_QUOTE"; + case PostgresqlHostConfig14_BackslashQuote.BACKSLASH_QUOTE_ON: + return "BACKSLASH_QUOTE_ON"; + case PostgresqlHostConfig14_BackslashQuote.BACKSLASH_QUOTE_OFF: + return "BACKSLASH_QUOTE_OFF"; + case PostgresqlHostConfig14_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING: + return "BACKSLASH_QUOTE_SAFE_ENCODING"; + default: + return "UNKNOWN"; + } +} + +const basePostgresqlHostConfig14: object = { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig14", + constraintExclusion: 0, + forceParallelMode: 0, + clientMinMessages: 0, + logMinMessages: 0, + logMinErrorStatement: 0, + logErrorVerbosity: 0, + logStatement: 0, + searchPath: "", + defaultTransactionIsolation: 0, + byteaOutput: 0, + xmlbinary: 0, + xmloption: 0, + backslashQuote: 0, + timezone: "", +}; + +export const PostgresqlHostConfig14 = { + $type: + "yandex.cloud.mdb.postgresql.v1.config.PostgresqlHostConfig14" as const, + + encode( + message: PostgresqlHostConfig14, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.recoveryMinApplyDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.recoveryMinApplyDelay!, + }, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.sharedBuffers !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.sharedBuffers! }, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.tempBuffers !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.tempBuffers! }, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.workMem !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.workMem! }, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.tempFileLimit !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.tempFileLimit! }, + writer.uint32(42).fork() + ).ldelim(); + } + if (message.backendFlushAfter !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.backendFlushAfter!, + }, + writer.uint32(50).fork() + ).ldelim(); + } + if (message.oldSnapshotThreshold !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.oldSnapshotThreshold!, + }, + writer.uint32(58).fork() + ).ldelim(); + } + if (message.maxStandbyStreamingDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxStandbyStreamingDelay!, + }, + writer.uint32(66).fork() + ).ldelim(); + } + if (message.constraintExclusion !== 0) { + writer.uint32(72).int32(message.constraintExclusion); + } + if (message.cursorTupleFraction !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.cursorTupleFraction!, + }, + writer.uint32(82).fork() + ).ldelim(); + } + if (message.fromCollapseLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.fromCollapseLimit!, + }, + writer.uint32(90).fork() + ).ldelim(); + } + if (message.joinCollapseLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.joinCollapseLimit!, + }, + writer.uint32(98).fork() + ).ldelim(); + } + if (message.forceParallelMode !== 0) { + writer.uint32(104).int32(message.forceParallelMode); + } + if (message.clientMinMessages !== 0) { + writer.uint32(112).int32(message.clientMinMessages); + } + if (message.logMinMessages !== 0) { + writer.uint32(120).int32(message.logMinMessages); + } + if (message.logMinErrorStatement !== 0) { + writer.uint32(128).int32(message.logMinErrorStatement); + } + if (message.logMinDurationStatement !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logMinDurationStatement!, + }, + writer.uint32(138).fork() + ).ldelim(); + } + if (message.logCheckpoints !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logCheckpoints! }, + writer.uint32(146).fork() + ).ldelim(); + } + if (message.logConnections !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logConnections! }, + writer.uint32(154).fork() + ).ldelim(); + } + if (message.logDisconnections !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.logDisconnections!, + }, + writer.uint32(162).fork() + ).ldelim(); + } + if (message.logDuration !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logDuration! }, + writer.uint32(170).fork() + ).ldelim(); + } + if (message.logErrorVerbosity !== 0) { + writer.uint32(176).int32(message.logErrorVerbosity); + } + if (message.logLockWaits !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logLockWaits! }, + writer.uint32(186).fork() + ).ldelim(); + } + if (message.logStatement !== 0) { + writer.uint32(192).int32(message.logStatement); + } + if (message.logTempFiles !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.logTempFiles! }, + writer.uint32(202).fork() + ).ldelim(); + } + if (message.searchPath !== "") { + writer.uint32(210).string(message.searchPath); + } + if (message.rowSecurity !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.rowSecurity! }, + writer.uint32(218).fork() + ).ldelim(); + } + if (message.defaultTransactionIsolation !== 0) { + writer.uint32(224).int32(message.defaultTransactionIsolation); + } + if (message.statementTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.statementTimeout!, + }, + writer.uint32(234).fork() + ).ldelim(); + } + if (message.lockTimeout !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.lockTimeout! }, + writer.uint32(242).fork() + ).ldelim(); + } + if (message.idleInTransactionSessionTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.idleInTransactionSessionTimeout!, + }, + writer.uint32(250).fork() + ).ldelim(); + } + if (message.byteaOutput !== 0) { + writer.uint32(256).int32(message.byteaOutput); + } + if (message.xmlbinary !== 0) { + writer.uint32(264).int32(message.xmlbinary); + } + if (message.xmloption !== 0) { + writer.uint32(272).int32(message.xmloption); + } + if (message.ginPendingListLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.ginPendingListLimit!, + }, + writer.uint32(282).fork() + ).ldelim(); + } + if (message.deadlockTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.deadlockTimeout!, + }, + writer.uint32(290).fork() + ).ldelim(); + } + if (message.maxLocksPerTransaction !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxLocksPerTransaction!, + }, + writer.uint32(298).fork() + ).ldelim(); + } + if (message.maxPredLocksPerTransaction !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxPredLocksPerTransaction!, + }, + writer.uint32(306).fork() + ).ldelim(); + } + if (message.arrayNulls !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.arrayNulls! }, + writer.uint32(314).fork() + ).ldelim(); + } + if (message.backslashQuote !== 0) { + writer.uint32(320).int32(message.backslashQuote); + } + if (message.defaultWithOids !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.defaultWithOids! }, + writer.uint32(330).fork() + ).ldelim(); + } + if (message.escapeStringWarning !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.escapeStringWarning!, + }, + writer.uint32(338).fork() + ).ldelim(); + } + if (message.loCompatPrivileges !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.loCompatPrivileges!, + }, + writer.uint32(346).fork() + ).ldelim(); + } + if (message.quoteAllIdentifiers !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.quoteAllIdentifiers!, + }, + writer.uint32(362).fork() + ).ldelim(); + } + if (message.standardConformingStrings !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.standardConformingStrings!, + }, + writer.uint32(370).fork() + ).ldelim(); + } + if (message.synchronizeSeqscans !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.synchronizeSeqscans!, + }, + writer.uint32(378).fork() + ).ldelim(); + } + if (message.transformNullEquals !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.transformNullEquals!, + }, + writer.uint32(386).fork() + ).ldelim(); + } + if (message.exitOnError !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.exitOnError! }, + writer.uint32(394).fork() + ).ldelim(); + } + if (message.seqPageCost !== undefined) { + DoubleValue.encode( + { $type: "google.protobuf.DoubleValue", value: message.seqPageCost! }, + writer.uint32(402).fork() + ).ldelim(); + } + if (message.randomPageCost !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.randomPageCost!, + }, + writer.uint32(410).fork() + ).ldelim(); + } + if (message.enableBitmapscan !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableBitmapscan!, + }, + writer.uint32(434).fork() + ).ldelim(); + } + if (message.enableHashagg !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableHashagg! }, + writer.uint32(442).fork() + ).ldelim(); + } + if (message.enableHashjoin !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableHashjoin! }, + writer.uint32(450).fork() + ).ldelim(); + } + if (message.enableIndexscan !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableIndexscan! }, + writer.uint32(458).fork() + ).ldelim(); + } + if (message.enableIndexonlyscan !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableIndexonlyscan!, + }, + writer.uint32(466).fork() + ).ldelim(); + } + if (message.enableMaterial !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableMaterial! }, + writer.uint32(474).fork() + ).ldelim(); + } + if (message.enableMergejoin !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableMergejoin! }, + writer.uint32(482).fork() + ).ldelim(); + } + if (message.enableNestloop !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableNestloop! }, + writer.uint32(490).fork() + ).ldelim(); + } + if (message.enableSeqscan !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableSeqscan! }, + writer.uint32(498).fork() + ).ldelim(); + } + if (message.enableSort !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableSort! }, + writer.uint32(506).fork() + ).ldelim(); + } + if (message.enableTidscan !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableTidscan! }, + writer.uint32(514).fork() + ).ldelim(); + } + if (message.maxParallelWorkers !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxParallelWorkers!, + }, + writer.uint32(522).fork() + ).ldelim(); + } + if (message.maxParallelWorkersPerGather !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxParallelWorkersPerGather!, + }, + writer.uint32(530).fork() + ).ldelim(); + } + if (message.timezone !== "") { + writer.uint32(538).string(message.timezone); + } + if (message.effectiveIoConcurrency !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.effectiveIoConcurrency!, + }, + writer.uint32(546).fork() + ).ldelim(); + } + if (message.effectiveCacheSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.effectiveCacheSize!, + }, + writer.uint32(554).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): PostgresqlHostConfig14 { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...basePostgresqlHostConfig14 } as PostgresqlHostConfig14; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.recoveryMinApplyDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 2: + message.sharedBuffers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 3: + message.tempBuffers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 4: + message.workMem = Int64Value.decode(reader, reader.uint32()).value; + break; + case 5: + message.tempFileLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 6: + message.backendFlushAfter = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 7: + message.oldSnapshotThreshold = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 8: + message.maxStandbyStreamingDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 9: + message.constraintExclusion = reader.int32() as any; + break; + case 10: + message.cursorTupleFraction = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 11: + message.fromCollapseLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 12: + message.joinCollapseLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 13: + message.forceParallelMode = reader.int32() as any; + break; + case 14: + message.clientMinMessages = reader.int32() as any; + break; + case 15: + message.logMinMessages = reader.int32() as any; + break; + case 16: + message.logMinErrorStatement = reader.int32() as any; + break; + case 17: + message.logMinDurationStatement = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 18: + message.logCheckpoints = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 19: + message.logConnections = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 20: + message.logDisconnections = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 21: + message.logDuration = BoolValue.decode(reader, reader.uint32()).value; + break; + case 22: + message.logErrorVerbosity = reader.int32() as any; + break; + case 23: + message.logLockWaits = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 24: + message.logStatement = reader.int32() as any; + break; + case 25: + message.logTempFiles = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 26: + message.searchPath = reader.string(); + break; + case 27: + message.rowSecurity = BoolValue.decode(reader, reader.uint32()).value; + break; + case 28: + message.defaultTransactionIsolation = reader.int32() as any; + break; + case 29: + message.statementTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 30: + message.lockTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 31: + message.idleInTransactionSessionTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 32: + message.byteaOutput = reader.int32() as any; + break; + case 33: + message.xmlbinary = reader.int32() as any; + break; + case 34: + message.xmloption = reader.int32() as any; + break; + case 35: + message.ginPendingListLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 36: + message.deadlockTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 37: + message.maxLocksPerTransaction = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 38: + message.maxPredLocksPerTransaction = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 39: + message.arrayNulls = BoolValue.decode(reader, reader.uint32()).value; + break; + case 40: + message.backslashQuote = reader.int32() as any; + break; + case 41: + message.defaultWithOids = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 42: + message.escapeStringWarning = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 43: + message.loCompatPrivileges = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 45: + message.quoteAllIdentifiers = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 46: + message.standardConformingStrings = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 47: + message.synchronizeSeqscans = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 48: + message.transformNullEquals = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 49: + message.exitOnError = BoolValue.decode(reader, reader.uint32()).value; + break; + case 50: + message.seqPageCost = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 51: + message.randomPageCost = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 54: + message.enableBitmapscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 55: + message.enableHashagg = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 56: + message.enableHashjoin = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 57: + message.enableIndexscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 58: + message.enableIndexonlyscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 59: + message.enableMaterial = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 60: + message.enableMergejoin = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 61: + message.enableNestloop = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 62: + message.enableSeqscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 63: + message.enableSort = BoolValue.decode(reader, reader.uint32()).value; + break; + case 64: + message.enableTidscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 65: + message.maxParallelWorkers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 66: + message.maxParallelWorkersPerGather = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 67: + message.timezone = reader.string(); + break; + case 68: + message.effectiveIoConcurrency = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 69: + message.effectiveCacheSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): PostgresqlHostConfig14 { + const message = { ...basePostgresqlHostConfig14 } as PostgresqlHostConfig14; + message.recoveryMinApplyDelay = + object.recoveryMinApplyDelay !== undefined && + object.recoveryMinApplyDelay !== null + ? Number(object.recoveryMinApplyDelay) + : undefined; + message.sharedBuffers = + object.sharedBuffers !== undefined && object.sharedBuffers !== null + ? Number(object.sharedBuffers) + : undefined; + message.tempBuffers = + object.tempBuffers !== undefined && object.tempBuffers !== null + ? Number(object.tempBuffers) + : undefined; + message.workMem = + object.workMem !== undefined && object.workMem !== null + ? Number(object.workMem) + : undefined; + message.tempFileLimit = + object.tempFileLimit !== undefined && object.tempFileLimit !== null + ? Number(object.tempFileLimit) + : undefined; + message.backendFlushAfter = + object.backendFlushAfter !== undefined && + object.backendFlushAfter !== null + ? Number(object.backendFlushAfter) + : undefined; + message.oldSnapshotThreshold = + object.oldSnapshotThreshold !== undefined && + object.oldSnapshotThreshold !== null + ? Number(object.oldSnapshotThreshold) + : undefined; + message.maxStandbyStreamingDelay = + object.maxStandbyStreamingDelay !== undefined && + object.maxStandbyStreamingDelay !== null + ? Number(object.maxStandbyStreamingDelay) + : undefined; + message.constraintExclusion = + object.constraintExclusion !== undefined && + object.constraintExclusion !== null + ? postgresqlHostConfig14_ConstraintExclusionFromJSON( + object.constraintExclusion + ) + : 0; + message.cursorTupleFraction = + object.cursorTupleFraction !== undefined && + object.cursorTupleFraction !== null + ? Number(object.cursorTupleFraction) + : undefined; + message.fromCollapseLimit = + object.fromCollapseLimit !== undefined && + object.fromCollapseLimit !== null + ? Number(object.fromCollapseLimit) + : undefined; + message.joinCollapseLimit = + object.joinCollapseLimit !== undefined && + object.joinCollapseLimit !== null + ? Number(object.joinCollapseLimit) + : undefined; + message.forceParallelMode = + object.forceParallelMode !== undefined && + object.forceParallelMode !== null + ? postgresqlHostConfig14_ForceParallelModeFromJSON( + object.forceParallelMode + ) + : 0; + message.clientMinMessages = + object.clientMinMessages !== undefined && + object.clientMinMessages !== null + ? postgresqlHostConfig14_LogLevelFromJSON(object.clientMinMessages) + : 0; + message.logMinMessages = + object.logMinMessages !== undefined && object.logMinMessages !== null + ? postgresqlHostConfig14_LogLevelFromJSON(object.logMinMessages) + : 0; + message.logMinErrorStatement = + object.logMinErrorStatement !== undefined && + object.logMinErrorStatement !== null + ? postgresqlHostConfig14_LogLevelFromJSON(object.logMinErrorStatement) + : 0; + message.logMinDurationStatement = + object.logMinDurationStatement !== undefined && + object.logMinDurationStatement !== null + ? Number(object.logMinDurationStatement) + : undefined; + message.logCheckpoints = + object.logCheckpoints !== undefined && object.logCheckpoints !== null + ? Boolean(object.logCheckpoints) + : undefined; + message.logConnections = + object.logConnections !== undefined && object.logConnections !== null + ? Boolean(object.logConnections) + : undefined; + message.logDisconnections = + object.logDisconnections !== undefined && + object.logDisconnections !== null + ? Boolean(object.logDisconnections) + : undefined; + message.logDuration = + object.logDuration !== undefined && object.logDuration !== null + ? Boolean(object.logDuration) + : undefined; + message.logErrorVerbosity = + object.logErrorVerbosity !== undefined && + object.logErrorVerbosity !== null + ? postgresqlHostConfig14_LogErrorVerbosityFromJSON( + object.logErrorVerbosity + ) + : 0; + message.logLockWaits = + object.logLockWaits !== undefined && object.logLockWaits !== null + ? Boolean(object.logLockWaits) + : undefined; + message.logStatement = + object.logStatement !== undefined && object.logStatement !== null + ? postgresqlHostConfig14_LogStatementFromJSON(object.logStatement) + : 0; + message.logTempFiles = + object.logTempFiles !== undefined && object.logTempFiles !== null + ? Number(object.logTempFiles) + : undefined; + message.searchPath = + object.searchPath !== undefined && object.searchPath !== null + ? String(object.searchPath) + : ""; + message.rowSecurity = + object.rowSecurity !== undefined && object.rowSecurity !== null + ? Boolean(object.rowSecurity) + : undefined; + message.defaultTransactionIsolation = + object.defaultTransactionIsolation !== undefined && + object.defaultTransactionIsolation !== null + ? postgresqlHostConfig14_TransactionIsolationFromJSON( + object.defaultTransactionIsolation + ) + : 0; + message.statementTimeout = + object.statementTimeout !== undefined && object.statementTimeout !== null + ? Number(object.statementTimeout) + : undefined; + message.lockTimeout = + object.lockTimeout !== undefined && object.lockTimeout !== null + ? Number(object.lockTimeout) + : undefined; + message.idleInTransactionSessionTimeout = + object.idleInTransactionSessionTimeout !== undefined && + object.idleInTransactionSessionTimeout !== null + ? Number(object.idleInTransactionSessionTimeout) + : undefined; + message.byteaOutput = + object.byteaOutput !== undefined && object.byteaOutput !== null + ? postgresqlHostConfig14_ByteaOutputFromJSON(object.byteaOutput) + : 0; + message.xmlbinary = + object.xmlbinary !== undefined && object.xmlbinary !== null + ? postgresqlHostConfig14_XmlBinaryFromJSON(object.xmlbinary) + : 0; + message.xmloption = + object.xmloption !== undefined && object.xmloption !== null + ? postgresqlHostConfig14_XmlOptionFromJSON(object.xmloption) + : 0; + message.ginPendingListLimit = + object.ginPendingListLimit !== undefined && + object.ginPendingListLimit !== null + ? Number(object.ginPendingListLimit) + : undefined; + message.deadlockTimeout = + object.deadlockTimeout !== undefined && object.deadlockTimeout !== null + ? Number(object.deadlockTimeout) + : undefined; + message.maxLocksPerTransaction = + object.maxLocksPerTransaction !== undefined && + object.maxLocksPerTransaction !== null + ? Number(object.maxLocksPerTransaction) + : undefined; + message.maxPredLocksPerTransaction = + object.maxPredLocksPerTransaction !== undefined && + object.maxPredLocksPerTransaction !== null + ? Number(object.maxPredLocksPerTransaction) + : undefined; + message.arrayNulls = + object.arrayNulls !== undefined && object.arrayNulls !== null + ? Boolean(object.arrayNulls) + : undefined; + message.backslashQuote = + object.backslashQuote !== undefined && object.backslashQuote !== null + ? postgresqlHostConfig14_BackslashQuoteFromJSON(object.backslashQuote) + : 0; + message.defaultWithOids = + object.defaultWithOids !== undefined && object.defaultWithOids !== null + ? Boolean(object.defaultWithOids) + : undefined; + message.escapeStringWarning = + object.escapeStringWarning !== undefined && + object.escapeStringWarning !== null + ? Boolean(object.escapeStringWarning) + : undefined; + message.loCompatPrivileges = + object.loCompatPrivileges !== undefined && + object.loCompatPrivileges !== null + ? Boolean(object.loCompatPrivileges) + : undefined; + message.quoteAllIdentifiers = + object.quoteAllIdentifiers !== undefined && + object.quoteAllIdentifiers !== null + ? Boolean(object.quoteAllIdentifiers) + : undefined; + message.standardConformingStrings = + object.standardConformingStrings !== undefined && + object.standardConformingStrings !== null + ? Boolean(object.standardConformingStrings) + : undefined; + message.synchronizeSeqscans = + object.synchronizeSeqscans !== undefined && + object.synchronizeSeqscans !== null + ? Boolean(object.synchronizeSeqscans) + : undefined; + message.transformNullEquals = + object.transformNullEquals !== undefined && + object.transformNullEquals !== null + ? Boolean(object.transformNullEquals) + : undefined; + message.exitOnError = + object.exitOnError !== undefined && object.exitOnError !== null + ? Boolean(object.exitOnError) + : undefined; + message.seqPageCost = + object.seqPageCost !== undefined && object.seqPageCost !== null + ? Number(object.seqPageCost) + : undefined; + message.randomPageCost = + object.randomPageCost !== undefined && object.randomPageCost !== null + ? Number(object.randomPageCost) + : undefined; + message.enableBitmapscan = + object.enableBitmapscan !== undefined && object.enableBitmapscan !== null + ? Boolean(object.enableBitmapscan) + : undefined; + message.enableHashagg = + object.enableHashagg !== undefined && object.enableHashagg !== null + ? Boolean(object.enableHashagg) + : undefined; + message.enableHashjoin = + object.enableHashjoin !== undefined && object.enableHashjoin !== null + ? Boolean(object.enableHashjoin) + : undefined; + message.enableIndexscan = + object.enableIndexscan !== undefined && object.enableIndexscan !== null + ? Boolean(object.enableIndexscan) + : undefined; + message.enableIndexonlyscan = + object.enableIndexonlyscan !== undefined && + object.enableIndexonlyscan !== null + ? Boolean(object.enableIndexonlyscan) + : undefined; + message.enableMaterial = + object.enableMaterial !== undefined && object.enableMaterial !== null + ? Boolean(object.enableMaterial) + : undefined; + message.enableMergejoin = + object.enableMergejoin !== undefined && object.enableMergejoin !== null + ? Boolean(object.enableMergejoin) + : undefined; + message.enableNestloop = + object.enableNestloop !== undefined && object.enableNestloop !== null + ? Boolean(object.enableNestloop) + : undefined; + message.enableSeqscan = + object.enableSeqscan !== undefined && object.enableSeqscan !== null + ? Boolean(object.enableSeqscan) + : undefined; + message.enableSort = + object.enableSort !== undefined && object.enableSort !== null + ? Boolean(object.enableSort) + : undefined; + message.enableTidscan = + object.enableTidscan !== undefined && object.enableTidscan !== null + ? Boolean(object.enableTidscan) + : undefined; + message.maxParallelWorkers = + object.maxParallelWorkers !== undefined && + object.maxParallelWorkers !== null + ? Number(object.maxParallelWorkers) + : undefined; + message.maxParallelWorkersPerGather = + object.maxParallelWorkersPerGather !== undefined && + object.maxParallelWorkersPerGather !== null + ? Number(object.maxParallelWorkersPerGather) + : undefined; + message.timezone = + object.timezone !== undefined && object.timezone !== null + ? String(object.timezone) + : ""; + message.effectiveIoConcurrency = + object.effectiveIoConcurrency !== undefined && + object.effectiveIoConcurrency !== null + ? Number(object.effectiveIoConcurrency) + : undefined; + message.effectiveCacheSize = + object.effectiveCacheSize !== undefined && + object.effectiveCacheSize !== null + ? Number(object.effectiveCacheSize) + : undefined; + return message; + }, + + toJSON(message: PostgresqlHostConfig14): unknown { + const obj: any = {}; + message.recoveryMinApplyDelay !== undefined && + (obj.recoveryMinApplyDelay = message.recoveryMinApplyDelay); + message.sharedBuffers !== undefined && + (obj.sharedBuffers = message.sharedBuffers); + message.tempBuffers !== undefined && + (obj.tempBuffers = message.tempBuffers); + message.workMem !== undefined && (obj.workMem = message.workMem); + message.tempFileLimit !== undefined && + (obj.tempFileLimit = message.tempFileLimit); + message.backendFlushAfter !== undefined && + (obj.backendFlushAfter = message.backendFlushAfter); + message.oldSnapshotThreshold !== undefined && + (obj.oldSnapshotThreshold = message.oldSnapshotThreshold); + message.maxStandbyStreamingDelay !== undefined && + (obj.maxStandbyStreamingDelay = message.maxStandbyStreamingDelay); + message.constraintExclusion !== undefined && + (obj.constraintExclusion = + postgresqlHostConfig14_ConstraintExclusionToJSON( + message.constraintExclusion + )); + message.cursorTupleFraction !== undefined && + (obj.cursorTupleFraction = message.cursorTupleFraction); + message.fromCollapseLimit !== undefined && + (obj.fromCollapseLimit = message.fromCollapseLimit); + message.joinCollapseLimit !== undefined && + (obj.joinCollapseLimit = message.joinCollapseLimit); + message.forceParallelMode !== undefined && + (obj.forceParallelMode = postgresqlHostConfig14_ForceParallelModeToJSON( + message.forceParallelMode + )); + message.clientMinMessages !== undefined && + (obj.clientMinMessages = postgresqlHostConfig14_LogLevelToJSON( + message.clientMinMessages + )); + message.logMinMessages !== undefined && + (obj.logMinMessages = postgresqlHostConfig14_LogLevelToJSON( + message.logMinMessages + )); + message.logMinErrorStatement !== undefined && + (obj.logMinErrorStatement = postgresqlHostConfig14_LogLevelToJSON( + message.logMinErrorStatement + )); + message.logMinDurationStatement !== undefined && + (obj.logMinDurationStatement = message.logMinDurationStatement); + message.logCheckpoints !== undefined && + (obj.logCheckpoints = message.logCheckpoints); + message.logConnections !== undefined && + (obj.logConnections = message.logConnections); + message.logDisconnections !== undefined && + (obj.logDisconnections = message.logDisconnections); + message.logDuration !== undefined && + (obj.logDuration = message.logDuration); + message.logErrorVerbosity !== undefined && + (obj.logErrorVerbosity = postgresqlHostConfig14_LogErrorVerbosityToJSON( + message.logErrorVerbosity + )); + message.logLockWaits !== undefined && + (obj.logLockWaits = message.logLockWaits); + message.logStatement !== undefined && + (obj.logStatement = postgresqlHostConfig14_LogStatementToJSON( + message.logStatement + )); + message.logTempFiles !== undefined && + (obj.logTempFiles = message.logTempFiles); + message.searchPath !== undefined && (obj.searchPath = message.searchPath); + message.rowSecurity !== undefined && + (obj.rowSecurity = message.rowSecurity); + message.defaultTransactionIsolation !== undefined && + (obj.defaultTransactionIsolation = + postgresqlHostConfig14_TransactionIsolationToJSON( + message.defaultTransactionIsolation + )); + message.statementTimeout !== undefined && + (obj.statementTimeout = message.statementTimeout); + message.lockTimeout !== undefined && + (obj.lockTimeout = message.lockTimeout); + message.idleInTransactionSessionTimeout !== undefined && + (obj.idleInTransactionSessionTimeout = + message.idleInTransactionSessionTimeout); + message.byteaOutput !== undefined && + (obj.byteaOutput = postgresqlHostConfig14_ByteaOutputToJSON( + message.byteaOutput + )); + message.xmlbinary !== undefined && + (obj.xmlbinary = postgresqlHostConfig14_XmlBinaryToJSON( + message.xmlbinary + )); + message.xmloption !== undefined && + (obj.xmloption = postgresqlHostConfig14_XmlOptionToJSON( + message.xmloption + )); + message.ginPendingListLimit !== undefined && + (obj.ginPendingListLimit = message.ginPendingListLimit); + message.deadlockTimeout !== undefined && + (obj.deadlockTimeout = message.deadlockTimeout); + message.maxLocksPerTransaction !== undefined && + (obj.maxLocksPerTransaction = message.maxLocksPerTransaction); + message.maxPredLocksPerTransaction !== undefined && + (obj.maxPredLocksPerTransaction = message.maxPredLocksPerTransaction); + message.arrayNulls !== undefined && (obj.arrayNulls = message.arrayNulls); + message.backslashQuote !== undefined && + (obj.backslashQuote = postgresqlHostConfig14_BackslashQuoteToJSON( + message.backslashQuote + )); + message.defaultWithOids !== undefined && + (obj.defaultWithOids = message.defaultWithOids); + message.escapeStringWarning !== undefined && + (obj.escapeStringWarning = message.escapeStringWarning); + message.loCompatPrivileges !== undefined && + (obj.loCompatPrivileges = message.loCompatPrivileges); + message.quoteAllIdentifiers !== undefined && + (obj.quoteAllIdentifiers = message.quoteAllIdentifiers); + message.standardConformingStrings !== undefined && + (obj.standardConformingStrings = message.standardConformingStrings); + message.synchronizeSeqscans !== undefined && + (obj.synchronizeSeqscans = message.synchronizeSeqscans); + message.transformNullEquals !== undefined && + (obj.transformNullEquals = message.transformNullEquals); + message.exitOnError !== undefined && + (obj.exitOnError = message.exitOnError); + message.seqPageCost !== undefined && + (obj.seqPageCost = message.seqPageCost); + message.randomPageCost !== undefined && + (obj.randomPageCost = message.randomPageCost); + message.enableBitmapscan !== undefined && + (obj.enableBitmapscan = message.enableBitmapscan); + message.enableHashagg !== undefined && + (obj.enableHashagg = message.enableHashagg); + message.enableHashjoin !== undefined && + (obj.enableHashjoin = message.enableHashjoin); + message.enableIndexscan !== undefined && + (obj.enableIndexscan = message.enableIndexscan); + message.enableIndexonlyscan !== undefined && + (obj.enableIndexonlyscan = message.enableIndexonlyscan); + message.enableMaterial !== undefined && + (obj.enableMaterial = message.enableMaterial); + message.enableMergejoin !== undefined && + (obj.enableMergejoin = message.enableMergejoin); + message.enableNestloop !== undefined && + (obj.enableNestloop = message.enableNestloop); + message.enableSeqscan !== undefined && + (obj.enableSeqscan = message.enableSeqscan); + message.enableSort !== undefined && (obj.enableSort = message.enableSort); + message.enableTidscan !== undefined && + (obj.enableTidscan = message.enableTidscan); + message.maxParallelWorkers !== undefined && + (obj.maxParallelWorkers = message.maxParallelWorkers); + message.maxParallelWorkersPerGather !== undefined && + (obj.maxParallelWorkersPerGather = message.maxParallelWorkersPerGather); + message.timezone !== undefined && (obj.timezone = message.timezone); + message.effectiveIoConcurrency !== undefined && + (obj.effectiveIoConcurrency = message.effectiveIoConcurrency); + message.effectiveCacheSize !== undefined && + (obj.effectiveCacheSize = message.effectiveCacheSize); + return obj; + }, + + fromPartial, I>>( + object: I + ): PostgresqlHostConfig14 { + const message = { ...basePostgresqlHostConfig14 } as PostgresqlHostConfig14; + message.recoveryMinApplyDelay = object.recoveryMinApplyDelay ?? undefined; + message.sharedBuffers = object.sharedBuffers ?? undefined; + message.tempBuffers = object.tempBuffers ?? undefined; + message.workMem = object.workMem ?? undefined; + message.tempFileLimit = object.tempFileLimit ?? undefined; + message.backendFlushAfter = object.backendFlushAfter ?? undefined; + message.oldSnapshotThreshold = object.oldSnapshotThreshold ?? undefined; + message.maxStandbyStreamingDelay = + object.maxStandbyStreamingDelay ?? undefined; + message.constraintExclusion = object.constraintExclusion ?? 0; + message.cursorTupleFraction = object.cursorTupleFraction ?? undefined; + message.fromCollapseLimit = object.fromCollapseLimit ?? undefined; + message.joinCollapseLimit = object.joinCollapseLimit ?? undefined; + message.forceParallelMode = object.forceParallelMode ?? 0; + message.clientMinMessages = object.clientMinMessages ?? 0; + message.logMinMessages = object.logMinMessages ?? 0; + message.logMinErrorStatement = object.logMinErrorStatement ?? 0; + message.logMinDurationStatement = + object.logMinDurationStatement ?? undefined; + message.logCheckpoints = object.logCheckpoints ?? undefined; + message.logConnections = object.logConnections ?? undefined; + message.logDisconnections = object.logDisconnections ?? undefined; + message.logDuration = object.logDuration ?? undefined; + message.logErrorVerbosity = object.logErrorVerbosity ?? 0; + message.logLockWaits = object.logLockWaits ?? undefined; + message.logStatement = object.logStatement ?? 0; + message.logTempFiles = object.logTempFiles ?? undefined; + message.searchPath = object.searchPath ?? ""; + message.rowSecurity = object.rowSecurity ?? undefined; + message.defaultTransactionIsolation = + object.defaultTransactionIsolation ?? 0; + message.statementTimeout = object.statementTimeout ?? undefined; + message.lockTimeout = object.lockTimeout ?? undefined; + message.idleInTransactionSessionTimeout = + object.idleInTransactionSessionTimeout ?? undefined; + message.byteaOutput = object.byteaOutput ?? 0; + message.xmlbinary = object.xmlbinary ?? 0; + message.xmloption = object.xmloption ?? 0; + message.ginPendingListLimit = object.ginPendingListLimit ?? undefined; + message.deadlockTimeout = object.deadlockTimeout ?? undefined; + message.maxLocksPerTransaction = object.maxLocksPerTransaction ?? undefined; + message.maxPredLocksPerTransaction = + object.maxPredLocksPerTransaction ?? undefined; + message.arrayNulls = object.arrayNulls ?? undefined; + message.backslashQuote = object.backslashQuote ?? 0; + message.defaultWithOids = object.defaultWithOids ?? undefined; + message.escapeStringWarning = object.escapeStringWarning ?? undefined; + message.loCompatPrivileges = object.loCompatPrivileges ?? undefined; + message.quoteAllIdentifiers = object.quoteAllIdentifiers ?? undefined; + message.standardConformingStrings = + object.standardConformingStrings ?? undefined; + message.synchronizeSeqscans = object.synchronizeSeqscans ?? undefined; + message.transformNullEquals = object.transformNullEquals ?? undefined; + message.exitOnError = object.exitOnError ?? undefined; + message.seqPageCost = object.seqPageCost ?? undefined; + message.randomPageCost = object.randomPageCost ?? undefined; + message.enableBitmapscan = object.enableBitmapscan ?? undefined; + message.enableHashagg = object.enableHashagg ?? undefined; + message.enableHashjoin = object.enableHashjoin ?? undefined; + message.enableIndexscan = object.enableIndexscan ?? undefined; + message.enableIndexonlyscan = object.enableIndexonlyscan ?? undefined; + message.enableMaterial = object.enableMaterial ?? undefined; + message.enableMergejoin = object.enableMergejoin ?? undefined; + message.enableNestloop = object.enableNestloop ?? undefined; + message.enableSeqscan = object.enableSeqscan ?? undefined; + message.enableSort = object.enableSort ?? undefined; + message.enableTidscan = object.enableTidscan ?? undefined; + message.maxParallelWorkers = object.maxParallelWorkers ?? undefined; + message.maxParallelWorkersPerGather = + object.maxParallelWorkersPerGather ?? undefined; + message.timezone = object.timezone ?? ""; + message.effectiveIoConcurrency = object.effectiveIoConcurrency ?? undefined; + message.effectiveCacheSize = object.effectiveCacheSize ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(PostgresqlHostConfig14.$type, PostgresqlHostConfig14); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql10.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql10.ts index 5be6c6d8..5545a401 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql10.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql10.ts @@ -149,6 +149,11 @@ export interface PostgresqlConfig10 { pgHintPlanEnableHintTable?: boolean; pgHintPlanDebugPrint: PostgresqlConfig10_PgHintPlanDebugPrint; pgHintPlanMessageLevel: PostgresqlConfig10_LogLevel; + pgQualstatsEnabled?: boolean; + pgQualstatsTrackConstants?: boolean; + pgQualstatsMax?: number; + pgQualstatsResolveOids?: boolean; + pgQualstatsSampleRate?: number; } export enum PostgresqlConfig10_WalLevel { @@ -1732,6 +1737,48 @@ export const PostgresqlConfig10 = { if (message.pgHintPlanMessageLevel !== 0) { writer.uint32(896).int32(message.pgHintPlanMessageLevel); } + if (message.pgQualstatsEnabled !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsEnabled!, + }, + writer.uint32(906).fork() + ).ldelim(); + } + if (message.pgQualstatsTrackConstants !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsTrackConstants!, + }, + writer.uint32(914).fork() + ).ldelim(); + } + if (message.pgQualstatsMax !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.pgQualstatsMax! }, + writer.uint32(922).fork() + ).ldelim(); + } + if (message.pgQualstatsResolveOids !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsResolveOids!, + }, + writer.uint32(930).fork() + ).ldelim(); + } + if (message.pgQualstatsSampleRate !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgQualstatsSampleRate!, + }, + writer.uint32(938).fork() + ).ldelim(); + } return writer; }, @@ -2335,6 +2382,36 @@ export const PostgresqlConfig10 = { case 112: message.pgHintPlanMessageLevel = reader.int32() as any; break; + case 113: + message.pgQualstatsEnabled = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 114: + message.pgQualstatsTrackConstants = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 115: + message.pgQualstatsMax = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 116: + message.pgQualstatsResolveOids = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 117: + message.pgQualstatsSampleRate = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; default: reader.skipType(tag & 7); break; @@ -2860,6 +2937,30 @@ export const PostgresqlConfig10 = { object.pgHintPlanMessageLevel !== null ? postgresqlConfig10_LogLevelFromJSON(object.pgHintPlanMessageLevel) : 0; + message.pgQualstatsEnabled = + object.pgQualstatsEnabled !== undefined && + object.pgQualstatsEnabled !== null + ? Boolean(object.pgQualstatsEnabled) + : undefined; + message.pgQualstatsTrackConstants = + object.pgQualstatsTrackConstants !== undefined && + object.pgQualstatsTrackConstants !== null + ? Boolean(object.pgQualstatsTrackConstants) + : undefined; + message.pgQualstatsMax = + object.pgQualstatsMax !== undefined && object.pgQualstatsMax !== null + ? Number(object.pgQualstatsMax) + : undefined; + message.pgQualstatsResolveOids = + object.pgQualstatsResolveOids !== undefined && + object.pgQualstatsResolveOids !== null + ? Boolean(object.pgQualstatsResolveOids) + : undefined; + message.pgQualstatsSampleRate = + object.pgQualstatsSampleRate !== undefined && + object.pgQualstatsSampleRate !== null + ? Number(object.pgQualstatsSampleRate) + : undefined; return message; }, @@ -3114,6 +3215,16 @@ export const PostgresqlConfig10 = { (obj.pgHintPlanMessageLevel = postgresqlConfig10_LogLevelToJSON( message.pgHintPlanMessageLevel )); + message.pgQualstatsEnabled !== undefined && + (obj.pgQualstatsEnabled = message.pgQualstatsEnabled); + message.pgQualstatsTrackConstants !== undefined && + (obj.pgQualstatsTrackConstants = message.pgQualstatsTrackConstants); + message.pgQualstatsMax !== undefined && + (obj.pgQualstatsMax = message.pgQualstatsMax); + message.pgQualstatsResolveOids !== undefined && + (obj.pgQualstatsResolveOids = message.pgQualstatsResolveOids); + message.pgQualstatsSampleRate !== undefined && + (obj.pgQualstatsSampleRate = message.pgQualstatsSampleRate); return obj; }, @@ -3252,6 +3363,12 @@ export const PostgresqlConfig10 = { object.pgHintPlanEnableHintTable ?? undefined; message.pgHintPlanDebugPrint = object.pgHintPlanDebugPrint ?? 0; message.pgHintPlanMessageLevel = object.pgHintPlanMessageLevel ?? 0; + message.pgQualstatsEnabled = object.pgQualstatsEnabled ?? undefined; + message.pgQualstatsTrackConstants = + object.pgQualstatsTrackConstants ?? undefined; + message.pgQualstatsMax = object.pgQualstatsMax ?? undefined; + message.pgQualstatsResolveOids = object.pgQualstatsResolveOids ?? undefined; + message.pgQualstatsSampleRate = object.pgQualstatsSampleRate ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql10_1c.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql10_1c.ts index d4893bc1..430d62ab 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql10_1c.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql10_1c.ts @@ -151,6 +151,11 @@ export interface Postgresqlconfig101c { pgHintPlanMessageLevel: Postgresqlconfig101c_LogLevel; onlineAnalyzeEnable?: boolean; plantunerFixEmptyTable?: boolean; + pgQualstatsEnabled?: boolean; + pgQualstatsTrackConstants?: boolean; + pgQualstatsMax?: number; + pgQualstatsResolveOids?: boolean; + pgQualstatsSampleRate?: number; } export enum Postgresqlconfig101c_WalLevel { @@ -1752,6 +1757,48 @@ export const Postgresqlconfig101c = { writer.uint32(914).fork() ).ldelim(); } + if (message.pgQualstatsEnabled !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsEnabled!, + }, + writer.uint32(922).fork() + ).ldelim(); + } + if (message.pgQualstatsTrackConstants !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsTrackConstants!, + }, + writer.uint32(930).fork() + ).ldelim(); + } + if (message.pgQualstatsMax !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.pgQualstatsMax! }, + writer.uint32(938).fork() + ).ldelim(); + } + if (message.pgQualstatsResolveOids !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsResolveOids!, + }, + writer.uint32(946).fork() + ).ldelim(); + } + if (message.pgQualstatsSampleRate !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgQualstatsSampleRate!, + }, + writer.uint32(954).fork() + ).ldelim(); + } return writer; }, @@ -2370,6 +2417,36 @@ export const Postgresqlconfig101c = { reader.uint32() ).value; break; + case 115: + message.pgQualstatsEnabled = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 116: + message.pgQualstatsTrackConstants = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 117: + message.pgQualstatsMax = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 118: + message.pgQualstatsResolveOids = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 119: + message.pgQualstatsSampleRate = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; default: reader.skipType(tag & 7); break; @@ -2911,6 +2988,30 @@ export const Postgresqlconfig101c = { object.plantunerFixEmptyTable !== null ? Boolean(object.plantunerFixEmptyTable) : undefined; + message.pgQualstatsEnabled = + object.pgQualstatsEnabled !== undefined && + object.pgQualstatsEnabled !== null + ? Boolean(object.pgQualstatsEnabled) + : undefined; + message.pgQualstatsTrackConstants = + object.pgQualstatsTrackConstants !== undefined && + object.pgQualstatsTrackConstants !== null + ? Boolean(object.pgQualstatsTrackConstants) + : undefined; + message.pgQualstatsMax = + object.pgQualstatsMax !== undefined && object.pgQualstatsMax !== null + ? Number(object.pgQualstatsMax) + : undefined; + message.pgQualstatsResolveOids = + object.pgQualstatsResolveOids !== undefined && + object.pgQualstatsResolveOids !== null + ? Boolean(object.pgQualstatsResolveOids) + : undefined; + message.pgQualstatsSampleRate = + object.pgQualstatsSampleRate !== undefined && + object.pgQualstatsSampleRate !== null + ? Number(object.pgQualstatsSampleRate) + : undefined; return message; }, @@ -3170,6 +3271,16 @@ export const Postgresqlconfig101c = { (obj.onlineAnalyzeEnable = message.onlineAnalyzeEnable); message.plantunerFixEmptyTable !== undefined && (obj.plantunerFixEmptyTable = message.plantunerFixEmptyTable); + message.pgQualstatsEnabled !== undefined && + (obj.pgQualstatsEnabled = message.pgQualstatsEnabled); + message.pgQualstatsTrackConstants !== undefined && + (obj.pgQualstatsTrackConstants = message.pgQualstatsTrackConstants); + message.pgQualstatsMax !== undefined && + (obj.pgQualstatsMax = message.pgQualstatsMax); + message.pgQualstatsResolveOids !== undefined && + (obj.pgQualstatsResolveOids = message.pgQualstatsResolveOids); + message.pgQualstatsSampleRate !== undefined && + (obj.pgQualstatsSampleRate = message.pgQualstatsSampleRate); return obj; }, @@ -3310,6 +3421,12 @@ export const Postgresqlconfig101c = { message.pgHintPlanMessageLevel = object.pgHintPlanMessageLevel ?? 0; message.onlineAnalyzeEnable = object.onlineAnalyzeEnable ?? undefined; message.plantunerFixEmptyTable = object.plantunerFixEmptyTable ?? undefined; + message.pgQualstatsEnabled = object.pgQualstatsEnabled ?? undefined; + message.pgQualstatsTrackConstants = + object.pgQualstatsTrackConstants ?? undefined; + message.pgQualstatsMax = object.pgQualstatsMax ?? undefined; + message.pgQualstatsResolveOids = object.pgQualstatsResolveOids ?? undefined; + message.pgQualstatsSampleRate = object.pgQualstatsSampleRate ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql11.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql11.ts index 6bda4a9a..bda39416 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql11.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql11.ts @@ -157,6 +157,11 @@ export interface PostgresqlConfig11 { pgHintPlanEnableHintTable?: boolean; pgHintPlanDebugPrint: PostgresqlConfig11_PgHintPlanDebugPrint; pgHintPlanMessageLevel: PostgresqlConfig11_LogLevel; + pgQualstatsEnabled?: boolean; + pgQualstatsTrackConstants?: boolean; + pgQualstatsMax?: number; + pgQualstatsResolveOids?: boolean; + pgQualstatsSampleRate?: number; } export enum PostgresqlConfig11_WalLevel { @@ -1809,6 +1814,48 @@ export const PostgresqlConfig11 = { if (message.pgHintPlanMessageLevel !== 0) { writer.uint32(968).int32(message.pgHintPlanMessageLevel); } + if (message.pgQualstatsEnabled !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsEnabled!, + }, + writer.uint32(978).fork() + ).ldelim(); + } + if (message.pgQualstatsTrackConstants !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsTrackConstants!, + }, + writer.uint32(986).fork() + ).ldelim(); + } + if (message.pgQualstatsMax !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.pgQualstatsMax! }, + writer.uint32(994).fork() + ).ldelim(); + } + if (message.pgQualstatsResolveOids !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsResolveOids!, + }, + writer.uint32(1002).fork() + ).ldelim(); + } + if (message.pgQualstatsSampleRate !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgQualstatsSampleRate!, + }, + writer.uint32(1010).fork() + ).ldelim(); + } return writer; }, @@ -2457,6 +2504,36 @@ export const PostgresqlConfig11 = { case 121: message.pgHintPlanMessageLevel = reader.int32() as any; break; + case 122: + message.pgQualstatsEnabled = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 123: + message.pgQualstatsTrackConstants = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 124: + message.pgQualstatsMax = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 125: + message.pgQualstatsResolveOids = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 126: + message.pgQualstatsSampleRate = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; default: reader.skipType(tag & 7); break; @@ -3021,6 +3098,30 @@ export const PostgresqlConfig11 = { object.pgHintPlanMessageLevel !== null ? postgresqlConfig11_LogLevelFromJSON(object.pgHintPlanMessageLevel) : 0; + message.pgQualstatsEnabled = + object.pgQualstatsEnabled !== undefined && + object.pgQualstatsEnabled !== null + ? Boolean(object.pgQualstatsEnabled) + : undefined; + message.pgQualstatsTrackConstants = + object.pgQualstatsTrackConstants !== undefined && + object.pgQualstatsTrackConstants !== null + ? Boolean(object.pgQualstatsTrackConstants) + : undefined; + message.pgQualstatsMax = + object.pgQualstatsMax !== undefined && object.pgQualstatsMax !== null + ? Number(object.pgQualstatsMax) + : undefined; + message.pgQualstatsResolveOids = + object.pgQualstatsResolveOids !== undefined && + object.pgQualstatsResolveOids !== null + ? Boolean(object.pgQualstatsResolveOids) + : undefined; + message.pgQualstatsSampleRate = + object.pgQualstatsSampleRate !== undefined && + object.pgQualstatsSampleRate !== null + ? Number(object.pgQualstatsSampleRate) + : undefined; return message; }, @@ -3292,6 +3393,16 @@ export const PostgresqlConfig11 = { (obj.pgHintPlanMessageLevel = postgresqlConfig11_LogLevelToJSON( message.pgHintPlanMessageLevel )); + message.pgQualstatsEnabled !== undefined && + (obj.pgQualstatsEnabled = message.pgQualstatsEnabled); + message.pgQualstatsTrackConstants !== undefined && + (obj.pgQualstatsTrackConstants = message.pgQualstatsTrackConstants); + message.pgQualstatsMax !== undefined && + (obj.pgQualstatsMax = message.pgQualstatsMax); + message.pgQualstatsResolveOids !== undefined && + (obj.pgQualstatsResolveOids = message.pgQualstatsResolveOids); + message.pgQualstatsSampleRate !== undefined && + (obj.pgQualstatsSampleRate = message.pgQualstatsSampleRate); return obj; }, @@ -3443,6 +3554,12 @@ export const PostgresqlConfig11 = { object.pgHintPlanEnableHintTable ?? undefined; message.pgHintPlanDebugPrint = object.pgHintPlanDebugPrint ?? 0; message.pgHintPlanMessageLevel = object.pgHintPlanMessageLevel ?? 0; + message.pgQualstatsEnabled = object.pgQualstatsEnabled ?? undefined; + message.pgQualstatsTrackConstants = + object.pgQualstatsTrackConstants ?? undefined; + message.pgQualstatsMax = object.pgQualstatsMax ?? undefined; + message.pgQualstatsResolveOids = object.pgQualstatsResolveOids ?? undefined; + message.pgQualstatsSampleRate = object.pgQualstatsSampleRate ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql11_1c.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql11_1c.ts index 0c8473bb..f8e82ee5 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql11_1c.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql11_1c.ts @@ -157,6 +157,11 @@ export interface Postgresqlconfig111c { pgHintPlanEnableHintTable?: boolean; pgHintPlanDebugPrint: Postgresqlconfig111c_PgHintPlanDebugPrint; pgHintPlanMessageLevel: Postgresqlconfig111c_LogLevel; + pgQualstatsEnabled?: boolean; + pgQualstatsTrackConstants?: boolean; + pgQualstatsMax?: number; + pgQualstatsResolveOids?: boolean; + pgQualstatsSampleRate?: number; } export enum Postgresqlconfig111c_WalLevel { @@ -1809,6 +1814,48 @@ export const Postgresqlconfig111c = { if (message.pgHintPlanMessageLevel !== 0) { writer.uint32(968).int32(message.pgHintPlanMessageLevel); } + if (message.pgQualstatsEnabled !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsEnabled!, + }, + writer.uint32(978).fork() + ).ldelim(); + } + if (message.pgQualstatsTrackConstants !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsTrackConstants!, + }, + writer.uint32(986).fork() + ).ldelim(); + } + if (message.pgQualstatsMax !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.pgQualstatsMax! }, + writer.uint32(994).fork() + ).ldelim(); + } + if (message.pgQualstatsResolveOids !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsResolveOids!, + }, + writer.uint32(1002).fork() + ).ldelim(); + } + if (message.pgQualstatsSampleRate !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgQualstatsSampleRate!, + }, + writer.uint32(1010).fork() + ).ldelim(); + } return writer; }, @@ -2460,6 +2507,36 @@ export const Postgresqlconfig111c = { case 121: message.pgHintPlanMessageLevel = reader.int32() as any; break; + case 122: + message.pgQualstatsEnabled = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 123: + message.pgQualstatsTrackConstants = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 124: + message.pgQualstatsMax = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 125: + message.pgQualstatsResolveOids = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 126: + message.pgQualstatsSampleRate = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; default: reader.skipType(tag & 7); break; @@ -3030,6 +3107,30 @@ export const Postgresqlconfig111c = { object.pgHintPlanMessageLevel !== null ? postgresqlconfig111c_LogLevelFromJSON(object.pgHintPlanMessageLevel) : 0; + message.pgQualstatsEnabled = + object.pgQualstatsEnabled !== undefined && + object.pgQualstatsEnabled !== null + ? Boolean(object.pgQualstatsEnabled) + : undefined; + message.pgQualstatsTrackConstants = + object.pgQualstatsTrackConstants !== undefined && + object.pgQualstatsTrackConstants !== null + ? Boolean(object.pgQualstatsTrackConstants) + : undefined; + message.pgQualstatsMax = + object.pgQualstatsMax !== undefined && object.pgQualstatsMax !== null + ? Number(object.pgQualstatsMax) + : undefined; + message.pgQualstatsResolveOids = + object.pgQualstatsResolveOids !== undefined && + object.pgQualstatsResolveOids !== null + ? Boolean(object.pgQualstatsResolveOids) + : undefined; + message.pgQualstatsSampleRate = + object.pgQualstatsSampleRate !== undefined && + object.pgQualstatsSampleRate !== null + ? Number(object.pgQualstatsSampleRate) + : undefined; return message; }, @@ -3302,6 +3403,16 @@ export const Postgresqlconfig111c = { (obj.pgHintPlanMessageLevel = postgresqlconfig111c_LogLevelToJSON( message.pgHintPlanMessageLevel )); + message.pgQualstatsEnabled !== undefined && + (obj.pgQualstatsEnabled = message.pgQualstatsEnabled); + message.pgQualstatsTrackConstants !== undefined && + (obj.pgQualstatsTrackConstants = message.pgQualstatsTrackConstants); + message.pgQualstatsMax !== undefined && + (obj.pgQualstatsMax = message.pgQualstatsMax); + message.pgQualstatsResolveOids !== undefined && + (obj.pgQualstatsResolveOids = message.pgQualstatsResolveOids); + message.pgQualstatsSampleRate !== undefined && + (obj.pgQualstatsSampleRate = message.pgQualstatsSampleRate); return obj; }, @@ -3453,6 +3564,12 @@ export const Postgresqlconfig111c = { object.pgHintPlanEnableHintTable ?? undefined; message.pgHintPlanDebugPrint = object.pgHintPlanDebugPrint ?? 0; message.pgHintPlanMessageLevel = object.pgHintPlanMessageLevel ?? 0; + message.pgQualstatsEnabled = object.pgQualstatsEnabled ?? undefined; + message.pgQualstatsTrackConstants = + object.pgQualstatsTrackConstants ?? undefined; + message.pgQualstatsMax = object.pgQualstatsMax ?? undefined; + message.pgQualstatsResolveOids = object.pgQualstatsResolveOids ?? undefined; + message.pgQualstatsSampleRate = object.pgQualstatsSampleRate ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql12.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql12.ts index ba276b27..85503a0c 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql12.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql12.ts @@ -159,6 +159,11 @@ export interface PostgresqlConfig12 { pgHintPlanEnableHintTable?: boolean; pgHintPlanDebugPrint: PostgresqlConfig12_PgHintPlanDebugPrint; pgHintPlanMessageLevel: PostgresqlConfig12_LogLevel; + pgQualstatsEnabled?: boolean; + pgQualstatsTrackConstants?: boolean; + pgQualstatsMax?: number; + pgQualstatsResolveOids?: boolean; + pgQualstatsSampleRate?: number; } export enum PostgresqlConfig12_WalLevel { @@ -1872,6 +1877,48 @@ export const PostgresqlConfig12 = { if (message.pgHintPlanMessageLevel !== 0) { writer.uint32(984).int32(message.pgHintPlanMessageLevel); } + if (message.pgQualstatsEnabled !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsEnabled!, + }, + writer.uint32(994).fork() + ).ldelim(); + } + if (message.pgQualstatsTrackConstants !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsTrackConstants!, + }, + writer.uint32(1002).fork() + ).ldelim(); + } + if (message.pgQualstatsMax !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.pgQualstatsMax! }, + writer.uint32(1010).fork() + ).ldelim(); + } + if (message.pgQualstatsResolveOids !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsResolveOids!, + }, + writer.uint32(1018).fork() + ).ldelim(); + } + if (message.pgQualstatsSampleRate !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgQualstatsSampleRate!, + }, + writer.uint32(1026).fork() + ).ldelim(); + } return writer; }, @@ -2529,6 +2576,36 @@ export const PostgresqlConfig12 = { case 123: message.pgHintPlanMessageLevel = reader.int32() as any; break; + case 124: + message.pgQualstatsEnabled = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 125: + message.pgQualstatsTrackConstants = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 126: + message.pgQualstatsMax = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 127: + message.pgQualstatsResolveOids = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 128: + message.pgQualstatsSampleRate = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; default: reader.skipType(tag & 7); break; @@ -3102,6 +3179,30 @@ export const PostgresqlConfig12 = { object.pgHintPlanMessageLevel !== null ? postgresqlConfig12_LogLevelFromJSON(object.pgHintPlanMessageLevel) : 0; + message.pgQualstatsEnabled = + object.pgQualstatsEnabled !== undefined && + object.pgQualstatsEnabled !== null + ? Boolean(object.pgQualstatsEnabled) + : undefined; + message.pgQualstatsTrackConstants = + object.pgQualstatsTrackConstants !== undefined && + object.pgQualstatsTrackConstants !== null + ? Boolean(object.pgQualstatsTrackConstants) + : undefined; + message.pgQualstatsMax = + object.pgQualstatsMax !== undefined && object.pgQualstatsMax !== null + ? Number(object.pgQualstatsMax) + : undefined; + message.pgQualstatsResolveOids = + object.pgQualstatsResolveOids !== undefined && + object.pgQualstatsResolveOids !== null + ? Boolean(object.pgQualstatsResolveOids) + : undefined; + message.pgQualstatsSampleRate = + object.pgQualstatsSampleRate !== undefined && + object.pgQualstatsSampleRate !== null + ? Number(object.pgQualstatsSampleRate) + : undefined; return message; }, @@ -3379,6 +3480,16 @@ export const PostgresqlConfig12 = { (obj.pgHintPlanMessageLevel = postgresqlConfig12_LogLevelToJSON( message.pgHintPlanMessageLevel )); + message.pgQualstatsEnabled !== undefined && + (obj.pgQualstatsEnabled = message.pgQualstatsEnabled); + message.pgQualstatsTrackConstants !== undefined && + (obj.pgQualstatsTrackConstants = message.pgQualstatsTrackConstants); + message.pgQualstatsMax !== undefined && + (obj.pgQualstatsMax = message.pgQualstatsMax); + message.pgQualstatsResolveOids !== undefined && + (obj.pgQualstatsResolveOids = message.pgQualstatsResolveOids); + message.pgQualstatsSampleRate !== undefined && + (obj.pgQualstatsSampleRate = message.pgQualstatsSampleRate); return obj; }, @@ -3533,6 +3644,12 @@ export const PostgresqlConfig12 = { object.pgHintPlanEnableHintTable ?? undefined; message.pgHintPlanDebugPrint = object.pgHintPlanDebugPrint ?? 0; message.pgHintPlanMessageLevel = object.pgHintPlanMessageLevel ?? 0; + message.pgQualstatsEnabled = object.pgQualstatsEnabled ?? undefined; + message.pgQualstatsTrackConstants = + object.pgQualstatsTrackConstants ?? undefined; + message.pgQualstatsMax = object.pgQualstatsMax ?? undefined; + message.pgQualstatsResolveOids = object.pgQualstatsResolveOids ?? undefined; + message.pgQualstatsSampleRate = object.pgQualstatsSampleRate ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql12_1c.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql12_1c.ts index 52d346a5..d79a33ea 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql12_1c.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql12_1c.ts @@ -159,6 +159,11 @@ export interface Postgresqlconfig121c { pgHintPlanEnableHintTable?: boolean; pgHintPlanDebugPrint: Postgresqlconfig121c_PgHintPlanDebugPrint; pgHintPlanMessageLevel: Postgresqlconfig121c_LogLevel; + pgQualstatsEnabled?: boolean; + pgQualstatsTrackConstants?: boolean; + pgQualstatsMax?: number; + pgQualstatsResolveOids?: boolean; + pgQualstatsSampleRate?: number; } export enum Postgresqlconfig121c_WalLevel { @@ -1872,6 +1877,48 @@ export const Postgresqlconfig121c = { if (message.pgHintPlanMessageLevel !== 0) { writer.uint32(984).int32(message.pgHintPlanMessageLevel); } + if (message.pgQualstatsEnabled !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsEnabled!, + }, + writer.uint32(994).fork() + ).ldelim(); + } + if (message.pgQualstatsTrackConstants !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsTrackConstants!, + }, + writer.uint32(1002).fork() + ).ldelim(); + } + if (message.pgQualstatsMax !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.pgQualstatsMax! }, + writer.uint32(1010).fork() + ).ldelim(); + } + if (message.pgQualstatsResolveOids !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsResolveOids!, + }, + writer.uint32(1018).fork() + ).ldelim(); + } + if (message.pgQualstatsSampleRate !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgQualstatsSampleRate!, + }, + writer.uint32(1026).fork() + ).ldelim(); + } return writer; }, @@ -2532,6 +2579,36 @@ export const Postgresqlconfig121c = { case 123: message.pgHintPlanMessageLevel = reader.int32() as any; break; + case 124: + message.pgQualstatsEnabled = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 125: + message.pgQualstatsTrackConstants = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 126: + message.pgQualstatsMax = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 127: + message.pgQualstatsResolveOids = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 128: + message.pgQualstatsSampleRate = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; default: reader.skipType(tag & 7); break; @@ -3111,6 +3188,30 @@ export const Postgresqlconfig121c = { object.pgHintPlanMessageLevel !== null ? postgresqlconfig121c_LogLevelFromJSON(object.pgHintPlanMessageLevel) : 0; + message.pgQualstatsEnabled = + object.pgQualstatsEnabled !== undefined && + object.pgQualstatsEnabled !== null + ? Boolean(object.pgQualstatsEnabled) + : undefined; + message.pgQualstatsTrackConstants = + object.pgQualstatsTrackConstants !== undefined && + object.pgQualstatsTrackConstants !== null + ? Boolean(object.pgQualstatsTrackConstants) + : undefined; + message.pgQualstatsMax = + object.pgQualstatsMax !== undefined && object.pgQualstatsMax !== null + ? Number(object.pgQualstatsMax) + : undefined; + message.pgQualstatsResolveOids = + object.pgQualstatsResolveOids !== undefined && + object.pgQualstatsResolveOids !== null + ? Boolean(object.pgQualstatsResolveOids) + : undefined; + message.pgQualstatsSampleRate = + object.pgQualstatsSampleRate !== undefined && + object.pgQualstatsSampleRate !== null + ? Number(object.pgQualstatsSampleRate) + : undefined; return message; }, @@ -3389,6 +3490,16 @@ export const Postgresqlconfig121c = { (obj.pgHintPlanMessageLevel = postgresqlconfig121c_LogLevelToJSON( message.pgHintPlanMessageLevel )); + message.pgQualstatsEnabled !== undefined && + (obj.pgQualstatsEnabled = message.pgQualstatsEnabled); + message.pgQualstatsTrackConstants !== undefined && + (obj.pgQualstatsTrackConstants = message.pgQualstatsTrackConstants); + message.pgQualstatsMax !== undefined && + (obj.pgQualstatsMax = message.pgQualstatsMax); + message.pgQualstatsResolveOids !== undefined && + (obj.pgQualstatsResolveOids = message.pgQualstatsResolveOids); + message.pgQualstatsSampleRate !== undefined && + (obj.pgQualstatsSampleRate = message.pgQualstatsSampleRate); return obj; }, @@ -3543,6 +3654,12 @@ export const Postgresqlconfig121c = { object.pgHintPlanEnableHintTable ?? undefined; message.pgHintPlanDebugPrint = object.pgHintPlanDebugPrint ?? 0; message.pgHintPlanMessageLevel = object.pgHintPlanMessageLevel ?? 0; + message.pgQualstatsEnabled = object.pgQualstatsEnabled ?? undefined; + message.pgQualstatsTrackConstants = + object.pgQualstatsTrackConstants ?? undefined; + message.pgQualstatsMax = object.pgQualstatsMax ?? undefined; + message.pgQualstatsResolveOids = object.pgQualstatsResolveOids ?? undefined; + message.pgQualstatsSampleRate = object.pgQualstatsSampleRate ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql13.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql13.ts index d921ed9a..518a877b 100644 --- a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql13.ts +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql13.ts @@ -177,6 +177,11 @@ export interface PostgresqlConfig13 { logParameterMaxLength?: number; /** in bytes. */ logParameterMaxLengthOnError?: number; + pgQualstatsEnabled?: boolean; + pgQualstatsTrackConstants?: boolean; + pgQualstatsMax?: number; + pgQualstatsResolveOids?: boolean; + pgQualstatsSampleRate?: number; } export enum PostgresqlConfig13_WalLevel { @@ -1995,6 +2000,48 @@ export const PostgresqlConfig13 = { writer.uint32(1090).fork() ).ldelim(); } + if (message.pgQualstatsEnabled !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsEnabled!, + }, + writer.uint32(1098).fork() + ).ldelim(); + } + if (message.pgQualstatsTrackConstants !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsTrackConstants!, + }, + writer.uint32(1106).fork() + ).ldelim(); + } + if (message.pgQualstatsMax !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.pgQualstatsMax! }, + writer.uint32(1114).fork() + ).ldelim(); + } + if (message.pgQualstatsResolveOids !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsResolveOids!, + }, + writer.uint32(1122).fork() + ).ldelim(); + } + if (message.pgQualstatsSampleRate !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgQualstatsSampleRate!, + }, + writer.uint32(1130).fork() + ).ldelim(); + } return writer; }, @@ -2724,6 +2771,36 @@ export const PostgresqlConfig13 = { reader.uint32() ).value; break; + case 137: + message.pgQualstatsEnabled = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 138: + message.pgQualstatsTrackConstants = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 139: + message.pgQualstatsMax = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 140: + message.pgQualstatsResolveOids = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 141: + message.pgQualstatsSampleRate = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; default: reader.skipType(tag & 7); break; @@ -3356,6 +3433,30 @@ export const PostgresqlConfig13 = { object.logParameterMaxLengthOnError !== null ? Number(object.logParameterMaxLengthOnError) : undefined; + message.pgQualstatsEnabled = + object.pgQualstatsEnabled !== undefined && + object.pgQualstatsEnabled !== null + ? Boolean(object.pgQualstatsEnabled) + : undefined; + message.pgQualstatsTrackConstants = + object.pgQualstatsTrackConstants !== undefined && + object.pgQualstatsTrackConstants !== null + ? Boolean(object.pgQualstatsTrackConstants) + : undefined; + message.pgQualstatsMax = + object.pgQualstatsMax !== undefined && object.pgQualstatsMax !== null + ? Number(object.pgQualstatsMax) + : undefined; + message.pgQualstatsResolveOids = + object.pgQualstatsResolveOids !== undefined && + object.pgQualstatsResolveOids !== null + ? Boolean(object.pgQualstatsResolveOids) + : undefined; + message.pgQualstatsSampleRate = + object.pgQualstatsSampleRate !== undefined && + object.pgQualstatsSampleRate !== null + ? Number(object.pgQualstatsSampleRate) + : undefined; return message; }, @@ -3659,6 +3760,16 @@ export const PostgresqlConfig13 = { (obj.logParameterMaxLength = message.logParameterMaxLength); message.logParameterMaxLengthOnError !== undefined && (obj.logParameterMaxLengthOnError = message.logParameterMaxLengthOnError); + message.pgQualstatsEnabled !== undefined && + (obj.pgQualstatsEnabled = message.pgQualstatsEnabled); + message.pgQualstatsTrackConstants !== undefined && + (obj.pgQualstatsTrackConstants = message.pgQualstatsTrackConstants); + message.pgQualstatsMax !== undefined && + (obj.pgQualstatsMax = message.pgQualstatsMax); + message.pgQualstatsResolveOids !== undefined && + (obj.pgQualstatsResolveOids = message.pgQualstatsResolveOids); + message.pgQualstatsSampleRate !== undefined && + (obj.pgQualstatsSampleRate = message.pgQualstatsSampleRate); return obj; }, @@ -3829,6 +3940,12 @@ export const PostgresqlConfig13 = { message.logParameterMaxLength = object.logParameterMaxLength ?? undefined; message.logParameterMaxLengthOnError = object.logParameterMaxLengthOnError ?? undefined; + message.pgQualstatsEnabled = object.pgQualstatsEnabled ?? undefined; + message.pgQualstatsTrackConstants = + object.pgQualstatsTrackConstants ?? undefined; + message.pgQualstatsMax = object.pgQualstatsMax ?? undefined; + message.pgQualstatsResolveOids = object.pgQualstatsResolveOids ?? undefined; + message.pgQualstatsSampleRate = object.pgQualstatsSampleRate ?? undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql14.ts b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql14.ts new file mode 100644 index 00000000..de6d6a7b --- /dev/null +++ b/src/generated/yandex/cloud/mdb/postgresql/v1/config/postgresql14.ts @@ -0,0 +1,4230 @@ +/* eslint-disable */ +import { messageTypeRegistry } from "../../../../../../typeRegistry"; +import Long from "long"; +import _m0 from "protobufjs/minimal"; +import { + Int64Value, + DoubleValue, + BoolValue, +} from "../../../../../../google/protobuf/wrappers"; + +export const protobufPackage = "yandex.cloud.mdb.postgresql.v1.config"; + +/** + * Options and structure of `PostgresqlConfig` reflects PostgreSQL configuration file + * parameters which detailed description is available in + * [PostgreSQL documentation](https://www.postgresql.org/docs/11/runtime-config.html). + */ +export interface PostgresqlConfig14 { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig14"; + maxConnections?: number; + /** in bytes. */ + sharedBuffers?: number; + /** in bytes. */ + tempBuffers?: number; + maxPreparedTransactions?: number; + /** in bytes. */ + workMem?: number; + /** in bytes. */ + maintenanceWorkMem?: number; + /** in bytes. */ + autovacuumWorkMem?: number; + /** in bytes. */ + tempFileLimit?: number; + /** in milliseconds. */ + vacuumCostDelay?: number; + vacuumCostPageHit?: number; + vacuumCostPageMiss?: number; + vacuumCostPageDirty?: number; + vacuumCostLimit?: number; + /** in milliseconds. */ + bgwriterDelay?: number; + bgwriterLruMaxpages?: number; + bgwriterLruMultiplier?: number; + bgwriterFlushAfter?: number; + backendFlushAfter?: number; + oldSnapshotThreshold?: number; + walLevel: PostgresqlConfig14_WalLevel; + synchronousCommit: PostgresqlConfig14_SynchronousCommit; + /** in milliseconds. */ + checkpointTimeout?: number; + checkpointCompletionTarget?: number; + checkpointFlushAfter?: number; + /** in bytes. */ + maxWalSize?: number; + /** in bytes. */ + minWalSize?: number; + /** in milliseconds. */ + maxStandbyStreamingDelay?: number; + defaultStatisticsTarget?: number; + constraintExclusion: PostgresqlConfig14_ConstraintExclusion; + cursorTupleFraction?: number; + fromCollapseLimit?: number; + joinCollapseLimit?: number; + forceParallelMode: PostgresqlConfig14_ForceParallelMode; + clientMinMessages: PostgresqlConfig14_LogLevel; + logMinMessages: PostgresqlConfig14_LogLevel; + logMinErrorStatement: PostgresqlConfig14_LogLevel; + /** in milliseconds. */ + logMinDurationStatement?: number; + logCheckpoints?: boolean; + logConnections?: boolean; + logDisconnections?: boolean; + logDuration?: boolean; + logErrorVerbosity: PostgresqlConfig14_LogErrorVerbosity; + logLockWaits?: boolean; + logStatement: PostgresqlConfig14_LogStatement; + logTempFiles?: number; + searchPath: string; + rowSecurity?: boolean; + defaultTransactionIsolation: PostgresqlConfig14_TransactionIsolation; + /** in milliseconds. */ + statementTimeout?: number; + /** in milliseconds. */ + lockTimeout?: number; + /** in milliseconds. */ + idleInTransactionSessionTimeout?: number; + byteaOutput: PostgresqlConfig14_ByteaOutput; + xmlbinary: PostgresqlConfig14_XmlBinary; + xmloption: PostgresqlConfig14_XmlOption; + /** in bytes. */ + ginPendingListLimit?: number; + /** in milliseconds. */ + deadlockTimeout?: number; + maxLocksPerTransaction?: number; + maxPredLocksPerTransaction?: number; + arrayNulls?: boolean; + backslashQuote: PostgresqlConfig14_BackslashQuote; + defaultWithOids?: boolean; + escapeStringWarning?: boolean; + loCompatPrivileges?: boolean; + quoteAllIdentifiers?: boolean; + standardConformingStrings?: boolean; + synchronizeSeqscans?: boolean; + transformNullEquals?: boolean; + exitOnError?: boolean; + seqPageCost?: number; + randomPageCost?: number; + autovacuumMaxWorkers?: number; + autovacuumVacuumCostDelay?: number; + autovacuumVacuumCostLimit?: number; + /** in milliseconds. */ + autovacuumNaptime?: number; + /** in milliseconds. */ + archiveTimeout?: number; + trackActivityQuerySize?: number; + enableBitmapscan?: boolean; + enableHashagg?: boolean; + enableHashjoin?: boolean; + enableIndexscan?: boolean; + enableIndexonlyscan?: boolean; + enableMaterial?: boolean; + enableMergejoin?: boolean; + enableNestloop?: boolean; + enableSeqscan?: boolean; + enableSort?: boolean; + enableTidscan?: boolean; + maxWorkerProcesses?: number; + maxParallelWorkers?: number; + maxParallelWorkersPerGather?: number; + autovacuumVacuumScaleFactor?: number; + autovacuumAnalyzeScaleFactor?: number; + defaultTransactionReadOnly?: boolean; + timezone: string; + enableParallelAppend?: boolean; + enableParallelHash?: boolean; + enablePartitionPruning?: boolean; + enablePartitionwiseAggregate?: boolean; + enablePartitionwiseJoin?: boolean; + jit?: boolean; + maxParallelMaintenanceWorkers?: number; + parallelLeaderParticipation?: boolean; + logTransactionSampleRate?: number; + planCacheMode: PostgresqlConfig14_PlanCacheMode; + effectiveIoConcurrency?: number; + effectiveCacheSize?: number; + sharedPreloadLibraries: PostgresqlConfig14_SharedPreloadLibraries[]; + /** in milliseconds. */ + autoExplainLogMinDuration?: number; + autoExplainLogAnalyze?: boolean; + autoExplainLogBuffers?: boolean; + autoExplainLogTiming?: boolean; + autoExplainLogTriggers?: boolean; + autoExplainLogVerbose?: boolean; + autoExplainLogNestedStatements?: boolean; + autoExplainSampleRate?: number; + pgHintPlanEnableHint?: boolean; + pgHintPlanEnableHintTable?: boolean; + pgHintPlanDebugPrint: PostgresqlConfig14_PgHintPlanDebugPrint; + pgHintPlanMessageLevel: PostgresqlConfig14_LogLevel; + hashMemMultiplier?: number; + /** in bytes. */ + logicalDecodingWorkMem?: number; + maintenanceIoConcurrency?: number; + /** in bytes. */ + maxSlotWalKeepSize?: number; + /** in bytes. */ + walKeepSize?: number; + enableIncrementalSort?: boolean; + autovacuumVacuumInsertThreshold?: number; + autovacuumVacuumInsertScaleFactor?: number; + /** in milliseconds. */ + logMinDurationSample?: number; + logStatementSampleRate?: number; + /** in bytes. */ + logParameterMaxLength?: number; + /** in bytes. */ + logParameterMaxLengthOnError?: number; + /** in milliseconds. */ + clientConnectionCheckInterval?: number; + enableAsyncAppend?: boolean; + enableGathermerge?: boolean; + enableMemoize?: boolean; + /** in milliseconds. */ + logRecoveryConflictWaits?: boolean; + /** in milliseconds. */ + vacuumFailsafeAge?: number; + /** in milliseconds. */ + vacuumMultixactFailsafeAge?: number; + pgQualstatsEnabled?: boolean; + pgQualstatsTrackConstants?: boolean; + pgQualstatsMax?: number; + pgQualstatsResolveOids?: boolean; + pgQualstatsSampleRate?: number; +} + +export enum PostgresqlConfig14_WalLevel { + WAL_LEVEL_UNSPECIFIED = 0, + WAL_LEVEL_REPLICA = 1, + WAL_LEVEL_LOGICAL = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig14_WalLevelFromJSON( + object: any +): PostgresqlConfig14_WalLevel { + switch (object) { + case 0: + case "WAL_LEVEL_UNSPECIFIED": + return PostgresqlConfig14_WalLevel.WAL_LEVEL_UNSPECIFIED; + case 1: + case "WAL_LEVEL_REPLICA": + return PostgresqlConfig14_WalLevel.WAL_LEVEL_REPLICA; + case 2: + case "WAL_LEVEL_LOGICAL": + return PostgresqlConfig14_WalLevel.WAL_LEVEL_LOGICAL; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig14_WalLevel.UNRECOGNIZED; + } +} + +export function postgresqlConfig14_WalLevelToJSON( + object: PostgresqlConfig14_WalLevel +): string { + switch (object) { + case PostgresqlConfig14_WalLevel.WAL_LEVEL_UNSPECIFIED: + return "WAL_LEVEL_UNSPECIFIED"; + case PostgresqlConfig14_WalLevel.WAL_LEVEL_REPLICA: + return "WAL_LEVEL_REPLICA"; + case PostgresqlConfig14_WalLevel.WAL_LEVEL_LOGICAL: + return "WAL_LEVEL_LOGICAL"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig14_SynchronousCommit { + SYNCHRONOUS_COMMIT_UNSPECIFIED = 0, + SYNCHRONOUS_COMMIT_ON = 1, + SYNCHRONOUS_COMMIT_OFF = 2, + SYNCHRONOUS_COMMIT_LOCAL = 3, + SYNCHRONOUS_COMMIT_REMOTE_WRITE = 4, + SYNCHRONOUS_COMMIT_REMOTE_APPLY = 5, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig14_SynchronousCommitFromJSON( + object: any +): PostgresqlConfig14_SynchronousCommit { + switch (object) { + case 0: + case "SYNCHRONOUS_COMMIT_UNSPECIFIED": + return PostgresqlConfig14_SynchronousCommit.SYNCHRONOUS_COMMIT_UNSPECIFIED; + case 1: + case "SYNCHRONOUS_COMMIT_ON": + return PostgresqlConfig14_SynchronousCommit.SYNCHRONOUS_COMMIT_ON; + case 2: + case "SYNCHRONOUS_COMMIT_OFF": + return PostgresqlConfig14_SynchronousCommit.SYNCHRONOUS_COMMIT_OFF; + case 3: + case "SYNCHRONOUS_COMMIT_LOCAL": + return PostgresqlConfig14_SynchronousCommit.SYNCHRONOUS_COMMIT_LOCAL; + case 4: + case "SYNCHRONOUS_COMMIT_REMOTE_WRITE": + return PostgresqlConfig14_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_WRITE; + case 5: + case "SYNCHRONOUS_COMMIT_REMOTE_APPLY": + return PostgresqlConfig14_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_APPLY; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig14_SynchronousCommit.UNRECOGNIZED; + } +} + +export function postgresqlConfig14_SynchronousCommitToJSON( + object: PostgresqlConfig14_SynchronousCommit +): string { + switch (object) { + case PostgresqlConfig14_SynchronousCommit.SYNCHRONOUS_COMMIT_UNSPECIFIED: + return "SYNCHRONOUS_COMMIT_UNSPECIFIED"; + case PostgresqlConfig14_SynchronousCommit.SYNCHRONOUS_COMMIT_ON: + return "SYNCHRONOUS_COMMIT_ON"; + case PostgresqlConfig14_SynchronousCommit.SYNCHRONOUS_COMMIT_OFF: + return "SYNCHRONOUS_COMMIT_OFF"; + case PostgresqlConfig14_SynchronousCommit.SYNCHRONOUS_COMMIT_LOCAL: + return "SYNCHRONOUS_COMMIT_LOCAL"; + case PostgresqlConfig14_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_WRITE: + return "SYNCHRONOUS_COMMIT_REMOTE_WRITE"; + case PostgresqlConfig14_SynchronousCommit.SYNCHRONOUS_COMMIT_REMOTE_APPLY: + return "SYNCHRONOUS_COMMIT_REMOTE_APPLY"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig14_ConstraintExclusion { + CONSTRAINT_EXCLUSION_UNSPECIFIED = 0, + CONSTRAINT_EXCLUSION_ON = 1, + CONSTRAINT_EXCLUSION_OFF = 2, + CONSTRAINT_EXCLUSION_PARTITION = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig14_ConstraintExclusionFromJSON( + object: any +): PostgresqlConfig14_ConstraintExclusion { + switch (object) { + case 0: + case "CONSTRAINT_EXCLUSION_UNSPECIFIED": + return PostgresqlConfig14_ConstraintExclusion.CONSTRAINT_EXCLUSION_UNSPECIFIED; + case 1: + case "CONSTRAINT_EXCLUSION_ON": + return PostgresqlConfig14_ConstraintExclusion.CONSTRAINT_EXCLUSION_ON; + case 2: + case "CONSTRAINT_EXCLUSION_OFF": + return PostgresqlConfig14_ConstraintExclusion.CONSTRAINT_EXCLUSION_OFF; + case 3: + case "CONSTRAINT_EXCLUSION_PARTITION": + return PostgresqlConfig14_ConstraintExclusion.CONSTRAINT_EXCLUSION_PARTITION; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig14_ConstraintExclusion.UNRECOGNIZED; + } +} + +export function postgresqlConfig14_ConstraintExclusionToJSON( + object: PostgresqlConfig14_ConstraintExclusion +): string { + switch (object) { + case PostgresqlConfig14_ConstraintExclusion.CONSTRAINT_EXCLUSION_UNSPECIFIED: + return "CONSTRAINT_EXCLUSION_UNSPECIFIED"; + case PostgresqlConfig14_ConstraintExclusion.CONSTRAINT_EXCLUSION_ON: + return "CONSTRAINT_EXCLUSION_ON"; + case PostgresqlConfig14_ConstraintExclusion.CONSTRAINT_EXCLUSION_OFF: + return "CONSTRAINT_EXCLUSION_OFF"; + case PostgresqlConfig14_ConstraintExclusion.CONSTRAINT_EXCLUSION_PARTITION: + return "CONSTRAINT_EXCLUSION_PARTITION"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig14_ForceParallelMode { + FORCE_PARALLEL_MODE_UNSPECIFIED = 0, + FORCE_PARALLEL_MODE_ON = 1, + FORCE_PARALLEL_MODE_OFF = 2, + FORCE_PARALLEL_MODE_REGRESS = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig14_ForceParallelModeFromJSON( + object: any +): PostgresqlConfig14_ForceParallelMode { + switch (object) { + case 0: + case "FORCE_PARALLEL_MODE_UNSPECIFIED": + return PostgresqlConfig14_ForceParallelMode.FORCE_PARALLEL_MODE_UNSPECIFIED; + case 1: + case "FORCE_PARALLEL_MODE_ON": + return PostgresqlConfig14_ForceParallelMode.FORCE_PARALLEL_MODE_ON; + case 2: + case "FORCE_PARALLEL_MODE_OFF": + return PostgresqlConfig14_ForceParallelMode.FORCE_PARALLEL_MODE_OFF; + case 3: + case "FORCE_PARALLEL_MODE_REGRESS": + return PostgresqlConfig14_ForceParallelMode.FORCE_PARALLEL_MODE_REGRESS; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig14_ForceParallelMode.UNRECOGNIZED; + } +} + +export function postgresqlConfig14_ForceParallelModeToJSON( + object: PostgresqlConfig14_ForceParallelMode +): string { + switch (object) { + case PostgresqlConfig14_ForceParallelMode.FORCE_PARALLEL_MODE_UNSPECIFIED: + return "FORCE_PARALLEL_MODE_UNSPECIFIED"; + case PostgresqlConfig14_ForceParallelMode.FORCE_PARALLEL_MODE_ON: + return "FORCE_PARALLEL_MODE_ON"; + case PostgresqlConfig14_ForceParallelMode.FORCE_PARALLEL_MODE_OFF: + return "FORCE_PARALLEL_MODE_OFF"; + case PostgresqlConfig14_ForceParallelMode.FORCE_PARALLEL_MODE_REGRESS: + return "FORCE_PARALLEL_MODE_REGRESS"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig14_LogLevel { + LOG_LEVEL_UNSPECIFIED = 0, + LOG_LEVEL_DEBUG5 = 1, + LOG_LEVEL_DEBUG4 = 2, + LOG_LEVEL_DEBUG3 = 3, + LOG_LEVEL_DEBUG2 = 4, + LOG_LEVEL_DEBUG1 = 5, + LOG_LEVEL_LOG = 6, + LOG_LEVEL_NOTICE = 7, + LOG_LEVEL_WARNING = 8, + LOG_LEVEL_ERROR = 9, + LOG_LEVEL_FATAL = 10, + LOG_LEVEL_PANIC = 11, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig14_LogLevelFromJSON( + object: any +): PostgresqlConfig14_LogLevel { + switch (object) { + case 0: + case "LOG_LEVEL_UNSPECIFIED": + return PostgresqlConfig14_LogLevel.LOG_LEVEL_UNSPECIFIED; + case 1: + case "LOG_LEVEL_DEBUG5": + return PostgresqlConfig14_LogLevel.LOG_LEVEL_DEBUG5; + case 2: + case "LOG_LEVEL_DEBUG4": + return PostgresqlConfig14_LogLevel.LOG_LEVEL_DEBUG4; + case 3: + case "LOG_LEVEL_DEBUG3": + return PostgresqlConfig14_LogLevel.LOG_LEVEL_DEBUG3; + case 4: + case "LOG_LEVEL_DEBUG2": + return PostgresqlConfig14_LogLevel.LOG_LEVEL_DEBUG2; + case 5: + case "LOG_LEVEL_DEBUG1": + return PostgresqlConfig14_LogLevel.LOG_LEVEL_DEBUG1; + case 6: + case "LOG_LEVEL_LOG": + return PostgresqlConfig14_LogLevel.LOG_LEVEL_LOG; + case 7: + case "LOG_LEVEL_NOTICE": + return PostgresqlConfig14_LogLevel.LOG_LEVEL_NOTICE; + case 8: + case "LOG_LEVEL_WARNING": + return PostgresqlConfig14_LogLevel.LOG_LEVEL_WARNING; + case 9: + case "LOG_LEVEL_ERROR": + return PostgresqlConfig14_LogLevel.LOG_LEVEL_ERROR; + case 10: + case "LOG_LEVEL_FATAL": + return PostgresqlConfig14_LogLevel.LOG_LEVEL_FATAL; + case 11: + case "LOG_LEVEL_PANIC": + return PostgresqlConfig14_LogLevel.LOG_LEVEL_PANIC; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig14_LogLevel.UNRECOGNIZED; + } +} + +export function postgresqlConfig14_LogLevelToJSON( + object: PostgresqlConfig14_LogLevel +): string { + switch (object) { + case PostgresqlConfig14_LogLevel.LOG_LEVEL_UNSPECIFIED: + return "LOG_LEVEL_UNSPECIFIED"; + case PostgresqlConfig14_LogLevel.LOG_LEVEL_DEBUG5: + return "LOG_LEVEL_DEBUG5"; + case PostgresqlConfig14_LogLevel.LOG_LEVEL_DEBUG4: + return "LOG_LEVEL_DEBUG4"; + case PostgresqlConfig14_LogLevel.LOG_LEVEL_DEBUG3: + return "LOG_LEVEL_DEBUG3"; + case PostgresqlConfig14_LogLevel.LOG_LEVEL_DEBUG2: + return "LOG_LEVEL_DEBUG2"; + case PostgresqlConfig14_LogLevel.LOG_LEVEL_DEBUG1: + return "LOG_LEVEL_DEBUG1"; + case PostgresqlConfig14_LogLevel.LOG_LEVEL_LOG: + return "LOG_LEVEL_LOG"; + case PostgresqlConfig14_LogLevel.LOG_LEVEL_NOTICE: + return "LOG_LEVEL_NOTICE"; + case PostgresqlConfig14_LogLevel.LOG_LEVEL_WARNING: + return "LOG_LEVEL_WARNING"; + case PostgresqlConfig14_LogLevel.LOG_LEVEL_ERROR: + return "LOG_LEVEL_ERROR"; + case PostgresqlConfig14_LogLevel.LOG_LEVEL_FATAL: + return "LOG_LEVEL_FATAL"; + case PostgresqlConfig14_LogLevel.LOG_LEVEL_PANIC: + return "LOG_LEVEL_PANIC"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig14_LogErrorVerbosity { + LOG_ERROR_VERBOSITY_UNSPECIFIED = 0, + LOG_ERROR_VERBOSITY_TERSE = 1, + LOG_ERROR_VERBOSITY_DEFAULT = 2, + LOG_ERROR_VERBOSITY_VERBOSE = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig14_LogErrorVerbosityFromJSON( + object: any +): PostgresqlConfig14_LogErrorVerbosity { + switch (object) { + case 0: + case "LOG_ERROR_VERBOSITY_UNSPECIFIED": + return PostgresqlConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED; + case 1: + case "LOG_ERROR_VERBOSITY_TERSE": + return PostgresqlConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE; + case 2: + case "LOG_ERROR_VERBOSITY_DEFAULT": + return PostgresqlConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT; + case 3: + case "LOG_ERROR_VERBOSITY_VERBOSE": + return PostgresqlConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig14_LogErrorVerbosity.UNRECOGNIZED; + } +} + +export function postgresqlConfig14_LogErrorVerbosityToJSON( + object: PostgresqlConfig14_LogErrorVerbosity +): string { + switch (object) { + case PostgresqlConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_UNSPECIFIED: + return "LOG_ERROR_VERBOSITY_UNSPECIFIED"; + case PostgresqlConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_TERSE: + return "LOG_ERROR_VERBOSITY_TERSE"; + case PostgresqlConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_DEFAULT: + return "LOG_ERROR_VERBOSITY_DEFAULT"; + case PostgresqlConfig14_LogErrorVerbosity.LOG_ERROR_VERBOSITY_VERBOSE: + return "LOG_ERROR_VERBOSITY_VERBOSE"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig14_LogStatement { + LOG_STATEMENT_UNSPECIFIED = 0, + LOG_STATEMENT_NONE = 1, + LOG_STATEMENT_DDL = 2, + LOG_STATEMENT_MOD = 3, + LOG_STATEMENT_ALL = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig14_LogStatementFromJSON( + object: any +): PostgresqlConfig14_LogStatement { + switch (object) { + case 0: + case "LOG_STATEMENT_UNSPECIFIED": + return PostgresqlConfig14_LogStatement.LOG_STATEMENT_UNSPECIFIED; + case 1: + case "LOG_STATEMENT_NONE": + return PostgresqlConfig14_LogStatement.LOG_STATEMENT_NONE; + case 2: + case "LOG_STATEMENT_DDL": + return PostgresqlConfig14_LogStatement.LOG_STATEMENT_DDL; + case 3: + case "LOG_STATEMENT_MOD": + return PostgresqlConfig14_LogStatement.LOG_STATEMENT_MOD; + case 4: + case "LOG_STATEMENT_ALL": + return PostgresqlConfig14_LogStatement.LOG_STATEMENT_ALL; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig14_LogStatement.UNRECOGNIZED; + } +} + +export function postgresqlConfig14_LogStatementToJSON( + object: PostgresqlConfig14_LogStatement +): string { + switch (object) { + case PostgresqlConfig14_LogStatement.LOG_STATEMENT_UNSPECIFIED: + return "LOG_STATEMENT_UNSPECIFIED"; + case PostgresqlConfig14_LogStatement.LOG_STATEMENT_NONE: + return "LOG_STATEMENT_NONE"; + case PostgresqlConfig14_LogStatement.LOG_STATEMENT_DDL: + return "LOG_STATEMENT_DDL"; + case PostgresqlConfig14_LogStatement.LOG_STATEMENT_MOD: + return "LOG_STATEMENT_MOD"; + case PostgresqlConfig14_LogStatement.LOG_STATEMENT_ALL: + return "LOG_STATEMENT_ALL"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig14_TransactionIsolation { + TRANSACTION_ISOLATION_UNSPECIFIED = 0, + TRANSACTION_ISOLATION_READ_UNCOMMITTED = 1, + TRANSACTION_ISOLATION_READ_COMMITTED = 2, + TRANSACTION_ISOLATION_REPEATABLE_READ = 3, + TRANSACTION_ISOLATION_SERIALIZABLE = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig14_TransactionIsolationFromJSON( + object: any +): PostgresqlConfig14_TransactionIsolation { + switch (object) { + case 0: + case "TRANSACTION_ISOLATION_UNSPECIFIED": + return PostgresqlConfig14_TransactionIsolation.TRANSACTION_ISOLATION_UNSPECIFIED; + case 1: + case "TRANSACTION_ISOLATION_READ_UNCOMMITTED": + return PostgresqlConfig14_TransactionIsolation.TRANSACTION_ISOLATION_READ_UNCOMMITTED; + case 2: + case "TRANSACTION_ISOLATION_READ_COMMITTED": + return PostgresqlConfig14_TransactionIsolation.TRANSACTION_ISOLATION_READ_COMMITTED; + case 3: + case "TRANSACTION_ISOLATION_REPEATABLE_READ": + return PostgresqlConfig14_TransactionIsolation.TRANSACTION_ISOLATION_REPEATABLE_READ; + case 4: + case "TRANSACTION_ISOLATION_SERIALIZABLE": + return PostgresqlConfig14_TransactionIsolation.TRANSACTION_ISOLATION_SERIALIZABLE; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig14_TransactionIsolation.UNRECOGNIZED; + } +} + +export function postgresqlConfig14_TransactionIsolationToJSON( + object: PostgresqlConfig14_TransactionIsolation +): string { + switch (object) { + case PostgresqlConfig14_TransactionIsolation.TRANSACTION_ISOLATION_UNSPECIFIED: + return "TRANSACTION_ISOLATION_UNSPECIFIED"; + case PostgresqlConfig14_TransactionIsolation.TRANSACTION_ISOLATION_READ_UNCOMMITTED: + return "TRANSACTION_ISOLATION_READ_UNCOMMITTED"; + case PostgresqlConfig14_TransactionIsolation.TRANSACTION_ISOLATION_READ_COMMITTED: + return "TRANSACTION_ISOLATION_READ_COMMITTED"; + case PostgresqlConfig14_TransactionIsolation.TRANSACTION_ISOLATION_REPEATABLE_READ: + return "TRANSACTION_ISOLATION_REPEATABLE_READ"; + case PostgresqlConfig14_TransactionIsolation.TRANSACTION_ISOLATION_SERIALIZABLE: + return "TRANSACTION_ISOLATION_SERIALIZABLE"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig14_ByteaOutput { + BYTEA_OUTPUT_UNSPECIFIED = 0, + BYTEA_OUTPUT_HEX = 1, + BYTEA_OUTPUT_ESCAPED = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig14_ByteaOutputFromJSON( + object: any +): PostgresqlConfig14_ByteaOutput { + switch (object) { + case 0: + case "BYTEA_OUTPUT_UNSPECIFIED": + return PostgresqlConfig14_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED; + case 1: + case "BYTEA_OUTPUT_HEX": + return PostgresqlConfig14_ByteaOutput.BYTEA_OUTPUT_HEX; + case 2: + case "BYTEA_OUTPUT_ESCAPED": + return PostgresqlConfig14_ByteaOutput.BYTEA_OUTPUT_ESCAPED; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig14_ByteaOutput.UNRECOGNIZED; + } +} + +export function postgresqlConfig14_ByteaOutputToJSON( + object: PostgresqlConfig14_ByteaOutput +): string { + switch (object) { + case PostgresqlConfig14_ByteaOutput.BYTEA_OUTPUT_UNSPECIFIED: + return "BYTEA_OUTPUT_UNSPECIFIED"; + case PostgresqlConfig14_ByteaOutput.BYTEA_OUTPUT_HEX: + return "BYTEA_OUTPUT_HEX"; + case PostgresqlConfig14_ByteaOutput.BYTEA_OUTPUT_ESCAPED: + return "BYTEA_OUTPUT_ESCAPED"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig14_XmlBinary { + XML_BINARY_UNSPECIFIED = 0, + XML_BINARY_BASE64 = 1, + XML_BINARY_HEX = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig14_XmlBinaryFromJSON( + object: any +): PostgresqlConfig14_XmlBinary { + switch (object) { + case 0: + case "XML_BINARY_UNSPECIFIED": + return PostgresqlConfig14_XmlBinary.XML_BINARY_UNSPECIFIED; + case 1: + case "XML_BINARY_BASE64": + return PostgresqlConfig14_XmlBinary.XML_BINARY_BASE64; + case 2: + case "XML_BINARY_HEX": + return PostgresqlConfig14_XmlBinary.XML_BINARY_HEX; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig14_XmlBinary.UNRECOGNIZED; + } +} + +export function postgresqlConfig14_XmlBinaryToJSON( + object: PostgresqlConfig14_XmlBinary +): string { + switch (object) { + case PostgresqlConfig14_XmlBinary.XML_BINARY_UNSPECIFIED: + return "XML_BINARY_UNSPECIFIED"; + case PostgresqlConfig14_XmlBinary.XML_BINARY_BASE64: + return "XML_BINARY_BASE64"; + case PostgresqlConfig14_XmlBinary.XML_BINARY_HEX: + return "XML_BINARY_HEX"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig14_XmlOption { + XML_OPTION_UNSPECIFIED = 0, + XML_OPTION_DOCUMENT = 1, + XML_OPTION_CONTENT = 2, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig14_XmlOptionFromJSON( + object: any +): PostgresqlConfig14_XmlOption { + switch (object) { + case 0: + case "XML_OPTION_UNSPECIFIED": + return PostgresqlConfig14_XmlOption.XML_OPTION_UNSPECIFIED; + case 1: + case "XML_OPTION_DOCUMENT": + return PostgresqlConfig14_XmlOption.XML_OPTION_DOCUMENT; + case 2: + case "XML_OPTION_CONTENT": + return PostgresqlConfig14_XmlOption.XML_OPTION_CONTENT; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig14_XmlOption.UNRECOGNIZED; + } +} + +export function postgresqlConfig14_XmlOptionToJSON( + object: PostgresqlConfig14_XmlOption +): string { + switch (object) { + case PostgresqlConfig14_XmlOption.XML_OPTION_UNSPECIFIED: + return "XML_OPTION_UNSPECIFIED"; + case PostgresqlConfig14_XmlOption.XML_OPTION_DOCUMENT: + return "XML_OPTION_DOCUMENT"; + case PostgresqlConfig14_XmlOption.XML_OPTION_CONTENT: + return "XML_OPTION_CONTENT"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig14_BackslashQuote { + BACKSLASH_QUOTE_UNSPECIFIED = 0, + BACKSLASH_QUOTE = 1, + BACKSLASH_QUOTE_ON = 2, + BACKSLASH_QUOTE_OFF = 3, + BACKSLASH_QUOTE_SAFE_ENCODING = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig14_BackslashQuoteFromJSON( + object: any +): PostgresqlConfig14_BackslashQuote { + switch (object) { + case 0: + case "BACKSLASH_QUOTE_UNSPECIFIED": + return PostgresqlConfig14_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED; + case 1: + case "BACKSLASH_QUOTE": + return PostgresqlConfig14_BackslashQuote.BACKSLASH_QUOTE; + case 2: + case "BACKSLASH_QUOTE_ON": + return PostgresqlConfig14_BackslashQuote.BACKSLASH_QUOTE_ON; + case 3: + case "BACKSLASH_QUOTE_OFF": + return PostgresqlConfig14_BackslashQuote.BACKSLASH_QUOTE_OFF; + case 4: + case "BACKSLASH_QUOTE_SAFE_ENCODING": + return PostgresqlConfig14_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig14_BackslashQuote.UNRECOGNIZED; + } +} + +export function postgresqlConfig14_BackslashQuoteToJSON( + object: PostgresqlConfig14_BackslashQuote +): string { + switch (object) { + case PostgresqlConfig14_BackslashQuote.BACKSLASH_QUOTE_UNSPECIFIED: + return "BACKSLASH_QUOTE_UNSPECIFIED"; + case PostgresqlConfig14_BackslashQuote.BACKSLASH_QUOTE: + return "BACKSLASH_QUOTE"; + case PostgresqlConfig14_BackslashQuote.BACKSLASH_QUOTE_ON: + return "BACKSLASH_QUOTE_ON"; + case PostgresqlConfig14_BackslashQuote.BACKSLASH_QUOTE_OFF: + return "BACKSLASH_QUOTE_OFF"; + case PostgresqlConfig14_BackslashQuote.BACKSLASH_QUOTE_SAFE_ENCODING: + return "BACKSLASH_QUOTE_SAFE_ENCODING"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig14_PlanCacheMode { + PLAN_CACHE_MODE_UNSPECIFIED = 0, + PLAN_CACHE_MODE_AUTO = 1, + PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN = 2, + PLAN_CACHE_MODE_FORCE_GENERIC_PLAN = 3, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig14_PlanCacheModeFromJSON( + object: any +): PostgresqlConfig14_PlanCacheMode { + switch (object) { + case 0: + case "PLAN_CACHE_MODE_UNSPECIFIED": + return PostgresqlConfig14_PlanCacheMode.PLAN_CACHE_MODE_UNSPECIFIED; + case 1: + case "PLAN_CACHE_MODE_AUTO": + return PostgresqlConfig14_PlanCacheMode.PLAN_CACHE_MODE_AUTO; + case 2: + case "PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN": + return PostgresqlConfig14_PlanCacheMode.PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN; + case 3: + case "PLAN_CACHE_MODE_FORCE_GENERIC_PLAN": + return PostgresqlConfig14_PlanCacheMode.PLAN_CACHE_MODE_FORCE_GENERIC_PLAN; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig14_PlanCacheMode.UNRECOGNIZED; + } +} + +export function postgresqlConfig14_PlanCacheModeToJSON( + object: PostgresqlConfig14_PlanCacheMode +): string { + switch (object) { + case PostgresqlConfig14_PlanCacheMode.PLAN_CACHE_MODE_UNSPECIFIED: + return "PLAN_CACHE_MODE_UNSPECIFIED"; + case PostgresqlConfig14_PlanCacheMode.PLAN_CACHE_MODE_AUTO: + return "PLAN_CACHE_MODE_AUTO"; + case PostgresqlConfig14_PlanCacheMode.PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN: + return "PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN"; + case PostgresqlConfig14_PlanCacheMode.PLAN_CACHE_MODE_FORCE_GENERIC_PLAN: + return "PLAN_CACHE_MODE_FORCE_GENERIC_PLAN"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig14_PgHintPlanDebugPrint { + PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED = 0, + PG_HINT_PLAN_DEBUG_PRINT_OFF = 1, + PG_HINT_PLAN_DEBUG_PRINT_ON = 2, + PG_HINT_PLAN_DEBUG_PRINT_DETAILED = 3, + PG_HINT_PLAN_DEBUG_PRINT_VERBOSE = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig14_PgHintPlanDebugPrintFromJSON( + object: any +): PostgresqlConfig14_PgHintPlanDebugPrint { + switch (object) { + case 0: + case "PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED": + return PostgresqlConfig14_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED; + case 1: + case "PG_HINT_PLAN_DEBUG_PRINT_OFF": + return PostgresqlConfig14_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_OFF; + case 2: + case "PG_HINT_PLAN_DEBUG_PRINT_ON": + return PostgresqlConfig14_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_ON; + case 3: + case "PG_HINT_PLAN_DEBUG_PRINT_DETAILED": + return PostgresqlConfig14_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_DETAILED; + case 4: + case "PG_HINT_PLAN_DEBUG_PRINT_VERBOSE": + return PostgresqlConfig14_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_VERBOSE; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig14_PgHintPlanDebugPrint.UNRECOGNIZED; + } +} + +export function postgresqlConfig14_PgHintPlanDebugPrintToJSON( + object: PostgresqlConfig14_PgHintPlanDebugPrint +): string { + switch (object) { + case PostgresqlConfig14_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED: + return "PG_HINT_PLAN_DEBUG_PRINT_UNSPECIFIED"; + case PostgresqlConfig14_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_OFF: + return "PG_HINT_PLAN_DEBUG_PRINT_OFF"; + case PostgresqlConfig14_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_ON: + return "PG_HINT_PLAN_DEBUG_PRINT_ON"; + case PostgresqlConfig14_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_DETAILED: + return "PG_HINT_PLAN_DEBUG_PRINT_DETAILED"; + case PostgresqlConfig14_PgHintPlanDebugPrint.PG_HINT_PLAN_DEBUG_PRINT_VERBOSE: + return "PG_HINT_PLAN_DEBUG_PRINT_VERBOSE"; + default: + return "UNKNOWN"; + } +} + +export enum PostgresqlConfig14_SharedPreloadLibraries { + SHARED_PRELOAD_LIBRARIES_UNSPECIFIED = 0, + SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN = 1, + SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN = 2, + SHARED_PRELOAD_LIBRARIES_TIMESCALEDB = 3, + SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS = 4, + UNRECOGNIZED = -1, +} + +export function postgresqlConfig14_SharedPreloadLibrariesFromJSON( + object: any +): PostgresqlConfig14_SharedPreloadLibraries { + switch (object) { + case 0: + case "SHARED_PRELOAD_LIBRARIES_UNSPECIFIED": + return PostgresqlConfig14_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_UNSPECIFIED; + case 1: + case "SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN": + return PostgresqlConfig14_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN; + case 2: + case "SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN": + return PostgresqlConfig14_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN; + case 3: + case "SHARED_PRELOAD_LIBRARIES_TIMESCALEDB": + return PostgresqlConfig14_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_TIMESCALEDB; + case 4: + case "SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS": + return PostgresqlConfig14_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS; + case -1: + case "UNRECOGNIZED": + default: + return PostgresqlConfig14_SharedPreloadLibraries.UNRECOGNIZED; + } +} + +export function postgresqlConfig14_SharedPreloadLibrariesToJSON( + object: PostgresqlConfig14_SharedPreloadLibraries +): string { + switch (object) { + case PostgresqlConfig14_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_UNSPECIFIED: + return "SHARED_PRELOAD_LIBRARIES_UNSPECIFIED"; + case PostgresqlConfig14_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN: + return "SHARED_PRELOAD_LIBRARIES_AUTO_EXPLAIN"; + case PostgresqlConfig14_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN: + return "SHARED_PRELOAD_LIBRARIES_PG_HINT_PLAN"; + case PostgresqlConfig14_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_TIMESCALEDB: + return "SHARED_PRELOAD_LIBRARIES_TIMESCALEDB"; + case PostgresqlConfig14_SharedPreloadLibraries.SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS: + return "SHARED_PRELOAD_LIBRARIES_PG_QUALSTATS"; + default: + return "UNKNOWN"; + } +} + +export interface PostgresqlConfigSet14 { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfigSet14"; + /** + * Effective settings for a PostgreSQL 14 cluster (a combination of settings defined + * in [user_config] and [default_config]). + */ + effectiveConfig?: PostgresqlConfig14; + /** User-defined settings for a PostgreSQL 14 cluster. */ + userConfig?: PostgresqlConfig14; + /** Default configuration for a PostgreSQL 14 cluster. */ + defaultConfig?: PostgresqlConfig14; +} + +const basePostgresqlConfig14: object = { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig14", + walLevel: 0, + synchronousCommit: 0, + constraintExclusion: 0, + forceParallelMode: 0, + clientMinMessages: 0, + logMinMessages: 0, + logMinErrorStatement: 0, + logErrorVerbosity: 0, + logStatement: 0, + searchPath: "", + defaultTransactionIsolation: 0, + byteaOutput: 0, + xmlbinary: 0, + xmloption: 0, + backslashQuote: 0, + timezone: "", + planCacheMode: 0, + sharedPreloadLibraries: 0, + pgHintPlanDebugPrint: 0, + pgHintPlanMessageLevel: 0, +}; + +export const PostgresqlConfig14 = { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfig14" as const, + + encode( + message: PostgresqlConfig14, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.maxConnections !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.maxConnections! }, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.sharedBuffers !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.sharedBuffers! }, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.tempBuffers !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.tempBuffers! }, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.maxPreparedTransactions !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxPreparedTransactions!, + }, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.workMem !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.workMem! }, + writer.uint32(42).fork() + ).ldelim(); + } + if (message.maintenanceWorkMem !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maintenanceWorkMem!, + }, + writer.uint32(50).fork() + ).ldelim(); + } + if (message.autovacuumWorkMem !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autovacuumWorkMem!, + }, + writer.uint32(58).fork() + ).ldelim(); + } + if (message.tempFileLimit !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.tempFileLimit! }, + writer.uint32(66).fork() + ).ldelim(); + } + if (message.vacuumCostDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumCostDelay!, + }, + writer.uint32(74).fork() + ).ldelim(); + } + if (message.vacuumCostPageHit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumCostPageHit!, + }, + writer.uint32(82).fork() + ).ldelim(); + } + if (message.vacuumCostPageMiss !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumCostPageMiss!, + }, + writer.uint32(90).fork() + ).ldelim(); + } + if (message.vacuumCostPageDirty !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumCostPageDirty!, + }, + writer.uint32(98).fork() + ).ldelim(); + } + if (message.vacuumCostLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumCostLimit!, + }, + writer.uint32(106).fork() + ).ldelim(); + } + if (message.bgwriterDelay !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.bgwriterDelay! }, + writer.uint32(114).fork() + ).ldelim(); + } + if (message.bgwriterLruMaxpages !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.bgwriterLruMaxpages!, + }, + writer.uint32(122).fork() + ).ldelim(); + } + if (message.bgwriterLruMultiplier !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.bgwriterLruMultiplier!, + }, + writer.uint32(130).fork() + ).ldelim(); + } + if (message.bgwriterFlushAfter !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.bgwriterFlushAfter!, + }, + writer.uint32(138).fork() + ).ldelim(); + } + if (message.backendFlushAfter !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.backendFlushAfter!, + }, + writer.uint32(146).fork() + ).ldelim(); + } + if (message.oldSnapshotThreshold !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.oldSnapshotThreshold!, + }, + writer.uint32(154).fork() + ).ldelim(); + } + if (message.walLevel !== 0) { + writer.uint32(160).int32(message.walLevel); + } + if (message.synchronousCommit !== 0) { + writer.uint32(168).int32(message.synchronousCommit); + } + if (message.checkpointTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.checkpointTimeout!, + }, + writer.uint32(178).fork() + ).ldelim(); + } + if (message.checkpointCompletionTarget !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.checkpointCompletionTarget!, + }, + writer.uint32(186).fork() + ).ldelim(); + } + if (message.checkpointFlushAfter !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.checkpointFlushAfter!, + }, + writer.uint32(194).fork() + ).ldelim(); + } + if (message.maxWalSize !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.maxWalSize! }, + writer.uint32(202).fork() + ).ldelim(); + } + if (message.minWalSize !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.minWalSize! }, + writer.uint32(210).fork() + ).ldelim(); + } + if (message.maxStandbyStreamingDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxStandbyStreamingDelay!, + }, + writer.uint32(218).fork() + ).ldelim(); + } + if (message.defaultStatisticsTarget !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.defaultStatisticsTarget!, + }, + writer.uint32(226).fork() + ).ldelim(); + } + if (message.constraintExclusion !== 0) { + writer.uint32(232).int32(message.constraintExclusion); + } + if (message.cursorTupleFraction !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.cursorTupleFraction!, + }, + writer.uint32(242).fork() + ).ldelim(); + } + if (message.fromCollapseLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.fromCollapseLimit!, + }, + writer.uint32(250).fork() + ).ldelim(); + } + if (message.joinCollapseLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.joinCollapseLimit!, + }, + writer.uint32(258).fork() + ).ldelim(); + } + if (message.forceParallelMode !== 0) { + writer.uint32(264).int32(message.forceParallelMode); + } + if (message.clientMinMessages !== 0) { + writer.uint32(272).int32(message.clientMinMessages); + } + if (message.logMinMessages !== 0) { + writer.uint32(280).int32(message.logMinMessages); + } + if (message.logMinErrorStatement !== 0) { + writer.uint32(288).int32(message.logMinErrorStatement); + } + if (message.logMinDurationStatement !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logMinDurationStatement!, + }, + writer.uint32(298).fork() + ).ldelim(); + } + if (message.logCheckpoints !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logCheckpoints! }, + writer.uint32(306).fork() + ).ldelim(); + } + if (message.logConnections !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logConnections! }, + writer.uint32(314).fork() + ).ldelim(); + } + if (message.logDisconnections !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.logDisconnections!, + }, + writer.uint32(322).fork() + ).ldelim(); + } + if (message.logDuration !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logDuration! }, + writer.uint32(330).fork() + ).ldelim(); + } + if (message.logErrorVerbosity !== 0) { + writer.uint32(336).int32(message.logErrorVerbosity); + } + if (message.logLockWaits !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.logLockWaits! }, + writer.uint32(346).fork() + ).ldelim(); + } + if (message.logStatement !== 0) { + writer.uint32(352).int32(message.logStatement); + } + if (message.logTempFiles !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.logTempFiles! }, + writer.uint32(362).fork() + ).ldelim(); + } + if (message.searchPath !== "") { + writer.uint32(370).string(message.searchPath); + } + if (message.rowSecurity !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.rowSecurity! }, + writer.uint32(378).fork() + ).ldelim(); + } + if (message.defaultTransactionIsolation !== 0) { + writer.uint32(384).int32(message.defaultTransactionIsolation); + } + if (message.statementTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.statementTimeout!, + }, + writer.uint32(394).fork() + ).ldelim(); + } + if (message.lockTimeout !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.lockTimeout! }, + writer.uint32(402).fork() + ).ldelim(); + } + if (message.idleInTransactionSessionTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.idleInTransactionSessionTimeout!, + }, + writer.uint32(410).fork() + ).ldelim(); + } + if (message.byteaOutput !== 0) { + writer.uint32(416).int32(message.byteaOutput); + } + if (message.xmlbinary !== 0) { + writer.uint32(424).int32(message.xmlbinary); + } + if (message.xmloption !== 0) { + writer.uint32(432).int32(message.xmloption); + } + if (message.ginPendingListLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.ginPendingListLimit!, + }, + writer.uint32(442).fork() + ).ldelim(); + } + if (message.deadlockTimeout !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.deadlockTimeout!, + }, + writer.uint32(450).fork() + ).ldelim(); + } + if (message.maxLocksPerTransaction !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxLocksPerTransaction!, + }, + writer.uint32(458).fork() + ).ldelim(); + } + if (message.maxPredLocksPerTransaction !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxPredLocksPerTransaction!, + }, + writer.uint32(466).fork() + ).ldelim(); + } + if (message.arrayNulls !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.arrayNulls! }, + writer.uint32(474).fork() + ).ldelim(); + } + if (message.backslashQuote !== 0) { + writer.uint32(480).int32(message.backslashQuote); + } + if (message.defaultWithOids !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.defaultWithOids! }, + writer.uint32(490).fork() + ).ldelim(); + } + if (message.escapeStringWarning !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.escapeStringWarning!, + }, + writer.uint32(498).fork() + ).ldelim(); + } + if (message.loCompatPrivileges !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.loCompatPrivileges!, + }, + writer.uint32(506).fork() + ).ldelim(); + } + if (message.quoteAllIdentifiers !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.quoteAllIdentifiers!, + }, + writer.uint32(522).fork() + ).ldelim(); + } + if (message.standardConformingStrings !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.standardConformingStrings!, + }, + writer.uint32(530).fork() + ).ldelim(); + } + if (message.synchronizeSeqscans !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.synchronizeSeqscans!, + }, + writer.uint32(538).fork() + ).ldelim(); + } + if (message.transformNullEquals !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.transformNullEquals!, + }, + writer.uint32(546).fork() + ).ldelim(); + } + if (message.exitOnError !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.exitOnError! }, + writer.uint32(554).fork() + ).ldelim(); + } + if (message.seqPageCost !== undefined) { + DoubleValue.encode( + { $type: "google.protobuf.DoubleValue", value: message.seqPageCost! }, + writer.uint32(562).fork() + ).ldelim(); + } + if (message.randomPageCost !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.randomPageCost!, + }, + writer.uint32(570).fork() + ).ldelim(); + } + if (message.autovacuumMaxWorkers !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autovacuumMaxWorkers!, + }, + writer.uint32(578).fork() + ).ldelim(); + } + if (message.autovacuumVacuumCostDelay !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autovacuumVacuumCostDelay!, + }, + writer.uint32(586).fork() + ).ldelim(); + } + if (message.autovacuumVacuumCostLimit !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autovacuumVacuumCostLimit!, + }, + writer.uint32(594).fork() + ).ldelim(); + } + if (message.autovacuumNaptime !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autovacuumNaptime!, + }, + writer.uint32(602).fork() + ).ldelim(); + } + if (message.archiveTimeout !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.archiveTimeout! }, + writer.uint32(610).fork() + ).ldelim(); + } + if (message.trackActivityQuerySize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.trackActivityQuerySize!, + }, + writer.uint32(618).fork() + ).ldelim(); + } + if (message.enableBitmapscan !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableBitmapscan!, + }, + writer.uint32(642).fork() + ).ldelim(); + } + if (message.enableHashagg !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableHashagg! }, + writer.uint32(650).fork() + ).ldelim(); + } + if (message.enableHashjoin !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableHashjoin! }, + writer.uint32(658).fork() + ).ldelim(); + } + if (message.enableIndexscan !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableIndexscan! }, + writer.uint32(666).fork() + ).ldelim(); + } + if (message.enableIndexonlyscan !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableIndexonlyscan!, + }, + writer.uint32(674).fork() + ).ldelim(); + } + if (message.enableMaterial !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableMaterial! }, + writer.uint32(682).fork() + ).ldelim(); + } + if (message.enableMergejoin !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableMergejoin! }, + writer.uint32(690).fork() + ).ldelim(); + } + if (message.enableNestloop !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableNestloop! }, + writer.uint32(698).fork() + ).ldelim(); + } + if (message.enableSeqscan !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableSeqscan! }, + writer.uint32(706).fork() + ).ldelim(); + } + if (message.enableSort !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableSort! }, + writer.uint32(714).fork() + ).ldelim(); + } + if (message.enableTidscan !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableTidscan! }, + writer.uint32(722).fork() + ).ldelim(); + } + if (message.maxWorkerProcesses !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxWorkerProcesses!, + }, + writer.uint32(730).fork() + ).ldelim(); + } + if (message.maxParallelWorkers !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxParallelWorkers!, + }, + writer.uint32(738).fork() + ).ldelim(); + } + if (message.maxParallelWorkersPerGather !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxParallelWorkersPerGather!, + }, + writer.uint32(746).fork() + ).ldelim(); + } + if (message.autovacuumVacuumScaleFactor !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.autovacuumVacuumScaleFactor!, + }, + writer.uint32(754).fork() + ).ldelim(); + } + if (message.autovacuumAnalyzeScaleFactor !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.autovacuumAnalyzeScaleFactor!, + }, + writer.uint32(762).fork() + ).ldelim(); + } + if (message.defaultTransactionReadOnly !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.defaultTransactionReadOnly!, + }, + writer.uint32(770).fork() + ).ldelim(); + } + if (message.timezone !== "") { + writer.uint32(778).string(message.timezone); + } + if (message.enableParallelAppend !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableParallelAppend!, + }, + writer.uint32(786).fork() + ).ldelim(); + } + if (message.enableParallelHash !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableParallelHash!, + }, + writer.uint32(794).fork() + ).ldelim(); + } + if (message.enablePartitionPruning !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enablePartitionPruning!, + }, + writer.uint32(802).fork() + ).ldelim(); + } + if (message.enablePartitionwiseAggregate !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enablePartitionwiseAggregate!, + }, + writer.uint32(810).fork() + ).ldelim(); + } + if (message.enablePartitionwiseJoin !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enablePartitionwiseJoin!, + }, + writer.uint32(818).fork() + ).ldelim(); + } + if (message.jit !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.jit! }, + writer.uint32(826).fork() + ).ldelim(); + } + if (message.maxParallelMaintenanceWorkers !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxParallelMaintenanceWorkers!, + }, + writer.uint32(834).fork() + ).ldelim(); + } + if (message.parallelLeaderParticipation !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.parallelLeaderParticipation!, + }, + writer.uint32(842).fork() + ).ldelim(); + } + if (message.logTransactionSampleRate !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.logTransactionSampleRate!, + }, + writer.uint32(858).fork() + ).ldelim(); + } + if (message.planCacheMode !== 0) { + writer.uint32(864).int32(message.planCacheMode); + } + if (message.effectiveIoConcurrency !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.effectiveIoConcurrency!, + }, + writer.uint32(874).fork() + ).ldelim(); + } + if (message.effectiveCacheSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.effectiveCacheSize!, + }, + writer.uint32(882).fork() + ).ldelim(); + } + writer.uint32(890).fork(); + for (const v of message.sharedPreloadLibraries) { + writer.int32(v); + } + writer.ldelim(); + if (message.autoExplainLogMinDuration !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autoExplainLogMinDuration!, + }, + writer.uint32(898).fork() + ).ldelim(); + } + if (message.autoExplainLogAnalyze !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.autoExplainLogAnalyze!, + }, + writer.uint32(906).fork() + ).ldelim(); + } + if (message.autoExplainLogBuffers !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.autoExplainLogBuffers!, + }, + writer.uint32(914).fork() + ).ldelim(); + } + if (message.autoExplainLogTiming !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.autoExplainLogTiming!, + }, + writer.uint32(922).fork() + ).ldelim(); + } + if (message.autoExplainLogTriggers !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.autoExplainLogTriggers!, + }, + writer.uint32(930).fork() + ).ldelim(); + } + if (message.autoExplainLogVerbose !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.autoExplainLogVerbose!, + }, + writer.uint32(938).fork() + ).ldelim(); + } + if (message.autoExplainLogNestedStatements !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.autoExplainLogNestedStatements!, + }, + writer.uint32(946).fork() + ).ldelim(); + } + if (message.autoExplainSampleRate !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.autoExplainSampleRate!, + }, + writer.uint32(954).fork() + ).ldelim(); + } + if (message.pgHintPlanEnableHint !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgHintPlanEnableHint!, + }, + writer.uint32(962).fork() + ).ldelim(); + } + if (message.pgHintPlanEnableHintTable !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgHintPlanEnableHintTable!, + }, + writer.uint32(970).fork() + ).ldelim(); + } + if (message.pgHintPlanDebugPrint !== 0) { + writer.uint32(976).int32(message.pgHintPlanDebugPrint); + } + if (message.pgHintPlanMessageLevel !== 0) { + writer.uint32(984).int32(message.pgHintPlanMessageLevel); + } + if (message.hashMemMultiplier !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.hashMemMultiplier!, + }, + writer.uint32(994).fork() + ).ldelim(); + } + if (message.logicalDecodingWorkMem !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logicalDecodingWorkMem!, + }, + writer.uint32(1010).fork() + ).ldelim(); + } + if (message.maintenanceIoConcurrency !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maintenanceIoConcurrency!, + }, + writer.uint32(1018).fork() + ).ldelim(); + } + if (message.maxSlotWalKeepSize !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.maxSlotWalKeepSize!, + }, + writer.uint32(1026).fork() + ).ldelim(); + } + if (message.walKeepSize !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.walKeepSize! }, + writer.uint32(1034).fork() + ).ldelim(); + } + if (message.enableIncrementalSort !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableIncrementalSort!, + }, + writer.uint32(1042).fork() + ).ldelim(); + } + if (message.autovacuumVacuumInsertThreshold !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.autovacuumVacuumInsertThreshold!, + }, + writer.uint32(1050).fork() + ).ldelim(); + } + if (message.autovacuumVacuumInsertScaleFactor !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.autovacuumVacuumInsertScaleFactor!, + }, + writer.uint32(1058).fork() + ).ldelim(); + } + if (message.logMinDurationSample !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logMinDurationSample!, + }, + writer.uint32(1066).fork() + ).ldelim(); + } + if (message.logStatementSampleRate !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.logStatementSampleRate!, + }, + writer.uint32(1074).fork() + ).ldelim(); + } + if (message.logParameterMaxLength !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logParameterMaxLength!, + }, + writer.uint32(1082).fork() + ).ldelim(); + } + if (message.logParameterMaxLengthOnError !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.logParameterMaxLengthOnError!, + }, + writer.uint32(1090).fork() + ).ldelim(); + } + if (message.clientConnectionCheckInterval !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.clientConnectionCheckInterval!, + }, + writer.uint32(1098).fork() + ).ldelim(); + } + if (message.enableAsyncAppend !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableAsyncAppend!, + }, + writer.uint32(1106).fork() + ).ldelim(); + } + if (message.enableGathermerge !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.enableGathermerge!, + }, + writer.uint32(1114).fork() + ).ldelim(); + } + if (message.enableMemoize !== undefined) { + BoolValue.encode( + { $type: "google.protobuf.BoolValue", value: message.enableMemoize! }, + writer.uint32(1122).fork() + ).ldelim(); + } + if (message.logRecoveryConflictWaits !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.logRecoveryConflictWaits!, + }, + writer.uint32(1130).fork() + ).ldelim(); + } + if (message.vacuumFailsafeAge !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumFailsafeAge!, + }, + writer.uint32(1138).fork() + ).ldelim(); + } + if (message.vacuumMultixactFailsafeAge !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.vacuumMultixactFailsafeAge!, + }, + writer.uint32(1146).fork() + ).ldelim(); + } + if (message.pgQualstatsEnabled !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsEnabled!, + }, + writer.uint32(1154).fork() + ).ldelim(); + } + if (message.pgQualstatsTrackConstants !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsTrackConstants!, + }, + writer.uint32(1162).fork() + ).ldelim(); + } + if (message.pgQualstatsMax !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.pgQualstatsMax! }, + writer.uint32(1170).fork() + ).ldelim(); + } + if (message.pgQualstatsResolveOids !== undefined) { + BoolValue.encode( + { + $type: "google.protobuf.BoolValue", + value: message.pgQualstatsResolveOids!, + }, + writer.uint32(1178).fork() + ).ldelim(); + } + if (message.pgQualstatsSampleRate !== undefined) { + DoubleValue.encode( + { + $type: "google.protobuf.DoubleValue", + value: message.pgQualstatsSampleRate!, + }, + writer.uint32(1186).fork() + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): PostgresqlConfig14 { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...basePostgresqlConfig14 } as PostgresqlConfig14; + message.sharedPreloadLibraries = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.maxConnections = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 2: + message.sharedBuffers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 3: + message.tempBuffers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 4: + message.maxPreparedTransactions = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 5: + message.workMem = Int64Value.decode(reader, reader.uint32()).value; + break; + case 6: + message.maintenanceWorkMem = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 7: + message.autovacuumWorkMem = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 8: + message.tempFileLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 9: + message.vacuumCostDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 10: + message.vacuumCostPageHit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 11: + message.vacuumCostPageMiss = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 12: + message.vacuumCostPageDirty = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 13: + message.vacuumCostLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 14: + message.bgwriterDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 15: + message.bgwriterLruMaxpages = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 16: + message.bgwriterLruMultiplier = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 17: + message.bgwriterFlushAfter = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 18: + message.backendFlushAfter = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 19: + message.oldSnapshotThreshold = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 20: + message.walLevel = reader.int32() as any; + break; + case 21: + message.synchronousCommit = reader.int32() as any; + break; + case 22: + message.checkpointTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 23: + message.checkpointCompletionTarget = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 24: + message.checkpointFlushAfter = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 25: + message.maxWalSize = Int64Value.decode(reader, reader.uint32()).value; + break; + case 26: + message.minWalSize = Int64Value.decode(reader, reader.uint32()).value; + break; + case 27: + message.maxStandbyStreamingDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 28: + message.defaultStatisticsTarget = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 29: + message.constraintExclusion = reader.int32() as any; + break; + case 30: + message.cursorTupleFraction = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 31: + message.fromCollapseLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 32: + message.joinCollapseLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 33: + message.forceParallelMode = reader.int32() as any; + break; + case 34: + message.clientMinMessages = reader.int32() as any; + break; + case 35: + message.logMinMessages = reader.int32() as any; + break; + case 36: + message.logMinErrorStatement = reader.int32() as any; + break; + case 37: + message.logMinDurationStatement = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 38: + message.logCheckpoints = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 39: + message.logConnections = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 40: + message.logDisconnections = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 41: + message.logDuration = BoolValue.decode(reader, reader.uint32()).value; + break; + case 42: + message.logErrorVerbosity = reader.int32() as any; + break; + case 43: + message.logLockWaits = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 44: + message.logStatement = reader.int32() as any; + break; + case 45: + message.logTempFiles = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 46: + message.searchPath = reader.string(); + break; + case 47: + message.rowSecurity = BoolValue.decode(reader, reader.uint32()).value; + break; + case 48: + message.defaultTransactionIsolation = reader.int32() as any; + break; + case 49: + message.statementTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 50: + message.lockTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 51: + message.idleInTransactionSessionTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 52: + message.byteaOutput = reader.int32() as any; + break; + case 53: + message.xmlbinary = reader.int32() as any; + break; + case 54: + message.xmloption = reader.int32() as any; + break; + case 55: + message.ginPendingListLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 56: + message.deadlockTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 57: + message.maxLocksPerTransaction = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 58: + message.maxPredLocksPerTransaction = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 59: + message.arrayNulls = BoolValue.decode(reader, reader.uint32()).value; + break; + case 60: + message.backslashQuote = reader.int32() as any; + break; + case 61: + message.defaultWithOids = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 62: + message.escapeStringWarning = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 63: + message.loCompatPrivileges = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 65: + message.quoteAllIdentifiers = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 66: + message.standardConformingStrings = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 67: + message.synchronizeSeqscans = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 68: + message.transformNullEquals = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 69: + message.exitOnError = BoolValue.decode(reader, reader.uint32()).value; + break; + case 70: + message.seqPageCost = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 71: + message.randomPageCost = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 72: + message.autovacuumMaxWorkers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 73: + message.autovacuumVacuumCostDelay = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 74: + message.autovacuumVacuumCostLimit = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 75: + message.autovacuumNaptime = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 76: + message.archiveTimeout = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 77: + message.trackActivityQuerySize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 80: + message.enableBitmapscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 81: + message.enableHashagg = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 82: + message.enableHashjoin = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 83: + message.enableIndexscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 84: + message.enableIndexonlyscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 85: + message.enableMaterial = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 86: + message.enableMergejoin = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 87: + message.enableNestloop = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 88: + message.enableSeqscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 89: + message.enableSort = BoolValue.decode(reader, reader.uint32()).value; + break; + case 90: + message.enableTidscan = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 91: + message.maxWorkerProcesses = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 92: + message.maxParallelWorkers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 93: + message.maxParallelWorkersPerGather = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 94: + message.autovacuumVacuumScaleFactor = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 95: + message.autovacuumAnalyzeScaleFactor = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 96: + message.defaultTransactionReadOnly = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 97: + message.timezone = reader.string(); + break; + case 98: + message.enableParallelAppend = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 99: + message.enableParallelHash = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 100: + message.enablePartitionPruning = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 101: + message.enablePartitionwiseAggregate = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 102: + message.enablePartitionwiseJoin = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 103: + message.jit = BoolValue.decode(reader, reader.uint32()).value; + break; + case 104: + message.maxParallelMaintenanceWorkers = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 105: + message.parallelLeaderParticipation = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 107: + message.logTransactionSampleRate = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 108: + message.planCacheMode = reader.int32() as any; + break; + case 109: + message.effectiveIoConcurrency = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 110: + message.effectiveCacheSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 111: + if ((tag & 7) === 2) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.sharedPreloadLibraries.push(reader.int32() as any); + } + } else { + message.sharedPreloadLibraries.push(reader.int32() as any); + } + break; + case 112: + message.autoExplainLogMinDuration = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 113: + message.autoExplainLogAnalyze = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 114: + message.autoExplainLogBuffers = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 115: + message.autoExplainLogTiming = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 116: + message.autoExplainLogTriggers = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 117: + message.autoExplainLogVerbose = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 118: + message.autoExplainLogNestedStatements = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 119: + message.autoExplainSampleRate = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 120: + message.pgHintPlanEnableHint = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 121: + message.pgHintPlanEnableHintTable = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 122: + message.pgHintPlanDebugPrint = reader.int32() as any; + break; + case 123: + message.pgHintPlanMessageLevel = reader.int32() as any; + break; + case 124: + message.hashMemMultiplier = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 126: + message.logicalDecodingWorkMem = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 127: + message.maintenanceIoConcurrency = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 128: + message.maxSlotWalKeepSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 129: + message.walKeepSize = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 130: + message.enableIncrementalSort = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 131: + message.autovacuumVacuumInsertThreshold = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 132: + message.autovacuumVacuumInsertScaleFactor = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 133: + message.logMinDurationSample = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 134: + message.logStatementSampleRate = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + case 135: + message.logParameterMaxLength = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 136: + message.logParameterMaxLengthOnError = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 137: + message.clientConnectionCheckInterval = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 138: + message.enableAsyncAppend = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 139: + message.enableGathermerge = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 140: + message.enableMemoize = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 141: + message.logRecoveryConflictWaits = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 142: + message.vacuumFailsafeAge = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 143: + message.vacuumMultixactFailsafeAge = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 144: + message.pgQualstatsEnabled = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 145: + message.pgQualstatsTrackConstants = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 146: + message.pgQualstatsMax = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 147: + message.pgQualstatsResolveOids = BoolValue.decode( + reader, + reader.uint32() + ).value; + break; + case 148: + message.pgQualstatsSampleRate = DoubleValue.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): PostgresqlConfig14 { + const message = { ...basePostgresqlConfig14 } as PostgresqlConfig14; + message.maxConnections = + object.maxConnections !== undefined && object.maxConnections !== null + ? Number(object.maxConnections) + : undefined; + message.sharedBuffers = + object.sharedBuffers !== undefined && object.sharedBuffers !== null + ? Number(object.sharedBuffers) + : undefined; + message.tempBuffers = + object.tempBuffers !== undefined && object.tempBuffers !== null + ? Number(object.tempBuffers) + : undefined; + message.maxPreparedTransactions = + object.maxPreparedTransactions !== undefined && + object.maxPreparedTransactions !== null + ? Number(object.maxPreparedTransactions) + : undefined; + message.workMem = + object.workMem !== undefined && object.workMem !== null + ? Number(object.workMem) + : undefined; + message.maintenanceWorkMem = + object.maintenanceWorkMem !== undefined && + object.maintenanceWorkMem !== null + ? Number(object.maintenanceWorkMem) + : undefined; + message.autovacuumWorkMem = + object.autovacuumWorkMem !== undefined && + object.autovacuumWorkMem !== null + ? Number(object.autovacuumWorkMem) + : undefined; + message.tempFileLimit = + object.tempFileLimit !== undefined && object.tempFileLimit !== null + ? Number(object.tempFileLimit) + : undefined; + message.vacuumCostDelay = + object.vacuumCostDelay !== undefined && object.vacuumCostDelay !== null + ? Number(object.vacuumCostDelay) + : undefined; + message.vacuumCostPageHit = + object.vacuumCostPageHit !== undefined && + object.vacuumCostPageHit !== null + ? Number(object.vacuumCostPageHit) + : undefined; + message.vacuumCostPageMiss = + object.vacuumCostPageMiss !== undefined && + object.vacuumCostPageMiss !== null + ? Number(object.vacuumCostPageMiss) + : undefined; + message.vacuumCostPageDirty = + object.vacuumCostPageDirty !== undefined && + object.vacuumCostPageDirty !== null + ? Number(object.vacuumCostPageDirty) + : undefined; + message.vacuumCostLimit = + object.vacuumCostLimit !== undefined && object.vacuumCostLimit !== null + ? Number(object.vacuumCostLimit) + : undefined; + message.bgwriterDelay = + object.bgwriterDelay !== undefined && object.bgwriterDelay !== null + ? Number(object.bgwriterDelay) + : undefined; + message.bgwriterLruMaxpages = + object.bgwriterLruMaxpages !== undefined && + object.bgwriterLruMaxpages !== null + ? Number(object.bgwriterLruMaxpages) + : undefined; + message.bgwriterLruMultiplier = + object.bgwriterLruMultiplier !== undefined && + object.bgwriterLruMultiplier !== null + ? Number(object.bgwriterLruMultiplier) + : undefined; + message.bgwriterFlushAfter = + object.bgwriterFlushAfter !== undefined && + object.bgwriterFlushAfter !== null + ? Number(object.bgwriterFlushAfter) + : undefined; + message.backendFlushAfter = + object.backendFlushAfter !== undefined && + object.backendFlushAfter !== null + ? Number(object.backendFlushAfter) + : undefined; + message.oldSnapshotThreshold = + object.oldSnapshotThreshold !== undefined && + object.oldSnapshotThreshold !== null + ? Number(object.oldSnapshotThreshold) + : undefined; + message.walLevel = + object.walLevel !== undefined && object.walLevel !== null + ? postgresqlConfig14_WalLevelFromJSON(object.walLevel) + : 0; + message.synchronousCommit = + object.synchronousCommit !== undefined && + object.synchronousCommit !== null + ? postgresqlConfig14_SynchronousCommitFromJSON(object.synchronousCommit) + : 0; + message.checkpointTimeout = + object.checkpointTimeout !== undefined && + object.checkpointTimeout !== null + ? Number(object.checkpointTimeout) + : undefined; + message.checkpointCompletionTarget = + object.checkpointCompletionTarget !== undefined && + object.checkpointCompletionTarget !== null + ? Number(object.checkpointCompletionTarget) + : undefined; + message.checkpointFlushAfter = + object.checkpointFlushAfter !== undefined && + object.checkpointFlushAfter !== null + ? Number(object.checkpointFlushAfter) + : undefined; + message.maxWalSize = + object.maxWalSize !== undefined && object.maxWalSize !== null + ? Number(object.maxWalSize) + : undefined; + message.minWalSize = + object.minWalSize !== undefined && object.minWalSize !== null + ? Number(object.minWalSize) + : undefined; + message.maxStandbyStreamingDelay = + object.maxStandbyStreamingDelay !== undefined && + object.maxStandbyStreamingDelay !== null + ? Number(object.maxStandbyStreamingDelay) + : undefined; + message.defaultStatisticsTarget = + object.defaultStatisticsTarget !== undefined && + object.defaultStatisticsTarget !== null + ? Number(object.defaultStatisticsTarget) + : undefined; + message.constraintExclusion = + object.constraintExclusion !== undefined && + object.constraintExclusion !== null + ? postgresqlConfig14_ConstraintExclusionFromJSON( + object.constraintExclusion + ) + : 0; + message.cursorTupleFraction = + object.cursorTupleFraction !== undefined && + object.cursorTupleFraction !== null + ? Number(object.cursorTupleFraction) + : undefined; + message.fromCollapseLimit = + object.fromCollapseLimit !== undefined && + object.fromCollapseLimit !== null + ? Number(object.fromCollapseLimit) + : undefined; + message.joinCollapseLimit = + object.joinCollapseLimit !== undefined && + object.joinCollapseLimit !== null + ? Number(object.joinCollapseLimit) + : undefined; + message.forceParallelMode = + object.forceParallelMode !== undefined && + object.forceParallelMode !== null + ? postgresqlConfig14_ForceParallelModeFromJSON(object.forceParallelMode) + : 0; + message.clientMinMessages = + object.clientMinMessages !== undefined && + object.clientMinMessages !== null + ? postgresqlConfig14_LogLevelFromJSON(object.clientMinMessages) + : 0; + message.logMinMessages = + object.logMinMessages !== undefined && object.logMinMessages !== null + ? postgresqlConfig14_LogLevelFromJSON(object.logMinMessages) + : 0; + message.logMinErrorStatement = + object.logMinErrorStatement !== undefined && + object.logMinErrorStatement !== null + ? postgresqlConfig14_LogLevelFromJSON(object.logMinErrorStatement) + : 0; + message.logMinDurationStatement = + object.logMinDurationStatement !== undefined && + object.logMinDurationStatement !== null + ? Number(object.logMinDurationStatement) + : undefined; + message.logCheckpoints = + object.logCheckpoints !== undefined && object.logCheckpoints !== null + ? Boolean(object.logCheckpoints) + : undefined; + message.logConnections = + object.logConnections !== undefined && object.logConnections !== null + ? Boolean(object.logConnections) + : undefined; + message.logDisconnections = + object.logDisconnections !== undefined && + object.logDisconnections !== null + ? Boolean(object.logDisconnections) + : undefined; + message.logDuration = + object.logDuration !== undefined && object.logDuration !== null + ? Boolean(object.logDuration) + : undefined; + message.logErrorVerbosity = + object.logErrorVerbosity !== undefined && + object.logErrorVerbosity !== null + ? postgresqlConfig14_LogErrorVerbosityFromJSON(object.logErrorVerbosity) + : 0; + message.logLockWaits = + object.logLockWaits !== undefined && object.logLockWaits !== null + ? Boolean(object.logLockWaits) + : undefined; + message.logStatement = + object.logStatement !== undefined && object.logStatement !== null + ? postgresqlConfig14_LogStatementFromJSON(object.logStatement) + : 0; + message.logTempFiles = + object.logTempFiles !== undefined && object.logTempFiles !== null + ? Number(object.logTempFiles) + : undefined; + message.searchPath = + object.searchPath !== undefined && object.searchPath !== null + ? String(object.searchPath) + : ""; + message.rowSecurity = + object.rowSecurity !== undefined && object.rowSecurity !== null + ? Boolean(object.rowSecurity) + : undefined; + message.defaultTransactionIsolation = + object.defaultTransactionIsolation !== undefined && + object.defaultTransactionIsolation !== null + ? postgresqlConfig14_TransactionIsolationFromJSON( + object.defaultTransactionIsolation + ) + : 0; + message.statementTimeout = + object.statementTimeout !== undefined && object.statementTimeout !== null + ? Number(object.statementTimeout) + : undefined; + message.lockTimeout = + object.lockTimeout !== undefined && object.lockTimeout !== null + ? Number(object.lockTimeout) + : undefined; + message.idleInTransactionSessionTimeout = + object.idleInTransactionSessionTimeout !== undefined && + object.idleInTransactionSessionTimeout !== null + ? Number(object.idleInTransactionSessionTimeout) + : undefined; + message.byteaOutput = + object.byteaOutput !== undefined && object.byteaOutput !== null + ? postgresqlConfig14_ByteaOutputFromJSON(object.byteaOutput) + : 0; + message.xmlbinary = + object.xmlbinary !== undefined && object.xmlbinary !== null + ? postgresqlConfig14_XmlBinaryFromJSON(object.xmlbinary) + : 0; + message.xmloption = + object.xmloption !== undefined && object.xmloption !== null + ? postgresqlConfig14_XmlOptionFromJSON(object.xmloption) + : 0; + message.ginPendingListLimit = + object.ginPendingListLimit !== undefined && + object.ginPendingListLimit !== null + ? Number(object.ginPendingListLimit) + : undefined; + message.deadlockTimeout = + object.deadlockTimeout !== undefined && object.deadlockTimeout !== null + ? Number(object.deadlockTimeout) + : undefined; + message.maxLocksPerTransaction = + object.maxLocksPerTransaction !== undefined && + object.maxLocksPerTransaction !== null + ? Number(object.maxLocksPerTransaction) + : undefined; + message.maxPredLocksPerTransaction = + object.maxPredLocksPerTransaction !== undefined && + object.maxPredLocksPerTransaction !== null + ? Number(object.maxPredLocksPerTransaction) + : undefined; + message.arrayNulls = + object.arrayNulls !== undefined && object.arrayNulls !== null + ? Boolean(object.arrayNulls) + : undefined; + message.backslashQuote = + object.backslashQuote !== undefined && object.backslashQuote !== null + ? postgresqlConfig14_BackslashQuoteFromJSON(object.backslashQuote) + : 0; + message.defaultWithOids = + object.defaultWithOids !== undefined && object.defaultWithOids !== null + ? Boolean(object.defaultWithOids) + : undefined; + message.escapeStringWarning = + object.escapeStringWarning !== undefined && + object.escapeStringWarning !== null + ? Boolean(object.escapeStringWarning) + : undefined; + message.loCompatPrivileges = + object.loCompatPrivileges !== undefined && + object.loCompatPrivileges !== null + ? Boolean(object.loCompatPrivileges) + : undefined; + message.quoteAllIdentifiers = + object.quoteAllIdentifiers !== undefined && + object.quoteAllIdentifiers !== null + ? Boolean(object.quoteAllIdentifiers) + : undefined; + message.standardConformingStrings = + object.standardConformingStrings !== undefined && + object.standardConformingStrings !== null + ? Boolean(object.standardConformingStrings) + : undefined; + message.synchronizeSeqscans = + object.synchronizeSeqscans !== undefined && + object.synchronizeSeqscans !== null + ? Boolean(object.synchronizeSeqscans) + : undefined; + message.transformNullEquals = + object.transformNullEquals !== undefined && + object.transformNullEquals !== null + ? Boolean(object.transformNullEquals) + : undefined; + message.exitOnError = + object.exitOnError !== undefined && object.exitOnError !== null + ? Boolean(object.exitOnError) + : undefined; + message.seqPageCost = + object.seqPageCost !== undefined && object.seqPageCost !== null + ? Number(object.seqPageCost) + : undefined; + message.randomPageCost = + object.randomPageCost !== undefined && object.randomPageCost !== null + ? Number(object.randomPageCost) + : undefined; + message.autovacuumMaxWorkers = + object.autovacuumMaxWorkers !== undefined && + object.autovacuumMaxWorkers !== null + ? Number(object.autovacuumMaxWorkers) + : undefined; + message.autovacuumVacuumCostDelay = + object.autovacuumVacuumCostDelay !== undefined && + object.autovacuumVacuumCostDelay !== null + ? Number(object.autovacuumVacuumCostDelay) + : undefined; + message.autovacuumVacuumCostLimit = + object.autovacuumVacuumCostLimit !== undefined && + object.autovacuumVacuumCostLimit !== null + ? Number(object.autovacuumVacuumCostLimit) + : undefined; + message.autovacuumNaptime = + object.autovacuumNaptime !== undefined && + object.autovacuumNaptime !== null + ? Number(object.autovacuumNaptime) + : undefined; + message.archiveTimeout = + object.archiveTimeout !== undefined && object.archiveTimeout !== null + ? Number(object.archiveTimeout) + : undefined; + message.trackActivityQuerySize = + object.trackActivityQuerySize !== undefined && + object.trackActivityQuerySize !== null + ? Number(object.trackActivityQuerySize) + : undefined; + message.enableBitmapscan = + object.enableBitmapscan !== undefined && object.enableBitmapscan !== null + ? Boolean(object.enableBitmapscan) + : undefined; + message.enableHashagg = + object.enableHashagg !== undefined && object.enableHashagg !== null + ? Boolean(object.enableHashagg) + : undefined; + message.enableHashjoin = + object.enableHashjoin !== undefined && object.enableHashjoin !== null + ? Boolean(object.enableHashjoin) + : undefined; + message.enableIndexscan = + object.enableIndexscan !== undefined && object.enableIndexscan !== null + ? Boolean(object.enableIndexscan) + : undefined; + message.enableIndexonlyscan = + object.enableIndexonlyscan !== undefined && + object.enableIndexonlyscan !== null + ? Boolean(object.enableIndexonlyscan) + : undefined; + message.enableMaterial = + object.enableMaterial !== undefined && object.enableMaterial !== null + ? Boolean(object.enableMaterial) + : undefined; + message.enableMergejoin = + object.enableMergejoin !== undefined && object.enableMergejoin !== null + ? Boolean(object.enableMergejoin) + : undefined; + message.enableNestloop = + object.enableNestloop !== undefined && object.enableNestloop !== null + ? Boolean(object.enableNestloop) + : undefined; + message.enableSeqscan = + object.enableSeqscan !== undefined && object.enableSeqscan !== null + ? Boolean(object.enableSeqscan) + : undefined; + message.enableSort = + object.enableSort !== undefined && object.enableSort !== null + ? Boolean(object.enableSort) + : undefined; + message.enableTidscan = + object.enableTidscan !== undefined && object.enableTidscan !== null + ? Boolean(object.enableTidscan) + : undefined; + message.maxWorkerProcesses = + object.maxWorkerProcesses !== undefined && + object.maxWorkerProcesses !== null + ? Number(object.maxWorkerProcesses) + : undefined; + message.maxParallelWorkers = + object.maxParallelWorkers !== undefined && + object.maxParallelWorkers !== null + ? Number(object.maxParallelWorkers) + : undefined; + message.maxParallelWorkersPerGather = + object.maxParallelWorkersPerGather !== undefined && + object.maxParallelWorkersPerGather !== null + ? Number(object.maxParallelWorkersPerGather) + : undefined; + message.autovacuumVacuumScaleFactor = + object.autovacuumVacuumScaleFactor !== undefined && + object.autovacuumVacuumScaleFactor !== null + ? Number(object.autovacuumVacuumScaleFactor) + : undefined; + message.autovacuumAnalyzeScaleFactor = + object.autovacuumAnalyzeScaleFactor !== undefined && + object.autovacuumAnalyzeScaleFactor !== null + ? Number(object.autovacuumAnalyzeScaleFactor) + : undefined; + message.defaultTransactionReadOnly = + object.defaultTransactionReadOnly !== undefined && + object.defaultTransactionReadOnly !== null + ? Boolean(object.defaultTransactionReadOnly) + : undefined; + message.timezone = + object.timezone !== undefined && object.timezone !== null + ? String(object.timezone) + : ""; + message.enableParallelAppend = + object.enableParallelAppend !== undefined && + object.enableParallelAppend !== null + ? Boolean(object.enableParallelAppend) + : undefined; + message.enableParallelHash = + object.enableParallelHash !== undefined && + object.enableParallelHash !== null + ? Boolean(object.enableParallelHash) + : undefined; + message.enablePartitionPruning = + object.enablePartitionPruning !== undefined && + object.enablePartitionPruning !== null + ? Boolean(object.enablePartitionPruning) + : undefined; + message.enablePartitionwiseAggregate = + object.enablePartitionwiseAggregate !== undefined && + object.enablePartitionwiseAggregate !== null + ? Boolean(object.enablePartitionwiseAggregate) + : undefined; + message.enablePartitionwiseJoin = + object.enablePartitionwiseJoin !== undefined && + object.enablePartitionwiseJoin !== null + ? Boolean(object.enablePartitionwiseJoin) + : undefined; + message.jit = + object.jit !== undefined && object.jit !== null + ? Boolean(object.jit) + : undefined; + message.maxParallelMaintenanceWorkers = + object.maxParallelMaintenanceWorkers !== undefined && + object.maxParallelMaintenanceWorkers !== null + ? Number(object.maxParallelMaintenanceWorkers) + : undefined; + message.parallelLeaderParticipation = + object.parallelLeaderParticipation !== undefined && + object.parallelLeaderParticipation !== null + ? Boolean(object.parallelLeaderParticipation) + : undefined; + message.logTransactionSampleRate = + object.logTransactionSampleRate !== undefined && + object.logTransactionSampleRate !== null + ? Number(object.logTransactionSampleRate) + : undefined; + message.planCacheMode = + object.planCacheMode !== undefined && object.planCacheMode !== null + ? postgresqlConfig14_PlanCacheModeFromJSON(object.planCacheMode) + : 0; + message.effectiveIoConcurrency = + object.effectiveIoConcurrency !== undefined && + object.effectiveIoConcurrency !== null + ? Number(object.effectiveIoConcurrency) + : undefined; + message.effectiveCacheSize = + object.effectiveCacheSize !== undefined && + object.effectiveCacheSize !== null + ? Number(object.effectiveCacheSize) + : undefined; + message.sharedPreloadLibraries = (object.sharedPreloadLibraries ?? []).map( + (e: any) => postgresqlConfig14_SharedPreloadLibrariesFromJSON(e) + ); + message.autoExplainLogMinDuration = + object.autoExplainLogMinDuration !== undefined && + object.autoExplainLogMinDuration !== null + ? Number(object.autoExplainLogMinDuration) + : undefined; + message.autoExplainLogAnalyze = + object.autoExplainLogAnalyze !== undefined && + object.autoExplainLogAnalyze !== null + ? Boolean(object.autoExplainLogAnalyze) + : undefined; + message.autoExplainLogBuffers = + object.autoExplainLogBuffers !== undefined && + object.autoExplainLogBuffers !== null + ? Boolean(object.autoExplainLogBuffers) + : undefined; + message.autoExplainLogTiming = + object.autoExplainLogTiming !== undefined && + object.autoExplainLogTiming !== null + ? Boolean(object.autoExplainLogTiming) + : undefined; + message.autoExplainLogTriggers = + object.autoExplainLogTriggers !== undefined && + object.autoExplainLogTriggers !== null + ? Boolean(object.autoExplainLogTriggers) + : undefined; + message.autoExplainLogVerbose = + object.autoExplainLogVerbose !== undefined && + object.autoExplainLogVerbose !== null + ? Boolean(object.autoExplainLogVerbose) + : undefined; + message.autoExplainLogNestedStatements = + object.autoExplainLogNestedStatements !== undefined && + object.autoExplainLogNestedStatements !== null + ? Boolean(object.autoExplainLogNestedStatements) + : undefined; + message.autoExplainSampleRate = + object.autoExplainSampleRate !== undefined && + object.autoExplainSampleRate !== null + ? Number(object.autoExplainSampleRate) + : undefined; + message.pgHintPlanEnableHint = + object.pgHintPlanEnableHint !== undefined && + object.pgHintPlanEnableHint !== null + ? Boolean(object.pgHintPlanEnableHint) + : undefined; + message.pgHintPlanEnableHintTable = + object.pgHintPlanEnableHintTable !== undefined && + object.pgHintPlanEnableHintTable !== null + ? Boolean(object.pgHintPlanEnableHintTable) + : undefined; + message.pgHintPlanDebugPrint = + object.pgHintPlanDebugPrint !== undefined && + object.pgHintPlanDebugPrint !== null + ? postgresqlConfig14_PgHintPlanDebugPrintFromJSON( + object.pgHintPlanDebugPrint + ) + : 0; + message.pgHintPlanMessageLevel = + object.pgHintPlanMessageLevel !== undefined && + object.pgHintPlanMessageLevel !== null + ? postgresqlConfig14_LogLevelFromJSON(object.pgHintPlanMessageLevel) + : 0; + message.hashMemMultiplier = + object.hashMemMultiplier !== undefined && + object.hashMemMultiplier !== null + ? Number(object.hashMemMultiplier) + : undefined; + message.logicalDecodingWorkMem = + object.logicalDecodingWorkMem !== undefined && + object.logicalDecodingWorkMem !== null + ? Number(object.logicalDecodingWorkMem) + : undefined; + message.maintenanceIoConcurrency = + object.maintenanceIoConcurrency !== undefined && + object.maintenanceIoConcurrency !== null + ? Number(object.maintenanceIoConcurrency) + : undefined; + message.maxSlotWalKeepSize = + object.maxSlotWalKeepSize !== undefined && + object.maxSlotWalKeepSize !== null + ? Number(object.maxSlotWalKeepSize) + : undefined; + message.walKeepSize = + object.walKeepSize !== undefined && object.walKeepSize !== null + ? Number(object.walKeepSize) + : undefined; + message.enableIncrementalSort = + object.enableIncrementalSort !== undefined && + object.enableIncrementalSort !== null + ? Boolean(object.enableIncrementalSort) + : undefined; + message.autovacuumVacuumInsertThreshold = + object.autovacuumVacuumInsertThreshold !== undefined && + object.autovacuumVacuumInsertThreshold !== null + ? Number(object.autovacuumVacuumInsertThreshold) + : undefined; + message.autovacuumVacuumInsertScaleFactor = + object.autovacuumVacuumInsertScaleFactor !== undefined && + object.autovacuumVacuumInsertScaleFactor !== null + ? Number(object.autovacuumVacuumInsertScaleFactor) + : undefined; + message.logMinDurationSample = + object.logMinDurationSample !== undefined && + object.logMinDurationSample !== null + ? Number(object.logMinDurationSample) + : undefined; + message.logStatementSampleRate = + object.logStatementSampleRate !== undefined && + object.logStatementSampleRate !== null + ? Number(object.logStatementSampleRate) + : undefined; + message.logParameterMaxLength = + object.logParameterMaxLength !== undefined && + object.logParameterMaxLength !== null + ? Number(object.logParameterMaxLength) + : undefined; + message.logParameterMaxLengthOnError = + object.logParameterMaxLengthOnError !== undefined && + object.logParameterMaxLengthOnError !== null + ? Number(object.logParameterMaxLengthOnError) + : undefined; + message.clientConnectionCheckInterval = + object.clientConnectionCheckInterval !== undefined && + object.clientConnectionCheckInterval !== null + ? Number(object.clientConnectionCheckInterval) + : undefined; + message.enableAsyncAppend = + object.enableAsyncAppend !== undefined && + object.enableAsyncAppend !== null + ? Boolean(object.enableAsyncAppend) + : undefined; + message.enableGathermerge = + object.enableGathermerge !== undefined && + object.enableGathermerge !== null + ? Boolean(object.enableGathermerge) + : undefined; + message.enableMemoize = + object.enableMemoize !== undefined && object.enableMemoize !== null + ? Boolean(object.enableMemoize) + : undefined; + message.logRecoveryConflictWaits = + object.logRecoveryConflictWaits !== undefined && + object.logRecoveryConflictWaits !== null + ? Boolean(object.logRecoveryConflictWaits) + : undefined; + message.vacuumFailsafeAge = + object.vacuumFailsafeAge !== undefined && + object.vacuumFailsafeAge !== null + ? Number(object.vacuumFailsafeAge) + : undefined; + message.vacuumMultixactFailsafeAge = + object.vacuumMultixactFailsafeAge !== undefined && + object.vacuumMultixactFailsafeAge !== null + ? Number(object.vacuumMultixactFailsafeAge) + : undefined; + message.pgQualstatsEnabled = + object.pgQualstatsEnabled !== undefined && + object.pgQualstatsEnabled !== null + ? Boolean(object.pgQualstatsEnabled) + : undefined; + message.pgQualstatsTrackConstants = + object.pgQualstatsTrackConstants !== undefined && + object.pgQualstatsTrackConstants !== null + ? Boolean(object.pgQualstatsTrackConstants) + : undefined; + message.pgQualstatsMax = + object.pgQualstatsMax !== undefined && object.pgQualstatsMax !== null + ? Number(object.pgQualstatsMax) + : undefined; + message.pgQualstatsResolveOids = + object.pgQualstatsResolveOids !== undefined && + object.pgQualstatsResolveOids !== null + ? Boolean(object.pgQualstatsResolveOids) + : undefined; + message.pgQualstatsSampleRate = + object.pgQualstatsSampleRate !== undefined && + object.pgQualstatsSampleRate !== null + ? Number(object.pgQualstatsSampleRate) + : undefined; + return message; + }, + + toJSON(message: PostgresqlConfig14): unknown { + const obj: any = {}; + message.maxConnections !== undefined && + (obj.maxConnections = message.maxConnections); + message.sharedBuffers !== undefined && + (obj.sharedBuffers = message.sharedBuffers); + message.tempBuffers !== undefined && + (obj.tempBuffers = message.tempBuffers); + message.maxPreparedTransactions !== undefined && + (obj.maxPreparedTransactions = message.maxPreparedTransactions); + message.workMem !== undefined && (obj.workMem = message.workMem); + message.maintenanceWorkMem !== undefined && + (obj.maintenanceWorkMem = message.maintenanceWorkMem); + message.autovacuumWorkMem !== undefined && + (obj.autovacuumWorkMem = message.autovacuumWorkMem); + message.tempFileLimit !== undefined && + (obj.tempFileLimit = message.tempFileLimit); + message.vacuumCostDelay !== undefined && + (obj.vacuumCostDelay = message.vacuumCostDelay); + message.vacuumCostPageHit !== undefined && + (obj.vacuumCostPageHit = message.vacuumCostPageHit); + message.vacuumCostPageMiss !== undefined && + (obj.vacuumCostPageMiss = message.vacuumCostPageMiss); + message.vacuumCostPageDirty !== undefined && + (obj.vacuumCostPageDirty = message.vacuumCostPageDirty); + message.vacuumCostLimit !== undefined && + (obj.vacuumCostLimit = message.vacuumCostLimit); + message.bgwriterDelay !== undefined && + (obj.bgwriterDelay = message.bgwriterDelay); + message.bgwriterLruMaxpages !== undefined && + (obj.bgwriterLruMaxpages = message.bgwriterLruMaxpages); + message.bgwriterLruMultiplier !== undefined && + (obj.bgwriterLruMultiplier = message.bgwriterLruMultiplier); + message.bgwriterFlushAfter !== undefined && + (obj.bgwriterFlushAfter = message.bgwriterFlushAfter); + message.backendFlushAfter !== undefined && + (obj.backendFlushAfter = message.backendFlushAfter); + message.oldSnapshotThreshold !== undefined && + (obj.oldSnapshotThreshold = message.oldSnapshotThreshold); + message.walLevel !== undefined && + (obj.walLevel = postgresqlConfig14_WalLevelToJSON(message.walLevel)); + message.synchronousCommit !== undefined && + (obj.synchronousCommit = postgresqlConfig14_SynchronousCommitToJSON( + message.synchronousCommit + )); + message.checkpointTimeout !== undefined && + (obj.checkpointTimeout = message.checkpointTimeout); + message.checkpointCompletionTarget !== undefined && + (obj.checkpointCompletionTarget = message.checkpointCompletionTarget); + message.checkpointFlushAfter !== undefined && + (obj.checkpointFlushAfter = message.checkpointFlushAfter); + message.maxWalSize !== undefined && (obj.maxWalSize = message.maxWalSize); + message.minWalSize !== undefined && (obj.minWalSize = message.minWalSize); + message.maxStandbyStreamingDelay !== undefined && + (obj.maxStandbyStreamingDelay = message.maxStandbyStreamingDelay); + message.defaultStatisticsTarget !== undefined && + (obj.defaultStatisticsTarget = message.defaultStatisticsTarget); + message.constraintExclusion !== undefined && + (obj.constraintExclusion = postgresqlConfig14_ConstraintExclusionToJSON( + message.constraintExclusion + )); + message.cursorTupleFraction !== undefined && + (obj.cursorTupleFraction = message.cursorTupleFraction); + message.fromCollapseLimit !== undefined && + (obj.fromCollapseLimit = message.fromCollapseLimit); + message.joinCollapseLimit !== undefined && + (obj.joinCollapseLimit = message.joinCollapseLimit); + message.forceParallelMode !== undefined && + (obj.forceParallelMode = postgresqlConfig14_ForceParallelModeToJSON( + message.forceParallelMode + )); + message.clientMinMessages !== undefined && + (obj.clientMinMessages = postgresqlConfig14_LogLevelToJSON( + message.clientMinMessages + )); + message.logMinMessages !== undefined && + (obj.logMinMessages = postgresqlConfig14_LogLevelToJSON( + message.logMinMessages + )); + message.logMinErrorStatement !== undefined && + (obj.logMinErrorStatement = postgresqlConfig14_LogLevelToJSON( + message.logMinErrorStatement + )); + message.logMinDurationStatement !== undefined && + (obj.logMinDurationStatement = message.logMinDurationStatement); + message.logCheckpoints !== undefined && + (obj.logCheckpoints = message.logCheckpoints); + message.logConnections !== undefined && + (obj.logConnections = message.logConnections); + message.logDisconnections !== undefined && + (obj.logDisconnections = message.logDisconnections); + message.logDuration !== undefined && + (obj.logDuration = message.logDuration); + message.logErrorVerbosity !== undefined && + (obj.logErrorVerbosity = postgresqlConfig14_LogErrorVerbosityToJSON( + message.logErrorVerbosity + )); + message.logLockWaits !== undefined && + (obj.logLockWaits = message.logLockWaits); + message.logStatement !== undefined && + (obj.logStatement = postgresqlConfig14_LogStatementToJSON( + message.logStatement + )); + message.logTempFiles !== undefined && + (obj.logTempFiles = message.logTempFiles); + message.searchPath !== undefined && (obj.searchPath = message.searchPath); + message.rowSecurity !== undefined && + (obj.rowSecurity = message.rowSecurity); + message.defaultTransactionIsolation !== undefined && + (obj.defaultTransactionIsolation = + postgresqlConfig14_TransactionIsolationToJSON( + message.defaultTransactionIsolation + )); + message.statementTimeout !== undefined && + (obj.statementTimeout = message.statementTimeout); + message.lockTimeout !== undefined && + (obj.lockTimeout = message.lockTimeout); + message.idleInTransactionSessionTimeout !== undefined && + (obj.idleInTransactionSessionTimeout = + message.idleInTransactionSessionTimeout); + message.byteaOutput !== undefined && + (obj.byteaOutput = postgresqlConfig14_ByteaOutputToJSON( + message.byteaOutput + )); + message.xmlbinary !== undefined && + (obj.xmlbinary = postgresqlConfig14_XmlBinaryToJSON(message.xmlbinary)); + message.xmloption !== undefined && + (obj.xmloption = postgresqlConfig14_XmlOptionToJSON(message.xmloption)); + message.ginPendingListLimit !== undefined && + (obj.ginPendingListLimit = message.ginPendingListLimit); + message.deadlockTimeout !== undefined && + (obj.deadlockTimeout = message.deadlockTimeout); + message.maxLocksPerTransaction !== undefined && + (obj.maxLocksPerTransaction = message.maxLocksPerTransaction); + message.maxPredLocksPerTransaction !== undefined && + (obj.maxPredLocksPerTransaction = message.maxPredLocksPerTransaction); + message.arrayNulls !== undefined && (obj.arrayNulls = message.arrayNulls); + message.backslashQuote !== undefined && + (obj.backslashQuote = postgresqlConfig14_BackslashQuoteToJSON( + message.backslashQuote + )); + message.defaultWithOids !== undefined && + (obj.defaultWithOids = message.defaultWithOids); + message.escapeStringWarning !== undefined && + (obj.escapeStringWarning = message.escapeStringWarning); + message.loCompatPrivileges !== undefined && + (obj.loCompatPrivileges = message.loCompatPrivileges); + message.quoteAllIdentifiers !== undefined && + (obj.quoteAllIdentifiers = message.quoteAllIdentifiers); + message.standardConformingStrings !== undefined && + (obj.standardConformingStrings = message.standardConformingStrings); + message.synchronizeSeqscans !== undefined && + (obj.synchronizeSeqscans = message.synchronizeSeqscans); + message.transformNullEquals !== undefined && + (obj.transformNullEquals = message.transformNullEquals); + message.exitOnError !== undefined && + (obj.exitOnError = message.exitOnError); + message.seqPageCost !== undefined && + (obj.seqPageCost = message.seqPageCost); + message.randomPageCost !== undefined && + (obj.randomPageCost = message.randomPageCost); + message.autovacuumMaxWorkers !== undefined && + (obj.autovacuumMaxWorkers = message.autovacuumMaxWorkers); + message.autovacuumVacuumCostDelay !== undefined && + (obj.autovacuumVacuumCostDelay = message.autovacuumVacuumCostDelay); + message.autovacuumVacuumCostLimit !== undefined && + (obj.autovacuumVacuumCostLimit = message.autovacuumVacuumCostLimit); + message.autovacuumNaptime !== undefined && + (obj.autovacuumNaptime = message.autovacuumNaptime); + message.archiveTimeout !== undefined && + (obj.archiveTimeout = message.archiveTimeout); + message.trackActivityQuerySize !== undefined && + (obj.trackActivityQuerySize = message.trackActivityQuerySize); + message.enableBitmapscan !== undefined && + (obj.enableBitmapscan = message.enableBitmapscan); + message.enableHashagg !== undefined && + (obj.enableHashagg = message.enableHashagg); + message.enableHashjoin !== undefined && + (obj.enableHashjoin = message.enableHashjoin); + message.enableIndexscan !== undefined && + (obj.enableIndexscan = message.enableIndexscan); + message.enableIndexonlyscan !== undefined && + (obj.enableIndexonlyscan = message.enableIndexonlyscan); + message.enableMaterial !== undefined && + (obj.enableMaterial = message.enableMaterial); + message.enableMergejoin !== undefined && + (obj.enableMergejoin = message.enableMergejoin); + message.enableNestloop !== undefined && + (obj.enableNestloop = message.enableNestloop); + message.enableSeqscan !== undefined && + (obj.enableSeqscan = message.enableSeqscan); + message.enableSort !== undefined && (obj.enableSort = message.enableSort); + message.enableTidscan !== undefined && + (obj.enableTidscan = message.enableTidscan); + message.maxWorkerProcesses !== undefined && + (obj.maxWorkerProcesses = message.maxWorkerProcesses); + message.maxParallelWorkers !== undefined && + (obj.maxParallelWorkers = message.maxParallelWorkers); + message.maxParallelWorkersPerGather !== undefined && + (obj.maxParallelWorkersPerGather = message.maxParallelWorkersPerGather); + message.autovacuumVacuumScaleFactor !== undefined && + (obj.autovacuumVacuumScaleFactor = message.autovacuumVacuumScaleFactor); + message.autovacuumAnalyzeScaleFactor !== undefined && + (obj.autovacuumAnalyzeScaleFactor = message.autovacuumAnalyzeScaleFactor); + message.defaultTransactionReadOnly !== undefined && + (obj.defaultTransactionReadOnly = message.defaultTransactionReadOnly); + message.timezone !== undefined && (obj.timezone = message.timezone); + message.enableParallelAppend !== undefined && + (obj.enableParallelAppend = message.enableParallelAppend); + message.enableParallelHash !== undefined && + (obj.enableParallelHash = message.enableParallelHash); + message.enablePartitionPruning !== undefined && + (obj.enablePartitionPruning = message.enablePartitionPruning); + message.enablePartitionwiseAggregate !== undefined && + (obj.enablePartitionwiseAggregate = message.enablePartitionwiseAggregate); + message.enablePartitionwiseJoin !== undefined && + (obj.enablePartitionwiseJoin = message.enablePartitionwiseJoin); + message.jit !== undefined && (obj.jit = message.jit); + message.maxParallelMaintenanceWorkers !== undefined && + (obj.maxParallelMaintenanceWorkers = + message.maxParallelMaintenanceWorkers); + message.parallelLeaderParticipation !== undefined && + (obj.parallelLeaderParticipation = message.parallelLeaderParticipation); + message.logTransactionSampleRate !== undefined && + (obj.logTransactionSampleRate = message.logTransactionSampleRate); + message.planCacheMode !== undefined && + (obj.planCacheMode = postgresqlConfig14_PlanCacheModeToJSON( + message.planCacheMode + )); + message.effectiveIoConcurrency !== undefined && + (obj.effectiveIoConcurrency = message.effectiveIoConcurrency); + message.effectiveCacheSize !== undefined && + (obj.effectiveCacheSize = message.effectiveCacheSize); + if (message.sharedPreloadLibraries) { + obj.sharedPreloadLibraries = message.sharedPreloadLibraries.map((e) => + postgresqlConfig14_SharedPreloadLibrariesToJSON(e) + ); + } else { + obj.sharedPreloadLibraries = []; + } + message.autoExplainLogMinDuration !== undefined && + (obj.autoExplainLogMinDuration = message.autoExplainLogMinDuration); + message.autoExplainLogAnalyze !== undefined && + (obj.autoExplainLogAnalyze = message.autoExplainLogAnalyze); + message.autoExplainLogBuffers !== undefined && + (obj.autoExplainLogBuffers = message.autoExplainLogBuffers); + message.autoExplainLogTiming !== undefined && + (obj.autoExplainLogTiming = message.autoExplainLogTiming); + message.autoExplainLogTriggers !== undefined && + (obj.autoExplainLogTriggers = message.autoExplainLogTriggers); + message.autoExplainLogVerbose !== undefined && + (obj.autoExplainLogVerbose = message.autoExplainLogVerbose); + message.autoExplainLogNestedStatements !== undefined && + (obj.autoExplainLogNestedStatements = + message.autoExplainLogNestedStatements); + message.autoExplainSampleRate !== undefined && + (obj.autoExplainSampleRate = message.autoExplainSampleRate); + message.pgHintPlanEnableHint !== undefined && + (obj.pgHintPlanEnableHint = message.pgHintPlanEnableHint); + message.pgHintPlanEnableHintTable !== undefined && + (obj.pgHintPlanEnableHintTable = message.pgHintPlanEnableHintTable); + message.pgHintPlanDebugPrint !== undefined && + (obj.pgHintPlanDebugPrint = postgresqlConfig14_PgHintPlanDebugPrintToJSON( + message.pgHintPlanDebugPrint + )); + message.pgHintPlanMessageLevel !== undefined && + (obj.pgHintPlanMessageLevel = postgresqlConfig14_LogLevelToJSON( + message.pgHintPlanMessageLevel + )); + message.hashMemMultiplier !== undefined && + (obj.hashMemMultiplier = message.hashMemMultiplier); + message.logicalDecodingWorkMem !== undefined && + (obj.logicalDecodingWorkMem = message.logicalDecodingWorkMem); + message.maintenanceIoConcurrency !== undefined && + (obj.maintenanceIoConcurrency = message.maintenanceIoConcurrency); + message.maxSlotWalKeepSize !== undefined && + (obj.maxSlotWalKeepSize = message.maxSlotWalKeepSize); + message.walKeepSize !== undefined && + (obj.walKeepSize = message.walKeepSize); + message.enableIncrementalSort !== undefined && + (obj.enableIncrementalSort = message.enableIncrementalSort); + message.autovacuumVacuumInsertThreshold !== undefined && + (obj.autovacuumVacuumInsertThreshold = + message.autovacuumVacuumInsertThreshold); + message.autovacuumVacuumInsertScaleFactor !== undefined && + (obj.autovacuumVacuumInsertScaleFactor = + message.autovacuumVacuumInsertScaleFactor); + message.logMinDurationSample !== undefined && + (obj.logMinDurationSample = message.logMinDurationSample); + message.logStatementSampleRate !== undefined && + (obj.logStatementSampleRate = message.logStatementSampleRate); + message.logParameterMaxLength !== undefined && + (obj.logParameterMaxLength = message.logParameterMaxLength); + message.logParameterMaxLengthOnError !== undefined && + (obj.logParameterMaxLengthOnError = message.logParameterMaxLengthOnError); + message.clientConnectionCheckInterval !== undefined && + (obj.clientConnectionCheckInterval = + message.clientConnectionCheckInterval); + message.enableAsyncAppend !== undefined && + (obj.enableAsyncAppend = message.enableAsyncAppend); + message.enableGathermerge !== undefined && + (obj.enableGathermerge = message.enableGathermerge); + message.enableMemoize !== undefined && + (obj.enableMemoize = message.enableMemoize); + message.logRecoveryConflictWaits !== undefined && + (obj.logRecoveryConflictWaits = message.logRecoveryConflictWaits); + message.vacuumFailsafeAge !== undefined && + (obj.vacuumFailsafeAge = message.vacuumFailsafeAge); + message.vacuumMultixactFailsafeAge !== undefined && + (obj.vacuumMultixactFailsafeAge = message.vacuumMultixactFailsafeAge); + message.pgQualstatsEnabled !== undefined && + (obj.pgQualstatsEnabled = message.pgQualstatsEnabled); + message.pgQualstatsTrackConstants !== undefined && + (obj.pgQualstatsTrackConstants = message.pgQualstatsTrackConstants); + message.pgQualstatsMax !== undefined && + (obj.pgQualstatsMax = message.pgQualstatsMax); + message.pgQualstatsResolveOids !== undefined && + (obj.pgQualstatsResolveOids = message.pgQualstatsResolveOids); + message.pgQualstatsSampleRate !== undefined && + (obj.pgQualstatsSampleRate = message.pgQualstatsSampleRate); + return obj; + }, + + fromPartial, I>>( + object: I + ): PostgresqlConfig14 { + const message = { ...basePostgresqlConfig14 } as PostgresqlConfig14; + message.maxConnections = object.maxConnections ?? undefined; + message.sharedBuffers = object.sharedBuffers ?? undefined; + message.tempBuffers = object.tempBuffers ?? undefined; + message.maxPreparedTransactions = + object.maxPreparedTransactions ?? undefined; + message.workMem = object.workMem ?? undefined; + message.maintenanceWorkMem = object.maintenanceWorkMem ?? undefined; + message.autovacuumWorkMem = object.autovacuumWorkMem ?? undefined; + message.tempFileLimit = object.tempFileLimit ?? undefined; + message.vacuumCostDelay = object.vacuumCostDelay ?? undefined; + message.vacuumCostPageHit = object.vacuumCostPageHit ?? undefined; + message.vacuumCostPageMiss = object.vacuumCostPageMiss ?? undefined; + message.vacuumCostPageDirty = object.vacuumCostPageDirty ?? undefined; + message.vacuumCostLimit = object.vacuumCostLimit ?? undefined; + message.bgwriterDelay = object.bgwriterDelay ?? undefined; + message.bgwriterLruMaxpages = object.bgwriterLruMaxpages ?? undefined; + message.bgwriterLruMultiplier = object.bgwriterLruMultiplier ?? undefined; + message.bgwriterFlushAfter = object.bgwriterFlushAfter ?? undefined; + message.backendFlushAfter = object.backendFlushAfter ?? undefined; + message.oldSnapshotThreshold = object.oldSnapshotThreshold ?? undefined; + message.walLevel = object.walLevel ?? 0; + message.synchronousCommit = object.synchronousCommit ?? 0; + message.checkpointTimeout = object.checkpointTimeout ?? undefined; + message.checkpointCompletionTarget = + object.checkpointCompletionTarget ?? undefined; + message.checkpointFlushAfter = object.checkpointFlushAfter ?? undefined; + message.maxWalSize = object.maxWalSize ?? undefined; + message.minWalSize = object.minWalSize ?? undefined; + message.maxStandbyStreamingDelay = + object.maxStandbyStreamingDelay ?? undefined; + message.defaultStatisticsTarget = + object.defaultStatisticsTarget ?? undefined; + message.constraintExclusion = object.constraintExclusion ?? 0; + message.cursorTupleFraction = object.cursorTupleFraction ?? undefined; + message.fromCollapseLimit = object.fromCollapseLimit ?? undefined; + message.joinCollapseLimit = object.joinCollapseLimit ?? undefined; + message.forceParallelMode = object.forceParallelMode ?? 0; + message.clientMinMessages = object.clientMinMessages ?? 0; + message.logMinMessages = object.logMinMessages ?? 0; + message.logMinErrorStatement = object.logMinErrorStatement ?? 0; + message.logMinDurationStatement = + object.logMinDurationStatement ?? undefined; + message.logCheckpoints = object.logCheckpoints ?? undefined; + message.logConnections = object.logConnections ?? undefined; + message.logDisconnections = object.logDisconnections ?? undefined; + message.logDuration = object.logDuration ?? undefined; + message.logErrorVerbosity = object.logErrorVerbosity ?? 0; + message.logLockWaits = object.logLockWaits ?? undefined; + message.logStatement = object.logStatement ?? 0; + message.logTempFiles = object.logTempFiles ?? undefined; + message.searchPath = object.searchPath ?? ""; + message.rowSecurity = object.rowSecurity ?? undefined; + message.defaultTransactionIsolation = + object.defaultTransactionIsolation ?? 0; + message.statementTimeout = object.statementTimeout ?? undefined; + message.lockTimeout = object.lockTimeout ?? undefined; + message.idleInTransactionSessionTimeout = + object.idleInTransactionSessionTimeout ?? undefined; + message.byteaOutput = object.byteaOutput ?? 0; + message.xmlbinary = object.xmlbinary ?? 0; + message.xmloption = object.xmloption ?? 0; + message.ginPendingListLimit = object.ginPendingListLimit ?? undefined; + message.deadlockTimeout = object.deadlockTimeout ?? undefined; + message.maxLocksPerTransaction = object.maxLocksPerTransaction ?? undefined; + message.maxPredLocksPerTransaction = + object.maxPredLocksPerTransaction ?? undefined; + message.arrayNulls = object.arrayNulls ?? undefined; + message.backslashQuote = object.backslashQuote ?? 0; + message.defaultWithOids = object.defaultWithOids ?? undefined; + message.escapeStringWarning = object.escapeStringWarning ?? undefined; + message.loCompatPrivileges = object.loCompatPrivileges ?? undefined; + message.quoteAllIdentifiers = object.quoteAllIdentifiers ?? undefined; + message.standardConformingStrings = + object.standardConformingStrings ?? undefined; + message.synchronizeSeqscans = object.synchronizeSeqscans ?? undefined; + message.transformNullEquals = object.transformNullEquals ?? undefined; + message.exitOnError = object.exitOnError ?? undefined; + message.seqPageCost = object.seqPageCost ?? undefined; + message.randomPageCost = object.randomPageCost ?? undefined; + message.autovacuumMaxWorkers = object.autovacuumMaxWorkers ?? undefined; + message.autovacuumVacuumCostDelay = + object.autovacuumVacuumCostDelay ?? undefined; + message.autovacuumVacuumCostLimit = + object.autovacuumVacuumCostLimit ?? undefined; + message.autovacuumNaptime = object.autovacuumNaptime ?? undefined; + message.archiveTimeout = object.archiveTimeout ?? undefined; + message.trackActivityQuerySize = object.trackActivityQuerySize ?? undefined; + message.enableBitmapscan = object.enableBitmapscan ?? undefined; + message.enableHashagg = object.enableHashagg ?? undefined; + message.enableHashjoin = object.enableHashjoin ?? undefined; + message.enableIndexscan = object.enableIndexscan ?? undefined; + message.enableIndexonlyscan = object.enableIndexonlyscan ?? undefined; + message.enableMaterial = object.enableMaterial ?? undefined; + message.enableMergejoin = object.enableMergejoin ?? undefined; + message.enableNestloop = object.enableNestloop ?? undefined; + message.enableSeqscan = object.enableSeqscan ?? undefined; + message.enableSort = object.enableSort ?? undefined; + message.enableTidscan = object.enableTidscan ?? undefined; + message.maxWorkerProcesses = object.maxWorkerProcesses ?? undefined; + message.maxParallelWorkers = object.maxParallelWorkers ?? undefined; + message.maxParallelWorkersPerGather = + object.maxParallelWorkersPerGather ?? undefined; + message.autovacuumVacuumScaleFactor = + object.autovacuumVacuumScaleFactor ?? undefined; + message.autovacuumAnalyzeScaleFactor = + object.autovacuumAnalyzeScaleFactor ?? undefined; + message.defaultTransactionReadOnly = + object.defaultTransactionReadOnly ?? undefined; + message.timezone = object.timezone ?? ""; + message.enableParallelAppend = object.enableParallelAppend ?? undefined; + message.enableParallelHash = object.enableParallelHash ?? undefined; + message.enablePartitionPruning = object.enablePartitionPruning ?? undefined; + message.enablePartitionwiseAggregate = + object.enablePartitionwiseAggregate ?? undefined; + message.enablePartitionwiseJoin = + object.enablePartitionwiseJoin ?? undefined; + message.jit = object.jit ?? undefined; + message.maxParallelMaintenanceWorkers = + object.maxParallelMaintenanceWorkers ?? undefined; + message.parallelLeaderParticipation = + object.parallelLeaderParticipation ?? undefined; + message.logTransactionSampleRate = + object.logTransactionSampleRate ?? undefined; + message.planCacheMode = object.planCacheMode ?? 0; + message.effectiveIoConcurrency = object.effectiveIoConcurrency ?? undefined; + message.effectiveCacheSize = object.effectiveCacheSize ?? undefined; + message.sharedPreloadLibraries = + object.sharedPreloadLibraries?.map((e) => e) || []; + message.autoExplainLogMinDuration = + object.autoExplainLogMinDuration ?? undefined; + message.autoExplainLogAnalyze = object.autoExplainLogAnalyze ?? undefined; + message.autoExplainLogBuffers = object.autoExplainLogBuffers ?? undefined; + message.autoExplainLogTiming = object.autoExplainLogTiming ?? undefined; + message.autoExplainLogTriggers = object.autoExplainLogTriggers ?? undefined; + message.autoExplainLogVerbose = object.autoExplainLogVerbose ?? undefined; + message.autoExplainLogNestedStatements = + object.autoExplainLogNestedStatements ?? undefined; + message.autoExplainSampleRate = object.autoExplainSampleRate ?? undefined; + message.pgHintPlanEnableHint = object.pgHintPlanEnableHint ?? undefined; + message.pgHintPlanEnableHintTable = + object.pgHintPlanEnableHintTable ?? undefined; + message.pgHintPlanDebugPrint = object.pgHintPlanDebugPrint ?? 0; + message.pgHintPlanMessageLevel = object.pgHintPlanMessageLevel ?? 0; + message.hashMemMultiplier = object.hashMemMultiplier ?? undefined; + message.logicalDecodingWorkMem = object.logicalDecodingWorkMem ?? undefined; + message.maintenanceIoConcurrency = + object.maintenanceIoConcurrency ?? undefined; + message.maxSlotWalKeepSize = object.maxSlotWalKeepSize ?? undefined; + message.walKeepSize = object.walKeepSize ?? undefined; + message.enableIncrementalSort = object.enableIncrementalSort ?? undefined; + message.autovacuumVacuumInsertThreshold = + object.autovacuumVacuumInsertThreshold ?? undefined; + message.autovacuumVacuumInsertScaleFactor = + object.autovacuumVacuumInsertScaleFactor ?? undefined; + message.logMinDurationSample = object.logMinDurationSample ?? undefined; + message.logStatementSampleRate = object.logStatementSampleRate ?? undefined; + message.logParameterMaxLength = object.logParameterMaxLength ?? undefined; + message.logParameterMaxLengthOnError = + object.logParameterMaxLengthOnError ?? undefined; + message.clientConnectionCheckInterval = + object.clientConnectionCheckInterval ?? undefined; + message.enableAsyncAppend = object.enableAsyncAppend ?? undefined; + message.enableGathermerge = object.enableGathermerge ?? undefined; + message.enableMemoize = object.enableMemoize ?? undefined; + message.logRecoveryConflictWaits = + object.logRecoveryConflictWaits ?? undefined; + message.vacuumFailsafeAge = object.vacuumFailsafeAge ?? undefined; + message.vacuumMultixactFailsafeAge = + object.vacuumMultixactFailsafeAge ?? undefined; + message.pgQualstatsEnabled = object.pgQualstatsEnabled ?? undefined; + message.pgQualstatsTrackConstants = + object.pgQualstatsTrackConstants ?? undefined; + message.pgQualstatsMax = object.pgQualstatsMax ?? undefined; + message.pgQualstatsResolveOids = object.pgQualstatsResolveOids ?? undefined; + message.pgQualstatsSampleRate = object.pgQualstatsSampleRate ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(PostgresqlConfig14.$type, PostgresqlConfig14); + +const basePostgresqlConfigSet14: object = { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfigSet14", +}; + +export const PostgresqlConfigSet14 = { + $type: "yandex.cloud.mdb.postgresql.v1.config.PostgresqlConfigSet14" as const, + + encode( + message: PostgresqlConfigSet14, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.effectiveConfig !== undefined) { + PostgresqlConfig14.encode( + message.effectiveConfig, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.userConfig !== undefined) { + PostgresqlConfig14.encode( + message.userConfig, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.defaultConfig !== undefined) { + PostgresqlConfig14.encode( + message.defaultConfig, + writer.uint32(26).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): PostgresqlConfigSet14 { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...basePostgresqlConfigSet14 } as PostgresqlConfigSet14; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.effectiveConfig = PostgresqlConfig14.decode( + reader, + reader.uint32() + ); + break; + case 2: + message.userConfig = PostgresqlConfig14.decode( + reader, + reader.uint32() + ); + break; + case 3: + message.defaultConfig = PostgresqlConfig14.decode( + reader, + reader.uint32() + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): PostgresqlConfigSet14 { + const message = { ...basePostgresqlConfigSet14 } as PostgresqlConfigSet14; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? PostgresqlConfig14.fromJSON(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? PostgresqlConfig14.fromJSON(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? PostgresqlConfig14.fromJSON(object.defaultConfig) + : undefined; + return message; + }, + + toJSON(message: PostgresqlConfigSet14): unknown { + const obj: any = {}; + message.effectiveConfig !== undefined && + (obj.effectiveConfig = message.effectiveConfig + ? PostgresqlConfig14.toJSON(message.effectiveConfig) + : undefined); + message.userConfig !== undefined && + (obj.userConfig = message.userConfig + ? PostgresqlConfig14.toJSON(message.userConfig) + : undefined); + message.defaultConfig !== undefined && + (obj.defaultConfig = message.defaultConfig + ? PostgresqlConfig14.toJSON(message.defaultConfig) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): PostgresqlConfigSet14 { + const message = { ...basePostgresqlConfigSet14 } as PostgresqlConfigSet14; + message.effectiveConfig = + object.effectiveConfig !== undefined && object.effectiveConfig !== null + ? PostgresqlConfig14.fromPartial(object.effectiveConfig) + : undefined; + message.userConfig = + object.userConfig !== undefined && object.userConfig !== null + ? PostgresqlConfig14.fromPartial(object.userConfig) + : undefined; + message.defaultConfig = + object.defaultConfig !== undefined && object.defaultConfig !== null + ? PostgresqlConfig14.fromPartial(object.defaultConfig) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(PostgresqlConfigSet14.$type, PostgresqlConfigSet14); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Array + ? Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +type KeysOfUnion = T extends T ? keyof T : never; +export type Exact = P extends Builtin + ? P + : P & { [K in keyof P]: Exact } & Record< + Exclude | "$type">, + never + >; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/src/generated/yandex/cloud/mdb/redis/v1/cluster.ts b/src/generated/yandex/cloud/mdb/redis/v1/cluster.ts index 74573e49..02dee9be 100644 --- a/src/generated/yandex/cloud/mdb/redis/v1/cluster.ts +++ b/src/generated/yandex/cloud/mdb/redis/v1/cluster.ts @@ -11,6 +11,7 @@ import { Timestamp } from "../../../../../google/protobuf/timestamp"; import { Redisconfigset50 } from "../../../../../yandex/cloud/mdb/redis/v1/config/redis5_0"; import { Redisconfigset60 } from "../../../../../yandex/cloud/mdb/redis/v1/config/redis6_0"; import { Redisconfigset62 } from "../../../../../yandex/cloud/mdb/redis/v1/config/redis6_2"; +import { Int64Value } from "../../../../../google/protobuf/wrappers"; export const protobufPackage = "yandex.cloud.mdb.redis.v1"; @@ -64,6 +65,8 @@ export interface Cluster { tlsEnabled: boolean; /** Deletion Protection inhibits deletion of the cluster */ deletionProtection: boolean; + /** Persistence mode */ + persistenceMode: Cluster_PersistenceMode; } export enum Cluster_Environment { @@ -236,6 +239,44 @@ export function cluster_StatusToJSON(object: Cluster_Status): string { } } +export enum Cluster_PersistenceMode { + /** ON - cluster persistence mode on */ + ON = 0, + /** OFF - cluster persistence mode off */ + OFF = 1, + UNRECOGNIZED = -1, +} + +export function cluster_PersistenceModeFromJSON( + object: any +): Cluster_PersistenceMode { + switch (object) { + case 0: + case "ON": + return Cluster_PersistenceMode.ON; + case 1: + case "OFF": + return Cluster_PersistenceMode.OFF; + case -1: + case "UNRECOGNIZED": + default: + return Cluster_PersistenceMode.UNRECOGNIZED; + } +} + +export function cluster_PersistenceModeToJSON( + object: Cluster_PersistenceMode +): string { + switch (object) { + case Cluster_PersistenceMode.ON: + return "ON"; + case Cluster_PersistenceMode.OFF: + return "OFF"; + default: + return "UNKNOWN"; + } +} + export interface Cluster_LabelsEntry { $type: "yandex.cloud.mdb.redis.v1.Cluster.LabelsEntry"; key: string; @@ -287,7 +328,7 @@ export interface Host { * Name of the Redis host. The host name is assigned by MDB at creation time, and cannot be changed. * 1-63 characters long. * - * The name is unique across all existing MDB hosts in Yandex.Cloud, as it defines the FQDN of the host. + * The name is unique across all existing MDB hosts in Yandex Cloud, as it defines the FQDN of the host. */ name: string; /** ID of the Redis cluster. The ID is assigned by MDB at creation time. */ @@ -305,6 +346,14 @@ export interface Host { /** Services provided by the host. */ services: Service[]; shardName: string; + /** + * A replica with a low priority number is considered better for promotion. + * A replica with priority of 0 will never be selected by Redis Sentinel for promotion. + * Works only for non-sharded clusters. Default value is 100. + */ + replicaPriority?: number; + /** Flag showing public IP assignment status to this host. */ + assignPublicIp: boolean; } export enum Host_Role { @@ -530,6 +579,7 @@ const baseCluster: object = { securityGroupIds: "", tlsEnabled: false, deletionProtection: false, + persistenceMode: 0, }; export const Cluster = { @@ -609,6 +659,9 @@ export const Cluster = { if (message.deletionProtection === true) { writer.uint32(144).bool(message.deletionProtection); } + if (message.persistenceMode !== 0) { + writer.uint32(152).int32(message.persistenceMode); + } return writer; }, @@ -687,6 +740,9 @@ export const Cluster = { case 18: message.deletionProtection = reader.bool(); break; + case 19: + message.persistenceMode = reader.int32() as any; + break; default: reader.skipType(tag & 7); break; @@ -769,6 +825,10 @@ export const Cluster = { object.deletionProtection !== null ? Boolean(object.deletionProtection) : false; + message.persistenceMode = + object.persistenceMode !== undefined && object.persistenceMode !== null + ? cluster_PersistenceModeFromJSON(object.persistenceMode) + : 0; return message; }, @@ -822,6 +882,10 @@ export const Cluster = { message.tlsEnabled !== undefined && (obj.tlsEnabled = message.tlsEnabled); message.deletionProtection !== undefined && (obj.deletionProtection = message.deletionProtection); + message.persistenceMode !== undefined && + (obj.persistenceMode = cluster_PersistenceModeToJSON( + message.persistenceMode + )); return obj; }, @@ -863,6 +927,7 @@ export const Cluster = { message.securityGroupIds = object.securityGroupIds?.map((e) => e) || []; message.tlsEnabled = object.tlsEnabled ?? false; message.deletionProtection = object.deletionProtection ?? false; + message.persistenceMode = object.persistenceMode ?? 0; return message; }, }; @@ -1301,6 +1366,7 @@ const baseHost: object = { role: 0, health: 0, shardName: "", + assignPublicIp: false, }; export const Host = { @@ -1334,6 +1400,18 @@ export const Host = { if (message.shardName !== "") { writer.uint32(74).string(message.shardName); } + if (message.replicaPriority !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.replicaPriority!, + }, + writer.uint32(82).fork() + ).ldelim(); + } + if (message.assignPublicIp === true) { + writer.uint32(88).bool(message.assignPublicIp); + } return writer; }, @@ -1372,6 +1450,15 @@ export const Host = { case 9: message.shardName = reader.string(); break; + case 10: + message.replicaPriority = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 11: + message.assignPublicIp = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -1417,6 +1504,14 @@ export const Host = { object.shardName !== undefined && object.shardName !== null ? String(object.shardName) : ""; + message.replicaPriority = + object.replicaPriority !== undefined && object.replicaPriority !== null + ? Number(object.replicaPriority) + : undefined; + message.assignPublicIp = + object.assignPublicIp !== undefined && object.assignPublicIp !== null + ? Boolean(object.assignPublicIp) + : false; return message; }, @@ -1441,6 +1536,10 @@ export const Host = { obj.services = []; } message.shardName !== undefined && (obj.shardName = message.shardName); + message.replicaPriority !== undefined && + (obj.replicaPriority = message.replicaPriority); + message.assignPublicIp !== undefined && + (obj.assignPublicIp = message.assignPublicIp); return obj; }, @@ -1459,6 +1558,8 @@ export const Host = { message.services = object.services?.map((e) => Service.fromPartial(e)) || []; message.shardName = object.shardName ?? ""; + message.replicaPriority = object.replicaPriority ?? undefined; + message.assignPublicIp = object.assignPublicIp ?? false; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/redis/v1/cluster_service.ts b/src/generated/yandex/cloud/mdb/redis/v1/cluster_service.ts index 14456c8d..2eb6d55e 100644 --- a/src/generated/yandex/cloud/mdb/redis/v1/cluster_service.ts +++ b/src/generated/yandex/cloud/mdb/redis/v1/cluster_service.ts @@ -18,13 +18,16 @@ import { import _m0 from "protobufjs/minimal"; import { Cluster_Environment, + Cluster_PersistenceMode, Resources, Access, Cluster, Host, Shard, cluster_EnvironmentFromJSON, + cluster_PersistenceModeFromJSON, cluster_EnvironmentToJSON, + cluster_PersistenceModeToJSON, } from "../../../../../yandex/cloud/mdb/redis/v1/cluster"; import { FieldMask } from "../../../../../google/protobuf/field_mask"; import { MaintenanceWindow } from "../../../../../yandex/cloud/mdb/redis/v1/maintenance"; @@ -35,7 +38,7 @@ import { Backup } from "../../../../../yandex/cloud/mdb/redis/v1/backup"; import { Redisconfig50 } from "../../../../../yandex/cloud/mdb/redis/v1/config/redis5_0"; import { Redisconfig60 } from "../../../../../yandex/cloud/mdb/redis/v1/config/redis6_0"; import { Redisconfig62 } from "../../../../../yandex/cloud/mdb/redis/v1/config/redis6_2"; -import { BoolValue } from "../../../../../google/protobuf/wrappers"; +import { BoolValue, Int64Value } from "../../../../../google/protobuf/wrappers"; export const protobufPackage = "yandex.cloud.mdb.redis.v1"; @@ -118,6 +121,8 @@ export interface CreateClusterRequest { tlsEnabled?: boolean; /** Deletion Protection inhibits deletion of the cluster */ deletionProtection: boolean; + /** Persistence mode */ + persistenceMode: Cluster_PersistenceMode; } export interface CreateClusterRequest_LabelsEntry { @@ -161,6 +166,8 @@ export interface UpdateClusterRequest { securityGroupIds: string[]; /** Deletion Protection inhibits deletion of the cluster */ deletionProtection: boolean; + /** Persistence mode */ + persistenceMode: Cluster_PersistenceMode; } export interface UpdateClusterRequest_LabelsEntry { @@ -232,6 +239,25 @@ export interface MoveClusterMetadata { destinationFolderId: string; } +export interface UpdateClusterHostsRequest { + $type: "yandex.cloud.mdb.redis.v1.UpdateClusterHostsRequest"; + /** + * ID of the Redis cluster to update hosts in. + * To get the Redis cluster ID, use a [ClusterService.List] request. + */ + clusterId: string; + /** New configurations to apply to hosts. */ + updateHostSpecs: UpdateHostSpec[]; +} + +export interface UpdateClusterHostsMetadata { + $type: "yandex.cloud.mdb.redis.v1.UpdateClusterHostsMetadata"; + /** ID of the Redis cluster to update hosts in. */ + clusterId: string; + /** Names of hosts that are being updated. */ + hostNames: string[]; +} + export interface BackupClusterRequest { $type: "yandex.cloud.mdb.redis.v1.BackupClusterRequest"; /** @@ -280,6 +306,8 @@ export interface RestoreClusterRequest { securityGroupIds: string[]; /** TLS port and functionality on\off */ tlsEnabled?: boolean; + /** Persistence mode */ + persistenceMode: Cluster_PersistenceMode; } export interface RestoreClusterRequest_LabelsEntry { @@ -806,6 +834,25 @@ export interface RebalanceClusterMetadata { clusterId: string; } +export interface UpdateHostSpec { + $type: "yandex.cloud.mdb.redis.v1.UpdateHostSpec"; + /** + * Name of the host to update. + * To get the Redis host name, use a [ClusterService.ListHosts] request. + */ + hostName: string; + /** + * A replica with a low priority number is considered better for promotion. + * A replica with priority of 0 will never be selected by Redis Sentinel for promotion. + * Works only for non-sharded clusters. Default value is 100. + */ + replicaPriority?: number; + /** Whether the host should get a public IP address on update. */ + assignPublicIp: boolean; + /** Field mask that specifies which fields of the Redis host should be updated. */ + updateMask?: FieldMask; +} + export interface HostSpec { $type: "yandex.cloud.mdb.redis.v1.HostSpec"; /** @@ -824,6 +871,20 @@ export interface HostSpec { * To get the shard ID use a [ClusterService.ListShards] request. */ shardName: string; + /** + * A replica with a low priority number is considered better for promotion. + * A replica with priority of 0 will never be selected by Redis Sentinel for promotion. + * Works only for non-sharded clusters. Default value is 100. + */ + replicaPriority?: number; + /** + * Whether the host should get a public IP address on creation. + * + * Possible values: + * * false - don't assign a public IP to the host. + * * true - the host should have a public IP address. + */ + assignPublicIp: boolean; } export interface ConfigSpec { @@ -1100,6 +1161,7 @@ const baseCreateClusterRequest: object = { sharded: false, securityGroupIds: "", deletionProtection: false, + persistenceMode: 0, }; export const CreateClusterRequest = { @@ -1155,6 +1217,9 @@ export const CreateClusterRequest = { if (message.deletionProtection === true) { writer.uint32(112).bool(message.deletionProtection); } + if (message.persistenceMode !== 0) { + writer.uint32(120).int32(message.persistenceMode); + } return writer; }, @@ -1213,6 +1278,9 @@ export const CreateClusterRequest = { case 14: message.deletionProtection = reader.bool(); break; + case 15: + message.persistenceMode = reader.int32() as any; + break; default: reader.skipType(tag & 7); break; @@ -1272,6 +1340,10 @@ export const CreateClusterRequest = { object.deletionProtection !== null ? Boolean(object.deletionProtection) : false; + message.persistenceMode = + object.persistenceMode !== undefined && object.persistenceMode !== null + ? cluster_PersistenceModeFromJSON(object.persistenceMode) + : 0; return message; }, @@ -1310,6 +1382,10 @@ export const CreateClusterRequest = { message.tlsEnabled !== undefined && (obj.tlsEnabled = message.tlsEnabled); message.deletionProtection !== undefined && (obj.deletionProtection = message.deletionProtection); + message.persistenceMode !== undefined && + (obj.persistenceMode = cluster_PersistenceModeToJSON( + message.persistenceMode + )); return obj; }, @@ -1340,6 +1416,7 @@ export const CreateClusterRequest = { message.securityGroupIds = object.securityGroupIds?.map((e) => e) || []; message.tlsEnabled = object.tlsEnabled ?? undefined; message.deletionProtection = object.deletionProtection ?? false; + message.persistenceMode = object.persistenceMode ?? 0; return message; }, }; @@ -1503,6 +1580,7 @@ const baseUpdateClusterRequest: object = { name: "", securityGroupIds: "", deletionProtection: false, + persistenceMode: 0, }; export const UpdateClusterRequest = { @@ -1549,6 +1627,9 @@ export const UpdateClusterRequest = { if (message.deletionProtection === true) { writer.uint32(72).bool(message.deletionProtection); } + if (message.persistenceMode !== 0) { + writer.uint32(80).int32(message.persistenceMode); + } return writer; }, @@ -1600,6 +1681,9 @@ export const UpdateClusterRequest = { case 9: message.deletionProtection = reader.bool(); break; + case 10: + message.persistenceMode = reader.int32() as any; + break; default: reader.skipType(tag & 7); break; @@ -1649,6 +1733,10 @@ export const UpdateClusterRequest = { object.deletionProtection !== null ? Boolean(object.deletionProtection) : false; + message.persistenceMode = + object.persistenceMode !== undefined && object.persistenceMode !== null + ? cluster_PersistenceModeFromJSON(object.persistenceMode) + : 0; return message; }, @@ -1683,6 +1771,10 @@ export const UpdateClusterRequest = { } message.deletionProtection !== undefined && (obj.deletionProtection = message.deletionProtection); + message.persistenceMode !== undefined && + (obj.persistenceMode = cluster_PersistenceModeToJSON( + message.persistenceMode + )); return obj; }, @@ -1716,6 +1808,7 @@ export const UpdateClusterRequest = { : undefined; message.securityGroupIds = object.securityGroupIds?.map((e) => e) || []; message.deletionProtection = object.deletionProtection ?? false; + message.persistenceMode = object.persistenceMode ?? 0; return message; }, }; @@ -2421,6 +2514,190 @@ export const MoveClusterMetadata = { messageTypeRegistry.set(MoveClusterMetadata.$type, MoveClusterMetadata); +const baseUpdateClusterHostsRequest: object = { + $type: "yandex.cloud.mdb.redis.v1.UpdateClusterHostsRequest", + clusterId: "", +}; + +export const UpdateClusterHostsRequest = { + $type: "yandex.cloud.mdb.redis.v1.UpdateClusterHostsRequest" as const, + + encode( + message: UpdateClusterHostsRequest, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + for (const v of message.updateHostSpecs) { + UpdateHostSpec.encode(v!, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateClusterHostsRequest { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateClusterHostsRequest, + } as UpdateClusterHostsRequest; + message.updateHostSpecs = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.updateHostSpecs.push( + UpdateHostSpec.decode(reader, reader.uint32()) + ); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateClusterHostsRequest { + const message = { + ...baseUpdateClusterHostsRequest, + } as UpdateClusterHostsRequest; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.updateHostSpecs = (object.updateHostSpecs ?? []).map((e: any) => + UpdateHostSpec.fromJSON(e) + ); + return message; + }, + + toJSON(message: UpdateClusterHostsRequest): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + if (message.updateHostSpecs) { + obj.updateHostSpecs = message.updateHostSpecs.map((e) => + e ? UpdateHostSpec.toJSON(e) : undefined + ); + } else { + obj.updateHostSpecs = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateClusterHostsRequest { + const message = { + ...baseUpdateClusterHostsRequest, + } as UpdateClusterHostsRequest; + message.clusterId = object.clusterId ?? ""; + message.updateHostSpecs = + object.updateHostSpecs?.map((e) => UpdateHostSpec.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set( + UpdateClusterHostsRequest.$type, + UpdateClusterHostsRequest +); + +const baseUpdateClusterHostsMetadata: object = { + $type: "yandex.cloud.mdb.redis.v1.UpdateClusterHostsMetadata", + clusterId: "", + hostNames: "", +}; + +export const UpdateClusterHostsMetadata = { + $type: "yandex.cloud.mdb.redis.v1.UpdateClusterHostsMetadata" as const, + + encode( + message: UpdateClusterHostsMetadata, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.clusterId !== "") { + writer.uint32(10).string(message.clusterId); + } + for (const v of message.hostNames) { + writer.uint32(18).string(v!); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): UpdateClusterHostsMetadata { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseUpdateClusterHostsMetadata, + } as UpdateClusterHostsMetadata; + message.hostNames = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.clusterId = reader.string(); + break; + case 2: + message.hostNames.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateClusterHostsMetadata { + const message = { + ...baseUpdateClusterHostsMetadata, + } as UpdateClusterHostsMetadata; + message.clusterId = + object.clusterId !== undefined && object.clusterId !== null + ? String(object.clusterId) + : ""; + message.hostNames = (object.hostNames ?? []).map((e: any) => String(e)); + return message; + }, + + toJSON(message: UpdateClusterHostsMetadata): unknown { + const obj: any = {}; + message.clusterId !== undefined && (obj.clusterId = message.clusterId); + if (message.hostNames) { + obj.hostNames = message.hostNames.map((e) => e); + } else { + obj.hostNames = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateClusterHostsMetadata { + const message = { + ...baseUpdateClusterHostsMetadata, + } as UpdateClusterHostsMetadata; + message.clusterId = object.clusterId ?? ""; + message.hostNames = object.hostNames?.map((e) => e) || []; + return message; + }, +}; + +messageTypeRegistry.set( + UpdateClusterHostsMetadata.$type, + UpdateClusterHostsMetadata +); + const baseBackupClusterRequest: object = { $type: "yandex.cloud.mdb.redis.v1.BackupClusterRequest", clusterId: "", @@ -2560,6 +2837,7 @@ const baseRestoreClusterRequest: object = { networkId: "", folderId: "", securityGroupIds: "", + persistenceMode: 0, }; export const RestoreClusterRequest = { @@ -2612,6 +2890,9 @@ export const RestoreClusterRequest = { writer.uint32(90).fork() ).ldelim(); } + if (message.persistenceMode !== 0) { + writer.uint32(96).int32(message.persistenceMode); + } return writer; }, @@ -2667,6 +2948,9 @@ export const RestoreClusterRequest = { case 11: message.tlsEnabled = BoolValue.decode(reader, reader.uint32()).value; break; + case 12: + message.persistenceMode = reader.int32() as any; + break; default: reader.skipType(tag & 7); break; @@ -2721,6 +3005,10 @@ export const RestoreClusterRequest = { object.tlsEnabled !== undefined && object.tlsEnabled !== null ? Boolean(object.tlsEnabled) : undefined; + message.persistenceMode = + object.persistenceMode !== undefined && object.persistenceMode !== null + ? cluster_PersistenceModeFromJSON(object.persistenceMode) + : 0; return message; }, @@ -2757,6 +3045,10 @@ export const RestoreClusterRequest = { obj.securityGroupIds = []; } message.tlsEnabled !== undefined && (obj.tlsEnabled = message.tlsEnabled); + message.persistenceMode !== undefined && + (obj.persistenceMode = cluster_PersistenceModeToJSON( + message.persistenceMode + )); return obj; }, @@ -2786,6 +3078,7 @@ export const RestoreClusterRequest = { message.folderId = object.folderId ?? ""; message.securityGroupIds = object.securityGroupIds?.map((e) => e) || []; message.tlsEnabled = object.tlsEnabled ?? undefined; + message.persistenceMode = object.persistenceMode ?? 0; return message; }, }; @@ -5701,11 +5994,128 @@ messageTypeRegistry.set( RebalanceClusterMetadata ); +const baseUpdateHostSpec: object = { + $type: "yandex.cloud.mdb.redis.v1.UpdateHostSpec", + hostName: "", + assignPublicIp: false, +}; + +export const UpdateHostSpec = { + $type: "yandex.cloud.mdb.redis.v1.UpdateHostSpec" as const, + + encode( + message: UpdateHostSpec, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.hostName !== "") { + writer.uint32(10).string(message.hostName); + } + if (message.replicaPriority !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.replicaPriority!, + }, + writer.uint32(18).fork() + ).ldelim(); + } + if (message.assignPublicIp === true) { + writer.uint32(24).bool(message.assignPublicIp); + } + if (message.updateMask !== undefined) { + FieldMask.encode(message.updateMask, writer.uint32(34).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): UpdateHostSpec { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseUpdateHostSpec } as UpdateHostSpec; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.hostName = reader.string(); + break; + case 2: + message.replicaPriority = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 3: + message.assignPublicIp = reader.bool(); + break; + case 4: + message.updateMask = FieldMask.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): UpdateHostSpec { + const message = { ...baseUpdateHostSpec } as UpdateHostSpec; + message.hostName = + object.hostName !== undefined && object.hostName !== null + ? String(object.hostName) + : ""; + message.replicaPriority = + object.replicaPriority !== undefined && object.replicaPriority !== null + ? Number(object.replicaPriority) + : undefined; + message.assignPublicIp = + object.assignPublicIp !== undefined && object.assignPublicIp !== null + ? Boolean(object.assignPublicIp) + : false; + message.updateMask = + object.updateMask !== undefined && object.updateMask !== null + ? FieldMask.fromJSON(object.updateMask) + : undefined; + return message; + }, + + toJSON(message: UpdateHostSpec): unknown { + const obj: any = {}; + message.hostName !== undefined && (obj.hostName = message.hostName); + message.replicaPriority !== undefined && + (obj.replicaPriority = message.replicaPriority); + message.assignPublicIp !== undefined && + (obj.assignPublicIp = message.assignPublicIp); + message.updateMask !== undefined && + (obj.updateMask = message.updateMask + ? FieldMask.toJSON(message.updateMask) + : undefined); + return obj; + }, + + fromPartial, I>>( + object: I + ): UpdateHostSpec { + const message = { ...baseUpdateHostSpec } as UpdateHostSpec; + message.hostName = object.hostName ?? ""; + message.replicaPriority = object.replicaPriority ?? undefined; + message.assignPublicIp = object.assignPublicIp ?? false; + message.updateMask = + object.updateMask !== undefined && object.updateMask !== null + ? FieldMask.fromPartial(object.updateMask) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(UpdateHostSpec.$type, UpdateHostSpec); + const baseHostSpec: object = { $type: "yandex.cloud.mdb.redis.v1.HostSpec", zoneId: "", subnetId: "", shardName: "", + assignPublicIp: false, }; export const HostSpec = { @@ -5724,6 +6134,18 @@ export const HostSpec = { if (message.shardName !== "") { writer.uint32(26).string(message.shardName); } + if (message.replicaPriority !== undefined) { + Int64Value.encode( + { + $type: "google.protobuf.Int64Value", + value: message.replicaPriority!, + }, + writer.uint32(34).fork() + ).ldelim(); + } + if (message.assignPublicIp === true) { + writer.uint32(40).bool(message.assignPublicIp); + } return writer; }, @@ -5743,6 +6165,15 @@ export const HostSpec = { case 3: message.shardName = reader.string(); break; + case 4: + message.replicaPriority = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + case 5: + message.assignPublicIp = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -5765,6 +6196,14 @@ export const HostSpec = { object.shardName !== undefined && object.shardName !== null ? String(object.shardName) : ""; + message.replicaPriority = + object.replicaPriority !== undefined && object.replicaPriority !== null + ? Number(object.replicaPriority) + : undefined; + message.assignPublicIp = + object.assignPublicIp !== undefined && object.assignPublicIp !== null + ? Boolean(object.assignPublicIp) + : false; return message; }, @@ -5773,6 +6212,10 @@ export const HostSpec = { message.zoneId !== undefined && (obj.zoneId = message.zoneId); message.subnetId !== undefined && (obj.subnetId = message.subnetId); message.shardName !== undefined && (obj.shardName = message.shardName); + message.replicaPriority !== undefined && + (obj.replicaPriority = message.replicaPriority); + message.assignPublicIp !== undefined && + (obj.assignPublicIp = message.assignPublicIp); return obj; }, @@ -5781,6 +6224,8 @@ export const HostSpec = { message.zoneId = object.zoneId ?? ""; message.subnetId = object.subnetId ?? ""; message.shardName = object.shardName ?? ""; + message.replicaPriority = object.replicaPriority ?? undefined; + message.assignPublicIp = object.assignPublicIp ?? false; return message; }, }; @@ -6217,6 +6662,19 @@ export const ClusterServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, + /** Updates the specified hosts. */ + updateHosts: { + path: "/yandex.cloud.mdb.redis.v1.ClusterService/UpdateHosts", + requestStream: false, + responseStream: false, + requestSerialize: (value: UpdateClusterHostsRequest) => + Buffer.from(UpdateClusterHostsRequest.encode(value).finish()), + requestDeserialize: (value: Buffer) => + UpdateClusterHostsRequest.decode(value), + responseSerialize: (value: Operation) => + Buffer.from(Operation.encode(value).finish()), + responseDeserialize: (value: Buffer) => Operation.decode(value), + }, /** Returns the specified shard. */ getShard: { path: "/yandex.cloud.mdb.redis.v1.ClusterService/GetShard", @@ -6341,6 +6799,8 @@ export interface ClusterServiceServer extends UntypedServiceImplementation { addHosts: handleUnaryCall; /** Deletes the specified hosts for a cluster. */ deleteHosts: handleUnaryCall; + /** Updates the specified hosts. */ + updateHosts: handleUnaryCall; /** Returns the specified shard. */ getShard: handleUnaryCall; /** Retrieves a list of shards. */ @@ -6707,6 +7167,22 @@ export interface ClusterServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; + /** Updates the specified hosts. */ + updateHosts( + request: UpdateClusterHostsRequest, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + updateHosts( + request: UpdateClusterHostsRequest, + metadata: Metadata, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; + updateHosts( + request: UpdateClusterHostsRequest, + metadata: Metadata, + options: Partial, + callback: (error: ServiceError | null, response: Operation) => void + ): ClientUnaryCall; /** Returns the specified shard. */ getShard( request: GetClusterShardRequest, diff --git a/src/generated/yandex/cloud/mdb/redis/v1/config/redis5_0.ts b/src/generated/yandex/cloud/mdb/redis/v1/config/redis5_0.ts index e6f6dda6..f95c9aa0 100644 --- a/src/generated/yandex/cloud/mdb/redis/v1/config/redis5_0.ts +++ b/src/generated/yandex/cloud/mdb/redis/v1/config/redis5_0.ts @@ -35,6 +35,10 @@ export interface Redisconfig50 { slowlogMaxLen?: number; /** String setting for pub\sub functionality; subset of KEg$lshzxeAt. */ notifyKeyspaceEvents: string; + /** Redis connection output buffers limits for pubsub operations. */ + clientOutputBufferLimitPubsub?: Redisconfig50_ClientOutputBufferLimit; + /** Redis connection output buffers limits for clients. */ + clientOutputBufferLimitNormal?: Redisconfig50_ClientOutputBufferLimit; } export enum Redisconfig50_MaxmemoryPolicy { @@ -129,6 +133,16 @@ export function redisconfig50_MaxmemoryPolicyToJSON( } } +export interface Redisconfig50_ClientOutputBufferLimit { + $type: "yandex.cloud.mdb.redis.v1.config.RedisConfig5_0.ClientOutputBufferLimit"; + /** Total limit in bytes. */ + hardLimit?: number; + /** Limit in bytes during certain time period. */ + softLimit?: number; + /** Seconds for soft limit. */ + softSeconds?: number; +} + export interface Redisconfigset50 { $type: "yandex.cloud.mdb.redis.v1.config.RedisConfigSet5_0"; /** @@ -192,6 +206,18 @@ export const Redisconfig50 = { if (message.notifyKeyspaceEvents !== "") { writer.uint32(58).string(message.notifyKeyspaceEvents); } + if (message.clientOutputBufferLimitPubsub !== undefined) { + Redisconfig50_ClientOutputBufferLimit.encode( + message.clientOutputBufferLimitPubsub, + writer.uint32(66).fork() + ).ldelim(); + } + if (message.clientOutputBufferLimitNormal !== undefined) { + Redisconfig50_ClientOutputBufferLimit.encode( + message.clientOutputBufferLimitNormal, + writer.uint32(74).fork() + ).ldelim(); + } return writer; }, @@ -229,6 +255,20 @@ export const Redisconfig50 = { case 7: message.notifyKeyspaceEvents = reader.string(); break; + case 8: + message.clientOutputBufferLimitPubsub = + Redisconfig50_ClientOutputBufferLimit.decode( + reader, + reader.uint32() + ); + break; + case 9: + message.clientOutputBufferLimitNormal = + Redisconfig50_ClientOutputBufferLimit.decode( + reader, + reader.uint32() + ); + break; default: reader.skipType(tag & 7); break; @@ -269,6 +309,20 @@ export const Redisconfig50 = { object.notifyKeyspaceEvents !== null ? String(object.notifyKeyspaceEvents) : ""; + message.clientOutputBufferLimitPubsub = + object.clientOutputBufferLimitPubsub !== undefined && + object.clientOutputBufferLimitPubsub !== null + ? Redisconfig50_ClientOutputBufferLimit.fromJSON( + object.clientOutputBufferLimitPubsub + ) + : undefined; + message.clientOutputBufferLimitNormal = + object.clientOutputBufferLimitNormal !== undefined && + object.clientOutputBufferLimitNormal !== null + ? Redisconfig50_ClientOutputBufferLimit.fromJSON( + object.clientOutputBufferLimitNormal + ) + : undefined; return message; }, @@ -287,6 +341,18 @@ export const Redisconfig50 = { (obj.slowlogMaxLen = message.slowlogMaxLen); message.notifyKeyspaceEvents !== undefined && (obj.notifyKeyspaceEvents = message.notifyKeyspaceEvents); + message.clientOutputBufferLimitPubsub !== undefined && + (obj.clientOutputBufferLimitPubsub = message.clientOutputBufferLimitPubsub + ? Redisconfig50_ClientOutputBufferLimit.toJSON( + message.clientOutputBufferLimitPubsub + ) + : undefined); + message.clientOutputBufferLimitNormal !== undefined && + (obj.clientOutputBufferLimitNormal = message.clientOutputBufferLimitNormal + ? Redisconfig50_ClientOutputBufferLimit.toJSON( + message.clientOutputBufferLimitNormal + ) + : undefined); return obj; }, @@ -301,12 +367,138 @@ export const Redisconfig50 = { message.slowlogLogSlowerThan = object.slowlogLogSlowerThan ?? undefined; message.slowlogMaxLen = object.slowlogMaxLen ?? undefined; message.notifyKeyspaceEvents = object.notifyKeyspaceEvents ?? ""; + message.clientOutputBufferLimitPubsub = + object.clientOutputBufferLimitPubsub !== undefined && + object.clientOutputBufferLimitPubsub !== null + ? Redisconfig50_ClientOutputBufferLimit.fromPartial( + object.clientOutputBufferLimitPubsub + ) + : undefined; + message.clientOutputBufferLimitNormal = + object.clientOutputBufferLimitNormal !== undefined && + object.clientOutputBufferLimitNormal !== null + ? Redisconfig50_ClientOutputBufferLimit.fromPartial( + object.clientOutputBufferLimitNormal + ) + : undefined; return message; }, }; messageTypeRegistry.set(Redisconfig50.$type, Redisconfig50); +const baseRedisconfig50_ClientOutputBufferLimit: object = { + $type: + "yandex.cloud.mdb.redis.v1.config.RedisConfig5_0.ClientOutputBufferLimit", +}; + +export const Redisconfig50_ClientOutputBufferLimit = { + $type: + "yandex.cloud.mdb.redis.v1.config.RedisConfig5_0.ClientOutputBufferLimit" as const, + + encode( + message: Redisconfig50_ClientOutputBufferLimit, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.hardLimit !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.hardLimit! }, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.softLimit !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.softLimit! }, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.softSeconds !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.softSeconds! }, + writer.uint32(42).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Redisconfig50_ClientOutputBufferLimit { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseRedisconfig50_ClientOutputBufferLimit, + } as Redisconfig50_ClientOutputBufferLimit; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.hardLimit = Int64Value.decode(reader, reader.uint32()).value; + break; + case 3: + message.softLimit = Int64Value.decode(reader, reader.uint32()).value; + break; + case 5: + message.softSeconds = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Redisconfig50_ClientOutputBufferLimit { + const message = { + ...baseRedisconfig50_ClientOutputBufferLimit, + } as Redisconfig50_ClientOutputBufferLimit; + message.hardLimit = + object.hardLimit !== undefined && object.hardLimit !== null + ? Number(object.hardLimit) + : undefined; + message.softLimit = + object.softLimit !== undefined && object.softLimit !== null + ? Number(object.softLimit) + : undefined; + message.softSeconds = + object.softSeconds !== undefined && object.softSeconds !== null + ? Number(object.softSeconds) + : undefined; + return message; + }, + + toJSON(message: Redisconfig50_ClientOutputBufferLimit): unknown { + const obj: any = {}; + message.hardLimit !== undefined && (obj.hardLimit = message.hardLimit); + message.softLimit !== undefined && (obj.softLimit = message.softLimit); + message.softSeconds !== undefined && + (obj.softSeconds = message.softSeconds); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Redisconfig50_ClientOutputBufferLimit { + const message = { + ...baseRedisconfig50_ClientOutputBufferLimit, + } as Redisconfig50_ClientOutputBufferLimit; + message.hardLimit = object.hardLimit ?? undefined; + message.softLimit = object.softLimit ?? undefined; + message.softSeconds = object.softSeconds ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Redisconfig50_ClientOutputBufferLimit.$type, + Redisconfig50_ClientOutputBufferLimit +); + const baseRedisconfigset50: object = { $type: "yandex.cloud.mdb.redis.v1.config.RedisConfigSet5_0", }; diff --git a/src/generated/yandex/cloud/mdb/redis/v1/config/redis6_0.ts b/src/generated/yandex/cloud/mdb/redis/v1/config/redis6_0.ts index e397847f..fbb5b756 100644 --- a/src/generated/yandex/cloud/mdb/redis/v1/config/redis6_0.ts +++ b/src/generated/yandex/cloud/mdb/redis/v1/config/redis6_0.ts @@ -35,6 +35,10 @@ export interface Redisconfig60 { slowlogMaxLen?: number; /** String setting for pub\sub functionality; subset of KEg$lshzxeAtm. */ notifyKeyspaceEvents: string; + /** Redis connection output buffers limits for pubsub operations. */ + clientOutputBufferLimitPubsub?: Redisconfig60_ClientOutputBufferLimit; + /** Redis connection output buffers limits for clients. */ + clientOutputBufferLimitNormal?: Redisconfig60_ClientOutputBufferLimit; } export enum Redisconfig60_MaxmemoryPolicy { @@ -129,6 +133,16 @@ export function redisconfig60_MaxmemoryPolicyToJSON( } } +export interface Redisconfig60_ClientOutputBufferLimit { + $type: "yandex.cloud.mdb.redis.v1.config.RedisConfig6_0.ClientOutputBufferLimit"; + /** Total limit in bytes. */ + hardLimit?: number; + /** Limit in bytes during certain time period. */ + softLimit?: number; + /** Seconds for soft limit. */ + softSeconds?: number; +} + export interface Redisconfigset60 { $type: "yandex.cloud.mdb.redis.v1.config.RedisConfigSet6_0"; /** @@ -192,6 +206,18 @@ export const Redisconfig60 = { if (message.notifyKeyspaceEvents !== "") { writer.uint32(58).string(message.notifyKeyspaceEvents); } + if (message.clientOutputBufferLimitPubsub !== undefined) { + Redisconfig60_ClientOutputBufferLimit.encode( + message.clientOutputBufferLimitPubsub, + writer.uint32(66).fork() + ).ldelim(); + } + if (message.clientOutputBufferLimitNormal !== undefined) { + Redisconfig60_ClientOutputBufferLimit.encode( + message.clientOutputBufferLimitNormal, + writer.uint32(74).fork() + ).ldelim(); + } return writer; }, @@ -229,6 +255,20 @@ export const Redisconfig60 = { case 7: message.notifyKeyspaceEvents = reader.string(); break; + case 8: + message.clientOutputBufferLimitPubsub = + Redisconfig60_ClientOutputBufferLimit.decode( + reader, + reader.uint32() + ); + break; + case 9: + message.clientOutputBufferLimitNormal = + Redisconfig60_ClientOutputBufferLimit.decode( + reader, + reader.uint32() + ); + break; default: reader.skipType(tag & 7); break; @@ -269,6 +309,20 @@ export const Redisconfig60 = { object.notifyKeyspaceEvents !== null ? String(object.notifyKeyspaceEvents) : ""; + message.clientOutputBufferLimitPubsub = + object.clientOutputBufferLimitPubsub !== undefined && + object.clientOutputBufferLimitPubsub !== null + ? Redisconfig60_ClientOutputBufferLimit.fromJSON( + object.clientOutputBufferLimitPubsub + ) + : undefined; + message.clientOutputBufferLimitNormal = + object.clientOutputBufferLimitNormal !== undefined && + object.clientOutputBufferLimitNormal !== null + ? Redisconfig60_ClientOutputBufferLimit.fromJSON( + object.clientOutputBufferLimitNormal + ) + : undefined; return message; }, @@ -287,6 +341,18 @@ export const Redisconfig60 = { (obj.slowlogMaxLen = message.slowlogMaxLen); message.notifyKeyspaceEvents !== undefined && (obj.notifyKeyspaceEvents = message.notifyKeyspaceEvents); + message.clientOutputBufferLimitPubsub !== undefined && + (obj.clientOutputBufferLimitPubsub = message.clientOutputBufferLimitPubsub + ? Redisconfig60_ClientOutputBufferLimit.toJSON( + message.clientOutputBufferLimitPubsub + ) + : undefined); + message.clientOutputBufferLimitNormal !== undefined && + (obj.clientOutputBufferLimitNormal = message.clientOutputBufferLimitNormal + ? Redisconfig60_ClientOutputBufferLimit.toJSON( + message.clientOutputBufferLimitNormal + ) + : undefined); return obj; }, @@ -301,12 +367,138 @@ export const Redisconfig60 = { message.slowlogLogSlowerThan = object.slowlogLogSlowerThan ?? undefined; message.slowlogMaxLen = object.slowlogMaxLen ?? undefined; message.notifyKeyspaceEvents = object.notifyKeyspaceEvents ?? ""; + message.clientOutputBufferLimitPubsub = + object.clientOutputBufferLimitPubsub !== undefined && + object.clientOutputBufferLimitPubsub !== null + ? Redisconfig60_ClientOutputBufferLimit.fromPartial( + object.clientOutputBufferLimitPubsub + ) + : undefined; + message.clientOutputBufferLimitNormal = + object.clientOutputBufferLimitNormal !== undefined && + object.clientOutputBufferLimitNormal !== null + ? Redisconfig60_ClientOutputBufferLimit.fromPartial( + object.clientOutputBufferLimitNormal + ) + : undefined; return message; }, }; messageTypeRegistry.set(Redisconfig60.$type, Redisconfig60); +const baseRedisconfig60_ClientOutputBufferLimit: object = { + $type: + "yandex.cloud.mdb.redis.v1.config.RedisConfig6_0.ClientOutputBufferLimit", +}; + +export const Redisconfig60_ClientOutputBufferLimit = { + $type: + "yandex.cloud.mdb.redis.v1.config.RedisConfig6_0.ClientOutputBufferLimit" as const, + + encode( + message: Redisconfig60_ClientOutputBufferLimit, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.hardLimit !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.hardLimit! }, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.softLimit !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.softLimit! }, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.softSeconds !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.softSeconds! }, + writer.uint32(42).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Redisconfig60_ClientOutputBufferLimit { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseRedisconfig60_ClientOutputBufferLimit, + } as Redisconfig60_ClientOutputBufferLimit; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.hardLimit = Int64Value.decode(reader, reader.uint32()).value; + break; + case 3: + message.softLimit = Int64Value.decode(reader, reader.uint32()).value; + break; + case 5: + message.softSeconds = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Redisconfig60_ClientOutputBufferLimit { + const message = { + ...baseRedisconfig60_ClientOutputBufferLimit, + } as Redisconfig60_ClientOutputBufferLimit; + message.hardLimit = + object.hardLimit !== undefined && object.hardLimit !== null + ? Number(object.hardLimit) + : undefined; + message.softLimit = + object.softLimit !== undefined && object.softLimit !== null + ? Number(object.softLimit) + : undefined; + message.softSeconds = + object.softSeconds !== undefined && object.softSeconds !== null + ? Number(object.softSeconds) + : undefined; + return message; + }, + + toJSON(message: Redisconfig60_ClientOutputBufferLimit): unknown { + const obj: any = {}; + message.hardLimit !== undefined && (obj.hardLimit = message.hardLimit); + message.softLimit !== undefined && (obj.softLimit = message.softLimit); + message.softSeconds !== undefined && + (obj.softSeconds = message.softSeconds); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Redisconfig60_ClientOutputBufferLimit { + const message = { + ...baseRedisconfig60_ClientOutputBufferLimit, + } as Redisconfig60_ClientOutputBufferLimit; + message.hardLimit = object.hardLimit ?? undefined; + message.softLimit = object.softLimit ?? undefined; + message.softSeconds = object.softSeconds ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Redisconfig60_ClientOutputBufferLimit.$type, + Redisconfig60_ClientOutputBufferLimit +); + const baseRedisconfigset60: object = { $type: "yandex.cloud.mdb.redis.v1.config.RedisConfigSet6_0", }; diff --git a/src/generated/yandex/cloud/mdb/redis/v1/config/redis6_2.ts b/src/generated/yandex/cloud/mdb/redis/v1/config/redis6_2.ts index 53648a05..3cbf65e9 100644 --- a/src/generated/yandex/cloud/mdb/redis/v1/config/redis6_2.ts +++ b/src/generated/yandex/cloud/mdb/redis/v1/config/redis6_2.ts @@ -35,6 +35,10 @@ export interface Redisconfig62 { slowlogMaxLen?: number; /** String setting for pub\sub functionality; subset of KEg$lshzxeAtm. */ notifyKeyspaceEvents: string; + /** Redis connection output buffers limits for pubsub operations. */ + clientOutputBufferLimitPubsub?: Redisconfig62_ClientOutputBufferLimit; + /** Redis connection output buffers limits for clients. */ + clientOutputBufferLimitNormal?: Redisconfig62_ClientOutputBufferLimit; } export enum Redisconfig62_MaxmemoryPolicy { @@ -129,6 +133,16 @@ export function redisconfig62_MaxmemoryPolicyToJSON( } } +export interface Redisconfig62_ClientOutputBufferLimit { + $type: "yandex.cloud.mdb.redis.v1.config.RedisConfig6_2.ClientOutputBufferLimit"; + /** Total limit in bytes. */ + hardLimit?: number; + /** Limit in bytes during certain time period. */ + softLimit?: number; + /** Seconds for soft limit. */ + softSeconds?: number; +} + export interface Redisconfigset62 { $type: "yandex.cloud.mdb.redis.v1.config.RedisConfigSet6_2"; /** @@ -192,6 +206,18 @@ export const Redisconfig62 = { if (message.notifyKeyspaceEvents !== "") { writer.uint32(58).string(message.notifyKeyspaceEvents); } + if (message.clientOutputBufferLimitPubsub !== undefined) { + Redisconfig62_ClientOutputBufferLimit.encode( + message.clientOutputBufferLimitPubsub, + writer.uint32(66).fork() + ).ldelim(); + } + if (message.clientOutputBufferLimitNormal !== undefined) { + Redisconfig62_ClientOutputBufferLimit.encode( + message.clientOutputBufferLimitNormal, + writer.uint32(74).fork() + ).ldelim(); + } return writer; }, @@ -229,6 +255,20 @@ export const Redisconfig62 = { case 7: message.notifyKeyspaceEvents = reader.string(); break; + case 8: + message.clientOutputBufferLimitPubsub = + Redisconfig62_ClientOutputBufferLimit.decode( + reader, + reader.uint32() + ); + break; + case 9: + message.clientOutputBufferLimitNormal = + Redisconfig62_ClientOutputBufferLimit.decode( + reader, + reader.uint32() + ); + break; default: reader.skipType(tag & 7); break; @@ -269,6 +309,20 @@ export const Redisconfig62 = { object.notifyKeyspaceEvents !== null ? String(object.notifyKeyspaceEvents) : ""; + message.clientOutputBufferLimitPubsub = + object.clientOutputBufferLimitPubsub !== undefined && + object.clientOutputBufferLimitPubsub !== null + ? Redisconfig62_ClientOutputBufferLimit.fromJSON( + object.clientOutputBufferLimitPubsub + ) + : undefined; + message.clientOutputBufferLimitNormal = + object.clientOutputBufferLimitNormal !== undefined && + object.clientOutputBufferLimitNormal !== null + ? Redisconfig62_ClientOutputBufferLimit.fromJSON( + object.clientOutputBufferLimitNormal + ) + : undefined; return message; }, @@ -287,6 +341,18 @@ export const Redisconfig62 = { (obj.slowlogMaxLen = message.slowlogMaxLen); message.notifyKeyspaceEvents !== undefined && (obj.notifyKeyspaceEvents = message.notifyKeyspaceEvents); + message.clientOutputBufferLimitPubsub !== undefined && + (obj.clientOutputBufferLimitPubsub = message.clientOutputBufferLimitPubsub + ? Redisconfig62_ClientOutputBufferLimit.toJSON( + message.clientOutputBufferLimitPubsub + ) + : undefined); + message.clientOutputBufferLimitNormal !== undefined && + (obj.clientOutputBufferLimitNormal = message.clientOutputBufferLimitNormal + ? Redisconfig62_ClientOutputBufferLimit.toJSON( + message.clientOutputBufferLimitNormal + ) + : undefined); return obj; }, @@ -301,12 +367,138 @@ export const Redisconfig62 = { message.slowlogLogSlowerThan = object.slowlogLogSlowerThan ?? undefined; message.slowlogMaxLen = object.slowlogMaxLen ?? undefined; message.notifyKeyspaceEvents = object.notifyKeyspaceEvents ?? ""; + message.clientOutputBufferLimitPubsub = + object.clientOutputBufferLimitPubsub !== undefined && + object.clientOutputBufferLimitPubsub !== null + ? Redisconfig62_ClientOutputBufferLimit.fromPartial( + object.clientOutputBufferLimitPubsub + ) + : undefined; + message.clientOutputBufferLimitNormal = + object.clientOutputBufferLimitNormal !== undefined && + object.clientOutputBufferLimitNormal !== null + ? Redisconfig62_ClientOutputBufferLimit.fromPartial( + object.clientOutputBufferLimitNormal + ) + : undefined; return message; }, }; messageTypeRegistry.set(Redisconfig62.$type, Redisconfig62); +const baseRedisconfig62_ClientOutputBufferLimit: object = { + $type: + "yandex.cloud.mdb.redis.v1.config.RedisConfig6_2.ClientOutputBufferLimit", +}; + +export const Redisconfig62_ClientOutputBufferLimit = { + $type: + "yandex.cloud.mdb.redis.v1.config.RedisConfig6_2.ClientOutputBufferLimit" as const, + + encode( + message: Redisconfig62_ClientOutputBufferLimit, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.hardLimit !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.hardLimit! }, + writer.uint32(10).fork() + ).ldelim(); + } + if (message.softLimit !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.softLimit! }, + writer.uint32(26).fork() + ).ldelim(); + } + if (message.softSeconds !== undefined) { + Int64Value.encode( + { $type: "google.protobuf.Int64Value", value: message.softSeconds! }, + writer.uint32(42).fork() + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number + ): Redisconfig62_ClientOutputBufferLimit { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { + ...baseRedisconfig62_ClientOutputBufferLimit, + } as Redisconfig62_ClientOutputBufferLimit; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.hardLimit = Int64Value.decode(reader, reader.uint32()).value; + break; + case 3: + message.softLimit = Int64Value.decode(reader, reader.uint32()).value; + break; + case 5: + message.softSeconds = Int64Value.decode( + reader, + reader.uint32() + ).value; + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Redisconfig62_ClientOutputBufferLimit { + const message = { + ...baseRedisconfig62_ClientOutputBufferLimit, + } as Redisconfig62_ClientOutputBufferLimit; + message.hardLimit = + object.hardLimit !== undefined && object.hardLimit !== null + ? Number(object.hardLimit) + : undefined; + message.softLimit = + object.softLimit !== undefined && object.softLimit !== null + ? Number(object.softLimit) + : undefined; + message.softSeconds = + object.softSeconds !== undefined && object.softSeconds !== null + ? Number(object.softSeconds) + : undefined; + return message; + }, + + toJSON(message: Redisconfig62_ClientOutputBufferLimit): unknown { + const obj: any = {}; + message.hardLimit !== undefined && (obj.hardLimit = message.hardLimit); + message.softLimit !== undefined && (obj.softLimit = message.softLimit); + message.softSeconds !== undefined && + (obj.softSeconds = message.softSeconds); + return obj; + }, + + fromPartial< + I extends Exact, I> + >(object: I): Redisconfig62_ClientOutputBufferLimit { + const message = { + ...baseRedisconfig62_ClientOutputBufferLimit, + } as Redisconfig62_ClientOutputBufferLimit; + message.hardLimit = object.hardLimit ?? undefined; + message.softLimit = object.softLimit ?? undefined; + message.softSeconds = object.softSeconds ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set( + Redisconfig62_ClientOutputBufferLimit.$type, + Redisconfig62_ClientOutputBufferLimit +); + const baseRedisconfigset62: object = { $type: "yandex.cloud.mdb.redis.v1.config.RedisConfigSet6_2", }; diff --git a/src/generated/yandex/cloud/mdb/sqlserver/v1/cluster.ts b/src/generated/yandex/cloud/mdb/sqlserver/v1/cluster.ts index 70a41b1c..e0bae4e4 100644 --- a/src/generated/yandex/cloud/mdb/sqlserver/v1/cluster.ts +++ b/src/generated/yandex/cloud/mdb/sqlserver/v1/cluster.ts @@ -56,6 +56,8 @@ export interface Cluster { deletionProtection: boolean; /** SQL Server Collation */ sqlcollation: string; + /** Host groups hosting VMs of the cluster. */ + hostGroupIds: string[]; } export enum Cluster_Environment { @@ -266,7 +268,7 @@ export interface Host { * Name of the SQL Server host. The host name is assigned by Managed Service for SQL Server * at creation time, and cannot be changed. 1-63 characters long. * - * The name is unique across all existing database hosts in Yandex.Cloud, + * The name is unique across all existing database hosts in Yandex Cloud, * as it defines the FQDN of the host. */ name: string; @@ -486,6 +488,8 @@ export interface Access { $type: "yandex.cloud.mdb.sqlserver.v1.Access"; /** Allow access for DataLens */ dataLens: boolean; + /** Allow access for Web SQL. */ + webSql: boolean; } const baseCluster: object = { @@ -501,6 +505,7 @@ const baseCluster: object = { securityGroupIds: "", deletionProtection: false, sqlcollation: "", + hostGroupIds: "", }; export const Cluster = { @@ -565,6 +570,9 @@ export const Cluster = { if (message.sqlcollation !== "") { writer.uint32(122).string(message.sqlcollation); } + for (const v of message.hostGroupIds) { + writer.uint32(130).string(v!); + } return writer; }, @@ -575,6 +583,7 @@ export const Cluster = { message.labels = {}; message.monitoring = []; message.securityGroupIds = []; + message.hostGroupIds = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -628,6 +637,9 @@ export const Cluster = { case 15: message.sqlcollation = reader.string(); break; + case 16: + message.hostGroupIds.push(reader.string()); + break; default: reader.skipType(tag & 7); break; @@ -697,6 +709,9 @@ export const Cluster = { object.sqlcollation !== undefined && object.sqlcollation !== null ? String(object.sqlcollation) : ""; + message.hostGroupIds = (object.hostGroupIds ?? []).map((e: any) => + String(e) + ); return message; }, @@ -742,6 +757,11 @@ export const Cluster = { (obj.deletionProtection = message.deletionProtection); message.sqlcollation !== undefined && (obj.sqlcollation = message.sqlcollation); + if (message.hostGroupIds) { + obj.hostGroupIds = message.hostGroupIds.map((e) => e); + } else { + obj.hostGroupIds = []; + } return obj; }, @@ -773,6 +793,7 @@ export const Cluster = { message.securityGroupIds = object.securityGroupIds?.map((e) => e) || []; message.deletionProtection = object.deletionProtection ?? false; message.sqlcollation = object.sqlcollation ?? ""; + message.hostGroupIds = object.hostGroupIds?.map((e) => e) || []; return message; }, }; @@ -1457,6 +1478,7 @@ messageTypeRegistry.set(Resources.$type, Resources); const baseAccess: object = { $type: "yandex.cloud.mdb.sqlserver.v1.Access", dataLens: false, + webSql: false, }; export const Access = { @@ -1469,6 +1491,9 @@ export const Access = { if (message.dataLens === true) { writer.uint32(8).bool(message.dataLens); } + if (message.webSql === true) { + writer.uint32(16).bool(message.webSql); + } return writer; }, @@ -1482,6 +1507,9 @@ export const Access = { case 1: message.dataLens = reader.bool(); break; + case 2: + message.webSql = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -1496,18 +1524,24 @@ export const Access = { object.dataLens !== undefined && object.dataLens !== null ? Boolean(object.dataLens) : false; + message.webSql = + object.webSql !== undefined && object.webSql !== null + ? Boolean(object.webSql) + : false; return message; }, toJSON(message: Access): unknown { const obj: any = {}; message.dataLens !== undefined && (obj.dataLens = message.dataLens); + message.webSql !== undefined && (obj.webSql = message.webSql); return obj; }, fromPartial, I>>(object: I): Access { const message = { ...baseAccess } as Access; message.dataLens = object.dataLens ?? false; + message.webSql = object.webSql ?? false; return message; }, }; diff --git a/src/generated/yandex/cloud/mdb/sqlserver/v1/cluster_service.ts b/src/generated/yandex/cloud/mdb/sqlserver/v1/cluster_service.ts index a5a8f2d2..ff912a4c 100644 --- a/src/generated/yandex/cloud/mdb/sqlserver/v1/cluster_service.ts +++ b/src/generated/yandex/cloud/mdb/sqlserver/v1/cluster_service.ts @@ -126,6 +126,8 @@ export interface CreateClusterRequest { deletionProtection: boolean; /** name of SQL Collation that cluster will be created with */ sqlcollation: string; + /** Host groups hosting VMs of the cluster. */ + hostGroupIds: string[]; } export interface CreateClusterRequest_LabelsEntry { @@ -250,6 +252,10 @@ export interface RestoreClusterRequest { folderId: string; /** User security groups */ securityGroupIds: string[]; + /** Deletion Protection inhibits deletion of the cluster */ + deletionProtection: boolean; + /** Host groups hosting VMs of the cluster. */ + hostGroupIds: string[]; } export interface RestoreClusterRequest_LabelsEntry { @@ -858,6 +864,7 @@ const baseCreateClusterRequest: object = { securityGroupIds: "", deletionProtection: false, sqlcollation: "", + hostGroupIds: "", }; export const CreateClusterRequest = { @@ -914,6 +921,9 @@ export const CreateClusterRequest = { if (message.sqlcollation !== "") { writer.uint32(106).string(message.sqlcollation); } + for (const v of message.hostGroupIds) { + writer.uint32(114).string(v!); + } return writer; }, @@ -929,6 +939,7 @@ export const CreateClusterRequest = { message.userSpecs = []; message.hostSpecs = []; message.securityGroupIds = []; + message.hostGroupIds = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -979,6 +990,9 @@ export const CreateClusterRequest = { case 13: message.sqlcollation = reader.string(); break; + case 14: + message.hostGroupIds.push(reader.string()); + break; default: reader.skipType(tag & 7); break; @@ -1040,6 +1054,9 @@ export const CreateClusterRequest = { object.sqlcollation !== undefined && object.sqlcollation !== null ? String(object.sqlcollation) : ""; + message.hostGroupIds = (object.hostGroupIds ?? []).map((e: any) => + String(e) + ); return message; }, @@ -1092,6 +1109,11 @@ export const CreateClusterRequest = { (obj.deletionProtection = message.deletionProtection); message.sqlcollation !== undefined && (obj.sqlcollation = message.sqlcollation); + if (message.hostGroupIds) { + obj.hostGroupIds = message.hostGroupIds.map((e) => e); + } else { + obj.hostGroupIds = []; + } return obj; }, @@ -1125,6 +1147,7 @@ export const CreateClusterRequest = { message.securityGroupIds = object.securityGroupIds?.map((e) => e) || []; message.deletionProtection = object.deletionProtection ?? false; message.sqlcollation = object.sqlcollation ?? ""; + message.hostGroupIds = object.hostGroupIds?.map((e) => e) || []; return message; }, }; @@ -1903,6 +1926,8 @@ const baseRestoreClusterRequest: object = { networkId: "", folderId: "", securityGroupIds: "", + deletionProtection: false, + hostGroupIds: "", }; export const RestoreClusterRequest = { @@ -1956,6 +1981,12 @@ export const RestoreClusterRequest = { for (const v of message.securityGroupIds) { writer.uint32(98).string(v!); } + if (message.deletionProtection === true) { + writer.uint32(104).bool(message.deletionProtection); + } + for (const v of message.hostGroupIds) { + writer.uint32(114).string(v!); + } return writer; }, @@ -1969,6 +2000,7 @@ export const RestoreClusterRequest = { message.labels = {}; message.hostSpecs = []; message.securityGroupIds = []; + message.hostGroupIds = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -2013,6 +2045,12 @@ export const RestoreClusterRequest = { case 12: message.securityGroupIds.push(reader.string()); break; + case 13: + message.deletionProtection = reader.bool(); + break; + case 14: + message.hostGroupIds.push(reader.string()); + break; default: reader.skipType(tag & 7); break; @@ -2067,6 +2105,14 @@ export const RestoreClusterRequest = { message.securityGroupIds = (object.securityGroupIds ?? []).map((e: any) => String(e) ); + message.deletionProtection = + object.deletionProtection !== undefined && + object.deletionProtection !== null + ? Boolean(object.deletionProtection) + : false; + message.hostGroupIds = (object.hostGroupIds ?? []).map((e: any) => + String(e) + ); return message; }, @@ -2103,6 +2149,13 @@ export const RestoreClusterRequest = { } else { obj.securityGroupIds = []; } + message.deletionProtection !== undefined && + (obj.deletionProtection = message.deletionProtection); + if (message.hostGroupIds) { + obj.hostGroupIds = message.hostGroupIds.map((e) => e); + } else { + obj.hostGroupIds = []; + } return obj; }, @@ -2132,6 +2185,8 @@ export const RestoreClusterRequest = { message.networkId = object.networkId ?? ""; message.folderId = object.folderId ?? ""; message.securityGroupIds = object.securityGroupIds?.map((e) => e) || []; + message.deletionProtection = object.deletionProtection ?? false; + message.hostGroupIds = object.hostGroupIds?.map((e) => e) || []; return message; }, }; diff --git a/src/generated/yandex/cloud/operation/operation_service.ts b/src/generated/yandex/cloud/operation/operation_service.ts index 8f858fe0..18ca3a71 100644 --- a/src/generated/yandex/cloud/operation/operation_service.ts +++ b/src/generated/yandex/cloud/operation/operation_service.ts @@ -173,7 +173,11 @@ export const OperationServiceService = { Buffer.from(Operation.encode(value).finish()), responseDeserialize: (value: Buffer) => Operation.decode(value), }, - /** Cancels the specified operation. */ + /** + * Cancels the specified operation. + * + * Note that currently Yandex Object Storage API does not support cancelling operations. + */ cancel: { path: "/yandex.cloud.operation.OperationService/Cancel", requestStream: false, @@ -190,7 +194,11 @@ export const OperationServiceService = { export interface OperationServiceServer extends UntypedServiceImplementation { /** Returns the specified Operation resource. */ get: handleUnaryCall; - /** Cancels the specified operation. */ + /** + * Cancels the specified operation. + * + * Note that currently Yandex Object Storage API does not support cancelling operations. + */ cancel: handleUnaryCall; } @@ -211,7 +219,11 @@ export interface OperationServiceClient extends Client { options: Partial, callback: (error: ServiceError | null, response: Operation) => void ): ClientUnaryCall; - /** Cancels the specified operation. */ + /** + * Cancels the specified operation. + * + * Note that currently Yandex Object Storage API does not support cancelling operations. + */ cancel( request: CancelOperationRequest, callback: (error: ServiceError | null, response: Operation) => void diff --git a/src/generated/yandex/cloud/organizationmanager/v1/user_account.ts b/src/generated/yandex/cloud/organizationmanager/v1/user_account.ts index ca1faeac..b205afd8 100644 --- a/src/generated/yandex/cloud/organizationmanager/v1/user_account.ts +++ b/src/generated/yandex/cloud/organizationmanager/v1/user_account.ts @@ -5,7 +5,7 @@ import _m0 from "protobufjs/minimal"; export const protobufPackage = "yandex.cloud.organizationmanager.v1"; -/** Currently represents only [Yandex.Passport account](/docs/iam/concepts/#passport). */ +/** Currently represents only [Yandex account](/docs/iam/concepts/#passport). */ export interface UserAccount { $type: "yandex.cloud.organizationmanager.v1.UserAccount"; /** ID of the user account. */ @@ -18,13 +18,13 @@ export interface UserAccount { /** * A YandexPassportUserAccount resource. - * For more information, see [Yandex.Passport account](/docs/iam/concepts/#passport). + * For more information, see [Yandex account](/docs/iam/concepts/#passport). */ export interface YandexPassportUserAccount { $type: "yandex.cloud.organizationmanager.v1.YandexPassportUserAccount"; - /** Login of the Yandex.Passport user account. */ + /** Login of the Yandex user account. */ login: string; - /** Default email of the Yandex.Passport user account. */ + /** Default email of the Yandex user account. */ defaultEmail: string; } diff --git a/src/generated/yandex/cloud/organizationmanager/v1/user_service.ts b/src/generated/yandex/cloud/organizationmanager/v1/user_service.ts index 3a8d2016..0bd8bd79 100644 --- a/src/generated/yandex/cloud/organizationmanager/v1/user_service.ts +++ b/src/generated/yandex/cloud/organizationmanager/v1/user_service.ts @@ -53,7 +53,7 @@ export interface ListMembersResponse { export interface ListMembersResponse_OrganizationUser { $type: "yandex.cloud.organizationmanager.v1.ListMembersResponse.OrganizationUser"; - /** OpenID standard claims with additional Yandex.Organization claims. */ + /** OpenID standard claims with additional Yandex Cloud Organization claims. */ subjectClaims?: SubjectClaims; } diff --git a/src/generated/yandex/cloud/serverless/apigateway/v1/apigateway.ts b/src/generated/yandex/cloud/serverless/apigateway/v1/apigateway.ts index 68ab2d15..ad0d2381 100644 --- a/src/generated/yandex/cloud/serverless/apigateway/v1/apigateway.ts +++ b/src/generated/yandex/cloud/serverless/apigateway/v1/apigateway.ts @@ -28,6 +28,8 @@ export interface ApiGateway { logGroupId: string; /** List of domains attached to API gateway. */ attachedDomains: AttachedDomain[]; + /** Network access. If specified the gateway will be attached to specified network/subnet(s). */ + connectivity?: Connectivity; } export enum ApiGateway_Status { @@ -109,6 +111,21 @@ export interface AttachedDomain { domain: string; } +/** Gateway connectivity specification. */ +export interface Connectivity { + $type: "yandex.cloud.serverless.apigateway.v1.Connectivity"; + /** + * Network the gateway will have access to. + * It's essential to specify network with subnets in all availability zones. + */ + networkId: string; + /** + * Complete list of subnets (from the same network) the gateway can be attached to. + * It's essential to specify at least one subnet for each availability zones. + */ + subnetId: string[]; +} + const baseApiGateway: object = { $type: "yandex.cloud.serverless.apigateway.v1.ApiGateway", id: "", @@ -167,6 +184,12 @@ export const ApiGateway = { for (const v of message.attachedDomains) { AttachedDomain.encode(v!, writer.uint32(90).fork()).ldelim(); } + if (message.connectivity !== undefined) { + Connectivity.encode( + message.connectivity, + writer.uint32(98).fork() + ).ldelim(); + } return writer; }, @@ -216,6 +239,9 @@ export const ApiGateway = { AttachedDomain.decode(reader, reader.uint32()) ); break; + case 12: + message.connectivity = Connectivity.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -265,6 +291,10 @@ export const ApiGateway = { message.attachedDomains = (object.attachedDomains ?? []).map((e: any) => AttachedDomain.fromJSON(e) ); + message.connectivity = + object.connectivity !== undefined && object.connectivity !== null + ? Connectivity.fromJSON(object.connectivity) + : undefined; return message; }, @@ -294,6 +324,10 @@ export const ApiGateway = { } else { obj.attachedDomains = []; } + message.connectivity !== undefined && + (obj.connectivity = message.connectivity + ? Connectivity.toJSON(message.connectivity) + : undefined); return obj; }, @@ -319,6 +353,10 @@ export const ApiGateway = { message.logGroupId = object.logGroupId ?? ""; message.attachedDomains = object.attachedDomains?.map((e) => AttachedDomain.fromPartial(e)) || []; + message.connectivity = + object.connectivity !== undefined && object.connectivity !== null + ? Connectivity.fromPartial(object.connectivity) + : undefined; return message; }, }; @@ -504,6 +542,83 @@ export const AttachedDomain = { messageTypeRegistry.set(AttachedDomain.$type, AttachedDomain); +const baseConnectivity: object = { + $type: "yandex.cloud.serverless.apigateway.v1.Connectivity", + networkId: "", + subnetId: "", +}; + +export const Connectivity = { + $type: "yandex.cloud.serverless.apigateway.v1.Connectivity" as const, + + encode( + message: Connectivity, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.networkId !== "") { + writer.uint32(10).string(message.networkId); + } + for (const v of message.subnetId) { + writer.uint32(18).string(v!); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Connectivity { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseConnectivity } as Connectivity; + message.subnetId = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.networkId = reader.string(); + break; + case 2: + message.subnetId.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Connectivity { + const message = { ...baseConnectivity } as Connectivity; + message.networkId = + object.networkId !== undefined && object.networkId !== null + ? String(object.networkId) + : ""; + message.subnetId = (object.subnetId ?? []).map((e: any) => String(e)); + return message; + }, + + toJSON(message: Connectivity): unknown { + const obj: any = {}; + message.networkId !== undefined && (obj.networkId = message.networkId); + if (message.subnetId) { + obj.subnetId = message.subnetId.map((e) => e); + } else { + obj.subnetId = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): Connectivity { + const message = { ...baseConnectivity } as Connectivity; + message.networkId = object.networkId ?? ""; + message.subnetId = object.subnetId?.map((e) => e) || []; + return message; + }, +}; + +messageTypeRegistry.set(Connectivity.$type, Connectivity); + type Builtin = | Date | Function diff --git a/src/generated/yandex/cloud/serverless/apigateway/v1/apigateway_service.ts b/src/generated/yandex/cloud/serverless/apigateway/v1/apigateway_service.ts index 03f29fd1..8692333c 100644 --- a/src/generated/yandex/cloud/serverless/apigateway/v1/apigateway_service.ts +++ b/src/generated/yandex/cloud/serverless/apigateway/v1/apigateway_service.ts @@ -14,8 +14,11 @@ import { ServiceError, } from "@grpc/grpc-js"; import _m0 from "protobufjs/minimal"; +import { + Connectivity, + ApiGateway, +} from "../../../../../yandex/cloud/serverless/apigateway/v1/apigateway"; import { FieldMask } from "../../../../../google/protobuf/field_mask"; -import { ApiGateway } from "../../../../../yandex/cloud/serverless/apigateway/v1/apigateway"; import { Operation } from "../../../../../yandex/cloud/operation/operation"; import { ListAccessBindingsRequest, @@ -102,6 +105,8 @@ export interface CreateApiGatewayRequest { labels: { [key: string]: string }; /** The text of specification, JSON or YAML. */ openapiSpec: string | undefined; + /** Gateway connectivity. If specified the gateway will be attached to specified network/subnet(s). */ + connectivity?: Connectivity; } export interface CreateApiGatewayRequest_LabelsEntry { @@ -136,6 +141,8 @@ export interface UpdateApiGatewayRequest { labels: { [key: string]: string }; /** The text of specification, JSON or YAML. */ openapiSpec: string | undefined; + /** Gateway connectivity. If specified the gateway will be attached to specified network/subnet(s). */ + connectivity?: Connectivity; } export interface UpdateApiGatewayRequest_LabelsEntry { @@ -600,6 +607,12 @@ export const CreateApiGatewayRequest = { if (message.openapiSpec !== undefined) { writer.uint32(42).string(message.openapiSpec); } + if (message.connectivity !== undefined) { + Connectivity.encode( + message.connectivity, + writer.uint32(50).fork() + ).ldelim(); + } return writer; }, @@ -637,6 +650,9 @@ export const CreateApiGatewayRequest = { case 5: message.openapiSpec = reader.string(); break; + case 6: + message.connectivity = Connectivity.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -671,6 +687,10 @@ export const CreateApiGatewayRequest = { object.openapiSpec !== undefined && object.openapiSpec !== null ? String(object.openapiSpec) : undefined; + message.connectivity = + object.connectivity !== undefined && object.connectivity !== null + ? Connectivity.fromJSON(object.connectivity) + : undefined; return message; }, @@ -688,6 +708,10 @@ export const CreateApiGatewayRequest = { } message.openapiSpec !== undefined && (obj.openapiSpec = message.openapiSpec); + message.connectivity !== undefined && + (obj.connectivity = message.connectivity + ? Connectivity.toJSON(message.connectivity) + : undefined); return obj; }, @@ -709,6 +733,10 @@ export const CreateApiGatewayRequest = { return acc; }, {}); message.openapiSpec = object.openapiSpec ?? undefined; + message.connectivity = + object.connectivity !== undefined && object.connectivity !== null + ? Connectivity.fromPartial(object.connectivity) + : undefined; return message; }, }; @@ -843,6 +871,12 @@ export const UpdateApiGatewayRequest = { if (message.openapiSpec !== undefined) { writer.uint32(50).string(message.openapiSpec); } + if (message.connectivity !== undefined) { + Connectivity.encode( + message.connectivity, + writer.uint32(58).fork() + ).ldelim(); + } return writer; }, @@ -883,6 +917,9 @@ export const UpdateApiGatewayRequest = { case 6: message.openapiSpec = reader.string(); break; + case 7: + message.connectivity = Connectivity.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -921,6 +958,10 @@ export const UpdateApiGatewayRequest = { object.openapiSpec !== undefined && object.openapiSpec !== null ? String(object.openapiSpec) : undefined; + message.connectivity = + object.connectivity !== undefined && object.connectivity !== null + ? Connectivity.fromJSON(object.connectivity) + : undefined; return message; }, @@ -943,6 +984,10 @@ export const UpdateApiGatewayRequest = { } message.openapiSpec !== undefined && (obj.openapiSpec = message.openapiSpec); + message.connectivity !== undefined && + (obj.connectivity = message.connectivity + ? Connectivity.toJSON(message.connectivity) + : undefined); return obj; }, @@ -968,6 +1013,10 @@ export const UpdateApiGatewayRequest = { return acc; }, {}); message.openapiSpec = object.openapiSpec ?? undefined; + message.connectivity = + object.connectivity !== undefined && object.connectivity !== null + ? Connectivity.fromPartial(object.connectivity) + : undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/serverless/containers/v1/container.ts b/src/generated/yandex/cloud/serverless/containers/v1/container.ts index 34b754db..b1c7d6d6 100644 --- a/src/generated/yandex/cloud/serverless/containers/v1/container.ts +++ b/src/generated/yandex/cloud/serverless/containers/v1/container.ts @@ -87,6 +87,8 @@ export interface Revision { concurrency: number; serviceAccountId: string; status: Revision_Status; + secrets: Secret[]; + connectivity?: Connectivity; } export enum Revision_Status { @@ -166,6 +168,20 @@ export interface Resources { coreFraction: number; } +export interface Secret { + $type: "yandex.cloud.serverless.containers.v1.Secret"; + id: string; + versionId: string; + key: string; + environmentVariable: string | undefined; +} + +export interface Connectivity { + $type: "yandex.cloud.serverless.containers.v1.Connectivity"; + networkId: string; + subnetIds: string[]; +} + const baseContainer: object = { $type: "yandex.cloud.serverless.containers.v1.Container", id: "", @@ -475,6 +491,15 @@ export const Revision = { if (message.status !== 0) { writer.uint32(80).int32(message.status); } + for (const v of message.secrets) { + Secret.encode(v!, writer.uint32(90).fork()).ldelim(); + } + if (message.connectivity !== undefined) { + Connectivity.encode( + message.connectivity, + writer.uint32(98).fork() + ).ldelim(); + } return writer; }, @@ -482,6 +507,7 @@ export const Revision = { const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = { ...baseRevision } as Revision; + message.secrets = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -517,6 +543,12 @@ export const Revision = { case 10: message.status = reader.int32() as any; break; + case 11: + message.secrets.push(Secret.decode(reader, reader.uint32())); + break; + case 12: + message.connectivity = Connectivity.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -565,6 +597,13 @@ export const Revision = { object.status !== undefined && object.status !== null ? revision_StatusFromJSON(object.status) : 0; + message.secrets = (object.secrets ?? []).map((e: any) => + Secret.fromJSON(e) + ); + message.connectivity = + object.connectivity !== undefined && object.connectivity !== null + ? Connectivity.fromJSON(object.connectivity) + : undefined; return message; }, @@ -593,6 +632,17 @@ export const Revision = { (obj.serviceAccountId = message.serviceAccountId); message.status !== undefined && (obj.status = revision_StatusToJSON(message.status)); + if (message.secrets) { + obj.secrets = message.secrets.map((e) => + e ? Secret.toJSON(e) : undefined + ); + } else { + obj.secrets = []; + } + message.connectivity !== undefined && + (obj.connectivity = message.connectivity + ? Connectivity.toJSON(message.connectivity) + : undefined); return obj; }, @@ -617,6 +667,11 @@ export const Revision = { message.concurrency = object.concurrency ?? 0; message.serviceAccountId = object.serviceAccountId ?? ""; message.status = object.status ?? 0; + message.secrets = object.secrets?.map((e) => Secret.fromPartial(e)) || []; + message.connectivity = + object.connectivity !== undefined && object.connectivity !== null + ? Connectivity.fromPartial(object.connectivity) + : undefined; return message; }, }; @@ -1065,6 +1120,179 @@ export const Resources = { messageTypeRegistry.set(Resources.$type, Resources); +const baseSecret: object = { + $type: "yandex.cloud.serverless.containers.v1.Secret", + id: "", + versionId: "", + key: "", +}; + +export const Secret = { + $type: "yandex.cloud.serverless.containers.v1.Secret" as const, + + encode( + message: Secret, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.id !== "") { + writer.uint32(10).string(message.id); + } + if (message.versionId !== "") { + writer.uint32(18).string(message.versionId); + } + if (message.key !== "") { + writer.uint32(26).string(message.key); + } + if (message.environmentVariable !== undefined) { + writer.uint32(34).string(message.environmentVariable); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Secret { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseSecret } as Secret; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = reader.string(); + break; + case 2: + message.versionId = reader.string(); + break; + case 3: + message.key = reader.string(); + break; + case 4: + message.environmentVariable = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Secret { + const message = { ...baseSecret } as Secret; + message.id = + object.id !== undefined && object.id !== null ? String(object.id) : ""; + message.versionId = + object.versionId !== undefined && object.versionId !== null + ? String(object.versionId) + : ""; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.environmentVariable = + object.environmentVariable !== undefined && + object.environmentVariable !== null + ? String(object.environmentVariable) + : undefined; + return message; + }, + + toJSON(message: Secret): unknown { + const obj: any = {}; + message.id !== undefined && (obj.id = message.id); + message.versionId !== undefined && (obj.versionId = message.versionId); + message.key !== undefined && (obj.key = message.key); + message.environmentVariable !== undefined && + (obj.environmentVariable = message.environmentVariable); + return obj; + }, + + fromPartial, I>>(object: I): Secret { + const message = { ...baseSecret } as Secret; + message.id = object.id ?? ""; + message.versionId = object.versionId ?? ""; + message.key = object.key ?? ""; + message.environmentVariable = object.environmentVariable ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(Secret.$type, Secret); + +const baseConnectivity: object = { + $type: "yandex.cloud.serverless.containers.v1.Connectivity", + networkId: "", + subnetIds: "", +}; + +export const Connectivity = { + $type: "yandex.cloud.serverless.containers.v1.Connectivity" as const, + + encode( + message: Connectivity, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.networkId !== "") { + writer.uint32(10).string(message.networkId); + } + for (const v of message.subnetIds) { + writer.uint32(18).string(v!); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Connectivity { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseConnectivity } as Connectivity; + message.subnetIds = []; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.networkId = reader.string(); + break; + case 2: + message.subnetIds.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Connectivity { + const message = { ...baseConnectivity } as Connectivity; + message.networkId = + object.networkId !== undefined && object.networkId !== null + ? String(object.networkId) + : ""; + message.subnetIds = (object.subnetIds ?? []).map((e: any) => String(e)); + return message; + }, + + toJSON(message: Connectivity): unknown { + const obj: any = {}; + message.networkId !== undefined && (obj.networkId = message.networkId); + if (message.subnetIds) { + obj.subnetIds = message.subnetIds.map((e) => e); + } else { + obj.subnetIds = []; + } + return obj; + }, + + fromPartial, I>>( + object: I + ): Connectivity { + const message = { ...baseConnectivity } as Connectivity; + message.networkId = object.networkId ?? ""; + message.subnetIds = object.subnetIds?.map((e) => e) || []; + return message; + }, +}; + +messageTypeRegistry.set(Connectivity.$type, Connectivity); + declare var self: any | undefined; declare var window: any | undefined; declare var global: any | undefined; diff --git a/src/generated/yandex/cloud/serverless/containers/v1/container_service.ts b/src/generated/yandex/cloud/serverless/containers/v1/container_service.ts index 183c1994..d4c7972c 100644 --- a/src/generated/yandex/cloud/serverless/containers/v1/container_service.ts +++ b/src/generated/yandex/cloud/serverless/containers/v1/container_service.ts @@ -17,10 +17,12 @@ import _m0 from "protobufjs/minimal"; import { FieldMask } from "../../../../../google/protobuf/field_mask"; import { Resources, + Connectivity, Command, Args, Container, Revision, + Secret, } from "../../../../../yandex/cloud/serverless/containers/v1/container"; import { Duration } from "../../../../../google/protobuf/duration"; import { Operation } from "../../../../../yandex/cloud/operation/operation"; @@ -130,6 +132,8 @@ export interface DeployContainerRevisionRequest { serviceAccountId: string; imageSpec?: ImageSpec; concurrency: number; + secrets: Secret[]; + connectivity?: Connectivity; } export interface ImageSpec { @@ -1507,6 +1511,15 @@ export const DeployContainerRevisionRequest = { if (message.concurrency !== 0) { writer.uint32(72).int64(message.concurrency); } + for (const v of message.secrets) { + Secret.encode(v!, writer.uint32(82).fork()).ldelim(); + } + if (message.connectivity !== undefined) { + Connectivity.encode( + message.connectivity, + writer.uint32(90).fork() + ).ldelim(); + } return writer; }, @@ -1519,6 +1532,7 @@ export const DeployContainerRevisionRequest = { const message = { ...baseDeployContainerRevisionRequest, } as DeployContainerRevisionRequest; + message.secrets = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -1543,6 +1557,12 @@ export const DeployContainerRevisionRequest = { case 9: message.concurrency = longToNumber(reader.int64() as Long); break; + case 10: + message.secrets.push(Secret.decode(reader, reader.uint32())); + break; + case 11: + message.connectivity = Connectivity.decode(reader, reader.uint32()); + break; default: reader.skipType(tag & 7); break; @@ -1583,6 +1603,13 @@ export const DeployContainerRevisionRequest = { object.concurrency !== undefined && object.concurrency !== null ? Number(object.concurrency) : 0; + message.secrets = (object.secrets ?? []).map((e: any) => + Secret.fromJSON(e) + ); + message.connectivity = + object.connectivity !== undefined && object.connectivity !== null + ? Connectivity.fromJSON(object.connectivity) + : undefined; return message; }, @@ -1608,6 +1635,17 @@ export const DeployContainerRevisionRequest = { : undefined); message.concurrency !== undefined && (obj.concurrency = Math.round(message.concurrency)); + if (message.secrets) { + obj.secrets = message.secrets.map((e) => + e ? Secret.toJSON(e) : undefined + ); + } else { + obj.secrets = []; + } + message.connectivity !== undefined && + (obj.connectivity = message.connectivity + ? Connectivity.toJSON(message.connectivity) + : undefined); return obj; }, @@ -1633,6 +1671,11 @@ export const DeployContainerRevisionRequest = { ? ImageSpec.fromPartial(object.imageSpec) : undefined; message.concurrency = object.concurrency ?? 0; + message.secrets = object.secrets?.map((e) => Secret.fromPartial(e)) || []; + message.connectivity = + object.connectivity !== undefined && object.connectivity !== null + ? Connectivity.fromPartial(object.connectivity) + : undefined; return message; }, }; diff --git a/src/generated/yandex/cloud/serverless/functions/v1/function.ts b/src/generated/yandex/cloud/serverless/functions/v1/function.ts index f511b127..3c0bf2b6 100644 --- a/src/generated/yandex/cloud/serverless/functions/v1/function.ts +++ b/src/generated/yandex/cloud/serverless/functions/v1/function.ts @@ -137,6 +137,8 @@ export interface Version { connectivity?: Connectivity; /** Additional service accounts to be used by the version. */ namedServiceAccounts: { [key: string]: string }; + /** Lockbox secrets to be used by the version */ + secrets: Secret[]; } export enum Version_Status { @@ -251,6 +253,19 @@ export interface ScalingPolicy { zoneRequestsLimit: number; } +/** Secret for serverless function */ +export interface Secret { + $type: "yandex.cloud.serverless.functions.v1.Secret"; + /** ID of lockbox secret */ + id: string; + /** ID of secret version */ + versionId: string; + /** Key in secret's payload, which value to be delivered into function environment */ + key: string; + /** environment variable in which secret's value to be delivered */ + environmentVariable: string | undefined; +} + const baseFunction: object = { $type: "yandex.cloud.serverless.functions.v1.Function", id: "", @@ -615,6 +630,9 @@ export const Version = { writer.uint32(146).fork() ).ldelim(); }); + for (const v of message.secrets) { + Secret.encode(v!, writer.uint32(154).fork()).ldelim(); + } return writer; }, @@ -625,6 +643,7 @@ export const Version = { message.tags = []; message.environment = {}; message.namedServiceAccounts = {}; + message.secrets = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -690,6 +709,9 @@ export const Version = { message.namedServiceAccounts[entry18.key] = entry18.value; } break; + case 19: + message.secrets.push(Secret.decode(reader, reader.uint32())); + break; default: reader.skipType(tag & 7); break; @@ -763,6 +785,9 @@ export const Version = { acc[key] = String(value); return acc; }, {}); + message.secrets = (object.secrets ?? []).map((e: any) => + Secret.fromJSON(e) + ); return message; }, @@ -812,6 +837,13 @@ export const Version = { obj.namedServiceAccounts[k] = v; }); } + if (message.secrets) { + obj.secrets = message.secrets.map((e) => + e ? Secret.toJSON(e) : undefined + ); + } else { + obj.secrets = []; + } return obj; }, @@ -856,6 +888,7 @@ export const Version = { } return acc; }, {}); + message.secrets = object.secrets?.map((e) => Secret.fromPartial(e)) || []; return message; }, }; @@ -1418,6 +1451,102 @@ export const ScalingPolicy = { messageTypeRegistry.set(ScalingPolicy.$type, ScalingPolicy); +const baseSecret: object = { + $type: "yandex.cloud.serverless.functions.v1.Secret", + id: "", + versionId: "", + key: "", +}; + +export const Secret = { + $type: "yandex.cloud.serverless.functions.v1.Secret" as const, + + encode( + message: Secret, + writer: _m0.Writer = _m0.Writer.create() + ): _m0.Writer { + if (message.id !== "") { + writer.uint32(10).string(message.id); + } + if (message.versionId !== "") { + writer.uint32(18).string(message.versionId); + } + if (message.key !== "") { + writer.uint32(26).string(message.key); + } + if (message.environmentVariable !== undefined) { + writer.uint32(34).string(message.environmentVariable); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Secret { + const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = { ...baseSecret } as Secret; + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.id = reader.string(); + break; + case 2: + message.versionId = reader.string(); + break; + case 3: + message.key = reader.string(); + break; + case 4: + message.environmentVariable = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }, + + fromJSON(object: any): Secret { + const message = { ...baseSecret } as Secret; + message.id = + object.id !== undefined && object.id !== null ? String(object.id) : ""; + message.versionId = + object.versionId !== undefined && object.versionId !== null + ? String(object.versionId) + : ""; + message.key = + object.key !== undefined && object.key !== null ? String(object.key) : ""; + message.environmentVariable = + object.environmentVariable !== undefined && + object.environmentVariable !== null + ? String(object.environmentVariable) + : undefined; + return message; + }, + + toJSON(message: Secret): unknown { + const obj: any = {}; + message.id !== undefined && (obj.id = message.id); + message.versionId !== undefined && (obj.versionId = message.versionId); + message.key !== undefined && (obj.key = message.key); + message.environmentVariable !== undefined && + (obj.environmentVariable = message.environmentVariable); + return obj; + }, + + fromPartial, I>>(object: I): Secret { + const message = { ...baseSecret } as Secret; + message.id = object.id ?? ""; + message.versionId = object.versionId ?? ""; + message.key = object.key ?? ""; + message.environmentVariable = object.environmentVariable ?? undefined; + return message; + }, +}; + +messageTypeRegistry.set(Secret.$type, Secret); + declare var self: any | undefined; declare var window: any | undefined; declare var global: any | undefined; diff --git a/src/generated/yandex/cloud/serverless/functions/v1/function_service.ts b/src/generated/yandex/cloud/serverless/functions/v1/function_service.ts index 025ceb3f..8d05039f 100644 --- a/src/generated/yandex/cloud/serverless/functions/v1/function_service.ts +++ b/src/generated/yandex/cloud/serverless/functions/v1/function_service.ts @@ -21,6 +21,7 @@ import { Function, Version, Package, + Secret, ScalingPolicy, } from "../../../../../yandex/cloud/serverless/functions/v1/function"; import { Duration } from "../../../../../google/protobuf/duration"; @@ -347,6 +348,8 @@ export interface CreateFunctionVersionRequest { connectivity?: Connectivity; /** Additional service accounts to be used by the version. */ namedServiceAccounts: { [key: string]: string }; + /** Lockbox secrets to be used by the version */ + secrets: Secret[]; } export interface CreateFunctionVersionRequest_EnvironmentEntry { @@ -2324,6 +2327,9 @@ export const CreateFunctionVersionRequest = { writer.uint32(122).fork() ).ldelim(); }); + for (const v of message.secrets) { + Secret.encode(v!, writer.uint32(146).fork()).ldelim(); + } return writer; }, @@ -2339,6 +2345,7 @@ export const CreateFunctionVersionRequest = { message.environment = {}; message.tag = []; message.namedServiceAccounts = {}; + message.secrets = []; while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { @@ -2397,6 +2404,9 @@ export const CreateFunctionVersionRequest = { message.namedServiceAccounts[entry15.key] = entry15.value; } break; + case 18: + message.secrets.push(Secret.decode(reader, reader.uint32())); + break; default: reader.skipType(tag & 7); break; @@ -2466,6 +2476,9 @@ export const CreateFunctionVersionRequest = { acc[key] = String(value); return acc; }, {}); + message.secrets = (object.secrets ?? []).map((e: any) => + Secret.fromJSON(e) + ); return message; }, @@ -2517,6 +2530,13 @@ export const CreateFunctionVersionRequest = { obj.namedServiceAccounts[k] = v; }); } + if (message.secrets) { + obj.secrets = message.secrets.map((e) => + e ? Secret.toJSON(e) : undefined + ); + } else { + obj.secrets = []; + } return obj; }, @@ -2566,6 +2586,7 @@ export const CreateFunctionVersionRequest = { } return acc; }, {}); + message.secrets = object.secrets?.map((e) => Secret.fromPartial(e)) || []; return message; }, }; diff --git a/src/generated/yandex/cloud/service_clients.ts b/src/generated/yandex/cloud/service_clients.ts index 07d480a0..255d6f2e 100644 --- a/src/generated/yandex/cloud/service_clients.ts +++ b/src/generated/yandex/cloud/service_clients.ts @@ -3,6 +3,7 @@ export const SttServiceClient = cloudApi.ai.stt_service.SttServiceClient; export const TranslationServiceClient = cloudApi.ai.translate_translation_service.TranslationServiceClient; export const SynthesizerClient = cloudApi.ai.tts_service.SynthesizerClient; export const VisionServiceClient = cloudApi.ai.vision_service.VisionServiceClient; +export const ImageClassifierServiceClient = cloudApi.ai.vision_image_classifier_service.ImageClassifierServiceClient; export const BackendGroupServiceClient = cloudApi.apploadbalancer.backend_group_service.BackendGroupServiceClient; export const HttpRouterServiceClient = cloudApi.apploadbalancer.http_router_service.HttpRouterServiceClient; export const LoadBalancerServiceClient = cloudApi.apploadbalancer.load_balancer_service.LoadBalancerServiceClient; @@ -17,6 +18,7 @@ export const CacheServiceClient = cloudApi.cdn.cache_service.CacheServiceClient; export const OriginGroupServiceClient = cloudApi.cdn.origin_group_service.OriginGroupServiceClient; export const OriginServiceClient = cloudApi.cdn.origin_service.OriginServiceClient; export const ProviderServiceClient = cloudApi.cdn.provider_service.ProviderServiceClient; +export const RawLogsServiceClient = cloudApi.cdn.raw_logs_service.RawLogsServiceClient; export const ResourceServiceClient = cloudApi.cdn.resource_service.ResourceServiceClient; export const CertificateContentServiceClient = cloudApi.certificatemanager.certificate_content_service.CertificateContentServiceClient; export const CertificateServiceClient = cloudApi.certificatemanager.certificate_service.CertificateServiceClient; @@ -86,10 +88,14 @@ export const ClickHouseResourcePresetServiceClient = cloudApi.mdb.clickhouse_res export const ClickHouseUserServiceClient = cloudApi.mdb.clickhouse_user_service.UserServiceClient; export const VersionsServiceClient = cloudApi.mdb.clickhouse_versions_service.VersionsServiceClient; export const AuthServiceClient = cloudApi.mdb.elasticsearch_auth_service.AuthServiceClient; +export const ElasticBackupServiceClient = cloudApi.mdb.elasticsearch_backup_service.BackupServiceClient; export const ElasticClusterServiceClient = cloudApi.mdb.elasticsearch_cluster_service.ClusterServiceClient; +export const ElasticExtensionServiceClient = cloudApi.mdb.elasticsearch_extension_service.ExtensionServiceClient; export const ElasticResourcePresetServiceClient = cloudApi.mdb.elasticsearch_resource_preset_service.ResourcePresetServiceClient; export const ElasticUserServiceClient = cloudApi.mdb.elasticsearch_user_service.UserServiceClient; +export const GreenplumBackupServiceClient = cloudApi.mdb.greenplum_backup_service.BackupServiceClient; export const GreenplumClusterServiceClient = cloudApi.mdb.greenplum_cluster_service.ClusterServiceClient; +export const GreenplumResourcePresetServiceClient = cloudApi.mdb.greenplum_resource_preset_service.ResourcePresetServiceClient; export const KafkaClusterServiceClient = cloudApi.mdb.kafka_cluster_service.ClusterServiceClient; export const ConnectorServiceClient = cloudApi.mdb.kafka_connector_service.ConnectorServiceClient; export const KafkaResourcePresetServiceClient = cloudApi.mdb.kafka_resource_preset_service.ResourcePresetServiceClient; @@ -139,4 +145,4 @@ export const YdbBackupServiceClient = cloudApi.ydb.backup_service.BackupServiceC export const YdbDatabaseServiceClient = cloudApi.ydb.database_service.DatabaseServiceClient; export const LocationServiceClient = cloudApi.ydb.location_service.LocationServiceClient; export const YdbResourcePresetServiceClient = cloudApi.ydb.resource_preset_service.ResourcePresetServiceClient; -export const StorageTypeServiceClient = cloudApi.ydb.storage_type_service.StorageTypeServiceClient; \ No newline at end of file +export const StorageTypeServiceClient = cloudApi.ydb.storage_type_service.StorageTypeServiceClient; diff --git a/src/service-endpoints.ts b/src/service-endpoints.ts index 726a8726..87164e5b 100644 --- a/src/service-endpoints.ts +++ b/src/service-endpoints.ts @@ -64,7 +64,11 @@ const SERVICE_ENDPOINTS_LIST: ServiceEndpointsList = [ 'yandex.cloud.mdb.elasticsearch.v1.ClusterService', 'yandex.cloud.mdb.elasticsearch.v1.ResourcePresetService', 'yandex.cloud.mdb.elasticsearch.v1.UserService', + 'yandex.cloud.mdb.elasticsearch.v1.BackupService', + 'yandex.cloud.mdb.elasticsearch.v1.ExtensionService', 'yandex.cloud.mdb.greenplum.v1.ClusterService', + 'yandex.cloud.mdb.greenplum.v1.BackupService', + 'yandex.cloud.mdb.greenplum.v1.ResourcePresetService', 'yandex.cloud.mdb.kafka.v1.ClusterService', 'yandex.cloud.mdb.kafka.v1.ConnectorService', 'yandex.cloud.mdb.kafka.v1.ResourcePresetService', @@ -224,7 +228,10 @@ const SERVICE_ENDPOINTS_LIST: ServiceEndpointsList = [ endpoint: 'translate.api.cloud.yandex.net:443', }, { - serviceIds: ['yandex.cloud.ai.vision.v1.VisionService'], + serviceIds: [ + 'yandex.cloud.ai.vision.v1.VisionService', + 'yandex.cloud.ai.vision.v2.ImageClassifierService', + ], endpoint: 'vision.api.cloud.yandex.net:443', }, { @@ -258,6 +265,7 @@ const SERVICE_ENDPOINTS_LIST: ServiceEndpointsList = [ 'yandex.cloud.cdn.v1.OriginService', 'yandex.cloud.cdn.v1.ProviderService', 'yandex.cloud.cdn.v1.ResourceService', + 'yandex.cloud.cdn.v1.RawLogsService', ], endpoint: 'cdn.api.cloud.yandex.net:443', },