From 0c39b0bed3107b1026cead3bc669e07b7b088ef0 Mon Sep 17 00:00:00 2001 From: AWS SDK for Ruby Date: Tue, 17 May 2022 18:03:24 +0000 Subject: [PATCH] Updated API models and rebuilt service gems. --- apis/glue/2017-03-31/api-2.json | 1466 +- apis/glue/2017-03-31/docs-2.json | 1059 ++ apis/kms/2014-11-01/docs-2.json | 36 +- gems/aws-sdk-glue/CHANGELOG.md | 5 + gems/aws-sdk-glue/VERSION | 2 +- gems/aws-sdk-glue/lib/aws-sdk-glue.rb | 2 +- gems/aws-sdk-glue/lib/aws-sdk-glue/client.rb | 2782 +++- .../lib/aws-sdk-glue/client_api.rb | 713 + gems/aws-sdk-glue/lib/aws-sdk-glue/types.rb | 11955 ++++++++++++---- gems/aws-sdk-kms/CHANGELOG.md | 5 + gems/aws-sdk-kms/VERSION | 2 +- gems/aws-sdk-kms/lib/aws-sdk-kms.rb | 2 +- gems/aws-sdk-kms/lib/aws-sdk-kms/client.rb | 321 +- gems/aws-sdk-kms/lib/aws-sdk-kms/types.rb | 104 +- 14 files changed, 15690 insertions(+), 2764 deletions(-) diff --git a/apis/glue/2017-03-31/api-2.json b/apis/glue/2017-03-31/api-2.json index 29127f8a909..e2fbc0cbaa4 100644 --- a/apis/glue/2017-03-31/api-2.json +++ b/apis/glue/2017-03-31/api-2.json @@ -2883,11 +2883,68 @@ "type":"list", "member":{"shape":"Action"} }, + "AdditionalOptions":{ + "type":"map", + "key":{"shape":"EnclosedInStringProperty"}, + "value":{"shape":"EnclosedInStringProperty"} + }, "AdditionalPlanOptionsMap":{ "type":"map", "key":{"shape":"GenericString"}, "value":{"shape":"GenericString"} }, + "AggFunction":{ + "type":"string", + "enum":[ + "avg", + "countDistinct", + "count", + "first", + "last", + "kurtosis", + "max", + "min", + "skewness", + "stddev_samp", + "stddev_pop", + "sum", + "sumDistinct", + "var_samp", + "var_pop" + ] + }, + "Aggregate":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "Groups", + "Aggs" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"OneInput"}, + "Groups":{"shape":"GlueStudioPathList"}, + "Aggs":{"shape":"AggregateOperations"} + } + }, + "AggregateOperation":{ + "type":"structure", + "required":[ + "Column", + "AggFunc" + ], + "members":{ + "Column":{"shape":"EnclosedInStringProperties"}, + "AggFunc":{"shape":"AggFunction"} + } + }, + "AggregateOperations":{ + "type":"list", + "member":{"shape":"AggregateOperation"}, + "max":30, + "min":1 + }, "AlreadyExistsException":{ "type":"structure", "members":{ @@ -2895,6 +2952,38 @@ }, "exception":true }, + "ApplyMapping":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "Mapping" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"OneInput"}, + "Mapping":{"shape":"Mappings"} + } + }, + "AthenaConnectorSource":{ + "type":"structure", + "required":[ + "Name", + "ConnectionName", + "ConnectorName", + "ConnectionType", + "SchemaName" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "ConnectionName":{"shape":"EnclosedInStringProperty"}, + "ConnectorName":{"shape":"EnclosedInStringProperty"}, + "ConnectionType":{"shape":"EnclosedInStringProperty"}, + "ConnectionTable":{"shape":"EnclosedInStringPropertyWithQuote"}, + "SchemaName":{"shape":"EnclosedInStringProperty"}, + "OutputSchemas":{"shape":"GlueSchemas"} + } + }, "AttemptCount":{"type":"integer"}, "AuditColumnNamesList":{ "type":"list", @@ -2938,6 +3027,21 @@ "type":"list", "member":{"shape":"BackfillError"} }, + "BasicCatalogTarget":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "Database", + "Table" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"OneInput"}, + "Database":{"shape":"EnclosedInStringProperty"}, + "Table":{"shape":"EnclosedInStringProperty"} + } + }, "BatchCreatePartitionRequest":{ "type":"structure", "required":[ @@ -3403,6 +3507,35 @@ "max":100, "min":0 }, + "BoxedBoolean":{ + "type":"boolean", + "box":true + }, + "BoxedDoubleFraction":{ + "type":"double", + "box":true, + "max":1, + "min":0 + }, + "BoxedLong":{ + "type":"long", + "box":true + }, + "BoxedNonNegativeInt":{ + "type":"integer", + "box":true, + "min":0 + }, + "BoxedNonNegativeLong":{ + "type":"long", + "box":true, + "min":0 + }, + "BoxedPositiveInt":{ + "type":"integer", + "box":true, + "min":0 + }, "CancelMLTaskRunRequest":{ "type":"structure", "required":[ @@ -3481,6 +3614,69 @@ "ImportedBy":{"shape":"NameString"} } }, + "CatalogKafkaSource":{ + "type":"structure", + "required":[ + "Name", + "Table", + "Database" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "WindowSize":{ + "shape":"BoxedPositiveInt", + "box":true + }, + "DetectSchema":{ + "shape":"BoxedBoolean", + "box":true + }, + "Table":{"shape":"EnclosedInStringProperty"}, + "Database":{"shape":"EnclosedInStringProperty"}, + "StreamingOptions":{"shape":"KafkaStreamingSourceOptions"}, + "DataPreviewOptions":{"shape":"StreamingDataPreviewOptions"} + } + }, + "CatalogKinesisSource":{ + "type":"structure", + "required":[ + "Name", + "Table", + "Database" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "WindowSize":{"shape":"BoxedPositiveInt"}, + "DetectSchema":{ + "shape":"BoxedBoolean", + "box":true + }, + "Table":{"shape":"EnclosedInStringProperty"}, + "Database":{"shape":"EnclosedInStringProperty"}, + "StreamingOptions":{"shape":"KinesisStreamingSourceOptions"}, + "DataPreviewOptions":{"shape":"StreamingDataPreviewOptions"} + } + }, + "CatalogSchemaChangePolicy":{ + "type":"structure", + "members":{ + "EnableUpdateCatalog":{"shape":"BoxedBoolean"}, + "UpdateBehavior":{"shape":"UpdateCatalogBehavior"} + } + }, + "CatalogSource":{ + "type":"structure", + "required":[ + "Name", + "Database", + "Table" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Database":{"shape":"EnclosedInStringProperty"}, + "Table":{"shape":"EnclosedInStringProperty"} + } + }, "CatalogTablesList":{ "type":"list", "member":{"shape":"NameString"}, @@ -3554,6 +3750,67 @@ }, "CodeGenArgName":{"type":"string"}, "CodeGenArgValue":{"type":"string"}, + "CodeGenConfigurationNode":{ + "type":"structure", + "members":{ + "AthenaConnectorSource":{"shape":"AthenaConnectorSource"}, + "JDBCConnectorSource":{"shape":"JDBCConnectorSource"}, + "SparkConnectorSource":{"shape":"SparkConnectorSource"}, + "CatalogSource":{"shape":"CatalogSource"}, + "RedshiftSource":{"shape":"RedshiftSource"}, + "S3CatalogSource":{"shape":"S3CatalogSource"}, + "S3CsvSource":{"shape":"S3CsvSource"}, + "S3JsonSource":{"shape":"S3JsonSource"}, + "S3ParquetSource":{"shape":"S3ParquetSource"}, + "RelationalCatalogSource":{"shape":"RelationalCatalogSource"}, + "DynamoDBCatalogSource":{"shape":"DynamoDBCatalogSource"}, + "JDBCConnectorTarget":{"shape":"JDBCConnectorTarget"}, + "SparkConnectorTarget":{"shape":"SparkConnectorTarget"}, + "CatalogTarget":{"shape":"BasicCatalogTarget"}, + "RedshiftTarget":{"shape":"RedshiftTarget"}, + "S3CatalogTarget":{"shape":"S3CatalogTarget"}, + "S3GlueParquetTarget":{"shape":"S3GlueParquetTarget"}, + "S3DirectTarget":{"shape":"S3DirectTarget"}, + "ApplyMapping":{"shape":"ApplyMapping"}, + "SelectFields":{"shape":"SelectFields"}, + "DropFields":{"shape":"DropFields"}, + "RenameField":{"shape":"RenameField"}, + "Spigot":{"shape":"Spigot"}, + "Join":{"shape":"Join"}, + "SplitFields":{"shape":"SplitFields"}, + "SelectFromCollection":{"shape":"SelectFromCollection"}, + "FillMissingValues":{"shape":"FillMissingValues"}, + "Filter":{"shape":"Filter"}, + "CustomCode":{"shape":"CustomCode"}, + "SparkSQL":{"shape":"SparkSQL"}, + "DirectKinesisSource":{"shape":"DirectKinesisSource"}, + "DirectKafkaSource":{"shape":"DirectKafkaSource"}, + "CatalogKinesisSource":{"shape":"CatalogKinesisSource"}, + "CatalogKafkaSource":{"shape":"CatalogKafkaSource"}, + "DropNullFields":{"shape":"DropNullFields"}, + "Merge":{"shape":"Merge"}, + "Union":{"shape":"Union"}, + "PIIDetection":{"shape":"PIIDetection"}, + "Aggregate":{"shape":"Aggregate"}, + "DropDuplicates":{"shape":"DropDuplicates"}, + "GovernedCatalogTarget":{"shape":"GovernedCatalogTarget"}, + "GovernedCatalogSource":{"shape":"GovernedCatalogSource"}, + "MicrosoftSQLServerCatalogSource":{"shape":"MicrosoftSQLServerCatalogSource"}, + "MySQLCatalogSource":{"shape":"MySQLCatalogSource"}, + "OracleSQLCatalogSource":{"shape":"OracleSQLCatalogSource"}, + "PostgreSQLCatalogSource":{"shape":"PostgreSQLCatalogSource"}, + "MicrosoftSQLServerCatalogTarget":{"shape":"MicrosoftSQLServerCatalogTarget"}, + "MySQLCatalogTarget":{"shape":"MySQLCatalogTarget"}, + "OracleSQLCatalogTarget":{"shape":"OracleSQLCatalogTarget"}, + "PostgreSQLCatalogTarget":{"shape":"PostgreSQLCatalogTarget"} + } + }, + "CodeGenConfigurationNodes":{ + "type":"map", + "key":{"shape":"NodeId"}, + "value":{"shape":"CodeGenConfigurationNode"}, + "sensitive":true + }, "CodeGenEdge":{ "type":"structure", "required":[ @@ -3756,6 +4013,13 @@ "FULL_ALL" ] }, + "CompressionType":{ + "type":"string", + "enum":[ + "gzip", + "bzip2" + ] + }, "ConcurrentModificationException":{ "type":"structure", "members":{ @@ -4265,7 +4529,8 @@ "NotificationProperty":{"shape":"NotificationProperty"}, "GlueVersion":{"shape":"GlueVersionString"}, "NumberOfWorkers":{"shape":"NullableInteger"}, - "WorkerType":{"shape":"WorkerType"} + "WorkerType":{"shape":"WorkerType"}, + "CodeGenConfigurationNodes":{"shape":"CodeGenConfigurationNodes"} } }, "CreateJobResponse":{ @@ -4603,6 +4868,22 @@ "min":1, "pattern":"[^\\r\\n]" }, + "CustomCode":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "Code", + "ClassName" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"ManyInputs"}, + "Code":{"shape":"ExtendedString"}, + "ClassName":{"shape":"EnclosedInStringProperty"}, + "OutputSchemas":{"shape":"GlueSchemas"} + } + }, "CustomEntityType":{ "type":"structure", "required":[ @@ -4703,6 +4984,17 @@ "member":{"shape":"Database"} }, "DatabaseName":{"type":"string"}, + "Datatype":{ + "type":"structure", + "required":[ + "Id", + "Label" + ], + "members":{ + "Id":{"shape":"GenericLimitedString"}, + "Label":{"shape":"GenericLimitedString"} + } + }, "DateColumnStatisticsData":{ "type":"structure", "required":[ @@ -5186,6 +5478,49 @@ "max":25, "min":1 }, + "DirectKafkaSource":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"NodeName"}, + "StreamingOptions":{"shape":"KafkaStreamingSourceOptions"}, + "WindowSize":{ + "shape":"BoxedPositiveInt", + "box":true + }, + "DetectSchema":{ + "shape":"BoxedBoolean", + "box":true + }, + "DataPreviewOptions":{"shape":"StreamingDataPreviewOptions"} + } + }, + "DirectKinesisSource":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"NodeName"}, + "WindowSize":{ + "shape":"BoxedPositiveInt", + "box":true + }, + "DetectSchema":{ + "shape":"BoxedBoolean", + "box":true + }, + "StreamingOptions":{"shape":"KinesisStreamingSourceOptions"}, + "DataPreviewOptions":{"shape":"StreamingDataPreviewOptions"} + } + }, + "DirectSchemaChangePolicy":{ + "type":"structure", + "members":{ + "EnableUpdateCatalog":{"shape":"BoxedBoolean"}, + "UpdateBehavior":{"shape":"UpdateCatalogBehavior"}, + "Table":{"shape":"EnclosedInStringProperty"}, + "Database":{"shape":"EnclosedInStringProperty"} + } + }, "Double":{"type":"double"}, "DoubleColumnStatisticsData":{ "type":"structure", @@ -5201,6 +5536,57 @@ } }, "DoubleValue":{"type":"double"}, + "DropDuplicates":{ + "type":"structure", + "required":[ + "Name", + "Inputs" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"OneInput"}, + "Columns":{"shape":"LimitedPathList"} + } + }, + "DropFields":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "Paths" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"OneInput"}, + "Paths":{"shape":"GlueStudioPathList"} + } + }, + "DropNullFields":{ + "type":"structure", + "required":[ + "Name", + "Inputs" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"OneInput"}, + "NullCheckBoxList":{"shape":"NullCheckBoxList"}, + "NullTextList":{"shape":"NullValueFields"} + } + }, + "DynamoDBCatalogSource":{ + "type":"structure", + "required":[ + "Name", + "Database", + "Table" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Database":{"shape":"EnclosedInStringProperty"}, + "Table":{"shape":"EnclosedInStringProperty"} + } + }, "DynamoDBTarget":{ "type":"structure", "members":{ @@ -5231,6 +5617,22 @@ "FALSE" ] }, + "EnclosedInStringProperties":{ + "type":"list", + "member":{"shape":"EnclosedInStringProperty"} + }, + "EnclosedInStringPropertiesMinOne":{ + "type":"list", + "member":{"shape":"EnclosedInStringProperty"} + }, + "EnclosedInStringProperty":{ + "type":"string", + "pattern":"([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n\"'])*" + }, + "EnclosedInStringPropertyWithQuote":{ + "type":"string", + "pattern":"([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\S\\r\\n])*" + }, "EncryptionAtRest":{ "type":"structure", "required":["CatalogEncryptionMode"], @@ -5314,13 +5716,103 @@ "OutputS3Path":{"shape":"UriString"} } }, + "ExtendedString":{ + "type":"string", + "pattern":"[\\s\\S]*" + }, "FieldType":{"type":"string"}, + "FillMissingValues":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "ImputedPath" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"OneInput"}, + "ImputedPath":{"shape":"EnclosedInStringProperty"}, + "FilledPath":{"shape":"EnclosedInStringProperty"} + } + }, + "Filter":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "LogicalOperator", + "Filters" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"OneInput"}, + "LogicalOperator":{"shape":"FilterLogicalOperator"}, + "Filters":{"shape":"FilterExpressions"} + } + }, + "FilterExpression":{ + "type":"structure", + "required":[ + "Operation", + "Values" + ], + "members":{ + "Operation":{"shape":"FilterOperation"}, + "Negated":{"shape":"BoxedBoolean"}, + "Values":{"shape":"FilterValues"} + } + }, + "FilterExpressions":{ + "type":"list", + "member":{"shape":"FilterExpression"} + }, + "FilterLogicalOperator":{ + "type":"string", + "enum":[ + "AND", + "OR" + ] + }, + "FilterOperation":{ + "type":"string", + "enum":[ + "EQ", + "LT", + "GT", + "LTE", + "GTE", + "REGEX", + "ISNULL" + ] + }, "FilterString":{ "type":"string", "max":2048, "min":0, "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" }, + "FilterValue":{ + "type":"structure", + "required":[ + "Type", + "Value" + ], + "members":{ + "Type":{"shape":"FilterValueType"}, + "Value":{"shape":"EnclosedInStringProperties"} + } + }, + "FilterValueType":{ + "type":"string", + "enum":[ + "COLUMNEXTRACTED", + "CONSTANT" + ] + }, + "FilterValues":{ + "type":"list", + "member":{"shape":"FilterValue"} + }, "FindMatchesMetrics":{ "type":"structure", "members":{ @@ -5365,6 +5857,10 @@ "max":1.0, "min":0.0 }, + "GenericLimitedString":{ + "type":"string", + "pattern":"[A-Za-z0-9_-]*" + }, "GenericMap":{ "type":"map", "key":{"shape":"GenericString"}, @@ -6476,12 +6972,59 @@ "UpdateTime":{"shape":"Timestamp"} } }, + "GlueRecordType":{ + "type":"string", + "enum":[ + "DATE", + "STRING", + "TIMESTAMP", + "INT", + "FLOAT", + "LONG", + "BIGDECIMAL", + "BYTE", + "SHORT", + "DOUBLE" + ] + }, "GlueResourceArn":{ "type":"string", "max":10240, "min":1, "pattern":"arn:(aws|aws-us-gov|aws-cn):glue:.*" }, + "GlueSchema":{ + "type":"structure", + "members":{ + "Columns":{"shape":"GlueStudioSchemaColumnList"} + } + }, + "GlueSchemas":{ + "type":"list", + "member":{"shape":"GlueSchema"} + }, + "GlueStudioColumnNameString":{ + "type":"string", + "max":1024, + "min":0, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" + }, + "GlueStudioPathList":{ + "type":"list", + "member":{"shape":"EnclosedInStringProperties"} + }, + "GlueStudioSchemaColumn":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{"shape":"GlueStudioColumnNameString"}, + "Type":{"shape":"ColumnTypeString"} + } + }, + "GlueStudioSchemaColumnList":{ + "type":"list", + "member":{"shape":"GlueStudioSchemaColumn"} + }, "GlueTable":{ "type":"structure", "required":[ @@ -6507,12 +7050,44 @@ "min":1, "pattern":"^\\w+\\.\\w+$" }, - "GrokClassifier":{ + "GovernedCatalogSource":{ "type":"structure", "required":[ "Name", - "Classification", - "GrokPattern" + "Database", + "Table" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Database":{"shape":"EnclosedInStringProperty"}, + "Table":{"shape":"EnclosedInStringProperty"}, + "PartitionPredicate":{"shape":"EnclosedInStringProperty"}, + "AdditionalOptions":{"shape":"S3SourceAdditionalOptions"} + } + }, + "GovernedCatalogTarget":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "Table", + "Database" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"OneInput"}, + "PartitionKeys":{"shape":"GlueStudioPathList"}, + "Table":{"shape":"EnclosedInStringProperty"}, + "Database":{"shape":"EnclosedInStringProperty"}, + "SchemaChangePolicy":{"shape":"CatalogSchemaChangePolicy"} + } + }, + "GrokClassifier":{ + "type":"structure", + "required":[ + "Name", + "Classification", + "GrokPattern" ], "members":{ "Name":{"shape":"NameString"}, @@ -6618,6 +7193,108 @@ "exception":true }, "IsVersionValid":{"type":"boolean"}, + "JDBCConnectorOptions":{ + "type":"structure", + "members":{ + "FilterPredicate":{"shape":"EnclosedInStringProperty"}, + "PartitionColumn":{"shape":"EnclosedInStringProperty"}, + "LowerBound":{"shape":"BoxedNonNegativeLong"}, + "UpperBound":{"shape":"BoxedNonNegativeLong"}, + "NumPartitions":{"shape":"BoxedNonNegativeLong"}, + "JobBookmarkKeys":{"shape":"EnclosedInStringProperties"}, + "JobBookmarkKeysSortOrder":{"shape":"EnclosedInStringProperty"}, + "DataTypeMapping":{"shape":"JDBCDataTypeMapping"} + } + }, + "JDBCConnectorSource":{ + "type":"structure", + "required":[ + "Name", + "ConnectionName", + "ConnectorName", + "ConnectionType" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "ConnectionName":{"shape":"EnclosedInStringProperty"}, + "ConnectorName":{"shape":"EnclosedInStringProperty"}, + "ConnectionType":{"shape":"EnclosedInStringProperty"}, + "AdditionalOptions":{"shape":"JDBCConnectorOptions"}, + "ConnectionTable":{"shape":"EnclosedInStringPropertyWithQuote"}, + "Query":{"shape":"SqlQuery"}, + "OutputSchemas":{"shape":"GlueSchemas"} + } + }, + "JDBCConnectorTarget":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "ConnectionName", + "ConnectionTable", + "ConnectorName", + "ConnectionType" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"OneInput"}, + "ConnectionName":{"shape":"EnclosedInStringProperty"}, + "ConnectionTable":{"shape":"EnclosedInStringPropertyWithQuote"}, + "ConnectorName":{"shape":"EnclosedInStringProperty"}, + "ConnectionType":{"shape":"EnclosedInStringProperty"}, + "AdditionalOptions":{"shape":"AdditionalOptions"}, + "OutputSchemas":{"shape":"GlueSchemas"} + } + }, + "JDBCDataType":{ + "type":"string", + "enum":[ + "ARRAY", + "BIGINT", + "BINARY", + "BIT", + "BLOB", + "BOOLEAN", + "CHAR", + "CLOB", + "DATALINK", + "DATE", + "DECIMAL", + "DISTINCT", + "DOUBLE", + "FLOAT", + "INTEGER", + "JAVA_OBJECT", + "LONGNVARCHAR", + "LONGVARBINARY", + "LONGVARCHAR", + "NCHAR", + "NCLOB", + "NULL", + "NUMERIC", + "NVARCHAR", + "OTHER", + "REAL", + "REF", + "REF_CURSOR", + "ROWID", + "SMALLINT", + "SQLXML", + "STRUCT", + "TIME", + "TIME_WITH_TIMEZONE", + "TIMESTAMP", + "TIMESTAMP_WITH_TIMEZONE", + "TINYINT", + "VARBINARY", + "VARCHAR" + ] + }, + "JDBCDataTypeMapping":{ + "type":"map", + "key":{"shape":"JDBCDataType"}, + "value":{"shape":"GlueRecordType"} + }, "JdbcTarget":{ "type":"structure", "members":{ @@ -6656,7 +7333,8 @@ "NumberOfWorkers":{"shape":"NullableInteger"}, "SecurityConfiguration":{"shape":"NameString"}, "NotificationProperty":{"shape":"NotificationProperty"}, - "GlueVersion":{"shape":"GlueVersionString"} + "GlueVersion":{"shape":"GlueVersionString"}, + "CodeGenConfigurationNodes":{"shape":"CodeGenConfigurationNodes"} } }, "JobBookmarkEntry":{ @@ -6779,9 +7457,53 @@ "NumberOfWorkers":{"shape":"NullableInteger"}, "SecurityConfiguration":{"shape":"NameString"}, "NotificationProperty":{"shape":"NotificationProperty"}, - "GlueVersion":{"shape":"GlueVersionString"} + "GlueVersion":{"shape":"GlueVersionString"}, + "CodeGenConfigurationNodes":{"shape":"CodeGenConfigurationNodes"} } }, + "Join":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "JoinType", + "Columns" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"TwoInputs"}, + "JoinType":{"shape":"JoinType"}, + "Columns":{"shape":"JoinColumns"} + } + }, + "JoinColumn":{ + "type":"structure", + "required":[ + "From", + "Keys" + ], + "members":{ + "From":{"shape":"EnclosedInStringProperty"}, + "Keys":{"shape":"GlueStudioPathList"} + } + }, + "JoinColumns":{ + "type":"list", + "member":{"shape":"JoinColumn"}, + "max":2, + "min":2 + }, + "JoinType":{ + "type":"string", + "enum":[ + "equijoin", + "left", + "right", + "outer", + "leftsemi", + "leftanti" + ] + }, "JsonClassifier":{ "type":"structure", "required":[ @@ -6798,6 +7520,26 @@ }, "JsonPath":{"type":"string"}, "JsonValue":{"type":"string"}, + "KafkaStreamingSourceOptions":{ + "type":"structure", + "members":{ + "BootstrapServers":{"shape":"EnclosedInStringProperty"}, + "SecurityProtocol":{"shape":"EnclosedInStringProperty"}, + "ConnectionName":{"shape":"EnclosedInStringProperty"}, + "TopicName":{"shape":"EnclosedInStringProperty"}, + "Assign":{"shape":"EnclosedInStringProperty"}, + "SubscribePattern":{"shape":"EnclosedInStringProperty"}, + "Classification":{"shape":"EnclosedInStringProperty"}, + "Delimiter":{"shape":"EnclosedInStringProperty"}, + "StartingOffsets":{"shape":"EnclosedInStringProperty"}, + "EndingOffsets":{"shape":"EnclosedInStringProperty"}, + "PollTimeoutMs":{"shape":"BoxedNonNegativeLong"}, + "NumRetries":{"shape":"BoxedNonNegativeInt"}, + "RetryIntervalMs":{"shape":"BoxedNonNegativeLong"}, + "MaxOffsetsPerTrigger":{"shape":"BoxedNonNegativeLong"}, + "MinPartitions":{"shape":"BoxedNonNegativeInt"} + } + }, "KeyList":{ "type":"list", "member":{"shape":"NameString"}, @@ -6825,6 +7567,29 @@ "min":1, "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" }, + "KinesisStreamingSourceOptions":{ + "type":"structure", + "members":{ + "EndpointUrl":{"shape":"EnclosedInStringProperty"}, + "StreamName":{"shape":"EnclosedInStringProperty"}, + "Classification":{"shape":"EnclosedInStringProperty"}, + "Delimiter":{"shape":"EnclosedInStringProperty"}, + "StartingPosition":{"shape":"StartingPosition"}, + "MaxFetchTimeInMs":{"shape":"BoxedNonNegativeLong"}, + "MaxFetchRecordsPerShard":{"shape":"BoxedNonNegativeLong"}, + "MaxRecordPerRead":{"shape":"BoxedNonNegativeLong"}, + "AddIdleTimeBetweenReads":{"shape":"BoxedBoolean"}, + "IdleTimeBetweenReadsInMs":{"shape":"BoxedNonNegativeLong"}, + "DescribeShardInterval":{"shape":"BoxedNonNegativeLong"}, + "NumRetries":{"shape":"BoxedNonNegativeInt"}, + "RetryIntervalMs":{"shape":"BoxedNonNegativeLong"}, + "MaxRetryIntervalMs":{"shape":"BoxedNonNegativeLong"}, + "AvoidEmptyBatches":{"shape":"BoxedBoolean"}, + "StreamArn":{"shape":"EnclosedInStringProperty"}, + "RoleArn":{"shape":"EnclosedInStringProperty"}, + "RoleSessionName":{"shape":"EnclosedInStringProperty"} + } + }, "KmsKeyArn":{ "type":"string", "pattern":"arn:aws:kms:.*" @@ -6880,6 +7645,14 @@ ] }, "LatestSchemaVersionBoolean":{"type":"boolean"}, + "LimitedPathList":{ + "type":"list", + "member":{"shape":"LimitedStringList"} + }, + "LimitedStringList":{ + "type":"list", + "member":{"shape":"GenericLimitedString"} + }, "LineageConfiguration":{ "type":"structure", "members":{ @@ -7192,6 +7965,11 @@ "SSE-KMS" ] }, + "ManyInputs":{ + "type":"list", + "member":{"shape":"NodeId"}, + "min":1 + }, "MapValue":{ "type":"map", "key":{"shape":"GenericString"}, @@ -7199,6 +7977,17 @@ "max":100, "min":0 }, + "Mapping":{ + "type":"structure", + "members":{ + "ToKey":{"shape":"EnclosedInStringProperty"}, + "FromPath":{"shape":"EnclosedInStringProperties"}, + "FromType":{"shape":"EnclosedInStringProperty"}, + "ToType":{"shape":"EnclosedInStringProperty"}, + "Dropped":{"shape":"BoxedBoolean"}, + "Children":{"shape":"Mappings"} + } + }, "MappingEntry":{ "type":"structure", "members":{ @@ -7214,6 +8003,16 @@ "type":"list", "member":{"shape":"MappingEntry"} }, + "Mappings":{ + "type":"list", + "member":{"shape":"Mapping"} + }, + "MaskValue":{ + "type":"string", + "max":256, + "min":0, + "pattern":"[*A-Za-z0-9_-]*" + }, "MatchCriteria":{ "type":"list", "member":{"shape":"NameString"}, @@ -7228,6 +8027,21 @@ "min":1 }, "MaxRetries":{"type":"integer"}, + "Merge":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "Source", + "PrimaryKeys" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"TwoInputs"}, + "Source":{"shape":"NodeId"}, + "PrimaryKeys":{"shape":"GlueStudioPathList"} + } + }, "MessagePrefix":{ "type":"string", "max":255, @@ -7271,6 +8085,34 @@ "min":1, "pattern":"[a-zA-Z0-9+-=._./@]+" }, + "MicrosoftSQLServerCatalogSource":{ + "type":"structure", + "required":[ + "Name", + "Database", + "Table" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Database":{"shape":"EnclosedInStringProperty"}, + "Table":{"shape":"EnclosedInStringProperty"} + } + }, + "MicrosoftSQLServerCatalogTarget":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "Database", + "Table" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"OneInput"}, + "Database":{"shape":"EnclosedInStringProperty"}, + "Table":{"shape":"EnclosedInStringProperty"} + } + }, "MillisecondsCount":{"type":"long"}, "MongoDBTarget":{ "type":"structure", @@ -7284,6 +8126,34 @@ "type":"list", "member":{"shape":"MongoDBTarget"} }, + "MySQLCatalogSource":{ + "type":"structure", + "required":[ + "Name", + "Database", + "Table" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Database":{"shape":"EnclosedInStringProperty"}, + "Table":{"shape":"EnclosedInStringProperty"} + } + }, + "MySQLCatalogTarget":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "Database", + "Table" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"OneInput"}, + "Database":{"shape":"EnclosedInStringProperty"}, + "Table":{"shape":"EnclosedInStringProperty"} + } + }, "NameString":{ "type":"string", "max":255, @@ -7312,6 +8182,10 @@ "CrawlerDetails":{"shape":"CrawlerNodeDetails"} } }, + "NodeId":{ + "type":"string", + "pattern":"[A-Za-z0-9_-]*" + }, "NodeIdList":{ "type":"list", "member":{"shape":"NameString"} @@ -7320,6 +8194,10 @@ "type":"list", "member":{"shape":"Node"} }, + "NodeName":{ + "type":"string", + "pattern":"([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF]|[^\\r\\n])*" + }, "NodeType":{ "type":"string", "enum":[ @@ -7332,6 +8210,10 @@ "type":"double", "min":0.0 }, + "NonNegativeInt":{ + "type":"integer", + "min":0 + }, "NonNegativeInteger":{ "type":"integer", "min":0 @@ -7351,6 +8233,31 @@ "box":true, "min":1 }, + "NullCheckBoxList":{ + "type":"structure", + "members":{ + "IsEmpty":{"shape":"BoxedBoolean"}, + "IsNullString":{"shape":"BoxedBoolean"}, + "IsNegOne":{"shape":"BoxedBoolean"} + } + }, + "NullValueField":{ + "type":"structure", + "required":[ + "Value", + "Datatype" + ], + "members":{ + "Value":{"shape":"EnclosedInStringProperty"}, + "Datatype":{"shape":"Datatype"} + } + }, + "NullValueFields":{ + "type":"list", + "member":{"shape":"NullValueField"}, + "max":50, + "min":0 + }, "NullableBoolean":{ "type":"boolean", "box":true @@ -7363,6 +8270,12 @@ "type":"integer", "box":true }, + "OneInput":{ + "type":"list", + "member":{"shape":"NodeId"}, + "max":1, + "min":1 + }, "OperationTimeoutException":{ "type":"structure", "members":{ @@ -7370,6 +8283,34 @@ }, "exception":true }, + "OracleSQLCatalogSource":{ + "type":"structure", + "required":[ + "Name", + "Database", + "Table" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Database":{"shape":"EnclosedInStringProperty"}, + "Table":{"shape":"EnclosedInStringProperty"} + } + }, + "OracleSQLCatalogTarget":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "Database", + "Table" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"OneInput"}, + "Database":{"shape":"EnclosedInStringProperty"}, + "Table":{"shape":"EnclosedInStringProperty"} + } + }, "OrchestrationArgumentsMap":{ "type":"map", "key":{"shape":"OrchestrationNameString"}, @@ -7445,6 +8386,25 @@ "CreatedTime":{"shape":"CreatedTimestamp"} } }, + "PIIDetection":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "PiiType", + "EntityTypesToDetect" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"OneInput"}, + "PiiType":{"shape":"PiiType"}, + "EntityTypesToDetect":{"shape":"EnclosedInStringProperties"}, + "OutputColumnName":{"shape":"EnclosedInStringProperty"}, + "SampleFraction":{"shape":"BoxedDoubleFraction"}, + "ThresholdFraction":{"shape":"BoxedDoubleFraction"}, + "MaskValue":{"shape":"MaskValue"} + } + }, "PageSize":{ "type":"integer", "box":true, @@ -7461,6 +8421,16 @@ "type":"string", "max":512000 }, + "ParquetCompressionType":{ + "type":"string", + "enum":[ + "snappy", + "lzo", + "gzip", + "uncompressed", + "none" + ] + }, "Partition":{ "type":"structure", "members":{ @@ -7607,10 +8577,57 @@ "AvailabilityZone":{"shape":"NameString"} } }, + "PiiType":{ + "type":"string", + "enum":[ + "RowAudit", + "RowMasking", + "ColumnAudit", + "ColumnMasking" + ] + }, "PolicyJsonString":{ "type":"string", "min":2 }, + "PollingTime":{ + "type":"long", + "box":true, + "min":10 + }, + "PositiveLong":{ + "type":"long", + "box":true, + "min":1 + }, + "PostgreSQLCatalogSource":{ + "type":"structure", + "required":[ + "Name", + "Database", + "Table" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Database":{"shape":"EnclosedInStringProperty"}, + "Table":{"shape":"EnclosedInStringProperty"} + } + }, + "PostgreSQLCatalogTarget":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "Database", + "Table" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"OneInput"}, + "Database":{"shape":"EnclosedInStringProperty"}, + "Table":{"shape":"EnclosedInStringProperty"} + } + }, "Predecessor":{ "type":"structure", "members":{ @@ -7654,6 +8671,12 @@ "GROUP" ] }, + "Prob":{ + "type":"double", + "box":true, + "max":1, + "min":0 + }, "PropertyPredicate":{ "type":"structure", "members":{ @@ -7767,6 +8790,15 @@ "NextToken":{"shape":"SchemaRegistryTokenString"} } }, + "QuoteChar":{ + "type":"string", + "enum":[ + "quote", + "quillemet", + "single_quote", + "disabled" + ] + }, "RecordsCount":{ "type":"long", "box":true @@ -7785,6 +8817,39 @@ "RecrawlBehavior":{"shape":"RecrawlBehavior"} } }, + "RedshiftSource":{ + "type":"structure", + "required":[ + "Name", + "Database", + "Table" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Database":{"shape":"EnclosedInStringProperty"}, + "Table":{"shape":"EnclosedInStringProperty"}, + "RedshiftTmpDir":{"shape":"EnclosedInStringProperty"}, + "TmpDirIAMRole":{"shape":"EnclosedInStringProperty"} + } + }, + "RedshiftTarget":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "Database", + "Table" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"OneInput"}, + "Database":{"shape":"EnclosedInStringProperty"}, + "Table":{"shape":"EnclosedInStringProperty"}, + "RedshiftTmpDir":{"shape":"EnclosedInStringProperty"}, + "TmpDirIAMRole":{"shape":"EnclosedInStringProperty"}, + "UpsertRedshiftOptions":{"shape":"UpsertRedshiftTargetOptions"} + } + }, "RegisterSchemaVersionInput":{ "type":"structure", "required":[ @@ -7833,6 +8898,19 @@ "DELETING" ] }, + "RelationalCatalogSource":{ + "type":"structure", + "required":[ + "Name", + "Database", + "Table" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Database":{"shape":"EnclosedInStringProperty"}, + "Table":{"shape":"EnclosedInStringProperty"} + } + }, "RemoveSchemaVersionMetadataInput":{ "type":"structure", "required":["MetadataKeyValue"], @@ -7856,6 +8934,21 @@ "MetadataValue":{"shape":"MetadataValueString"} } }, + "RenameField":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "SourcePath", + "TargetPath" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"OneInput"}, + "SourcePath":{"shape":"EnclosedInStringProperties"}, + "TargetPath":{"shape":"EnclosedInStringProperties"} + } + }, "ReplaceBoolean":{"type":"boolean"}, "ResetJobBookmarkRequest":{ "type":"structure", @@ -7959,6 +9052,95 @@ "Id":{"shape":"IntegerValue"} } }, + "S3CatalogSource":{ + "type":"structure", + "required":[ + "Name", + "Database", + "Table" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Database":{"shape":"EnclosedInStringProperty"}, + "Table":{"shape":"EnclosedInStringProperty"}, + "PartitionPredicate":{"shape":"EnclosedInStringProperty"}, + "AdditionalOptions":{"shape":"S3SourceAdditionalOptions"} + } + }, + "S3CatalogTarget":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "Table", + "Database" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"OneInput"}, + "PartitionKeys":{"shape":"GlueStudioPathList"}, + "Table":{"shape":"EnclosedInStringProperty"}, + "Database":{"shape":"EnclosedInStringProperty"}, + "SchemaChangePolicy":{"shape":"CatalogSchemaChangePolicy"} + } + }, + "S3CsvSource":{ + "type":"structure", + "required":[ + "Name", + "Paths", + "Separator", + "QuoteChar" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Paths":{"shape":"EnclosedInStringProperties"}, + "CompressionType":{"shape":"CompressionType"}, + "Exclusions":{"shape":"EnclosedInStringProperties"}, + "GroupSize":{"shape":"EnclosedInStringProperty"}, + "GroupFiles":{"shape":"EnclosedInStringProperty"}, + "Recurse":{"shape":"BoxedBoolean"}, + "MaxBand":{"shape":"BoxedNonNegativeInt"}, + "MaxFilesInBand":{"shape":"BoxedNonNegativeInt"}, + "AdditionalOptions":{"shape":"S3DirectSourceAdditionalOptions"}, + "Separator":{"shape":"Separator"}, + "Escaper":{"shape":"EnclosedInStringPropertyWithQuote"}, + "QuoteChar":{"shape":"QuoteChar"}, + "Multiline":{"shape":"BoxedBoolean"}, + "WithHeader":{"shape":"BoxedBoolean"}, + "WriteHeader":{"shape":"BoxedBoolean"}, + "SkipFirst":{"shape":"BoxedBoolean"}, + "OptimizePerformance":{"shape":"BooleanValue"}, + "OutputSchemas":{"shape":"GlueSchemas"} + } + }, + "S3DirectSourceAdditionalOptions":{ + "type":"structure", + "members":{ + "BoundedSize":{"shape":"BoxedLong"}, + "BoundedFiles":{"shape":"BoxedLong"}, + "EnableSamplePath":{"shape":"BoxedBoolean"}, + "SamplePath":{"shape":"EnclosedInStringProperty"} + } + }, + "S3DirectTarget":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "Path", + "Format" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"OneInput"}, + "PartitionKeys":{"shape":"GlueStudioPathList"}, + "Path":{"shape":"EnclosedInStringProperty"}, + "Compression":{"shape":"EnclosedInStringProperty"}, + "Format":{"shape":"TargetFormat"}, + "SchemaChangePolicy":{"shape":"DirectSchemaChangePolicy"} + } + }, "S3Encryption":{ "type":"structure", "members":{ @@ -7978,6 +9160,71 @@ "SSE-S3" ] }, + "S3GlueParquetTarget":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "Path" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"OneInput"}, + "PartitionKeys":{"shape":"GlueStudioPathList"}, + "Path":{"shape":"EnclosedInStringProperty"}, + "Compression":{"shape":"ParquetCompressionType"}, + "SchemaChangePolicy":{"shape":"DirectSchemaChangePolicy"} + } + }, + "S3JsonSource":{ + "type":"structure", + "required":[ + "Name", + "Paths" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Paths":{"shape":"EnclosedInStringProperties"}, + "CompressionType":{"shape":"CompressionType"}, + "Exclusions":{"shape":"EnclosedInStringProperties"}, + "GroupSize":{"shape":"EnclosedInStringProperty"}, + "GroupFiles":{"shape":"EnclosedInStringProperty"}, + "Recurse":{"shape":"BoxedBoolean"}, + "MaxBand":{"shape":"BoxedNonNegativeInt"}, + "MaxFilesInBand":{"shape":"BoxedNonNegativeInt"}, + "AdditionalOptions":{"shape":"S3DirectSourceAdditionalOptions"}, + "JsonPath":{"shape":"EnclosedInStringProperty"}, + "Multiline":{"shape":"BoxedBoolean"}, + "OutputSchemas":{"shape":"GlueSchemas"} + } + }, + "S3ParquetSource":{ + "type":"structure", + "required":[ + "Name", + "Paths" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Paths":{"shape":"EnclosedInStringProperties"}, + "CompressionType":{"shape":"ParquetCompressionType"}, + "Exclusions":{"shape":"EnclosedInStringProperties"}, + "GroupSize":{"shape":"EnclosedInStringProperty"}, + "GroupFiles":{"shape":"EnclosedInStringProperty"}, + "Recurse":{"shape":"BoxedBoolean"}, + "MaxBand":{"shape":"BoxedNonNegativeInt"}, + "MaxFilesInBand":{"shape":"BoxedNonNegativeInt"}, + "AdditionalOptions":{"shape":"S3DirectSourceAdditionalOptions"}, + "OutputSchemas":{"shape":"GlueSchemas"} + } + }, + "S3SourceAdditionalOptions":{ + "type":"structure", + "members":{ + "BoundedSize":{"shape":"BoxedLong"}, + "BoundedFiles":{"shape":"BoxedLong"} + } + }, "S3Target":{ "type":"structure", "members":{ @@ -8224,6 +9471,42 @@ "TotalSegments":{"shape":"TotalSegmentsInteger"} } }, + "SelectFields":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "Paths" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"OneInput"}, + "Paths":{"shape":"GlueStudioPathList"} + } + }, + "SelectFromCollection":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "Index" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"OneInput"}, + "Index":{"shape":"NonNegativeInt"} + } + }, + "Separator":{ + "type":"string", + "enum":[ + "comma", + "ctrla", + "pipe", + "semicolon", + "tab" + ] + }, "SerDeInfo":{ "type":"structure", "members":{ @@ -8311,6 +9594,105 @@ "ASCENDING" ] }, + "SparkConnectorSource":{ + "type":"structure", + "required":[ + "Name", + "ConnectionName", + "ConnectorName", + "ConnectionType" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "ConnectionName":{"shape":"EnclosedInStringProperty"}, + "ConnectorName":{"shape":"EnclosedInStringProperty"}, + "ConnectionType":{"shape":"EnclosedInStringProperty"}, + "AdditionalOptions":{"shape":"AdditionalOptions"}, + "OutputSchemas":{"shape":"GlueSchemas"} + } + }, + "SparkConnectorTarget":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "ConnectionName", + "ConnectorName", + "ConnectionType" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"OneInput"}, + "ConnectionName":{"shape":"EnclosedInStringProperty"}, + "ConnectorName":{"shape":"EnclosedInStringProperty"}, + "ConnectionType":{"shape":"EnclosedInStringProperty"}, + "AdditionalOptions":{"shape":"AdditionalOptions"}, + "OutputSchemas":{"shape":"GlueSchemas"} + } + }, + "SparkSQL":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "SqlQuery", + "SqlAliases" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"ManyInputs"}, + "SqlQuery":{"shape":"SqlQuery"}, + "SqlAliases":{"shape":"SqlAliases"}, + "OutputSchemas":{"shape":"GlueSchemas"} + } + }, + "Spigot":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "Path" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"OneInput"}, + "Path":{"shape":"EnclosedInStringProperty"}, + "Topk":{"shape":"Topk"}, + "Prob":{"shape":"Prob"} + } + }, + "SplitFields":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "Paths" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"OneInput"}, + "Paths":{"shape":"GlueStudioPathList"} + } + }, + "SqlAlias":{ + "type":"structure", + "required":[ + "From", + "Alias" + ], + "members":{ + "From":{"shape":"NodeId"}, + "Alias":{"shape":"EnclosedInStringPropertyWithQuote"} + } + }, + "SqlAliases":{ + "type":"list", + "member":{"shape":"SqlAlias"} + }, + "SqlQuery":{ + "type":"string", + "pattern":"([\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\s])*" + }, "StartBlueprintRunRequest":{ "type":"structure", "required":[ @@ -8478,6 +9860,14 @@ "BatchWindow":{"shape":"NullableInteger"} } }, + "StartingPosition":{ + "type":"string", + "enum":[ + "latest", + "trim_horizon", + "earliest" + ] + }, "Statement":{ "type":"structure", "members":{ @@ -8608,6 +9998,13 @@ "SchemaReference":{"shape":"SchemaReference"} } }, + "StreamingDataPreviewOptions":{ + "type":"structure", + "members":{ + "PollingTime":{"shape":"PollingTime"}, + "RecordPollingLimit":{"shape":"PositiveLong"} + } + }, "StringColumnStatisticsData":{ "type":"structure", "required":[ @@ -8763,6 +10160,16 @@ "max":50, "min":0 }, + "TargetFormat":{ + "type":"string", + "enum":[ + "json", + "csv", + "avro", + "orc", + "parquet" + ] + }, "TaskRun":{ "type":"structure", "members":{ @@ -8850,6 +10257,12 @@ "Timestamp":{"type":"timestamp"}, "TimestampValue":{"type":"timestamp"}, "Token":{"type":"string"}, + "Topk":{ + "type":"integer", + "box":true, + "max":100, + "min":0 + }, "TotalSegmentsInteger":{ "type":"integer", "max":10, @@ -8998,6 +10411,12 @@ "EventBatchingCondition":{"shape":"EventBatchingCondition"} } }, + "TwoInputs":{ + "type":"list", + "member":{"shape":"NodeId"}, + "max":2, + "min":2 + }, "TypeString":{ "type":"string", "max":20000, @@ -9022,6 +10441,26 @@ "type":"list", "member":{"shape":"UnfilteredPartition"} }, + "Union":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "UnionType" + ], + "members":{ + "Name":{"shape":"NodeName"}, + "Inputs":{"shape":"TwoInputs"}, + "UnionType":{"shape":"UnionType"} + } + }, + "UnionType":{ + "type":"string", + "enum":[ + "ALL", + "DISTINCT" + ] + }, "UntagResourceRequest":{ "type":"structure", "required":[ @@ -9063,6 +10502,13 @@ "Name":{"shape":"NameString"} } }, + "UpdateCatalogBehavior":{ + "type":"string", + "enum":[ + "UPDATE_IN_DATABASE", + "LOG" + ] + }, "UpdateClassifierRequest":{ "type":"structure", "members":{ @@ -9426,6 +10872,14 @@ } }, "UpdatedTimestamp":{"type":"string"}, + "UpsertRedshiftTargetOptions":{ + "type":"structure", + "members":{ + "TableLocation":{"shape":"EnclosedInStringProperty"}, + "ConnectionName":{"shape":"EnclosedInStringProperty"}, + "UpsertKeys":{"shape":"EnclosedInStringPropertiesMinOne"} + } + }, "UriString":{"type":"string"}, "UserDefinedFunction":{ "type":"structure", diff --git a/apis/glue/2017-03-31/docs-2.json b/apis/glue/2017-03-31/docs-2.json index 1bcf7e933b4..5926a2bd075 100644 --- a/apis/glue/2017-03-31/docs-2.json +++ b/apis/glue/2017-03-31/docs-2.json @@ -212,17 +212,61 @@ "TriggerUpdate$Actions": "

The actions initiated by this trigger.

" } }, + "AdditionalOptions": { + "base": null, + "refs": { + "JDBCConnectorTarget$AdditionalOptions": "

Additional connection options for the connector.

", + "SparkConnectorSource$AdditionalOptions": "

Additional connection options for the connector.

", + "SparkConnectorTarget$AdditionalOptions": "

Additional connection options for the connector.

" + } + }, "AdditionalPlanOptionsMap": { "base": null, "refs": { "GetPlanRequest$AdditionalPlanOptionsMap": "

A map to hold additional optional key-value parameters.

Currently, these key-value pairs are supported:

" } }, + "AggFunction": { + "base": null, + "refs": { + "AggregateOperation$AggFunc": "

Specifies the aggregation function to apply.

Possible aggregation functions include: avg countDistinct, count, first, last, kurtosis, max, min, skewness, stddev_samp, stddev_pop, sum, sumDistinct, var_samp, var_pop

" + } + }, + "Aggregate": { + "base": "

Specifies a transform that groups rows by chosen fields and computes the aggregated value by specified function.

", + "refs": { + "CodeGenConfigurationNode$Aggregate": "

Specifies a transform that groups rows by chosen fields and computes the aggregated value by specified function.

" + } + }, + "AggregateOperation": { + "base": "

Specifies the set of parameters needed to perform aggregation in the aggregate transform.

", + "refs": { + "AggregateOperations$member": null + } + }, + "AggregateOperations": { + "base": null, + "refs": { + "Aggregate$Aggs": "

Specifies the aggregate functions to be performed on specified fields.

" + } + }, "AlreadyExistsException": { "base": "

A resource to be created or added already exists.

", "refs": { } }, + "ApplyMapping": { + "base": "

Specifies a transform that maps data property keys in the data source to data property keys in the data target. You can rename keys, modify the data types for keys, and choose which keys to drop from the dataset.

", + "refs": { + "CodeGenConfigurationNode$ApplyMapping": "

Specifies a transform that maps data property keys in the data source to data property keys in the data target. You can rename keys, modify the data types for keys, and choose which keys to drop from the dataset.

" + } + }, + "AthenaConnectorSource": { + "base": "

Specifies a connector to an Amazon Athena data source.

", + "refs": { + "CodeGenConfigurationNode$AthenaConnectorSource": "

Specifies a connector to an Amazon Athena data source.

" + } + }, "AttemptCount": { "base": null, "refs": { @@ -273,6 +317,12 @@ "PartitionIndexDescriptor$BackfillErrors": "

A list of errors that can occur when registering partition indexes for an existing table.

" } }, + "BasicCatalogTarget": { + "base": "

Specifies a target that uses a Glue Data Catalog table.

", + "refs": { + "CodeGenConfigurationNode$CatalogTarget": "

Specifies a target that uses a Glue Data Catalog table.

" + } + }, "BatchCreatePartitionRequest": { "base": null, "refs": { @@ -632,6 +682,7 @@ "refs": { "CreateTriggerRequest$StartOnCreation": "

Set to true to start SCHEDULED and CONDITIONAL triggers when created. True is not supported for ON_DEMAND triggers.

", "GetJobRunRequest$PredecessorsIncluded": "

True if a list of predecessor runs should be returned.

", + "S3CsvSource$OptimizePerformance": "

A Boolean value that specifies whether to use the advanced SIMD CSV reader along with Apache Arrow based columnar memory formats. Only available in Glue version 3.0.

", "UpdateDevEndpointRequest$UpdateEtlLibraries": "

True if the list of custom libraries to be loaded in the development endpoint needs to be updated, or False if otherwise.

" } }, @@ -643,6 +694,90 @@ "UpdatePartitionRequest$PartitionValueList": "

List of partition key values that define the partition to update.

" } }, + "BoxedBoolean": { + "base": null, + "refs": { + "CatalogKafkaSource$DetectSchema": "

Whether to automatically determine the schema from the incoming data.

", + "CatalogKinesisSource$DetectSchema": "

Whether to automatically determine the schema from the incoming data.

", + "CatalogSchemaChangePolicy$EnableUpdateCatalog": "

Whether to use the specified update behavior when the crawler finds a changed schema.

", + "DirectKafkaSource$DetectSchema": "

Whether to automatically determine the schema from the incoming data.

", + "DirectKinesisSource$DetectSchema": "

Whether to automatically determine the schema from the incoming data.

", + "DirectSchemaChangePolicy$EnableUpdateCatalog": "

Whether to use the specified update behavior when the crawler finds a changed schema.

", + "FilterExpression$Negated": "

Whether the expression is to be negated.

", + "KinesisStreamingSourceOptions$AddIdleTimeBetweenReads": "

Adds a time delay between two consecutive getRecords operations. The default value is \"False\". This option is only configurable for Glue version 2.0 and above.

", + "KinesisStreamingSourceOptions$AvoidEmptyBatches": "

Avoids creating an empty microbatch job by checking for unread data in the Kinesis data stream before the batch is started. The default value is \"False\".

", + "Mapping$Dropped": "

If true, then the column is removed.

", + "NullCheckBoxList$IsEmpty": "

Specifies that an empty string is considered as a null value.

", + "NullCheckBoxList$IsNullString": "

Specifies that a value spelling out the word 'null' is considered as a null value.

", + "NullCheckBoxList$IsNegOne": "

Specifies that an integer value of -1 is considered as a null value.

", + "S3CsvSource$Recurse": "

If set to true, recursively reads files in all subdirectories under the specified paths.

", + "S3CsvSource$Multiline": "

A Boolean value that specifies whether a single record can span multiple lines. This can occur when a field contains a quoted new-line character. You must set this option to True if any record spans multiple lines. The default value is False, which allows for more aggressive file-splitting during parsing.

", + "S3CsvSource$WithHeader": "

A Boolean value that specifies whether to treat the first line as a header. The default value is False.

", + "S3CsvSource$WriteHeader": "

A Boolean value that specifies whether to write the header to output. The default value is True.

", + "S3CsvSource$SkipFirst": "

A Boolean value that specifies whether to skip the first data line. The default value is False.

", + "S3DirectSourceAdditionalOptions$EnableSamplePath": "

Sets option to enable a sample path.

", + "S3JsonSource$Recurse": "

If set to true, recursively reads files in all subdirectories under the specified paths.

", + "S3JsonSource$Multiline": "

A Boolean value that specifies whether a single record can span multiple lines. This can occur when a field contains a quoted new-line character. You must set this option to True if any record spans multiple lines. The default value is False, which allows for more aggressive file-splitting during parsing.

", + "S3ParquetSource$Recurse": "

If set to true, recursively reads files in all subdirectories under the specified paths.

" + } + }, + "BoxedDoubleFraction": { + "base": null, + "refs": { + "PIIDetection$SampleFraction": "

Indicates the fraction of the data to sample when scanning for PII entities.

", + "PIIDetection$ThresholdFraction": "

Indicates the fraction of the data that must be met in order for a column to be identified as PII data.

" + } + }, + "BoxedLong": { + "base": null, + "refs": { + "S3DirectSourceAdditionalOptions$BoundedSize": "

Sets the upper limit for the target size of the dataset in bytes that will be processed.

", + "S3DirectSourceAdditionalOptions$BoundedFiles": "

Sets the upper limit for the target number of files that will be processed.

", + "S3SourceAdditionalOptions$BoundedSize": "

Sets the upper limit for the target size of the dataset in bytes that will be processed.

", + "S3SourceAdditionalOptions$BoundedFiles": "

Sets the upper limit for the target number of files that will be processed.

" + } + }, + "BoxedNonNegativeInt": { + "base": null, + "refs": { + "KafkaStreamingSourceOptions$NumRetries": "

The number of times to retry before failing to fetch Kafka offsets. The default value is 3.

", + "KafkaStreamingSourceOptions$MinPartitions": "

The desired minimum number of partitions to read from Kafka. The default value is null, which means that the number of spark partitions is equal to the number of Kafka partitions.

", + "KinesisStreamingSourceOptions$NumRetries": "

The maximum number of retries for Kinesis Data Streams API requests. The default value is 3.

", + "S3CsvSource$MaxBand": "

This option controls the duration in milliseconds after which the s3 listing is likely to be consistent. Files with modification timestamps falling within the last maxBand milliseconds are tracked specially when using JobBookmarks to account for Amazon S3 eventual consistency. Most users don't need to set this option. The default is 900000 milliseconds, or 15 minutes.

", + "S3CsvSource$MaxFilesInBand": "

This option specifies the maximum number of files to save from the last maxBand seconds. If this number is exceeded, extra files are skipped and only processed in the next job run.

", + "S3JsonSource$MaxBand": "

This option controls the duration in milliseconds after which the s3 listing is likely to be consistent. Files with modification timestamps falling within the last maxBand milliseconds are tracked specially when using JobBookmarks to account for Amazon S3 eventual consistency. Most users don't need to set this option. The default is 900000 milliseconds, or 15 minutes.

", + "S3JsonSource$MaxFilesInBand": "

This option specifies the maximum number of files to save from the last maxBand seconds. If this number is exceeded, extra files are skipped and only processed in the next job run.

", + "S3ParquetSource$MaxBand": "

This option controls the duration in milliseconds after which the s3 listing is likely to be consistent. Files with modification timestamps falling within the last maxBand milliseconds are tracked specially when using JobBookmarks to account for Amazon S3 eventual consistency. Most users don't need to set this option. The default is 900000 milliseconds, or 15 minutes.

", + "S3ParquetSource$MaxFilesInBand": "

This option specifies the maximum number of files to save from the last maxBand seconds. If this number is exceeded, extra files are skipped and only processed in the next job run.

" + } + }, + "BoxedNonNegativeLong": { + "base": null, + "refs": { + "JDBCConnectorOptions$LowerBound": "

The minimum value of partitionColumn that is used to decide partition stride.

", + "JDBCConnectorOptions$UpperBound": "

The maximum value of partitionColumn that is used to decide partition stride.

", + "JDBCConnectorOptions$NumPartitions": "

The number of partitions. This value, along with lowerBound (inclusive) and upperBound (exclusive), form partition strides for generated WHERE clause expressions that are used to split the partitionColumn.

", + "KafkaStreamingSourceOptions$PollTimeoutMs": "

The timeout in milliseconds to poll data from Kafka in Spark job executors. The default value is 512.

", + "KafkaStreamingSourceOptions$RetryIntervalMs": "

The time in milliseconds to wait before retrying to fetch Kafka offsets. The default value is 10.

", + "KafkaStreamingSourceOptions$MaxOffsetsPerTrigger": "

The rate limit on the maximum number of offsets that are processed per trigger interval. The specified total number of offsets is proportionally split across topicPartitions of different volumes. The default value is null, which means that the consumer reads all offsets until the known latest offset.

", + "KinesisStreamingSourceOptions$MaxFetchTimeInMs": "

The maximum time spent in the job executor to fetch a record from the Kinesis data stream per shard, specified in milliseconds (ms). The default value is 1000.

", + "KinesisStreamingSourceOptions$MaxFetchRecordsPerShard": "

The maximum number of records to fetch per shard in the Kinesis data stream. The default value is 100000.

", + "KinesisStreamingSourceOptions$MaxRecordPerRead": "

The maximum number of records to fetch from the Kinesis data stream in each getRecords operation. The default value is 10000.

", + "KinesisStreamingSourceOptions$IdleTimeBetweenReadsInMs": "

The minimum time delay between two consecutive getRecords operations, specified in ms. The default value is 1000. This option is only configurable for Glue version 2.0 and above.

", + "KinesisStreamingSourceOptions$DescribeShardInterval": "

The minimum time interval between two ListShards API calls for your script to consider resharding. The default value is 1s.

", + "KinesisStreamingSourceOptions$RetryIntervalMs": "

The cool-off time period (specified in ms) before retrying the Kinesis Data Streams API call. The default value is 1000.

", + "KinesisStreamingSourceOptions$MaxRetryIntervalMs": "

The maximum cool-off time period (specified in ms) between two retries of a Kinesis Data Streams API call. The default value is 10000.

" + } + }, + "BoxedPositiveInt": { + "base": null, + "refs": { + "CatalogKafkaSource$WindowSize": "

The amount of time to spend processing each micro batch.

", + "CatalogKinesisSource$WindowSize": "

The amount of time to spend processing each micro batch.

", + "DirectKafkaSource$WindowSize": "

The amount of time to spend processing each micro batch.

", + "DirectKinesisSource$WindowSize": "

The amount of time to spend processing each micro batch.

" + } + }, "CancelMLTaskRunRequest": { "base": null, "refs": { @@ -762,6 +897,31 @@ "GetCatalogImportStatusResponse$ImportStatus": "

The status of the specified catalog migration.

" } }, + "CatalogKafkaSource": { + "base": "

Specifies an Apache Kafka data store in the Data Catalog.

", + "refs": { + "CodeGenConfigurationNode$CatalogKafkaSource": "

Specifies an Apache Kafka data store in the Data Catalog.

" + } + }, + "CatalogKinesisSource": { + "base": "

Specifies a Kinesis data source in the Glue Data Catalog.

", + "refs": { + "CodeGenConfigurationNode$CatalogKinesisSource": "

Specifies a Kinesis data source in the Glue Data Catalog.

" + } + }, + "CatalogSchemaChangePolicy": { + "base": "

A policy that specifies update behavior for the crawler.

", + "refs": { + "GovernedCatalogTarget$SchemaChangePolicy": "

A policy that specifies update behavior for the governed catalog.

", + "S3CatalogTarget$SchemaChangePolicy": "

A policy that specifies update behavior for the crawler.

" + } + }, + "CatalogSource": { + "base": "

Specifies a data store in the Glue Data Catalog.

", + "refs": { + "CodeGenConfigurationNode$CatalogSource": "

Specifies a data store in the Glue Data Catalog.

" + } + }, "CatalogTablesList": { "base": null, "refs": { @@ -847,6 +1007,20 @@ "CodeGenNodeArg$Value": "

The value of the argument or property.

" } }, + "CodeGenConfigurationNode": { + "base": "

CodeGenConfigurationNode enumerates all valid Node types. One and only one of its member variables can be populated.

", + "refs": { + "CodeGenConfigurationNodes$value": null + } + }, + "CodeGenConfigurationNodes": { + "base": null, + "refs": { + "CreateJobRequest$CodeGenConfigurationNodes": "

The representation of a directed acyclic graph on which both the Glue Studio visual component and Glue Studio code generation is based.

", + "Job$CodeGenConfigurationNodes": "

The representation of a directed acyclic graph on which both the Glue Studio visual component and Glue Studio code generation is based.

", + "JobUpdate$CodeGenConfigurationNodes": "

The representation of a directed acyclic graph on which both the Glue Studio visual component and Glue Studio code generation is based.

" + } + }, "CodeGenEdge": { "base": "

Represents a directional edge in a directed acyclic graph (DAG).

", "refs": { @@ -991,6 +1165,7 @@ "base": null, "refs": { "Column$Type": "

The data type of the Column.

", + "GlueStudioSchemaColumn$Type": "

The hive type for this column in the Glue Studio schema.

", "KeySchemaElement$Type": "

The type of a partition key.

", "SchemaColumn$DataType": "

The type of data in the column.

" } @@ -1030,6 +1205,13 @@ "UpdateSchemaInput$Compatibility": "

The new compatibility setting for the schema.

" } }, + "CompressionType": { + "base": null, + "refs": { + "S3CsvSource$CompressionType": "

Specifies how the data is compressed. This is generally not necessary if the data has a standard file extension. Possible values are \"gzip\" and \"bzip\").

", + "S3JsonSource$CompressionType": "

Specifies how the data is compressed. This is generally not necessary if the data has a standard file extension. Possible values are \"gzip\" and \"bzip\").

" + } + }, "ConcurrentModificationException": { "base": "

Two processes are trying to modify a resource simultaneously.

", "refs": { @@ -1539,6 +1721,12 @@ "UpdateCsvClassifierRequest$QuoteSymbol": "

A custom symbol to denote what combines content into a single column value. It must be different from the column delimiter.

" } }, + "CustomCode": { + "base": "

Specifies a transform that uses custom code you provide to perform the data transformation. The output is a collection of DynamicFrames.

", + "refs": { + "CodeGenConfigurationNode$CustomCode": "

Specifies a transform that uses custom code you provide to perform the data transformation. The output is a collection of DynamicFrames.

" + } + }, "CustomEntityType": { "base": "

An object representing a custom pattern for detecting sensitive data across the columns and rows of your structured data.

", "refs": { @@ -1646,6 +1834,12 @@ "UpdateCrawlerRequest$DatabaseName": "

The Glue database where results are stored, such as: arn:aws:daylight:us-east-1::database/sometable/*.

" } }, + "Datatype": { + "base": "

A structure representing the datatype of the value.

", + "refs": { + "NullValueField$Datatype": "

The datatype of the value.

" + } + }, "DateColumnStatisticsData": { "base": "

Defines column statistics supported for timestamp data columns.

", "refs": { @@ -2007,6 +2201,25 @@ "BatchGetDevEndpointsResponse$DevEndpointsNotFound": "

A list of DevEndpoints not found.

" } }, + "DirectKafkaSource": { + "base": "

Specifies an Apache Kafka data store.

", + "refs": { + "CodeGenConfigurationNode$DirectKafkaSource": "

Specifies an Apache Kafka data store.

" + } + }, + "DirectKinesisSource": { + "base": "

Specifies a direct Amazon Kinesis data source.

", + "refs": { + "CodeGenConfigurationNode$DirectKinesisSource": "

Specifies a direct Amazon Kinesis data source.

" + } + }, + "DirectSchemaChangePolicy": { + "base": "

A policy that specifies update behavior for the crawler.

", + "refs": { + "S3DirectTarget$SchemaChangePolicy": "

A policy that specifies update behavior for the crawler.

", + "S3GlueParquetTarget$SchemaChangePolicy": "

A policy that specifies update behavior for the crawler.

" + } + }, "Double": { "base": null, "refs": { @@ -2027,6 +2240,30 @@ "Statement$Progress": "

The code execution progress.

" } }, + "DropDuplicates": { + "base": "

Specifies a transform that removes rows of repeating data from a data set.

", + "refs": { + "CodeGenConfigurationNode$DropDuplicates": "

Specifies a transform that removes rows of repeating data from a data set.

" + } + }, + "DropFields": { + "base": "

Specifies a transform that chooses the data property keys that you want to drop.

", + "refs": { + "CodeGenConfigurationNode$DropFields": "

Specifies a transform that chooses the data property keys that you want to drop.

" + } + }, + "DropNullFields": { + "base": "

Specifies a transform that removes columns from the dataset if all values in the column are 'null'. By default, Glue Studio will recognize null objects, but some values such as empty strings, strings that are \"null\", -1 integers or other placeholders such as zeros, are not automatically recognized as nulls.

", + "refs": { + "CodeGenConfigurationNode$DropNullFields": "

Specifies a transform that removes columns from the dataset if all values in the column are 'null'. By default, Glue Studio will recognize null objects, but some values such as empty strings, strings that are \"null\", -1 integers or other placeholders such as zeros, are not automatically recognized as nulls.

" + } + }, + "DynamoDBCatalogSource": { + "base": "

Specifies a DynamoDB data source in the Glue Data Catalog.

", + "refs": { + "CodeGenConfigurationNode$DynamoDBCatalogSource": null + } + }, "DynamoDBTarget": { "base": "

Specifies an Amazon DynamoDB table to crawl.

", "refs": { @@ -2057,6 +2294,157 @@ "PutResourcePolicyRequest$EnableHybrid": "

If 'TRUE', indicates that you are using both methods to grant cross-account access to Data Catalog resources:

Must be set to 'TRUE' if you have already used the Management Console to grant cross-account access, otherwise the call fails. Default is 'FALSE'.

" } }, + "EnclosedInStringProperties": { + "base": null, + "refs": { + "AggregateOperation$Column": "

Specifies the column on the data set on which the aggregation function will be applied.

", + "FilterValue$Value": "

The value to be associated.

", + "GlueStudioPathList$member": null, + "JDBCConnectorOptions$JobBookmarkKeys": "

The name of the job bookmark keys on which to sort.

", + "Mapping$FromPath": "

The table or column to be modified.

", + "PIIDetection$EntityTypesToDetect": "

Indicates the types of entities the PIIDetection transform will identify as PII data.

PII type entities include: PERSON_NAME, DATE, USA_SNN, EMAIL, USA_ITIN, USA_PASSPORT_NUMBER, PHONE_NUMBER, BANK_ACCOUNT, IP_ADDRESS, MAC_ADDRESS, USA_CPT_CODE, USA_HCPCS_CODE, USA_NATIONAL_DRUG_CODE, USA_MEDICARE_BENEFICIARY_IDENTIFIER, USA_HEALTH_INSURANCE_CLAIM_NUMBER,CREDIT_CARD,USA_NATIONAL_PROVIDER_IDENTIFIER,USA_DEA_NUMBER,USA_DRIVING_LICENSE

", + "RenameField$SourcePath": "

A JSON path to a variable in the data structure for the source data.

", + "RenameField$TargetPath": "

A JSON path to a variable in the data structure for the target data.

", + "S3CsvSource$Paths": "

A list of the Amazon S3 paths to read from.

", + "S3CsvSource$Exclusions": "

A string containing a JSON list of Unix-style glob patterns to exclude. For example, \"[\\\"**.pdf\\\"]\" excludes all PDF files.

", + "S3JsonSource$Paths": "

A list of the Amazon S3 paths to read from.

", + "S3JsonSource$Exclusions": "

A string containing a JSON list of Unix-style glob patterns to exclude. For example, \"[\\\"**.pdf\\\"]\" excludes all PDF files.

", + "S3ParquetSource$Paths": "

A list of the Amazon S3 paths to read from.

", + "S3ParquetSource$Exclusions": "

A string containing a JSON list of Unix-style glob patterns to exclude. For example, \"[\\\"**.pdf\\\"]\" excludes all PDF files.

" + } + }, + "EnclosedInStringPropertiesMinOne": { + "base": null, + "refs": { + "UpsertRedshiftTargetOptions$UpsertKeys": "

The keys used to determine whether to perform an update or insert.

" + } + }, + "EnclosedInStringProperty": { + "base": null, + "refs": { + "AdditionalOptions$key": null, + "AdditionalOptions$value": null, + "AthenaConnectorSource$ConnectionName": "

The name of the connection that is associated with the connector.

", + "AthenaConnectorSource$ConnectorName": "

The name of a connector that assists with accessing the data store in Glue Studio.

", + "AthenaConnectorSource$ConnectionType": "

The type of connection, such as marketplace.athena or custom.athena, designating a connection to an Amazon Athena data store.

", + "AthenaConnectorSource$SchemaName": "

The name of the Cloudwatch log group to read from. For example, /aws-glue/jobs/output.

", + "BasicCatalogTarget$Database": "

The database that contains the table you want to use as the target. This database must already exist in the Data Catalog.

", + "BasicCatalogTarget$Table": "

The table that defines the schema of your output data. This table must already exist in the Data Catalog.

", + "CatalogKafkaSource$Table": "

The name of the table in the database to read from.

", + "CatalogKafkaSource$Database": "

The name of the database to read from.

", + "CatalogKinesisSource$Table": "

The name of the table in the database to read from.

", + "CatalogKinesisSource$Database": "

The name of the database to read from.

", + "CatalogSource$Database": "

The name of the database to read from.

", + "CatalogSource$Table": "

The name of the table in the database to read from.

", + "CustomCode$ClassName": "

The name defined for the custom code node class.

", + "DirectSchemaChangePolicy$Table": "

Specifies the table in the database that the schema change policy applies to.

", + "DirectSchemaChangePolicy$Database": "

Specifies the database that the schema change policy applies to.

", + "DynamoDBCatalogSource$Database": "

The name of the database to read from.

", + "DynamoDBCatalogSource$Table": "

The name of the table in the database to read from.

", + "EnclosedInStringProperties$member": null, + "EnclosedInStringPropertiesMinOne$member": null, + "FillMissingValues$ImputedPath": "

A JSON path to a variable in the data structure for the dataset that is imputed.

", + "FillMissingValues$FilledPath": "

A JSON path to a variable in the data structure for the dataset that is filled.

", + "GovernedCatalogSource$Database": "

The database to read from.

", + "GovernedCatalogSource$Table": "

The database table to read from.

", + "GovernedCatalogSource$PartitionPredicate": "

Partitions satisfying this predicate are deleted. Files within the retention period in these partitions are not deleted. Set to \"\" – empty by default.

", + "GovernedCatalogTarget$Table": "

The name of the table in the database to write to.

", + "GovernedCatalogTarget$Database": "

The name of the database to write to.

", + "JDBCConnectorOptions$FilterPredicate": "

Extra condition clause to filter data from source. For example:

BillingCity='Mountain View'

When using a query instead of a table name, you should validate that the query works with the specified filterPredicate.

", + "JDBCConnectorOptions$PartitionColumn": "

The name of an integer column that is used for partitioning. This option works only when it's included with lowerBound, upperBound, and numPartitions. This option works the same way as in the Spark SQL JDBC reader.

", + "JDBCConnectorOptions$JobBookmarkKeysSortOrder": "

Specifies an ascending or descending sort order.

", + "JDBCConnectorSource$ConnectionName": "

The name of the connection that is associated with the connector.

", + "JDBCConnectorSource$ConnectorName": "

The name of a connector that assists with accessing the data store in Glue Studio.

", + "JDBCConnectorSource$ConnectionType": "

The type of connection, such as marketplace.jdbc or custom.jdbc, designating a connection to a JDBC data store.

", + "JDBCConnectorTarget$ConnectionName": "

The name of the connection that is associated with the connector.

", + "JDBCConnectorTarget$ConnectorName": "

The name of a connector that will be used.

", + "JDBCConnectorTarget$ConnectionType": "

The type of connection, such as marketplace.jdbc or custom.jdbc, designating a connection to a JDBC data target.

", + "JoinColumn$From": "

The column to be joined.

", + "KafkaStreamingSourceOptions$BootstrapServers": "

A list of bootstrap server URLs, for example, as b-1.vpc-test-2.o4q88o.c6.kafka.us-east-1.amazonaws.com:9094. This option must be specified in the API call or defined in the table metadata in the Data Catalog.

", + "KafkaStreamingSourceOptions$SecurityProtocol": "

The protocol used to communicate with brokers. The possible values are \"SSL\" or \"PLAINTEXT\".

", + "KafkaStreamingSourceOptions$ConnectionName": "

The name of the connection.

", + "KafkaStreamingSourceOptions$TopicName": "

The topic name as specified in Apache Kafka. You must specify at least one of \"topicName\", \"assign\" or \"subscribePattern\".

", + "KafkaStreamingSourceOptions$Assign": "

The specific TopicPartitions to consume. You must specify at least one of \"topicName\", \"assign\" or \"subscribePattern\".

", + "KafkaStreamingSourceOptions$SubscribePattern": "

A Java regex string that identifies the topic list to subscribe to. You must specify at least one of \"topicName\", \"assign\" or \"subscribePattern\".

", + "KafkaStreamingSourceOptions$Classification": "

An optional classification.

", + "KafkaStreamingSourceOptions$Delimiter": "

Specifies the delimiter character.

", + "KafkaStreamingSourceOptions$StartingOffsets": "

The starting position in the Kafka topic to read data from. The possible values are \"earliest\" or \"latest\". The default value is \"latest\".

", + "KafkaStreamingSourceOptions$EndingOffsets": "

The end point when a batch query is ended. Possible values are either \"latest\" or a JSON string that specifies an ending offset for each TopicPartition.

", + "KinesisStreamingSourceOptions$EndpointUrl": "

The URL of the Kinesis endpoint.

", + "KinesisStreamingSourceOptions$StreamName": "

The name of the Kinesis data stream.

", + "KinesisStreamingSourceOptions$Classification": "

An optional classification.

", + "KinesisStreamingSourceOptions$Delimiter": "

Specifies the delimiter character.

", + "KinesisStreamingSourceOptions$StreamArn": "

The Amazon Resource Name (ARN) of the Kinesis data stream.

", + "KinesisStreamingSourceOptions$RoleArn": "

The Amazon Resource Name (ARN) of the role to assume using AWS Security Token Service (AWS STS). This role must have permissions for describe or read record operations for the Kinesis data stream. You must use this parameter when accessing a data stream in a different account. Used in conjunction with \"awsSTSSessionName\".

", + "KinesisStreamingSourceOptions$RoleSessionName": "

An identifier for the session assuming the role using AWS STS. You must use this parameter when accessing a data stream in a different account. Used in conjunction with \"awsSTSRoleARN\".

", + "Mapping$ToKey": "

After the apply mapping, what the name of the column should be. Can be the same as FromPath.

", + "Mapping$FromType": "

The type of the data to be modified.

", + "Mapping$ToType": "

The data type that the data is to be modified to.

", + "MicrosoftSQLServerCatalogSource$Database": "

The name of the database to read from.

", + "MicrosoftSQLServerCatalogSource$Table": "

The name of the table in the database to read from.

", + "MicrosoftSQLServerCatalogTarget$Database": "

The name of the database to write to.

", + "MicrosoftSQLServerCatalogTarget$Table": "

The name of the table in the database to write to.

", + "MySQLCatalogSource$Database": "

The name of the database to read from.

", + "MySQLCatalogSource$Table": "

The name of the table in the database to read from.

", + "MySQLCatalogTarget$Database": "

The name of the database to write to.

", + "MySQLCatalogTarget$Table": "

The name of the table in the database to write to.

", + "NullValueField$Value": "

The value of the null placeholder.

", + "OracleSQLCatalogSource$Database": "

The name of the database to read from.

", + "OracleSQLCatalogSource$Table": "

The name of the table in the database to read from.

", + "OracleSQLCatalogTarget$Database": "

The name of the database to write to.

", + "OracleSQLCatalogTarget$Table": "

The name of the table in the database to write to.

", + "PIIDetection$OutputColumnName": "

Indicates the output column name that will contain any entity type detected in that row.

", + "PostgreSQLCatalogSource$Database": "

The name of the database to read from.

", + "PostgreSQLCatalogSource$Table": "

The name of the table in the database to read from.

", + "PostgreSQLCatalogTarget$Database": "

The name of the database to write to.

", + "PostgreSQLCatalogTarget$Table": "

The name of the table in the database to write to.

", + "RedshiftSource$Database": "

The database to read from.

", + "RedshiftSource$Table": "

The database table to read from.

", + "RedshiftSource$RedshiftTmpDir": "

The Amazon S3 path where temporary data can be staged when copying out of the database.

", + "RedshiftSource$TmpDirIAMRole": "

The IAM role with permissions.

", + "RedshiftTarget$Database": "

The name of the database to write to.

", + "RedshiftTarget$Table": "

The name of the table in the database to write to.

", + "RedshiftTarget$RedshiftTmpDir": "

The Amazon S3 path where temporary data can be staged when copying out of the database.

", + "RedshiftTarget$TmpDirIAMRole": "

The IAM role with permissions.

", + "RelationalCatalogSource$Database": "

The name of the database to read from.

", + "RelationalCatalogSource$Table": "

The name of the table in the database to read from.

", + "S3CatalogSource$Database": "

The database to read from.

", + "S3CatalogSource$Table": "

The database table to read from.

", + "S3CatalogSource$PartitionPredicate": "

Partitions satisfying this predicate are deleted. Files within the retention period in these partitions are not deleted. Set to \"\" – empty by default.

", + "S3CatalogTarget$Table": "

The name of the table in the database to write to.

", + "S3CatalogTarget$Database": "

The name of the database to write to.

", + "S3CsvSource$GroupSize": "

The target group size in bytes. The default is computed based on the input data size and the size of your cluster. When there are fewer than 50,000 input files, \"groupFiles\" must be set to \"inPartition\" for this to take effect.

", + "S3CsvSource$GroupFiles": "

Grouping files is turned on by default when the input contains more than 50,000 files. To turn on grouping with fewer than 50,000 files, set this parameter to \"inPartition\". To disable grouping when there are more than 50,000 files, set this parameter to \"none\".

", + "S3DirectSourceAdditionalOptions$SamplePath": "

If enabled, specifies the sample path.

", + "S3DirectTarget$Path": "

A single Amazon S3 path to write to.

", + "S3DirectTarget$Compression": "

Specifies how the data is compressed. This is generally not necessary if the data has a standard file extension. Possible values are \"gzip\" and \"bzip\").

", + "S3GlueParquetTarget$Path": "

A single Amazon S3 path to write to.

", + "S3JsonSource$GroupSize": "

The target group size in bytes. The default is computed based on the input data size and the size of your cluster. When there are fewer than 50,000 input files, \"groupFiles\" must be set to \"inPartition\" for this to take effect.

", + "S3JsonSource$GroupFiles": "

Grouping files is turned on by default when the input contains more than 50,000 files. To turn on grouping with fewer than 50,000 files, set this parameter to \"inPartition\". To disable grouping when there are more than 50,000 files, set this parameter to \"none\".

", + "S3JsonSource$JsonPath": "

A JsonPath string defining the JSON data.

", + "S3ParquetSource$GroupSize": "

The target group size in bytes. The default is computed based on the input data size and the size of your cluster. When there are fewer than 50,000 input files, \"groupFiles\" must be set to \"inPartition\" for this to take effect.

", + "S3ParquetSource$GroupFiles": "

Grouping files is turned on by default when the input contains more than 50,000 files. To turn on grouping with fewer than 50,000 files, set this parameter to \"inPartition\". To disable grouping when there are more than 50,000 files, set this parameter to \"none\".

", + "SparkConnectorSource$ConnectionName": "

The name of the connection that is associated with the connector.

", + "SparkConnectorSource$ConnectorName": "

The name of a connector that assists with accessing the data store in Glue Studio.

", + "SparkConnectorSource$ConnectionType": "

The type of connection, such as marketplace.spark or custom.spark, designating a connection to an Apache Spark data store.

", + "SparkConnectorTarget$ConnectionName": "

The name of a connection for an Apache Spark connector.

", + "SparkConnectorTarget$ConnectorName": "

The name of an Apache Spark connector.

", + "SparkConnectorTarget$ConnectionType": "

The type of connection, such as marketplace.spark or custom.spark, designating a connection to an Apache Spark data store.

", + "Spigot$Path": "

A path in Amazon S3 where the transform will write a subset of records from the dataset to a JSON file in an Amazon S3 bucket.

", + "UpsertRedshiftTargetOptions$TableLocation": "

The physical location of the Redshift table.

", + "UpsertRedshiftTargetOptions$ConnectionName": "

The name of the connection to use to write to Redshift.

" + } + }, + "EnclosedInStringPropertyWithQuote": { + "base": null, + "refs": { + "AthenaConnectorSource$ConnectionTable": "

The name of the table in the data source.

", + "JDBCConnectorSource$ConnectionTable": "

The name of the table in the data source.

", + "JDBCConnectorTarget$ConnectionTable": "

The name of the table in the data target.

", + "S3CsvSource$Escaper": "

Specifies a character to use for escaping. This option is used only when reading CSV files. The default value is none. If enabled, the character which immediately follows is used as-is, except for a small set of well-known escapes (\\n, \\r, \\t, and \\0).

", + "SqlAlias$Alias": "

A temporary name given to a table, or a column in a table.

" + } + }, "EncryptionAtRest": { "base": "

Specifies the encryption-at-rest configuration for the Data Catalog.

", "refs": { @@ -2170,6 +2558,12 @@ "TaskRunProperties$ExportLabelsTaskRunProperties": "

The configuration properties for an exporting labels task run.

" } }, + "ExtendedString": { + "base": null, + "refs": { + "CustomCode$Code": "

The custom code that is used to perform the data transformation.

" + } + }, "FieldType": { "base": null, "refs": { @@ -2177,12 +2571,66 @@ "MappingEntry$TargetType": "

The target type.

" } }, + "FillMissingValues": { + "base": "

Specifies a transform that locates records in the dataset that have missing values and adds a new field with a value determined by imputation. The input data set is used to train the machine learning model that determines what the missing value should be.

", + "refs": { + "CodeGenConfigurationNode$FillMissingValues": "

Specifies a transform that locates records in the dataset that have missing values and adds a new field with a value determined by imputation. The input data set is used to train the machine learning model that determines what the missing value should be.

" + } + }, + "Filter": { + "base": "

Specifies a transform that splits a dataset into two, based on a filter condition.

", + "refs": { + "CodeGenConfigurationNode$Filter": "

Specifies a transform that splits a dataset into two, based on a filter condition.

" + } + }, + "FilterExpression": { + "base": "

Specifies a filter expression.

", + "refs": { + "FilterExpressions$member": null + } + }, + "FilterExpressions": { + "base": null, + "refs": { + "Filter$Filters": "

Specifies a filter expression.

" + } + }, + "FilterLogicalOperator": { + "base": null, + "refs": { + "Filter$LogicalOperator": "

The operator used to filter rows by comparing the key value to a specified value.

" + } + }, + "FilterOperation": { + "base": null, + "refs": { + "FilterExpression$Operation": "

The type of operation to perform in the expression.

" + } + }, "FilterString": { "base": null, "refs": { "GetTablesRequest$Expression": "

A regular expression pattern. If present, only those tables whose names match the pattern are returned.

" } }, + "FilterValue": { + "base": "

Represents a single entry in the list of values for a FilterExpression.

", + "refs": { + "FilterValues$member": null + } + }, + "FilterValueType": { + "base": null, + "refs": { + "FilterValue$Type": "

The type of filter value.

" + } + }, + "FilterValues": { + "base": null, + "refs": { + "FilterExpression$Values": "

A list of filter values.

" + } + }, "FindMatchesMetrics": { "base": "

The evaluation metrics for the find matches algorithm. The quality of your machine learning transform is measured by getting your transform to predict some matches and comparing the results to known matches from the same dataset. The quality metrics are based on a subset of your data, so they are not precise.

", "refs": { @@ -2229,6 +2677,14 @@ "FindMatchesParameters$AccuracyCostTradeoff": "

The value that is selected when tuning your transform for a balance between accuracy and cost. A value of 0.5 means that the system balances accuracy and cost concerns. A value of 1.0 means a bias purely for accuracy, which typically results in a higher cost, sometimes substantially higher. A value of 0.0 means a bias purely for cost, which results in a less accurate FindMatches transform, sometimes with unacceptable accuracy.

Accuracy measures how well the transform finds true positives and true negatives. Increasing accuracy requires more machine resources and cost. But it also results in increased recall.

Cost measures how many compute resources, and thus money, are consumed to run the transform.

" } }, + "GenericLimitedString": { + "base": null, + "refs": { + "Datatype$Id": "

The datatype of the value.

", + "Datatype$Label": "

A label assigned to the datatype.

", + "LimitedStringList$member": null + } + }, "GenericMap": { "base": null, "refs": { @@ -2982,6 +3438,12 @@ "GetResourcePoliciesResponseList$member": null } }, + "GlueRecordType": { + "base": null, + "refs": { + "JDBCDataTypeMapping$value": null + } + }, "GlueResourceArn": { "base": null, "refs": { @@ -3012,6 +3474,60 @@ "UpdateSchemaResponse$SchemaArn": "

The Amazon Resource Name (ARN) of the schema.

" } }, + "GlueSchema": { + "base": "

Specifies a user-defined schema when a schema cannot be determined by AWS Glue.

", + "refs": { + "GlueSchemas$member": null + } + }, + "GlueSchemas": { + "base": null, + "refs": { + "AthenaConnectorSource$OutputSchemas": "

Specifies the data schema for the custom Athena source.

", + "CustomCode$OutputSchemas": "

Specifies the data schema for the custom code transform.

", + "JDBCConnectorSource$OutputSchemas": "

Specifies the data schema for the custom JDBC source.

", + "JDBCConnectorTarget$OutputSchemas": "

Specifies the data schema for the JDBC target.

", + "S3CsvSource$OutputSchemas": "

Specifies the data schema for the S3 CSV source.

", + "S3JsonSource$OutputSchemas": "

Specifies the data schema for the S3 JSON source.

", + "S3ParquetSource$OutputSchemas": "

Specifies the data schema for the S3 Parquet source.

", + "SparkConnectorSource$OutputSchemas": "

Specifies data schema for the custom spark source.

", + "SparkConnectorTarget$OutputSchemas": "

Specifies the data schema for the custom spark target.

", + "SparkSQL$OutputSchemas": "

Specifies the data schema for the SparkSQL transform.

" + } + }, + "GlueStudioColumnNameString": { + "base": null, + "refs": { + "GlueStudioSchemaColumn$Name": "

The name of the column in the Glue Studio schema.

" + } + }, + "GlueStudioPathList": { + "base": null, + "refs": { + "Aggregate$Groups": "

Specifies the fields to group by.

", + "DropFields$Paths": "

A JSON path to a variable in the data structure.

", + "GovernedCatalogTarget$PartitionKeys": "

Specifies native partitioning using a sequence of keys.

", + "JoinColumn$Keys": "

The key of the column to be joined.

", + "Merge$PrimaryKeys": "

The list of primary key fields to match records from the source and staging dynamic frames.

", + "S3CatalogTarget$PartitionKeys": "

Specifies native partitioning using a sequence of keys.

", + "S3DirectTarget$PartitionKeys": "

Specifies native partitioning using a sequence of keys.

", + "S3GlueParquetTarget$PartitionKeys": "

Specifies native partitioning using a sequence of keys.

", + "SelectFields$Paths": "

A JSON path to a variable in the data structure.

", + "SplitFields$Paths": "

A JSON path to a variable in the data structure.

" + } + }, + "GlueStudioSchemaColumn": { + "base": "

Specifies a single column in a Glue schema definition.

", + "refs": { + "GlueStudioSchemaColumnList$member": null + } + }, + "GlueStudioSchemaColumnList": { + "base": null, + "refs": { + "GlueSchema$Columns": "

Specifies the column definitions that make up a Glue schema.

" + } + }, "GlueTable": { "base": "

The database and table in the Glue Data Catalog that is used for input or output data.

", "refs": { @@ -3045,6 +3561,18 @@ "UpdateMLTransformRequest$GlueVersion": "

This value determines which version of Glue this machine learning transform is compatible with. Glue 1.0 is recommended for most customers. If the value is not set, the Glue compatibility defaults to Glue 0.9. For more information, see Glue Versions in the developer guide.

" } }, + "GovernedCatalogSource": { + "base": "

Specifies the data store in the governed Glue Data Catalog.

", + "refs": { + "CodeGenConfigurationNode$GovernedCatalogSource": "

Specifies a data source in a goverened Data Catalog.

" + } + }, + "GovernedCatalogTarget": { + "base": "

Specifies a data target that writes to Amazon S3 using the Glue Data Catalog.

", + "refs": { + "CodeGenConfigurationNode$GovernedCatalogTarget": "

Specifies a data target that writes to a goverened catalog.

" + } + }, "GrokClassifier": { "base": "

A classifier that uses grok patterns.

", "refs": { @@ -3228,6 +3756,36 @@ "CheckSchemaVersionValidityResponse$Valid": "

Return true, if the schema is valid and false otherwise.

" } }, + "JDBCConnectorOptions": { + "base": "

Additional connection options for the connector.

", + "refs": { + "JDBCConnectorSource$AdditionalOptions": "

Additional connection options for the connector.

" + } + }, + "JDBCConnectorSource": { + "base": "

Specifies a connector to a JDBC data source.

", + "refs": { + "CodeGenConfigurationNode$JDBCConnectorSource": "

Specifies a connector to a JDBC data source.

" + } + }, + "JDBCConnectorTarget": { + "base": "

Specifies a data target that writes to Amazon S3 in Apache Parquet columnar storage.

", + "refs": { + "CodeGenConfigurationNode$JDBCConnectorTarget": "

Specifies a data target that writes to Amazon S3 in Apache Parquet columnar storage.

" + } + }, + "JDBCDataType": { + "base": null, + "refs": { + "JDBCDataTypeMapping$key": null + } + }, + "JDBCDataTypeMapping": { + "base": null, + "refs": { + "JDBCConnectorOptions$DataTypeMapping": "

Custom data type mapping that builds a mapping from a JDBC data type to an Glue data type. For example, the option \"dataTypeMapping\":{\"FLOAT\":\"STRING\"} maps data fields of JDBC type FLOAT into the Java String type by calling the ResultSet.getString() method of the driver, and uses it to build the Glue record. The ResultSet object is implemented by each driver, so the behavior is specific to the driver you use. Refer to the documentation for your JDBC driver to understand how the driver performs the conversions.

" + } + }, "JdbcTarget": { "base": "

Specifies a JDBC data store to crawl.

", "refs": { @@ -3330,6 +3888,30 @@ "UpdateJobRequest$JobUpdate": "

Specifies the values with which to update the job definition.

" } }, + "Join": { + "base": "

Specifies a transform that joins two datasets into one dataset using a comparison phrase on the specified data property keys. You can use inner, outer, left, right, left semi, and left anti joins.

", + "refs": { + "CodeGenConfigurationNode$Join": "

Specifies a transform that joins two datasets into one dataset using a comparison phrase on the specified data property keys. You can use inner, outer, left, right, left semi, and left anti joins.

" + } + }, + "JoinColumn": { + "base": "

Specifies a column to be joined.

", + "refs": { + "JoinColumns$member": null + } + }, + "JoinColumns": { + "base": null, + "refs": { + "Join$Columns": "

A list of the two columns to be joined.

" + } + }, + "JoinType": { + "base": null, + "refs": { + "Join$JoinType": "

Specifies the type of join to be performed on the datasets.

" + } + }, "JsonClassifier": { "base": "

A classifier for JSON content.

", "refs": { @@ -3350,6 +3932,13 @@ "JobBookmarkEntry$JobBookmark": "

The bookmark itself.

" } }, + "KafkaStreamingSourceOptions": { + "base": "

Additional options for streaming.

", + "refs": { + "CatalogKafkaSource$StreamingOptions": "

Specifies the streaming options.

", + "DirectKafkaSource$StreamingOptions": "

Specifies the streaming options.

" + } + }, "KeyList": { "base": null, "refs": { @@ -3374,6 +3963,13 @@ "ParametersMap$key": null } }, + "KinesisStreamingSourceOptions": { + "base": "

Additional options for the Amazon Kinesis streaming data source.

", + "refs": { + "CatalogKinesisSource$StreamingOptions": "

Additional options for the Kinesis streaming data source.

", + "DirectKinesisSource$StreamingOptions": "

Additional options for the Kinesis streaming data source.

" + } + }, "KmsKeyArn": { "base": null, "refs": { @@ -3436,6 +4032,18 @@ "SchemaVersionNumber$LatestVersion": "

The latest version available for the schema.

" } }, + "LimitedPathList": { + "base": null, + "refs": { + "DropDuplicates$Columns": "

The name of the columns to be merged or removed if repeating.

" + } + }, + "LimitedStringList": { + "base": null, + "refs": { + "LimitedPathList$member": null + } + }, "LineageConfiguration": { "base": "

Specifies data lineage configuration settings for the crawler.

", "refs": { @@ -3669,6 +4277,13 @@ "MLUserDataEncryption$MlUserDataEncryptionMode": "

The encryption mode applied to user data. Valid values are:

" } }, + "ManyInputs": { + "base": null, + "refs": { + "CustomCode$Inputs": "

The data inputs identified by their node names.

", + "SparkSQL$Inputs": "

The data inputs identified by their node names. You can associate a table name with each input node to use in the SQL query. The name you choose must meet the Spark SQL naming restrictions.

" + } + }, "MapValue": { "base": null, "refs": { @@ -3678,6 +4293,12 @@ "UpdateDevEndpointRequest$AddArguments": "

The map of arguments to add the map of arguments used to configure the DevEndpoint.

Valid arguments are:

You can specify a version of Python support for development endpoints by using the Arguments parameter in the CreateDevEndpoint or UpdateDevEndpoint APIs. If no arguments are provided, the version defaults to Python 2.

" } }, + "Mapping": { + "base": "

Specifies the mapping of data property keys.

", + "refs": { + "Mappings$member": null + } + }, "MappingEntry": { "base": "

Defines a mapping.

", "refs": { @@ -3691,6 +4312,19 @@ "GetPlanRequest$Mapping": "

The list of mappings from a source table to target tables.

" } }, + "Mappings": { + "base": null, + "refs": { + "ApplyMapping$Mapping": "

Specifies the mapping of data property keys in the data source to data property keys in the data target.

", + "Mapping$Children": "

Only applicable to nested data structures. If you want to change the parent structure, but also one of its children, you can fill out this data strucutre. It is also Mapping, but its FromPath will be the parent's FromPath plus the FromPath from this structure.

For the children part, suppose you have the structure:

{ \"FromPath\": \"OuterStructure\", \"ToKey\": \"OuterStructure\", \"ToType\": \"Struct\", \"Dropped\": false, \"Chidlren\": [{ \"FromPath\": \"inner\", \"ToKey\": \"inner\", \"ToType\": \"Double\", \"Dropped\": false, }] }

You can specify a Mapping that looks like:

{ \"FromPath\": \"OuterStructure\", \"ToKey\": \"OuterStructure\", \"ToType\": \"Struct\", \"Dropped\": false, \"Chidlren\": [{ \"FromPath\": \"inner\", \"ToKey\": \"inner\", \"ToType\": \"Double\", \"Dropped\": false, }] }

" + } + }, + "MaskValue": { + "base": null, + "refs": { + "PIIDetection$MaskValue": "

Indicates the value that will replace the detected entity.

" + } + }, "MatchCriteria": { "base": null, "refs": { @@ -3721,6 +4355,12 @@ "JobUpdate$MaxRetries": "

The maximum number of times to retry this job if it fails.

" } }, + "Merge": { + "base": "

Specifies a transform that merges a DynamicFrame with a staging DynamicFrame based on the specified primary keys to identify records. Duplicate records (records with the same primary keys) are not de-duplicated.

", + "refs": { + "CodeGenConfigurationNode$Merge": "

Specifies a transform that merges a DynamicFrame with a staging DynamicFrame based on the specified primary keys to identify records. Duplicate records (records with the same primary keys) are not de-duplicated.

" + } + }, "MessagePrefix": { "base": null, "refs": { @@ -3808,6 +4448,18 @@ "RemoveSchemaVersionMetadataResponse$MetadataValue": "

The value of the metadata key.

" } }, + "MicrosoftSQLServerCatalogSource": { + "base": "

Specifies a Microsoft SQL server data source in the Glue Data Catalog.

", + "refs": { + "CodeGenConfigurationNode$MicrosoftSQLServerCatalogSource": "

Specifies a Microsoft SQL server data source in the Glue Data Catalog.

" + } + }, + "MicrosoftSQLServerCatalogTarget": { + "base": "

Specifies a target that uses Microsoft SQL.

", + "refs": { + "CodeGenConfigurationNode$MicrosoftSQLServerCatalogTarget": "

Specifies a target that uses Microsoft SQL.

" + } + }, "MillisecondsCount": { "base": null, "refs": { @@ -3826,6 +4478,18 @@ "CrawlerTargets$MongoDBTargets": "

Specifies Amazon DocumentDB or MongoDB targets.

" } }, + "MySQLCatalogSource": { + "base": "

Specifies a MySQL data source in the Glue Data Catalog.

", + "refs": { + "CodeGenConfigurationNode$MySQLCatalogSource": "

Specifies a MySQL data source in the Glue Data Catalog.

" + } + }, + "MySQLCatalogTarget": { + "base": "

Specifies a target that uses MySQL.

", + "refs": { + "CodeGenConfigurationNode$MySQLCatalogTarget": "

Specifies a target that uses MySQL.

" + } + }, "NameString": { "base": null, "refs": { @@ -4133,6 +4797,17 @@ "NodeList$member": null } }, + "NodeId": { + "base": null, + "refs": { + "CodeGenConfigurationNodes$key": null, + "ManyInputs$member": null, + "Merge$Source": "

The source DynamicFrame that will be merged with a staging DynamicFrame.

", + "OneInput$member": null, + "SqlAlias$From": "

A table, or a column in a table.

", + "TwoInputs$member": null + } + }, "NodeIdList": { "base": null, "refs": { @@ -4146,6 +4821,61 @@ "WorkflowGraph$Nodes": "

A list of the the Glue components belong to the workflow represented as nodes.

" } }, + "NodeName": { + "base": null, + "refs": { + "Aggregate$Name": "

The name of the transform node.

", + "ApplyMapping$Name": "

The name of the transform node.

", + "AthenaConnectorSource$Name": "

The name of the data source.

", + "BasicCatalogTarget$Name": "

The name of your data target.

", + "CatalogKafkaSource$Name": "

The name of the data store.

", + "CatalogKinesisSource$Name": "

The name of the data source.

", + "CatalogSource$Name": "

The name of the data store.

", + "CustomCode$Name": "

The name of the transform node.

", + "DirectKafkaSource$Name": "

The name of the data store.

", + "DirectKinesisSource$Name": "

The name of the data source.

", + "DropDuplicates$Name": "

The name of the transform node.

", + "DropFields$Name": "

The name of the transform node.

", + "DropNullFields$Name": "

The name of the transform node.

", + "DynamoDBCatalogSource$Name": "

The name of the data source.

", + "FillMissingValues$Name": "

The name of the transform node.

", + "Filter$Name": "

The name of the transform node.

", + "GovernedCatalogSource$Name": "

The name of the data store.

", + "GovernedCatalogTarget$Name": "

The name of the data target.

", + "JDBCConnectorSource$Name": "

The name of the data source.

", + "JDBCConnectorTarget$Name": "

The name of the data target.

", + "Join$Name": "

The name of the transform node.

", + "Merge$Name": "

The name of the transform node.

", + "MicrosoftSQLServerCatalogSource$Name": "

The name of the data source.

", + "MicrosoftSQLServerCatalogTarget$Name": "

The name of the data target.

", + "MySQLCatalogSource$Name": "

The name of the data source.

", + "MySQLCatalogTarget$Name": "

The name of the data target.

", + "OracleSQLCatalogSource$Name": "

The name of the data source.

", + "OracleSQLCatalogTarget$Name": "

The name of the data target.

", + "PIIDetection$Name": "

The name of the transform node.

", + "PostgreSQLCatalogSource$Name": "

The name of the data source.

", + "PostgreSQLCatalogTarget$Name": "

The name of the data target.

", + "RedshiftSource$Name": "

The name of the Amazon Redshift data store.

", + "RedshiftTarget$Name": "

The name of the data target.

", + "RelationalCatalogSource$Name": "

The name of the data source.

", + "RenameField$Name": "

The name of the transform node.

", + "S3CatalogSource$Name": "

The name of the data store.

", + "S3CatalogTarget$Name": "

The name of the data target.

", + "S3CsvSource$Name": "

The name of the data store.

", + "S3DirectTarget$Name": "

The name of the data target.

", + "S3GlueParquetTarget$Name": "

The name of the data target.

", + "S3JsonSource$Name": "

The name of the data store.

", + "S3ParquetSource$Name": "

The name of the data store.

", + "SelectFields$Name": "

The name of the transform node.

", + "SelectFromCollection$Name": "

The name of the transform node.

", + "SparkConnectorSource$Name": "

The name of the data source.

", + "SparkConnectorTarget$Name": "

The name of the data target.

", + "SparkSQL$Name": "

The name of the transform node.

", + "Spigot$Name": "

The name of the transform node.

", + "SplitFields$Name": "

The name of the transform node.

", + "Union$Name": "

The name of the transform node.

" + } + }, "NodeType": { "base": null, "refs": { @@ -4162,6 +4892,12 @@ "StringColumnStatisticsData$AverageLength": "

The average string length in the column.

" } }, + "NonNegativeInt": { + "base": null, + "refs": { + "SelectFromCollection$Index": "

The index for the DynamicFrame to be selected.

" + } + }, "NonNegativeInteger": { "base": null, "refs": { @@ -4211,6 +4947,24 @@ "NotificationProperty$NotifyDelayAfter": "

After a job run starts, the number of minutes to wait before sending a job run delay notification.

" } }, + "NullCheckBoxList": { + "base": "

Represents whether certain values are recognized as null values for removal.

", + "refs": { + "DropNullFields$NullCheckBoxList": "

A structure that represents whether certain values are recognized as null values for removal.

" + } + }, + "NullValueField": { + "base": "

Represents a custom null value such as a zeros or other value being used as a null placeholder unique to the dataset.

", + "refs": { + "NullValueFields$member": null + } + }, + "NullValueFields": { + "base": null, + "refs": { + "DropNullFields$NullTextList": "

A structure that specifies a list of NullValueField structures that represent a custom null value such as zero or other value being used as a null placeholder unique to the dataset.

The DropNullFields transform removes custom null values only if both the value of the null placeholder and the datatype match the data.

" + } + }, "NullableBoolean": { "base": null, "refs": { @@ -4282,11 +5036,53 @@ "Workflow$MaxConcurrentRuns": "

You can use this parameter to prevent unwanted multiple updates to data, to control costs, or in some cases, to prevent exceeding the maximum number of concurrent runs of any of the component jobs. If you leave this parameter blank, there is no limit to the number of concurrent workflow runs.

" } }, + "OneInput": { + "base": null, + "refs": { + "Aggregate$Inputs": "

Specifies the fields and rows to use as inputs for the aggregate transform.

", + "ApplyMapping$Inputs": "

The data inputs identified by their node names.

", + "BasicCatalogTarget$Inputs": "

The nodes that are inputs to the data target.

", + "DropDuplicates$Inputs": "

The data inputs identified by their node names.

", + "DropFields$Inputs": "

The data inputs identified by their node names.

", + "DropNullFields$Inputs": "

The data inputs identified by their node names.

", + "FillMissingValues$Inputs": "

The data inputs identified by their node names.

", + "Filter$Inputs": "

The data inputs identified by their node names.

", + "GovernedCatalogTarget$Inputs": "

The nodes that are inputs to the data target.

", + "JDBCConnectorTarget$Inputs": "

The nodes that are inputs to the data target.

", + "MicrosoftSQLServerCatalogTarget$Inputs": "

The nodes that are inputs to the data target.

", + "MySQLCatalogTarget$Inputs": "

The nodes that are inputs to the data target.

", + "OracleSQLCatalogTarget$Inputs": "

The nodes that are inputs to the data target.

", + "PIIDetection$Inputs": "

The node ID inputs to the transform.

", + "PostgreSQLCatalogTarget$Inputs": "

The nodes that are inputs to the data target.

", + "RedshiftTarget$Inputs": "

The nodes that are inputs to the data target.

", + "RenameField$Inputs": "

The data inputs identified by their node names.

", + "S3CatalogTarget$Inputs": "

The nodes that are inputs to the data target.

", + "S3DirectTarget$Inputs": "

The nodes that are inputs to the data target.

", + "S3GlueParquetTarget$Inputs": "

The nodes that are inputs to the data target.

", + "SelectFields$Inputs": "

The data inputs identified by their node names.

", + "SelectFromCollection$Inputs": "

The data inputs identified by their node names.

", + "SparkConnectorTarget$Inputs": "

The nodes that are inputs to the data target.

", + "Spigot$Inputs": "

The data inputs identified by their node names.

", + "SplitFields$Inputs": "

The data inputs identified by their node names.

" + } + }, "OperationTimeoutException": { "base": "

The operation timed out.

", "refs": { } }, + "OracleSQLCatalogSource": { + "base": "

Specifies an Oracle data source in the Glue Data Catalog.

", + "refs": { + "CodeGenConfigurationNode$OracleSQLCatalogSource": "

Specifies an Oracle data source in the Glue Data Catalog.

" + } + }, + "OracleSQLCatalogTarget": { + "base": "

Specifies a target that uses Oracle SQL.

", + "refs": { + "CodeGenConfigurationNode$OracleSQLCatalogTarget": "

Specifies a target that uses Oracle SQL.

" + } + }, "OrchestrationArgumentsMap": { "base": null, "refs": { @@ -4391,6 +5187,12 @@ "OtherMetadataValueList$member": null } }, + "PIIDetection": { + "base": "

Specifies a transform that identifies, removes or masks PII data.

", + "refs": { + "CodeGenConfigurationNode$PIIDetection": "

Specifies a transform that identifies, removes or masks PII data.

" + } + }, "PageSize": { "base": null, "refs": { @@ -4455,6 +5257,13 @@ "ParametersMap$value": null } }, + "ParquetCompressionType": { + "base": null, + "refs": { + "S3GlueParquetTarget$Compression": "

Specifies how the data is compressed. This is generally not necessary if the data has a standard file extension. Possible values are \"gzip\" and \"bzip\").

", + "S3ParquetSource$CompressionType": "

Specifies how the data is compressed. This is generally not necessary if the data has a standard file extension. Possible values are \"gzip\" and \"bzip\").

" + } + }, "Partition": { "base": "

Represents a slice of table data.

", "refs": { @@ -4594,6 +5403,12 @@ "ConnectionInput$PhysicalConnectionRequirements": "

A map of physical connection requirements, such as virtual private cloud (VPC) and SecurityGroup, that are needed to successfully make this connection.

" } }, + "PiiType": { + "base": null, + "refs": { + "PIIDetection$PiiType": "

Indicates the type of PIIDetection transform.

" + } + }, "PolicyJsonString": { "base": null, "refs": { @@ -4602,6 +5417,30 @@ "PutResourcePolicyRequest$PolicyInJson": "

Contains the policy document to set, in JSON format.

" } }, + "PollingTime": { + "base": null, + "refs": { + "StreamingDataPreviewOptions$PollingTime": "

The polling time in milliseconds.

" + } + }, + "PositiveLong": { + "base": null, + "refs": { + "StreamingDataPreviewOptions$RecordPollingLimit": "

The limit to the number of records polled.

" + } + }, + "PostgreSQLCatalogSource": { + "base": "

Specifies a PostgresSQL data source in the Glue Data Catalog.

", + "refs": { + "CodeGenConfigurationNode$PostgreSQLCatalogSource": "

Specifies a PostgresSQL data source in the Glue Data Catalog.

" + } + }, + "PostgreSQLCatalogTarget": { + "base": "

Specifies a target that uses Postgres SQL.

", + "refs": { + "CodeGenConfigurationNode$PostgreSQLCatalogTarget": "

Specifies a target that uses Postgres SQL.

" + } + }, "Predecessor": { "base": "

A job run that was used in the predicate of a conditional trigger that triggered this job run.

", "refs": { @@ -4650,6 +5489,12 @@ "UserDefinedFunctionInput$OwnerType": "

The owner type.

" } }, + "Prob": { + "base": null, + "refs": { + "Spigot$Prob": "

The probability (a decimal value with a maximum value of 1) of picking any given record. A value of 1 indicates that each row read from the dataset should be included in the sample output.

" + } + }, "PropertyPredicate": { "base": "

Defines a property predicate.

", "refs": { @@ -4736,6 +5581,12 @@ "refs": { } }, + "QuoteChar": { + "base": null, + "refs": { + "S3CsvSource$QuoteChar": "

Specifies the character to use for quoting. The default is a double quote: '\"'. Set this to -1 to turn off quoting entirely.

" + } + }, "RecordsCount": { "base": null, "refs": { @@ -4759,6 +5610,18 @@ "UpdateCrawlerRequest$RecrawlPolicy": "

A policy that specifies whether to crawl the entire dataset again, or to crawl only folders that were added since the last crawler run.

" } }, + "RedshiftSource": { + "base": "

Specifies an Amazon Redshift data store.

", + "refs": { + "CodeGenConfigurationNode$RedshiftSource": "

Specifies an Amazon Redshift data store.

" + } + }, + "RedshiftTarget": { + "base": "

Specifies a target that uses Amazon Redshift.

", + "refs": { + "CodeGenConfigurationNode$RedshiftTarget": "

Specifies a target that uses Amazon Redshift.

" + } + }, "RegisterSchemaVersionInput": { "base": null, "refs": { @@ -4799,6 +5662,12 @@ "RegistryListItem$Status": "

The status of the registry.

" } }, + "RelationalCatalogSource": { + "base": "

Specifies a Relational database data source in the Glue Data Catalog.

", + "refs": { + "CodeGenConfigurationNode$RelationalCatalogSource": null + } + }, "RemoveSchemaVersionMetadataInput": { "base": null, "refs": { @@ -4809,6 +5678,12 @@ "refs": { } }, + "RenameField": { + "base": "

Specifies a transform that renames a single data property key.

", + "refs": { + "CodeGenConfigurationNode$RenameField": "

Specifies a transform that renames a single data property key.

" + } + }, "ReplaceBoolean": { "base": null, "refs": { @@ -4927,6 +5802,38 @@ "refs": { } }, + "S3CatalogSource": { + "base": "

Specifies an Amazon S3 data store in the Glue Data Catalog.

", + "refs": { + "CodeGenConfigurationNode$S3CatalogSource": "

Specifies an Amazon S3 data store in the Glue Data Catalog.

" + } + }, + "S3CatalogTarget": { + "base": "

Specifies a data target that writes to Amazon S3 using the Glue Data Catalog.

", + "refs": { + "CodeGenConfigurationNode$S3CatalogTarget": "

Specifies a data target that writes to Amazon S3 using the Glue Data Catalog.

" + } + }, + "S3CsvSource": { + "base": "

Specifies a command-separated value (CSV) data store stored in Amazon S3.

", + "refs": { + "CodeGenConfigurationNode$S3CsvSource": "

Specifies a command-separated value (CSV) data store stored in Amazon S3.

" + } + }, + "S3DirectSourceAdditionalOptions": { + "base": "

Specifies additional connection options for the Amazon S3 data store.

", + "refs": { + "S3CsvSource$AdditionalOptions": "

Specifies additional connection options.

", + "S3JsonSource$AdditionalOptions": "

Specifies additional connection options.

", + "S3ParquetSource$AdditionalOptions": "

Specifies additional connection options.

" + } + }, + "S3DirectTarget": { + "base": "

Specifies a data target that writes to Amazon S3.

", + "refs": { + "CodeGenConfigurationNode$S3DirectTarget": "

Specifies a data target that writes to Amazon S3.

" + } + }, "S3Encryption": { "base": "

Specifies how Amazon Simple Storage Service (Amazon S3) data should be encrypted.

", "refs": { @@ -4945,6 +5852,31 @@ "S3Encryption$S3EncryptionMode": "

The encryption mode to use for Amazon S3 data.

" } }, + "S3GlueParquetTarget": { + "base": "

Specifies a data target that writes to Amazon S3 in Apache Parquet columnar storage.

", + "refs": { + "CodeGenConfigurationNode$S3GlueParquetTarget": "

Specifies a data target that writes to Amazon S3 in Apache Parquet columnar storage.

" + } + }, + "S3JsonSource": { + "base": "

Specifies a JSON data store stored in Amazon S3.

", + "refs": { + "CodeGenConfigurationNode$S3JsonSource": "

Specifies a JSON data store stored in Amazon S3.

" + } + }, + "S3ParquetSource": { + "base": "

Specifies an Apache Parquet data store stored in Amazon S3.

", + "refs": { + "CodeGenConfigurationNode$S3ParquetSource": "

Specifies an Apache Parquet data store stored in Amazon S3.

" + } + }, + "S3SourceAdditionalOptions": { + "base": "

Specifies additional connection options for the Amazon S3 data store.

", + "refs": { + "GovernedCatalogSource$AdditionalOptions": "

Specifies additional connection options.

", + "S3CatalogSource$AdditionalOptions": "

Specifies additional connection options.

" + } + }, "S3Target": { "base": "

Specifies a data store in Amazon Simple Storage Service (Amazon S3).

", "refs": { @@ -5245,6 +6177,24 @@ "GetUnfilteredPartitionsMetadataRequest$Segment": null } }, + "SelectFields": { + "base": "

Specifies a transform that chooses the data property keys that you want to keep.

", + "refs": { + "CodeGenConfigurationNode$SelectFields": "

Specifies a transform that chooses the data property keys that you want to keep.

" + } + }, + "SelectFromCollection": { + "base": "

Specifies a transform that chooses one DynamicFrame from a collection of DynamicFrames. The output is the selected DynamicFrame

", + "refs": { + "CodeGenConfigurationNode$SelectFromCollection": "

Specifies a transform that chooses one DynamicFrame from a collection of DynamicFrames. The output is the selected DynamicFrame

" + } + }, + "Separator": { + "base": null, + "refs": { + "S3CsvSource$Separator": "

Specifies the delimiter character. The default is a comma: \",\", but any other character can be specified.

" + } + }, "SerDeInfo": { "base": "

Information about a serialization/deserialization program (SerDe) that serves as an extractor and loader.

", "refs": { @@ -5315,6 +6265,55 @@ "TransformSortCriteria$SortDirection": "

The sort direction to be used in the sorting criteria that are associated with the machine learning transform.

" } }, + "SparkConnectorSource": { + "base": "

Specifies a connector to an Apache Spark data source.

", + "refs": { + "CodeGenConfigurationNode$SparkConnectorSource": "

Specifies a connector to an Apache Spark data source.

" + } + }, + "SparkConnectorTarget": { + "base": "

Specifies a target that uses an Apache Spark connector.

", + "refs": { + "CodeGenConfigurationNode$SparkConnectorTarget": "

Specifies a target that uses an Apache Spark connector.

" + } + }, + "SparkSQL": { + "base": "

Specifies a transform where you enter a SQL query using Spark SQL syntax to transform the data. The output is a single DynamicFrame.

", + "refs": { + "CodeGenConfigurationNode$SparkSQL": "

Specifies a transform where you enter a SQL query using Spark SQL syntax to transform the data. The output is a single DynamicFrame.

" + } + }, + "Spigot": { + "base": "

Specifies a transform that writes samples of the data to an Amazon S3 bucket.

", + "refs": { + "CodeGenConfigurationNode$Spigot": "

Specifies a transform that writes samples of the data to an Amazon S3 bucket.

" + } + }, + "SplitFields": { + "base": "

Specifies a transform that splits data property keys into two DynamicFrames. The output is a collection of DynamicFrames: one with selected data property keys, and one with the remaining data property keys.

", + "refs": { + "CodeGenConfigurationNode$SplitFields": "

Specifies a transform that splits data property keys into two DynamicFrames. The output is a collection of DynamicFrames: one with selected data property keys, and one with the remaining data property keys.

" + } + }, + "SqlAlias": { + "base": "

Represents a single entry in the list of values for SqlAliases.

", + "refs": { + "SqlAliases$member": null + } + }, + "SqlAliases": { + "base": null, + "refs": { + "SparkSQL$SqlAliases": "

A list of aliases. An alias allows you to specify what name to use in the SQL for a given input. For example, you have a datasource named \"MyDataSource\". If you specify From as MyDataSource, and Alias as SqlName, then in your SQL you can do:

select * from SqlName

and that gets data from MyDataSource.

" + } + }, + "SqlQuery": { + "base": null, + "refs": { + "JDBCConnectorSource$Query": "

The table or SQL query to get the data from. You can specify either ConnectionTable or query, but not both.

", + "SparkSQL$SqlQuery": "

A SQL query that must use Spark SQL syntax and return a single data set.

" + } + }, "StartBlueprintRunRequest": { "base": null, "refs": { @@ -5421,6 +6420,12 @@ "WorkflowRun$StartingEventBatchCondition": "

The batch condition that started the workflow run.

" } }, + "StartingPosition": { + "base": null, + "refs": { + "KinesisStreamingSourceOptions$StartingPosition": "

The starting position in the Kinesis data stream to read data from. The possible values are \"latest\", \"trim_horizon\", or \"earliest\". The default value is \"latest\".

" + } + }, "Statement": { "base": "

The statement or request for a particular action to occur in a session.

", "refs": { @@ -5512,6 +6517,15 @@ "TableInput$StorageDescriptor": "

A storage descriptor containing information about the physical storage of this table.

" } }, + "StreamingDataPreviewOptions": { + "base": "

Specifies options related to data preview for viewing a sample of your data.

", + "refs": { + "CatalogKafkaSource$DataPreviewOptions": "

Specifies options related to data preview for viewing a sample of your data.

", + "CatalogKinesisSource$DataPreviewOptions": "

Additional options for data preview.

", + "DirectKafkaSource$DataPreviewOptions": "

Specifies options related to data preview for viewing a sample of your data.

", + "DirectKinesisSource$DataPreviewOptions": "

Additional options for data preview.

" + } + }, "StringColumnStatisticsData": { "base": "

Defines column statistics supported for character sequence data values.

", "refs": { @@ -5666,6 +6680,12 @@ "TagResourceRequest$TagsToAdd": "

Tags to add to this resource.

" } }, + "TargetFormat": { + "base": null, + "refs": { + "S3DirectTarget$Format": "

Specifies the data output format for the target.

" + } + }, "TaskRun": { "base": "

The sampling parameters that are associated with the machine learning transform.

", "refs": { @@ -5854,6 +6874,12 @@ "SearchTablesResponse$NextToken": "

A continuation token, present if the current list segment is not the last.

" } }, + "Topk": { + "base": null, + "refs": { + "Spigot$Topk": "

Specifies a number of records to write starting from the beginning of the dataset.

" + } + }, "TotalSegmentsInteger": { "base": null, "refs": { @@ -5994,6 +7020,14 @@ "UpdateTriggerRequest$TriggerUpdate": "

The new values with which to update the trigger.

" } }, + "TwoInputs": { + "base": null, + "refs": { + "Join$Inputs": "

The data inputs identified by their node names.

", + "Merge$Inputs": "

The data inputs identified by their node names.

", + "Union$Inputs": "

The node ID inputs to the transform.

" + } + }, "TypeString": { "base": null, "refs": { @@ -6020,6 +7054,18 @@ "GetUnfilteredPartitionsMetadataResponse$UnfilteredPartitions": null } }, + "Union": { + "base": "

Specifies a transform that combines the rows from two or more datasets into a single result.

", + "refs": { + "CodeGenConfigurationNode$Union": "

Specifies a transform that combines the rows from two or more datasets into a single result.

" + } + }, + "UnionType": { + "base": null, + "refs": { + "Union$UnionType": "

Indicates the type of Union transform.

Specify ALL to join all rows from data sources to the resulting DynamicFrame. The resulting union does not remove duplicate rows.

Specify DISTINCT to remove duplicate rows in the resulting DynamicFrame.

" + } + }, "UntagResourceRequest": { "base": null, "refs": { @@ -6046,6 +7092,13 @@ "refs": { } }, + "UpdateCatalogBehavior": { + "base": null, + "refs": { + "CatalogSchemaChangePolicy$UpdateBehavior": "

The update behavior when the crawler finds a changed schema.

", + "DirectSchemaChangePolicy$UpdateBehavior": "

The update behavior when the crawler finds a changed schema.

" + } + }, "UpdateClassifierRequest": { "base": null, "refs": { @@ -6256,6 +7309,12 @@ "SchemaListItem$UpdatedTime": "

The date and time that a schema was updated.

" } }, + "UpsertRedshiftTargetOptions": { + "base": "

The options to configure an upsert operation when writing to a Redshift target .

", + "refs": { + "RedshiftTarget$UpsertRedshiftOptions": "

The set of options to configure an upsert operation when writing to a Redshift target.

" + } + }, "UriString": { "base": null, "refs": { diff --git a/apis/kms/2014-11-01/docs-2.json b/apis/kms/2014-11-01/docs-2.json index 9f734cbeec2..2634654b0e5 100644 --- a/apis/kms/2014-11-01/docs-2.json +++ b/apis/kms/2014-11-01/docs-2.json @@ -1,33 +1,33 @@ { "version": "2.0", - "service": "Key Management Service

Key Management Service (KMS) is an encryption and key management web service. This guide describes the KMS operations that you can call programmatically. For general information about KMS, see the Key Management Service Developer Guide .

KMS is replacing the term customer master key (CMK) with KMS key and KMS key. The concept has not changed. To prevent breaking changes, KMS is keeping some variations of this term.

Amazon Web Services provides SDKs that consist of libraries and sample code for various programming languages and platforms (Java, Ruby, .Net, macOS, Android, etc.). The SDKs provide a convenient way to create programmatic access to KMS and other Amazon Web Services services. For example, the SDKs take care of tasks such as signing requests (see below), managing errors, and retrying requests automatically. For more information about the Amazon Web Services SDKs, including how to download and install them, see Tools for Amazon Web Services.

We recommend that you use the Amazon Web Services SDKs to make programmatic API calls to KMS.

If you need to use FIPS 140-2 validated cryptographic modules when communicating with Amazon Web Services, use the FIPS endpoint in your preferred Amazon Web Services Region. For more information about the available FIPS endpoints, see Service endpoints in the Key Management Service topic of the Amazon Web Services General Reference.

Clients must support TLS (Transport Layer Security) 1.0. We recommend TLS 1.2. Clients must also support cipher suites with Perfect Forward Secrecy (PFS) such as Ephemeral Diffie-Hellman (DHE) or Elliptic Curve Ephemeral Diffie-Hellman (ECDHE). Most modern systems such as Java 7 and later support these modes.

Signing Requests

Requests must be signed by using an access key ID and a secret access key. We strongly recommend that you do not use your Amazon Web Services account (root) access key ID and secret key for everyday work with KMS. Instead, use the access key ID and secret access key for an IAM user. You can also use the Amazon Web Services Security Token Service to generate temporary security credentials that you can use to sign requests.

All KMS operations require Signature Version 4.

Logging API Requests

KMS supports CloudTrail, a service that logs Amazon Web Services API calls and related events for your Amazon Web Services account and delivers them to an Amazon S3 bucket that you specify. By using the information collected by CloudTrail, you can determine what requests were made to KMS, who made the request, when it was made, and so on. To learn more about CloudTrail, including how to turn it on and find your log files, see the CloudTrail User Guide.

Additional Resources

For more information about credentials and request signing, see the following:

Commonly Used API Operations

Of the API operations discussed in this guide, the following will prove the most useful for most applications. You will likely perform operations other than these, such as creating keys and assigning policies, by using the console.

", + "service": "Key Management Service

Key Management Service (KMS) is an encryption and key management web service. This guide describes the KMS operations that you can call programmatically. For general information about KMS, see the Key Management Service Developer Guide .

KMS is replacing the term customer master key (CMK) with KMS key and KMS key. The concept has not changed. To prevent breaking changes, KMS is keeping some variations of this term.

Amazon Web Services provides SDKs that consist of libraries and sample code for various programming languages and platforms (Java, Ruby, .Net, macOS, Android, etc.). The SDKs provide a convenient way to create programmatic access to KMS and other Amazon Web Services services. For example, the SDKs take care of tasks such as signing requests (see below), managing errors, and retrying requests automatically. For more information about the Amazon Web Services SDKs, including how to download and install them, see Tools for Amazon Web Services.

We recommend that you use the Amazon Web Services SDKs to make programmatic API calls to KMS.

If you need to use FIPS 140-2 validated cryptographic modules when communicating with Amazon Web Services, use the FIPS endpoint in your preferred Amazon Web Services Region. For more information about the available FIPS endpoints, see Service endpoints in the Key Management Service topic of the Amazon Web Services General Reference.

All KMS API calls must be signed and be transmitted using Transport Layer Security (TLS). KMS recommends you always use the latest supported TLS version. Clients must also support cipher suites with Perfect Forward Secrecy (PFS) such as Ephemeral Diffie-Hellman (DHE) or Elliptic Curve Ephemeral Diffie-Hellman (ECDHE). Most modern systems such as Java 7 and later support these modes.

Signing Requests

Requests must be signed by using an access key ID and a secret access key. We strongly recommend that you do not use your Amazon Web Services account (root) access key ID and secret key for everyday work with KMS. Instead, use the access key ID and secret access key for an IAM user. You can also use the Amazon Web Services Security Token Service to generate temporary security credentials that you can use to sign requests.

All KMS operations require Signature Version 4.

Logging API Requests

KMS supports CloudTrail, a service that logs Amazon Web Services API calls and related events for your Amazon Web Services account and delivers them to an Amazon S3 bucket that you specify. By using the information collected by CloudTrail, you can determine what requests were made to KMS, who made the request, when it was made, and so on. To learn more about CloudTrail, including how to turn it on and find your log files, see the CloudTrail User Guide.

Additional Resources

For more information about credentials and request signing, see the following:

Commonly Used API Operations

Of the API operations discussed in this guide, the following will prove the most useful for most applications. You will likely perform operations other than these, such as creating keys and assigning policies, by using the console.

", "operations": { "CancelKeyDeletion": "

Cancels the deletion of a KMS key. When this operation succeeds, the key state of the KMS key is Disabled. To enable the KMS key, use EnableKey.

For more information about scheduling and canceling deletion of a KMS key, see Deleting KMS keys in the Key Management Service Developer Guide.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

Required permissions: kms:CancelKeyDeletion (key policy)

Related operations: ScheduleKeyDeletion

", "ConnectCustomKeyStore": "

Connects or reconnects a custom key store to its associated CloudHSM cluster.

The custom key store must be connected before you can create KMS keys in the key store or use the KMS keys it contains. You can disconnect and reconnect a custom key store at any time.

To connect a custom key store, its associated CloudHSM cluster must have at least one active HSM. To get the number of active HSMs in a cluster, use the DescribeClusters operation. To add HSMs to the cluster, use the CreateHsm operation. Also, the kmsuser crypto user (CU) must not be logged into the cluster. This prevents KMS from using this account to log in.

The connection process can take an extended amount of time to complete; up to 20 minutes. This operation starts the connection process, but it does not wait for it to complete. When it succeeds, this operation quickly returns an HTTP 200 response and a JSON object with no properties. However, this response does not indicate that the custom key store is connected. To get the connection state of the custom key store, use the DescribeCustomKeyStores operation.

During the connection process, KMS finds the CloudHSM cluster that is associated with the custom key store, creates the connection infrastructure, connects to the cluster, logs into the CloudHSM client as the kmsuser CU, and rotates its password.

The ConnectCustomKeyStore operation might fail for various reasons. To find the reason, use the DescribeCustomKeyStores operation and see the ConnectionErrorCode in the response. For help interpreting the ConnectionErrorCode, see CustomKeyStoresListEntry.

To fix the failure, use the DisconnectCustomKeyStore operation to disconnect the custom key store, correct the error, use the UpdateCustomKeyStore operation if necessary, and then use ConnectCustomKeyStore again.

If you are having trouble connecting or disconnecting a custom key store, see Troubleshooting a Custom Key Store in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.

Required permissions: kms:ConnectCustomKeyStore (IAM policy)

Related operations

", "CreateAlias": "

Creates a friendly name for a KMS key.

Adding, deleting, or updating an alias can allow or deny permission to the KMS key. For details, see ABAC in KMS in the Key Management Service Developer Guide.

You can use an alias to identify a KMS key in the KMS console, in the DescribeKey operation and in cryptographic operations, such as Encrypt and GenerateDataKey. You can also change the KMS key that's associated with the alias (UpdateAlias) or delete the alias (DeleteAlias) at any time. These operations don't affect the underlying KMS key.

You can associate the alias with any customer managed key in the same Amazon Web Services Region. Each alias is associated with only one KMS key at a time, but a KMS key can have multiple aliases. A valid KMS key is required. You can't create an alias without a KMS key.

The alias must be unique in the account and Region, but you can have aliases with the same name in different Regions. For detailed information about aliases, see Using aliases in the Key Management Service Developer Guide.

This operation does not return a response. To get the alias that you created, use the ListAliases operation.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on an alias in a different Amazon Web Services account.

Required permissions

For details, see Controlling access to aliases in the Key Management Service Developer Guide.

Related operations:

", "CreateCustomKeyStore": "

Creates a custom key store that is associated with an CloudHSM cluster that you own and manage.

This operation is part of the Custom Key Store feature feature in KMS, which combines the convenience and extensive integration of KMS with the isolation and control of a single-tenant key store.

Before you create the custom key store, you must assemble the required elements, including an CloudHSM cluster that fulfills the requirements for a custom key store. For details about the required elements, see Assemble the Prerequisites in the Key Management Service Developer Guide.

When the operation completes successfully, it returns the ID of the new custom key store. Before you can use your new custom key store, you need to use the ConnectCustomKeyStore operation to connect the new key store to its CloudHSM cluster. Even if you are not going to use your custom key store immediately, you might want to connect it to verify that all settings are correct and then disconnect it until you are ready to use it.

For help with failures, see Troubleshooting a Custom Key Store in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.

Required permissions: kms:CreateCustomKeyStore (IAM policy).

Related operations:

", "CreateGrant": "

Adds a grant to a KMS key.

A grant is a policy instrument that allows Amazon Web Services principals to use KMS keys in cryptographic operations. It also can allow them to view a KMS key (DescribeKey) and create and manage grants. When authorizing access to a KMS key, grants are considered along with key policies and IAM policies. Grants are often used for temporary permissions because you can create one, use its permissions, and delete it without changing your key policies or IAM policies.

For detailed information about grants, including grant terminology, see Grants in KMS in the Key Management Service Developer Guide . For examples of working with grants in several programming languages, see Programming grants.

The CreateGrant operation returns a GrantToken and a GrantId.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: Yes. To perform this operation on a KMS key in a different Amazon Web Services account, specify the key ARN in the value of the KeyId parameter.

Required permissions: kms:CreateGrant (key policy)

Related operations:

", - "CreateKey": "

Creates a unique customer managed KMS key in your Amazon Web Services account and Region.

In addition to the required parameters, you can use the optional parameters to specify a key policy, description, tags, and other useful elements for any key type.

KMS is replacing the term customer master key (CMK) with KMS key and KMS key. The concept has not changed. To prevent breaking changes, KMS is keeping some variations of this term.

To create different types of KMS keys, use the following guidance:

Symmetric encryption KMS key

To create a symmetric encryption KMS key, you aren't required to specify any parameters. The default value for KeySpec, SYMMETRIC_DEFAULT, and the default value for KeyUsage, ENCRYPT_DECRYPT, create a symmetric encryption KMS key.

If you need a key for basic encryption and decryption or you are creating a KMS key to protect your resources in an Amazon Web Services service, create a symmetric encryption KMS key. The key material in a symmetric encryption key never leaves KMS unencrypted. You can use a symmetric encryption KMS key to encrypt and decrypt data up to 4,096 bytes, but they are typically used to generate data keys and data keys pairs. For details, see GenerateDataKey and GenerateDataKeyPair.

Asymmetric KMS keys

To create an asymmetric KMS key, use the KeySpec parameter to specify the type of key material in the KMS key. Then, use the KeyUsage parameter to determine whether the KMS key will be used to encrypt and decrypt or sign and verify. You can't change these properties after the KMS key is created.

Asymmetric KMS keys contain an RSA key pair or an Elliptic Curve (ECC) key pair. The private key in an asymmetric KMS key never leaves AWS KMS unencrypted. However, you can use the GetPublicKey operation to download the public key so it can be used outside of AWS KMS. KMS keys with RSA key pairs can be used to encrypt or decrypt data or sign and verify messages (but not both). KMS keys with ECC key pairs can be used only to sign and verify messages. For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

HMAC KMS key

To create an HMAC KMS key, set the KeySpec parameter to a key spec value for HMAC KMS keys. Then set the KeyUsage parameter to GENERATE_VERIFY_MAC. You must set the key usage even though GENERATE_VERIFY_MAC is the only valid key usage value for HMAC KMS keys. You can't change these properties after the KMS key is created.

HMAC KMS keys are symmetric keys that never leave KMS unencrypted. You can use HMAC keys to generate (GenerateMac) and verify (VerifyMac) HMAC codes for messages up to 4096 bytes.

HMAC KMS keys are not supported in all Amazon Web Services Regions. If you try to create an HMAC KMS key in an Amazon Web Services Region in which HMAC keys are not supported, the CreateKey operation returns an UnsupportedOperationException. For a list of Regions in which HMAC KMS keys are supported, see HMAC keys in KMS in the Key Management Service Developer Guide.

Multi-Region primary keys
Imported key material

To create a multi-Region primary key in the local Amazon Web Services Region, use the MultiRegion parameter with a value of True. To create a multi-Region replica key, that is, a KMS key with the same key ID and key material as a primary key, but in a different Amazon Web Services Region, use the ReplicateKey operation. To change a replica key to a primary key, and its primary key to a replica key, use the UpdatePrimaryRegion operation.

You can create multi-Region KMS keys for all supported KMS key types: symmetric encryption KMS keys, HMAC KMS keys, asymmetric encryption KMS keys, and asymmetric signing KMS keys. You can also create multi-Region keys with imported key material. However, you can't create multi-Region keys in a custom key store.

This operation supports multi-Region keys, an KMS feature that lets you create multiple interoperable KMS keys in different Amazon Web Services Regions. Because these KMS keys have the same key ID, key material, and other metadata, you can use them interchangeably to encrypt data in one Amazon Web Services Region and decrypt it in a different Amazon Web Services Region without re-encrypting the data or making a cross-Region call. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

To import your own key material, begin by creating a symmetric encryption KMS key with no key material. To do this, use the Origin parameter of CreateKey with a value of EXTERNAL. Next, use GetParametersForImport operation to get a public key and import token, and use the public key to encrypt your key material. Then, use ImportKeyMaterial with your import token to import the key material. For step-by-step instructions, see Importing Key Material in the Key Management Service Developer Guide .

This feature supports only symmetric encryption KMS keys, including multi-Region symmetric encryption KMS keys. You cannot import key material into any other type of KMS key.

To create a multi-Region primary key with imported key material, use the Origin parameter of CreateKey with a value of EXTERNAL and the MultiRegion parameter with a value of True. To create replicas of the multi-Region primary key, use the ReplicateKey operation. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

Custom key store

To create a symmetric encryption KMS key in a custom key store, use the CustomKeyStoreId parameter to specify the custom key store. You must also use the Origin parameter with a value of AWS_CLOUDHSM. The CloudHSM cluster that is associated with the custom key store must have at least two active HSMs in different Availability Zones in the Amazon Web Services Region.

Custom key stores support only symmetric encryption KMS keys. You cannot create an HMAC KMS key or an asymmetric KMS key in a custom key store. For information about custom key stores in KMS see Custom key stores in KMS in the Key Management Service Developer Guide .

Cross-account use: No. You cannot use this operation to create a KMS key in a different Amazon Web Services account.

Required permissions: kms:CreateKey (IAM policy). To use the Tags parameter, kms:TagResource (IAM policy). For examples and information about related permissions, see Allow a user to create KMS keys in the Key Management Service Developer Guide.

Related operations:

", - "Decrypt": "

Decrypts ciphertext that was encrypted by a KMS key using any of the following operations:

You can use this operation to decrypt ciphertext that was encrypted under a symmetric encryption KMS key or an asymmetric encryption KMS key. When the KMS key is asymmetric, you must specify the KMS key and the encryption algorithm that was used to encrypt the ciphertext. For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

The Decrypt operation also decrypts ciphertext that was encrypted outside of KMS by the public key in an KMS asymmetric KMS key. However, it cannot decrypt symmetric ciphertext produced by other libraries, such as the Amazon Web Services Encryption SDK or Amazon S3 client-side encryption. These libraries return a ciphertext format that is incompatible with KMS.

If the ciphertext was encrypted under a symmetric encryption KMS key, the KeyId parameter is optional. KMS can get this information from metadata that it adds to the symmetric ciphertext blob. This feature adds durability to your implementation by ensuring that authorized users can decrypt ciphertext decades after it was encrypted, even if they've lost track of the key ID. However, specifying the KMS key is always recommended as a best practice. When you use the KeyId parameter to specify a KMS key, KMS only uses the KMS key you specify. If the ciphertext was encrypted under a different KMS key, the Decrypt operation fails. This practice ensures that you use the KMS key that you intend.

Whenever possible, use key policies to give users permission to call the Decrypt operation on a particular KMS key, instead of using IAM policies. Otherwise, you might create an IAM user policy that gives the user Decrypt permission on all KMS keys. This user could decrypt ciphertext that was encrypted by KMS keys in other accounts if the key policy for the cross-account KMS key permits it. If you must use an IAM policy for Decrypt permissions, limit the user to particular KMS keys or particular trusted accounts. For details, see Best practices for IAM policies in the Key Management Service Developer Guide.

Applications in Amazon Web Services Nitro Enclaves can call this operation by using the Amazon Web Services Nitro Enclaves Development Kit. For information about the supporting parameters, see How Amazon Web Services Nitro Enclaves use KMS in the Key Management Service Developer Guide.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

Required permissions: kms:Decrypt (key policy)

Related operations:

", + "CreateKey": "

Creates a unique customer managed KMS key in your Amazon Web Services account and Region.

In addition to the required parameters, you can use the optional parameters to specify a key policy, description, tags, and other useful elements for any key type.

KMS is replacing the term customer master key (CMK) with KMS key and KMS key. The concept has not changed. To prevent breaking changes, KMS is keeping some variations of this term.

To create different types of KMS keys, use the following guidance:

Symmetric encryption KMS key

To create a symmetric encryption KMS key, you aren't required to specify any parameters. The default value for KeySpec, SYMMETRIC_DEFAULT, and the default value for KeyUsage, ENCRYPT_DECRYPT, create a symmetric encryption KMS key.

If you need a key for basic encryption and decryption or you are creating a KMS key to protect your resources in an Amazon Web Services service, create a symmetric encryption KMS key. The key material in a symmetric encryption key never leaves KMS unencrypted. You can use a symmetric encryption KMS key to encrypt and decrypt data up to 4,096 bytes, but they are typically used to generate data keys and data keys pairs. For details, see GenerateDataKey and GenerateDataKeyPair.

Asymmetric KMS keys

To create an asymmetric KMS key, use the KeySpec parameter to specify the type of key material in the KMS key. Then, use the KeyUsage parameter to determine whether the KMS key will be used to encrypt and decrypt or sign and verify. You can't change these properties after the KMS key is created.

Asymmetric KMS keys contain an RSA key pair or an Elliptic Curve (ECC) key pair. The private key in an asymmetric KMS key never leaves KMS unencrypted. However, you can use the GetPublicKey operation to download the public key so it can be used outside of KMS. KMS keys with RSA key pairs can be used to encrypt or decrypt data or sign and verify messages (but not both). KMS keys with ECC key pairs can be used only to sign and verify messages. For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

HMAC KMS key

To create an HMAC KMS key, set the KeySpec parameter to a key spec value for HMAC KMS keys. Then set the KeyUsage parameter to GENERATE_VERIFY_MAC. You must set the key usage even though GENERATE_VERIFY_MAC is the only valid key usage value for HMAC KMS keys. You can't change these properties after the KMS key is created.

HMAC KMS keys are symmetric keys that never leave KMS unencrypted. You can use HMAC keys to generate (GenerateMac) and verify (VerifyMac) HMAC codes for messages up to 4096 bytes.

HMAC KMS keys are not supported in all Amazon Web Services Regions. If you try to create an HMAC KMS key in an Amazon Web Services Region in which HMAC keys are not supported, the CreateKey operation returns an UnsupportedOperationException. For a list of Regions in which HMAC KMS keys are supported, see HMAC keys in KMS in the Key Management Service Developer Guide.

Multi-Region primary keys
Imported key material

To create a multi-Region primary key in the local Amazon Web Services Region, use the MultiRegion parameter with a value of True. To create a multi-Region replica key, that is, a KMS key with the same key ID and key material as a primary key, but in a different Amazon Web Services Region, use the ReplicateKey operation. To change a replica key to a primary key, and its primary key to a replica key, use the UpdatePrimaryRegion operation.

You can create multi-Region KMS keys for all supported KMS key types: symmetric encryption KMS keys, HMAC KMS keys, asymmetric encryption KMS keys, and asymmetric signing KMS keys. You can also create multi-Region keys with imported key material. However, you can't create multi-Region keys in a custom key store.

This operation supports multi-Region keys, an KMS feature that lets you create multiple interoperable KMS keys in different Amazon Web Services Regions. Because these KMS keys have the same key ID, key material, and other metadata, you can use them interchangeably to encrypt data in one Amazon Web Services Region and decrypt it in a different Amazon Web Services Region without re-encrypting the data or making a cross-Region call. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

To import your own key material, begin by creating a symmetric encryption KMS key with no key material. To do this, use the Origin parameter of CreateKey with a value of EXTERNAL. Next, use GetParametersForImport operation to get a public key and import token, and use the public key to encrypt your key material. Then, use ImportKeyMaterial with your import token to import the key material. For step-by-step instructions, see Importing Key Material in the Key Management Service Developer Guide .

This feature supports only symmetric encryption KMS keys, including multi-Region symmetric encryption KMS keys. You cannot import key material into any other type of KMS key.

To create a multi-Region primary key with imported key material, use the Origin parameter of CreateKey with a value of EXTERNAL and the MultiRegion parameter with a value of True. To create replicas of the multi-Region primary key, use the ReplicateKey operation. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

Custom key store

To create a symmetric encryption KMS key in a custom key store, use the CustomKeyStoreId parameter to specify the custom key store. You must also use the Origin parameter with a value of AWS_CLOUDHSM. The CloudHSM cluster that is associated with the custom key store must have at least two active HSMs in different Availability Zones in the Amazon Web Services Region.

Custom key stores support only symmetric encryption KMS keys. You cannot create an HMAC KMS key or an asymmetric KMS key in a custom key store. For information about custom key stores in KMS see Custom key stores in KMS in the Key Management Service Developer Guide .

Cross-account use: No. You cannot use this operation to create a KMS key in a different Amazon Web Services account.

Required permissions: kms:CreateKey (IAM policy). To use the Tags parameter, kms:TagResource (IAM policy). For examples and information about related permissions, see Allow a user to create KMS keys in the Key Management Service Developer Guide.

Related operations:

", + "Decrypt": "

Decrypts ciphertext that was encrypted by a KMS key using any of the following operations:

You can use this operation to decrypt ciphertext that was encrypted under a symmetric encryption KMS key or an asymmetric encryption KMS key. When the KMS key is asymmetric, you must specify the KMS key and the encryption algorithm that was used to encrypt the ciphertext. For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

The Decrypt operation also decrypts ciphertext that was encrypted outside of KMS by the public key in an KMS asymmetric KMS key. However, it cannot decrypt ciphertext produced by other libraries, such as the Amazon Web Services Encryption SDK or Amazon S3 client-side encryption. These libraries return a ciphertext format that is incompatible with KMS.

If the ciphertext was encrypted under a symmetric encryption KMS key, the KeyId parameter is optional. KMS can get this information from metadata that it adds to the symmetric ciphertext blob. This feature adds durability to your implementation by ensuring that authorized users can decrypt ciphertext decades after it was encrypted, even if they've lost track of the key ID. However, specifying the KMS key is always recommended as a best practice. When you use the KeyId parameter to specify a KMS key, KMS only uses the KMS key you specify. If the ciphertext was encrypted under a different KMS key, the Decrypt operation fails. This practice ensures that you use the KMS key that you intend.

Whenever possible, use key policies to give users permission to call the Decrypt operation on a particular KMS key, instead of using IAM policies. Otherwise, you might create an IAM user policy that gives the user Decrypt permission on all KMS keys. This user could decrypt ciphertext that was encrypted by KMS keys in other accounts if the key policy for the cross-account KMS key permits it. If you must use an IAM policy for Decrypt permissions, limit the user to particular KMS keys or particular trusted accounts. For details, see Best practices for IAM policies in the Key Management Service Developer Guide.

Applications in Amazon Web Services Nitro Enclaves can call this operation by using the Amazon Web Services Nitro Enclaves Development Kit. For information about the supporting parameters, see How Amazon Web Services Nitro Enclaves use KMS in the Key Management Service Developer Guide.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

Required permissions: kms:Decrypt (key policy)

Related operations:

", "DeleteAlias": "

Deletes the specified alias.

Adding, deleting, or updating an alias can allow or deny permission to the KMS key. For details, see ABAC in KMS in the Key Management Service Developer Guide.

Because an alias is not a property of a KMS key, you can delete and change the aliases of a KMS key without affecting the KMS key. Also, aliases do not appear in the response from the DescribeKey operation. To get the aliases of all KMS keys, use the ListAliases operation.

Each KMS key can have multiple aliases. To change the alias of a KMS key, use DeleteAlias to delete the current alias and CreateAlias to create a new alias. To associate an existing alias with a different KMS key, call UpdateAlias.

Cross-account use: No. You cannot perform this operation on an alias in a different Amazon Web Services account.

Required permissions

For details, see Controlling access to aliases in the Key Management Service Developer Guide.

Related operations:

", "DeleteCustomKeyStore": "

Deletes a custom key store. This operation does not delete the CloudHSM cluster that is associated with the custom key store, or affect any users or keys in the cluster.

The custom key store that you delete cannot contain any KMS KMS keys. Before deleting the key store, verify that you will never need to use any of the KMS keys in the key store for any cryptographic operations. Then, use ScheduleKeyDeletion to delete the KMS keys from the key store. When the scheduled waiting period expires, the ScheduleKeyDeletion operation deletes the KMS keys. Then it makes a best effort to delete the key material from the associated cluster. However, you might need to manually delete the orphaned key material from the cluster and its backups.

After all KMS keys are deleted from KMS, use DisconnectCustomKeyStore to disconnect the key store from KMS. Then, you can delete the custom key store.

Instead of deleting the custom key store, consider using DisconnectCustomKeyStore to disconnect it from KMS. While the key store is disconnected, you cannot create or use the KMS keys in the key store. But, you do not need to delete KMS keys and you can reconnect a disconnected custom key store at any time.

If the operation succeeds, it returns a JSON object with no properties.

This operation is part of the Custom Key Store feature feature in KMS, which combines the convenience and extensive integration of KMS with the isolation and control of a single-tenant key store.

Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.

Required permissions: kms:DeleteCustomKeyStore (IAM policy)

Related operations:

", "DeleteImportedKeyMaterial": "

Deletes key material that you previously imported. This operation makes the specified KMS key unusable. For more information about importing key material into KMS, see Importing Key Material in the Key Management Service Developer Guide.

When the specified KMS key is in the PendingDeletion state, this operation does not change the KMS key's state. Otherwise, it changes the KMS key's state to PendingImport.

After you delete key material, you can use ImportKeyMaterial to reimport the same key material into the KMS key.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

Required permissions: kms:DeleteImportedKeyMaterial (key policy)

Related operations:

", "DescribeCustomKeyStores": "

Gets information about custom key stores in the account and Region.

This operation is part of the Custom Key Store feature feature in KMS, which combines the convenience and extensive integration of KMS with the isolation and control of a single-tenant key store.

By default, this operation returns information about all custom key stores in the account and Region. To get only information about a particular custom key store, use either the CustomKeyStoreName or CustomKeyStoreId parameter (but not both).

To determine whether the custom key store is connected to its CloudHSM cluster, use the ConnectionState element in the response. If an attempt to connect the custom key store failed, the ConnectionState value is FAILED and the ConnectionErrorCode element in the response indicates the cause of the failure. For help interpreting the ConnectionErrorCode, see CustomKeyStoresListEntry.

Custom key stores have a DISCONNECTED connection state if the key store has never been connected or you use the DisconnectCustomKeyStore operation to disconnect it. If your custom key store state is CONNECTED but you are having trouble using it, make sure that its associated CloudHSM cluster is active and contains the minimum number of HSMs required for the operation, if any.

For help repairing your custom key store, see the Troubleshooting Custom Key Stores topic in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.

Required permissions: kms:DescribeCustomKeyStores (IAM policy)

Related operations:

", "DescribeKey": "

Provides detailed information about a KMS key. You can run DescribeKey on a customer managed key or an Amazon Web Services managed key.

This detailed information includes the key ARN, creation date (and deletion date, if applicable), the key state, and the origin and expiration date (if any) of the key material. It includes fields, like KeySpec, that help you distinguish different types of KMS keys. It also displays the key usage (encryption, signing, or generating and verifying MACs) and the algorithms that the KMS key supports. For KMS keys in custom key stores, it includes information about the custom key store, such as the key store ID and the CloudHSM cluster ID. For multi-Region keys, it displays the primary key and all related replica keys.

DescribeKey does not return the following information:

In general, DescribeKey is a non-mutating operation. It returns data about KMS keys, but doesn't change them. However, Amazon Web Services services use DescribeKey to create Amazon Web Services managed keys from a predefined Amazon Web Services alias with no key ID.

Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

Required permissions: kms:DescribeKey (key policy)

Related operations:

", "DisableKey": "

Sets the state of a KMS key to disabled. This change temporarily prevents use of the KMS key for cryptographic operations.

For more information about how key state affects the use of a KMS key, see Key states of KMS keys in the Key Management Service Developer Guide .

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

Required permissions: kms:DisableKey (key policy)

Related operations: EnableKey

", - "DisableKeyRotation": "

Disables automatic rotation of the key material for the specified symmetric encryption KMS key.

You cannot enable automatic rotation of asymmetric KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key store. To enable or disable automatic rotation of a set of related multi-Region keys, set the property on the primary key.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

Required permissions: kms:DisableKeyRotation (key policy)

Related operations:

", + "DisableKeyRotation": "

Disables automatic rotation of the key material of the specified symmetric encryption KMS key.

Automatic key rotation is supported only on symmetric encryption KMS keys. You cannot enable or disable automatic rotation of asymmetric KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key store. The key rotation status of these KMS keys is always false. To enable or disable automatic rotation of a set of related multi-Region keys, set the property on the primary key.

You can enable (EnableKeyRotation) and disable automatic rotation of the key material in customer managed KMS keys. Key material rotation of Amazon Web Services managed KMS keys is not configurable. KMS always rotates the key material for every year. Rotation of Amazon Web Services owned KMS keys varies.

In May 2022, KMS changed the rotation schedule for Amazon Web Services managed keys from every three years to every year. For details, see EnableKeyRotation.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

Required permissions: kms:DisableKeyRotation (key policy)

Related operations:

", "DisconnectCustomKeyStore": "

Disconnects the custom key store from its associated CloudHSM cluster. While a custom key store is disconnected, you can manage the custom key store and its KMS keys, but you cannot create or use KMS keys in the custom key store. You can reconnect the custom key store at any time.

While a custom key store is disconnected, all attempts to create KMS keys in the custom key store or to use existing KMS keys in cryptographic operations will fail. This action can prevent users from storing and accessing sensitive data.

To find the connection state of a custom key store, use the DescribeCustomKeyStores operation. To reconnect a custom key store, use the ConnectCustomKeyStore operation.

If the operation succeeds, it returns a JSON object with no properties.

This operation is part of the Custom Key Store feature feature in KMS, which combines the convenience and extensive integration of KMS with the isolation and control of a single-tenant key store.

Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.

Required permissions: kms:DisconnectCustomKeyStore (IAM policy)

Related operations:

", "EnableKey": "

Sets the key state of a KMS key to enabled. This allows you to use the KMS key for cryptographic operations.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

Required permissions: kms:EnableKey (key policy)

Related operations: DisableKey

", - "EnableKeyRotation": "

Enables automatic rotation of the key material for the specified symmetric encryption KMS key.

You cannot enable automatic rotation of asymmetric KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key store. To enable or disable automatic rotation of a set of related multi-Region keys, set the property on the primary key.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

Required permissions: kms:EnableKeyRotation (key policy)

Related operations:

", + "EnableKeyRotation": "

Enables automatic rotation of the key material of the specified symmetric encryption KMS key.

When you enable automatic rotation of acustomer managed KMS key, KMS rotates the key material of the KMS key one year (approximately 365 days) from the enable date and every year thereafter. You can monitor rotation of the key material for your KMS keys in CloudTrail and Amazon CloudWatch. To disable rotation of the key material in a customer managed KMS key, use the DisableKeyRotation operation.

Automatic key rotation is supported only on symmetric encryption KMS keys. You cannot enable or disable automatic rotation of asymmetric KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key store. The key rotation status of these KMS keys is always false. To enable or disable automatic rotation of a set of related multi-Region keys, set the property on the primary key.

You cannot enable or disable automatic rotation Amazon Web Services managed KMS keys. KMS always rotates the key material of Amazon Web Services managed keys every year. Rotation of Amazon Web Services owned KMS keys varies.

In May 2022, KMS changed the rotation schedule for Amazon Web Services managed keys from every three years (approximately 1,095 days) to every year (approximately 365 days).

New Amazon Web Services managed keys are automatically rotated one year after they are created, and approximately every year thereafter.

Existing Amazon Web Services managed keys are automatically rotated one year after their most recent rotation, and every year thereafter.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

Required permissions: kms:EnableKeyRotation (key policy)

Related operations:

", "Encrypt": "

Encrypts plaintext of up to 4,096 bytes using a KMS key. You can use a symmetric or asymmetric KMS key with a KeyUsage of ENCRYPT_DECRYPT.

You can use this operation to encrypt small amounts of arbitrary data, such as a personal identifier or database password, or other sensitive information. You don't need to use the Encrypt operation to encrypt a data key. The GenerateDataKey and GenerateDataKeyPair operations return a plaintext data key and an encrypted copy of that data key.

If you use a symmetric encryption KMS key, you can use an encryption context to add additional security to your encryption operation. If you specify an EncryptionContext when encrypting data, you must specify the same encryption context (a case-sensitive exact match) when decrypting the data. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the Key Management Service Developer Guide.

If you specify an asymmetric KMS key, you must also specify the encryption algorithm. The algorithm must be compatible with the KMS key type.

When you use an asymmetric KMS key to encrypt or reencrypt data, be sure to record the KMS key and encryption algorithm that you choose. You will be required to provide the same KMS key and encryption algorithm when you decrypt the data. If the KMS key and algorithm do not match the values used to encrypt the data, the decrypt operation fails.

You are not required to supply the key ID and encryption algorithm when you decrypt with symmetric encryption KMS keys because KMS stores this information in the ciphertext blob. KMS cannot store metadata in ciphertext generated with asymmetric keys. The standard format for asymmetric key ciphertext does not include configurable fields.

The maximum size of the data that you can encrypt varies with the type of KMS key and the encryption algorithm that you choose.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

Required permissions: kms:Encrypt (key policy)

Related operations:

", "GenerateDataKey": "

Returns a unique symmetric data key for use outside of KMS. This operation returns a plaintext copy of the data key and a copy that is encrypted under a symmetric encryption KMS key that you specify. The bytes in the plaintext key are random; they are not related to the caller or the KMS key. You can use the plaintext key to encrypt your data outside of KMS and store the encrypted data key with the encrypted data.

To generate a data key, specify the symmetric encryption KMS key that will be used to encrypt the data key. You cannot use an asymmetric KMS key to encrypt data keys. To get the type of your KMS key, use the DescribeKey operation. You must also specify the length of the data key. Use either the KeySpec or NumberOfBytes parameters (but not both). For 128-bit and 256-bit data keys, use the KeySpec parameter.

To get only an encrypted copy of the data key, use GenerateDataKeyWithoutPlaintext. To generate an asymmetric data key pair, use the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext operation. To get a cryptographically secure random byte string, use GenerateRandom.

You can use an optional encryption context to add additional security to the encryption operation. If you specify an EncryptionContext, you must specify the same encryption context (a case-sensitive exact match) when decrypting the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the Key Management Service Developer Guide.

Applications in Amazon Web Services Nitro Enclaves can call this operation by using the Amazon Web Services Nitro Enclaves Development Kit. For information about the supporting parameters, see How Amazon Web Services Nitro Enclaves use KMS in the Key Management Service Developer Guide.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

How to use your data key

We recommend that you use the following pattern to encrypt data locally in your application. You can write your own code or use a client-side encryption library, such as the Amazon Web Services Encryption SDK, the Amazon DynamoDB Encryption Client, or Amazon S3 client-side encryption to do these tasks for you.

To encrypt data outside of KMS:

  1. Use the GenerateDataKey operation to get a data key.

  2. Use the plaintext data key (in the Plaintext field of the response) to encrypt your data outside of KMS. Then erase the plaintext data key from memory.

  3. Store the encrypted data key (in the CiphertextBlob field of the response) with the encrypted data.

To decrypt data outside of KMS:

  1. Use the Decrypt operation to decrypt the encrypted data key. The operation returns a plaintext copy of the data key.

  2. Use the plaintext data key to decrypt data outside of KMS, then erase the plaintext data key from memory.

Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

Required permissions: kms:GenerateDataKey (key policy)

Related operations:

", "GenerateDataKeyPair": "

Returns a unique asymmetric data key pair for use outside of KMS. This operation returns a plaintext public key, a plaintext private key, and a copy of the private key that is encrypted under the symmetric encryption KMS key you specify. You can use the data key pair to perform asymmetric cryptography and implement digital signatures outside of KMS. The bytes in the keys are random; they not related to the caller or to the KMS key that is used to encrypt the private key.

You can use the public key that GenerateDataKeyPair returns to encrypt data or verify a signature outside of KMS. Then, store the encrypted private key with the data. When you are ready to decrypt data or sign a message, you can use the Decrypt operation to decrypt the encrypted private key.

To generate a data key pair, you must specify a symmetric encryption KMS key to encrypt the private key in a data key pair. You cannot use an asymmetric KMS key or a KMS key in a custom key store. To get the type and origin of your KMS key, use the DescribeKey operation.

Use the KeyPairSpec parameter to choose an RSA or Elliptic Curve (ECC) data key pair. KMS recommends that your use ECC key pairs for signing, and use RSA key pairs for either encryption or signing, but not both. However, KMS cannot enforce any restrictions on the use of data key pairs outside of KMS.

If you are using the data key pair to encrypt data, or for any operation where you don't immediately need a private key, consider using the GenerateDataKeyPairWithoutPlaintext operation. GenerateDataKeyPairWithoutPlaintext returns a plaintext public key and an encrypted private key, but omits the plaintext private key that you need only to decrypt ciphertext or sign a message. Later, when you need to decrypt the data or sign a message, use the Decrypt operation to decrypt the encrypted private key in the data key pair.

GenerateDataKeyPair returns a unique data key pair for each request. The bytes in the keys are random; they are not related to the caller or the KMS key that is used to encrypt the private key. The public key is a DER-encoded X.509 SubjectPublicKeyInfo, as specified in RFC 5280. The private key is a DER-encoded PKCS8 PrivateKeyInfo, as specified in RFC 5958.

You can use an optional encryption context to add additional security to the encryption operation. If you specify an EncryptionContext, you must specify the same encryption context (a case-sensitive exact match) when decrypting the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the Key Management Service Developer Guide.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

Required permissions: kms:GenerateDataKeyPair (key policy)

Related operations:

", "GenerateDataKeyPairWithoutPlaintext": "

Returns a unique asymmetric data key pair for use outside of KMS. This operation returns a plaintext public key and a copy of the private key that is encrypted under the symmetric encryption KMS key you specify. Unlike GenerateDataKeyPair, this operation does not return a plaintext private key. The bytes in the keys are random; they are not related to the caller or to the KMS key that is used to encrypt the private key.

You can use the public key that GenerateDataKeyPairWithoutPlaintext returns to encrypt data or verify a signature outside of KMS. Then, store the encrypted private key with the data. When you are ready to decrypt data or sign a message, you can use the Decrypt operation to decrypt the encrypted private key.

To generate a data key pair, you must specify a symmetric encryption KMS key to encrypt the private key in a data key pair. You cannot use an asymmetric KMS key or a KMS key in a custom key store. To get the type and origin of your KMS key, use the DescribeKey operation.

Use the KeyPairSpec parameter to choose an RSA or Elliptic Curve (ECC) data key pair. KMS recommends that your use ECC key pairs for signing, and use RSA key pairs for either encryption or signing, but not both. However, KMS cannot enforce any restrictions on the use of data key pairs outside of KMS.

GenerateDataKeyPairWithoutPlaintext returns a unique data key pair for each request. The bytes in the key are not related to the caller or KMS key that is used to encrypt the private key. The public key is a DER-encoded X.509 SubjectPublicKeyInfo, as specified in RFC 5280.

You can use an optional encryption context to add additional security to the encryption operation. If you specify an EncryptionContext, you must specify the same encryption context (a case-sensitive exact match) when decrypting the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the Key Management Service Developer Guide.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

Required permissions: kms:GenerateDataKeyPairWithoutPlaintext (key policy)

Related operations:

", - "GenerateDataKeyWithoutPlaintext": "

Returns a unique symmetric data key for use outside of KMS. This operation returns a data key that is encrypted under a symmetric encryption KMS key that you specify. The bytes in the key are random; they are not related to the caller or to the KMS key.

GenerateDataKeyWithoutPlaintext is identical to the GenerateDataKey operation except that it does not return a plaintext copy of the data key.

This operation is useful for systems that need to encrypt data at some point, but not immediately. When you need to encrypt the data, you call the Decrypt operation on the encrypted copy of the key. It's also useful in distributed systems with different levels of trust. For example, you might store encrypted data in containers. One component of your system creates new containers and stores an encrypted data key with each container. Then, a different component puts the data into the containers. That component first decrypts the data key, uses the plaintext data key to encrypt data, puts the encrypted data into the container, and then destroys the plaintext data key. In this system, the component that creates the containers never sees the plaintext data key.

To request an asymmetric data key pair, use the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext operations.

To generate a data key, you must specify the symmetric encryption KMS key that is used to encrypt the data key. You cannot use an asymmetric KMS key or a key in a custom key store to generate a data key. To get the type of your KMS key, use the DescribeKey operation.

If the operation succeeds, you will find the encrypted copy of the data key in the CiphertextBlob field.

You can use an optional encryption context to add additional security to the encryption operation. If you specify an EncryptionContext, you must specify the same encryption context (a case-sensitive exact match) when decrypting the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the Key Management Service Developer Guide.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

Required permissions: kms:GenerateDataKeyWithoutPlaintext (key policy)

Related operations:

", - "GenerateMac": "

Generates a hash-based message authentication code (HMAC) for a message using an HMAC KMS key and a MAC algorithm that the key supports. The MAC algorithm computes the HMAC for the message and the key as described in RFC 2104.

You can use the HMAC that this operation generates with the VerifyMac operation to demonstrate that the original message has not changed. Also, because a secret key is used to create the hash, you can verify that the party that generated the hash has the required secret key. This operation is part of KMS support for HMAC KMS keys. For details, see HMAC keys in KMS in the Key Management Service Developer Guide .

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

Required permissions: kms:GenerateMac (key policy)

Related operations: VerifyMac

", + "GenerateDataKeyWithoutPlaintext": "

Returns a unique symmetric data key for use outside of KMS. This operation returns a data key that is encrypted under a symmetric encryption KMS key that you specify. The bytes in the key are random; they are not related to the caller or to the KMS key.

GenerateDataKeyWithoutPlaintext is identical to the GenerateDataKey operation except that it does not return a plaintext copy of the data key.

This operation is useful for systems that need to encrypt data at some point, but not immediately. When you need to encrypt the data, you call the Decrypt operation on the encrypted copy of the key.

It's also useful in distributed systems with different levels of trust. For example, you might store encrypted data in containers. One component of your system creates new containers and stores an encrypted data key with each container. Then, a different component puts the data into the containers. That component first decrypts the data key, uses the plaintext data key to encrypt data, puts the encrypted data into the container, and then destroys the plaintext data key. In this system, the component that creates the containers never sees the plaintext data key.

To request an asymmetric data key pair, use the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext operations.

To generate a data key, you must specify the symmetric encryption KMS key that is used to encrypt the data key. You cannot use an asymmetric KMS key or a key in a custom key store to generate a data key. To get the type of your KMS key, use the DescribeKey operation.

If the operation succeeds, you will find the encrypted copy of the data key in the CiphertextBlob field.

You can use an optional encryption context to add additional security to the encryption operation. If you specify an EncryptionContext, you must specify the same encryption context (a case-sensitive exact match) when decrypting the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the Key Management Service Developer Guide.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

Required permissions: kms:GenerateDataKeyWithoutPlaintext (key policy)

Related operations:

", + "GenerateMac": "

Generates a hash-based message authentication code (HMAC) for a message using an HMAC KMS key and a MAC algorithm that the key supports. The MAC algorithm computes the HMAC for the message and the key as described in RFC 2104.

You can use the HMAC that this operation generates with the VerifyMac operation to demonstrate that the original message has not changed. Also, because a secret key is used to create the hash, you can verify that the party that generated the hash has the required secret key. This operation is part of KMS support for HMAC KMS keys. For details, see HMAC keys in KMS in the Key Management Service Developer Guide .

Best practices recommend that you limit the time during which any signing mechanism, including an HMAC, is effective. This deters an attack where the actor uses a signed message to establish validity repeatedly or long after the message is superseded. HMAC tags do not include a timestamp, but you can include a timestamp in the token or message to help you detect when its time to refresh the HMAC.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

Required permissions: kms:GenerateMac (key policy)

Related operations: VerifyMac

", "GenerateRandom": "

Returns a random byte string that is cryptographically secure.

By default, the random byte string is generated in KMS. To generate the byte string in the CloudHSM cluster that is associated with a custom key store, specify the custom key store ID.

Applications in Amazon Web Services Nitro Enclaves can call this operation by using the Amazon Web Services Nitro Enclaves Development Kit. For information about the supporting parameters, see How Amazon Web Services Nitro Enclaves use KMS in the Key Management Service Developer Guide.

For more information about entropy and random number generation, see Key Management Service Cryptographic Details.

Required permissions: kms:GenerateRandom (IAM policy)

", "GetKeyPolicy": "

Gets a key policy attached to the specified KMS key.

Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

Required permissions: kms:GetKeyPolicy (key policy)

Related operations: PutKeyPolicy

", - "GetKeyRotationStatus": "

Gets a Boolean value that indicates whether automatic rotation of the key material is enabled for the specified KMS key.

You cannot enable automatic rotation of asymmetric KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key store. To enable or disable automatic rotation of a set of related multi-Region keys, set the property on the primary key. The key rotation status for these KMS keys is always false.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: Yes. To perform this operation on a KMS key in a different Amazon Web Services account, specify the key ARN in the value of the KeyId parameter.

Required permissions: kms:GetKeyRotationStatus (key policy)

Related operations:

", + "GetKeyRotationStatus": "

Gets a Boolean value that indicates whether automatic rotation of the key material is enabled for the specified KMS key.

When you enable automatic rotation for customer managed KMS keys, KMS rotates the key material of the KMS key one year (approximately 365 days) from the enable date and every year thereafter. You can monitor rotation of the key material for your KMS keys in CloudTrail and Amazon CloudWatch.

Automatic key rotation is supported only on symmetric encryption KMS keys. You cannot enable or disable automatic rotation of asymmetric KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key store. The key rotation status of these KMS keys is always false. To enable or disable automatic rotation of a set of related multi-Region keys, set the property on the primary key..

You can enable (EnableKeyRotation) and disable automatic rotation (DisableKeyRotation) of the key material in customer managed KMS keys. Key material rotation of Amazon Web Services managed KMS keys is not configurable. KMS always rotates the key material in Amazon Web Services managed KMS keys every year. The key rotation status for Amazon Web Services managed KMS keys is always true.

In May 2022, KMS changed the rotation schedule for Amazon Web Services managed keys from every three years to every year. For details, see EnableKeyRotation.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: Yes. To perform this operation on a KMS key in a different Amazon Web Services account, specify the key ARN in the value of the KeyId parameter.

Required permissions: kms:GetKeyRotationStatus (key policy)

Related operations:

", "GetParametersForImport": "

Returns the items you need to import key material into a symmetric encryption KMS key. For more information about importing key material into KMS, see Importing key material in the Key Management Service Developer Guide.

This operation returns a public key and an import token. Use the public key to encrypt the symmetric key material. Store the import token to send with a subsequent ImportKeyMaterial request.

You must specify the key ID of the symmetric encryption KMS key into which you will import key material. This KMS key's Origin must be EXTERNAL. You must also specify the wrapping algorithm and type of wrapping key (public key) that you will use to encrypt the key material. You cannot perform this operation on an asymmetric KMS key, an HMAC KMS key, or on any KMS key in a different Amazon Web Services account.

To import key material, you must use the public key and import token from the same response. These items are valid for 24 hours. The expiration date and time appear in the GetParametersForImport response. You cannot use an expired token in an ImportKeyMaterial request. If your key and token expire, send another GetParametersForImport request.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

Required permissions: kms:GetParametersForImport (key policy)

Related operations:

", "GetPublicKey": "

Returns the public key of an asymmetric KMS key. Unlike the private key of a asymmetric KMS key, which never leaves KMS unencrypted, callers with kms:GetPublicKey permission can download the public key of an asymmetric KMS key. You can share the public key to allow others to encrypt messages and verify signatures outside of KMS. For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

You do not need to download the public key. Instead, you can use the public key within KMS by calling the Encrypt, ReEncrypt, or Verify operations with the identifier of an asymmetric KMS key. When you use the public key within KMS, you benefit from the authentication, authorization, and logging that are part of every KMS operation. You also reduce of risk of encrypting data that cannot be decrypted. These features are not effective outside of KMS. For details, see Special Considerations for Downloading Public Keys.

To help you use the public key safely outside of KMS, GetPublicKey returns important information about the public key in the response, including:

Although KMS cannot enforce these restrictions on external operations, it is crucial that you use this information to prevent the public key from being used improperly. For example, you can prevent a public signing key from being used encrypt data, or prevent a public key from being used with an encryption algorithm that is not supported by KMS. You can also avoid errors, such as using the wrong signing algorithm in a verification operation.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

Required permissions: kms:GetPublicKey (key policy)

Related operations: CreateKey

", "ImportKeyMaterial": "

Imports key material into an existing symmetric encryption KMS key that was created without key material. After you successfully import key material into a KMS key, you can reimport the same key material into that KMS key, but you cannot import different key material.

You cannot perform this operation on an asymmetric KMS key, an HMAC KMS key, or on any KMS key in a different Amazon Web Services account. For more information about creating KMS keys with no key material and then importing key material, see Importing Key Material in the Key Management Service Developer Guide.

Before using this operation, call GetParametersForImport. Its response includes a public key and an import token. Use the public key to encrypt the key material. Then, submit the import token from the same GetParametersForImport response.

When calling this operation, you must specify the following values:

When this operation is successful, the key state of the KMS key changes from PendingImport to Enabled, and you can use the KMS key.

If this operation fails, use the exception to help determine the problem. If the error is related to the key material, the import token, or wrapping key, use GetParametersForImport to get a new public key and import token for the KMS key and repeat the import procedure. For help, see How To Import Key Material in the Key Management Service Developer Guide.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

Required permissions: kms:ImportKeyMaterial (key policy)

Related operations:

", @@ -43,7 +43,7 @@ "RetireGrant": "

Deletes a grant. Typically, you retire a grant when you no longer need its permissions. To identify the grant to retire, use a grant token, or both the grant ID and a key identifier (key ID or key ARN) of the KMS key. The CreateGrant operation returns both values.

This operation can be called by the retiring principal for a grant, by the grantee principal if the grant allows the RetireGrant operation, and by the Amazon Web Services account in which the grant is created. It can also be called by principals to whom permission for retiring a grant is delegated. For details, see Retiring and revoking grants in the Key Management Service Developer Guide.

For detailed information about grants, including grant terminology, see Grants in KMS in the Key Management Service Developer Guide . For examples of working with grants in several programming languages, see Programming grants.

Cross-account use: Yes. You can retire a grant on a KMS key in a different Amazon Web Services account.

Required permissions::Permission to retire a grant is determined primarily by the grant. For details, see Retiring and revoking grants in the Key Management Service Developer Guide.

Related operations:

", "RevokeGrant": "

Deletes the specified grant. You revoke a grant to terminate the permissions that the grant allows. For more information, see Retiring and revoking grants in the Key Management Service Developer Guide .

When you create, retire, or revoke a grant, there might be a brief delay, usually less than five minutes, until the grant is available throughout KMS. This state is known as eventual consistency. For details, see Eventual consistency in the Key Management Service Developer Guide .

For detailed information about grants, including grant terminology, see Grants in KMS in the Key Management Service Developer Guide . For examples of working with grants in several programming languages, see Programming grants.

Cross-account use: Yes. To perform this operation on a KMS key in a different Amazon Web Services account, specify the key ARN in the value of the KeyId parameter.

Required permissions: kms:RevokeGrant (key policy).

Related operations:

", "ScheduleKeyDeletion": "

Schedules the deletion of a KMS key. By default, KMS applies a waiting period of 30 days, but you can specify a waiting period of 7-30 days. When this operation is successful, the key state of the KMS key changes to PendingDeletion and the key can't be used in any cryptographic operations. It remains in this state for the duration of the waiting period. Before the waiting period ends, you can use CancelKeyDeletion to cancel the deletion of the KMS key. After the waiting period ends, KMS deletes the KMS key, its key material, and all KMS data associated with it, including all aliases that refer to it.

Deleting a KMS key is a destructive and potentially dangerous operation. When a KMS key is deleted, all data that was encrypted under the KMS key is unrecoverable. (The only exception is a multi-Region replica key.) To prevent the use of a KMS key without deleting it, use DisableKey.

If you schedule deletion of a KMS key from a custom key store, when the waiting period expires, ScheduleKeyDeletion deletes the KMS key from KMS. Then KMS makes a best effort to delete the key material from the associated CloudHSM cluster. However, you might need to manually delete the orphaned key material from the cluster and its backups.

You can schedule the deletion of a multi-Region primary key and its replica keys at any time. However, KMS will not delete a multi-Region primary key with existing replica keys. If you schedule the deletion of a primary key with replicas, its key state changes to PendingReplicaDeletion and it cannot be replicated or used in cryptographic operations. This status can continue indefinitely. When the last of its replicas keys is deleted (not just scheduled), the key state of the primary key changes to PendingDeletion and its waiting period (PendingWindowInDays) begins. For details, see Deleting multi-Region keys in the Key Management Service Developer Guide.

For more information about scheduling a KMS key for deletion, see Deleting KMS keys in the Key Management Service Developer Guide.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

Required permissions: kms:ScheduleKeyDeletion (key policy)

Related operations

", - "Sign": "

Creates a digital signature for a message or message digest by using the private key in an asymmetric signing KMS key. To verify the signature, use the Verify operation, or use the public key in the same asymmetric KMS key outside of KMS. For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

Digital signatures are generated and verified by using asymmetric key pair, such as an RSA or ECC pair that is represented by an asymmetric KMS key. The key owner (or an authorized user) uses their private key to sign a message. Anyone with the public key can verify that the message was signed with that particular private key and that the message hasn't changed since it was signed.

To use the Sign operation, provide the following information:

When signing a message, be sure to record the KMS key and the signing algorithm. This information is required to verify the signature.

To verify the signature that this operation generates, use the Verify operation. Or use the GetPublicKey operation to download the public key and then use the public key to verify the signature outside of KMS.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

Required permissions: kms:Sign (key policy)

Related operations: Verify

", + "Sign": "

Creates a digital signature for a message or message digest by using the private key in an asymmetric signing KMS key. To verify the signature, use the Verify operation, or use the public key in the same asymmetric KMS key outside of KMS. For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

Digital signatures are generated and verified by using asymmetric key pair, such as an RSA or ECC pair that is represented by an asymmetric KMS key. The key owner (or an authorized user) uses their private key to sign a message. Anyone with the public key can verify that the message was signed with that particular private key and that the message hasn't changed since it was signed.

To use the Sign operation, provide the following information:

When signing a message, be sure to record the KMS key and the signing algorithm. This information is required to verify the signature.

Best practices recommend that you limit the time during which any signature is effective. This deters an attack where the actor uses a signed message to establish validity repeatedly or long after the message is superseded. Signatures do not include a timestamp, but you can include a timestamp in the signed message to help you detect when its time to refresh the signature.

To verify the signature that this operation generates, use the Verify operation. Or use the GetPublicKey operation to download the public key and then use the public key to verify the signature outside of KMS.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

Required permissions: kms:Sign (key policy)

Related operations: Verify

", "TagResource": "

Adds or edits tags on a customer managed key.

Tagging or untagging a KMS key can allow or deny permission to the KMS key. For details, see ABAC in KMS in the Key Management Service Developer Guide.

Each tag consists of a tag key and a tag value, both of which are case-sensitive strings. The tag value can be an empty (null) string. To add a tag, specify a new tag key and a tag value. To edit a tag, specify an existing tag key and a new tag value.

You can use this operation to tag a customer managed key, but you cannot tag an Amazon Web Services managed key, an Amazon Web Services owned key, a custom key store, or an alias.

You can also add tags to a KMS key while creating it (CreateKey) or replicating it (ReplicateKey).

For information about using tags in KMS, see Tagging keys. For general information about tags, including the format and syntax, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

Required permissions: kms:TagResource (key policy)

Related operations

", "UntagResource": "

Deletes tags from a customer managed key. To delete a tag, specify the tag key and the KMS key.

Tagging or untagging a KMS key can allow or deny permission to the KMS key. For details, see ABAC in KMS in the Key Management Service Developer Guide.

When it succeeds, the UntagResource operation doesn't return any output. Also, if the specified tag key isn't found on the KMS key, it doesn't throw an exception or return a response. To confirm that the operation worked, use the ListResourceTags operation.

For information about using tags in KMS, see Tagging keys. For general information about tags, including the format and syntax, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

Required permissions: kms:UntagResource (key policy)

Related operations

", "UpdateAlias": "

Associates an existing KMS alias with a different KMS key. Each alias is associated with only one KMS key at a time, although a KMS key can have multiple aliases. The alias and the KMS key must be in the same Amazon Web Services account and Region.

Adding, deleting, or updating an alias can allow or deny permission to the KMS key. For details, see ABAC in KMS in the Key Management Service Developer Guide.

The current and new KMS key must be the same type (both symmetric or both asymmetric), and they must have the same key usage (ENCRYPT_DECRYPT or SIGN_VERIFY). This restriction prevents errors in code that uses aliases. If you must assign an alias to a different type of KMS key, use DeleteAlias to delete the old alias and CreateAlias to create a new alias.

You cannot use UpdateAlias to change an alias name. To change an alias name, use DeleteAlias to delete the old alias and CreateAlias to create a new alias.

Because an alias is not a property of a KMS key, you can create, update, and delete the aliases of a KMS key without affecting the KMS key. Also, aliases do not appear in the response from the DescribeKey operation. To get the aliases of all KMS keys in the account, use the ListAliases operation.

The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

Required permissions

For details, see Controlling access to aliases in the Key Management Service Developer Guide.

Related operations:

", @@ -730,7 +730,7 @@ } }, "IncorrectKeyException": { - "base": "

The request was rejected because the specified KMS key cannot decrypt the data. The KeyId in a Decrypt request and the SourceKeyId in a ReEncrypt request must identify the same KMS key that was used to encrypt the ciphertext.

", + "base": "

The request was rejected because the specified KMS key cannot decrypt the data. The KeyId in a Decrypt request and the SourceKeyId in a ReEncrypt request must identify the same KMS key that was used to encrypt the ciphertext.

", "refs": { } }, @@ -819,12 +819,12 @@ "DisableKeyRequest$KeyId": "

Identifies the KMS key to disable.

Specify the key ID or key ARN of the KMS key.

For example:

To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey.

", "DisableKeyRotationRequest$KeyId": "

Identifies a symmetric encryption KMS key. You cannot enable or disable automatic rotation of asymmetric KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key store.

Specify the key ID or key ARN of the KMS key.

For example:

To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey.

", "EnableKeyRequest$KeyId": "

Identifies the KMS key to enable.

Specify the key ID or key ARN of the KMS key.

For example:

To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey.

", - "EnableKeyRotationRequest$KeyId": "

Identifies a symmetric encryption KMS key. You cannot enable automatic rotation of asymmetric KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key store. To enable or disable automatic rotation of a set of related multi-Region keys, set the property on the primary key.

Specify the key ID or key ARN of the KMS key.

For example:

To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey.

", + "EnableKeyRotationRequest$KeyId": "

Identifies a symmetric encryption KMS key. You cannot enable or disable automatic rotation of asymmetric KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key store. The key rotation status of these KMS keys is always false. To enable or disable automatic rotation of a set of related multi-Region keys, set the property on the primary key.

Specify the key ID or key ARN of the KMS key.

For example:

To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey.

", "EncryptRequest$KeyId": "

Identifies the KMS key to use in the encryption operation. The KMS key must have a KeyUsage of ENCRYPT_DECRYPT. To find the KeyUsage of a KMS key, use the DescribeKey operation.

To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. When using an alias name, prefix it with \"alias/\". To specify a KMS key in a different Amazon Web Services account, you must use the key ARN or alias ARN.

For example:

To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name and alias ARN, use ListAliases.

", "EncryptResponse$KeyId": "

The Amazon Resource Name (key ARN) of the KMS key that was used to encrypt the plaintext.

", "GenerateDataKeyPairRequest$KeyId": "

Specifies the symmetric encryption KMS key that encrypts the private key in the data key pair. You cannot specify an asymmetric KMS key or a KMS key in a custom key store. To get the type and origin of your KMS key, use the DescribeKey operation.

To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. When using an alias name, prefix it with \"alias/\". To specify a KMS key in a different Amazon Web Services account, you must use the key ARN or alias ARN.

For example:

To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name and alias ARN, use ListAliases.

", "GenerateDataKeyPairResponse$KeyId": "

The Amazon Resource Name (key ARN) of the KMS key that encrypted the private key.

", - "GenerateDataKeyPairWithoutPlaintextRequest$KeyId": "

Specifies the symmetric encryption KMS key that encrypts the private key in the data key pair. You cannot specify an asymmetric KMS key or a KMS key in a custom key store. To get the type and origin of your KMS key, use the DescribeKey operation.

To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. When using an alias name, prefix it with \"alias/\". To specify a KMS key in a different Amazon Web Services account, you must use the key ARN or alias ARN.

For example:

To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name and alias ARN, use ListAliases.

", + "GenerateDataKeyPairWithoutPlaintextRequest$KeyId": "

Specifies the symmetric encryption KMS key that encrypts the private key in the data key pair. You cannot specify an asymmetric KMS key or a KMS key in a custom key store. To get the type and origin of your KMS key, use the DescribeKey operation.

To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. When using an alias name, prefix it with \"alias/\". To specify a KMS key in a different Amazon Web Services account, you must use the key ARN or alias ARN.

For example:

To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name and alias ARN, use ListAliases.

", "GenerateDataKeyPairWithoutPlaintextResponse$KeyId": "

The Amazon Resource Name (key ARN) of the KMS key that encrypted the private key.

", "GenerateDataKeyRequest$KeyId": "

Specifies the symmetric encryption KMS key that encrypts the data key. You cannot specify an asymmetric KMS key or a KMS key in a custom key store. To get the type and origin of your KMS key, use the DescribeKey operation.

To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. When using an alias name, prefix it with \"alias/\". To specify a KMS key in a different Amazon Web Services account, you must use the key ARN or alias ARN.

For example:

To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name and alias ARN, use ListAliases.

", "GenerateDataKeyResponse$KeyId": "

The Amazon Resource Name (key ARN) of the KMS key that encrypted the data key.

", @@ -898,7 +898,7 @@ "KeySpec": { "base": null, "refs": { - "CreateKeyRequest$KeySpec": "

Specifies the type of KMS key to create. The default value, SYMMETRIC_DEFAULT, creates a KMS key with a 256-bit symmetric key for encryption and decryption. For help choosing a key spec for your KMS key, see Choosing a KMS key type in the Key Management Service Developer Guide .

The KeySpec determines whether the KMS key contains a symmetric key or an asymmetric key pair. It also determines the algorithms that the KMS key supports. You can't change the KeySpec after the KMS key is created. To further restrict the algorithms that can be used with the KMS key, use a condition key in its key policy or IAM policy. For more information, see kms:EncryptionAlgorithm, kms:MacAlgorithm or kms:Signing Algorithm in the Key Management Service Developer Guide .

Amazon Web Services services that are integrated with KMS use symmetric encryption KMS keys to protect your data. These services do not support asymmetric KMS keys or HMAC KMS keys.

KMS supports the following key specs for KMS keys:

", + "CreateKeyRequest$KeySpec": "

Specifies the type of KMS key to create. The default value, SYMMETRIC_DEFAULT, creates a KMS key with a 256-bit symmetric key for encryption and decryption. For help choosing a key spec for your KMS key, see Choosing a KMS key type in the Key Management Service Developer Guide .

The KeySpec determines whether the KMS key contains a symmetric key or an asymmetric key pair. It also determines the cryptographic algorithms that the KMS key supports. You can't change the KeySpec after the KMS key is created. To further restrict the algorithms that can be used with the KMS key, use a condition key in its key policy or IAM policy. For more information, see kms:EncryptionAlgorithm, kms:MacAlgorithm or kms:Signing Algorithm in the Key Management Service Developer Guide .

Amazon Web Services services that are integrated with KMS use symmetric encryption KMS keys to protect your data. These services do not support asymmetric KMS keys or HMAC KMS keys.

KMS supports the following key specs for KMS keys:

", "GetPublicKeyResponse$KeySpec": "

The type of the of the public key that was downloaded.

", "KeyMetadata$KeySpec": "

Describes the type of key material in the KMS key.

" } @@ -1081,7 +1081,7 @@ "NullableBooleanType": { "base": null, "refs": { - "CreateKeyRequest$MultiRegion": "

Creates a multi-Region primary key that you can replicate into other Amazon Web Services Regions. You cannot change this value after you create the KMS key.

For a multi-Region key, set this parameter to True. For a single-Region KMS key, omit this parameter or set it to False. The default value is False.

This operation supports multi-Region keys, an KMS feature that lets you create multiple interoperable KMS keys in different Amazon Web Services Regions. Because these KMS keys have the same key ID, key material, and other metadata, you can use them interchangeably to encrypt data in one Amazon Web Services Region and decrypt it in a different Amazon Web Services Region without re-encrypting the data or making a cross-Region call. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

This value creates a primary key, not a replica. To create a replica key, use the ReplicateKey operation.

You can create a symmetric or asymmetric multi-Region key, and you can create a multi-Region key with imported key material. However, you cannot create a multi-Region key in a custom key store.

", + "CreateKeyRequest$MultiRegion": "

Creates a multi-Region primary key that you can replicate into other Amazon Web Services Regions. You cannot change this value after you create the KMS key.

For a multi-Region key, set this parameter to True. For a single-Region KMS key, omit this parameter or set it to False. The default value is False.

This operation supports multi-Region keys, an KMS feature that lets you create multiple interoperable KMS keys in different Amazon Web Services Regions. Because these KMS keys have the same key ID, key material, and other metadata, you can use them interchangeably to encrypt data in one Amazon Web Services Region and decrypt it in a different Amazon Web Services Region without re-encrypting the data or making a cross-Region call. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

This value creates a primary key, not a replica. To create a replica key, use the ReplicateKey operation.

You can create a multi-Region version of a symmetric encryption KMS key, an HMAC KMS key, an asymmetric KMS key, or a KMS key with imported key material. However, you cannot create a multi-Region key in a custom key store.

", "KeyMetadata$MultiRegion": "

Indicates whether the KMS key is a multi-Region (True) or regional (False) key. This value is True for multi-Region primary and replica keys and False for regional KMS keys.

For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

" } }, @@ -1104,7 +1104,7 @@ "base": null, "refs": { "KeyMetadata$PendingDeletionWindowInDays": "

The waiting period before the primary key in a multi-Region key is deleted. This waiting period begins when the last of its replica keys is deleted. This value is present only when the KeyState of the KMS key is PendingReplicaDeletion. That indicates that the KMS key is the primary key in a multi-Region key, it is scheduled for deletion, and it still has existing replica keys.

When a single-Region KMS key or a multi-Region replica key is scheduled for deletion, its deletion date is displayed in the DeletionDate field. However, when the primary key in a multi-Region key is scheduled for deletion, its waiting period doesn't begin until all of its replica keys are deleted. This value displays that waiting period. When the last replica key in the multi-Region key is deleted, the KeyState of the scheduled primary key changes from PendingReplicaDeletion to PendingDeletion and the deletion date appears in the DeletionDate field.

", - "ScheduleKeyDeletionRequest$PendingWindowInDays": "

The waiting period, specified in number of days. After the waiting period ends, KMS deletes the KMS key.

If the KMS key is a multi-Region primary key with replicas, the waiting period begins when the last of its replica keys is deleted. Otherwise, the waiting period begins immediately.

This value is optional. If you include a value, it must be between 7 and 30, inclusive. If you do not include a value, it defaults to 30.

", + "ScheduleKeyDeletionRequest$PendingWindowInDays": "

The waiting period, specified in number of days. After the waiting period ends, KMS deletes the KMS key.

If the KMS key is a multi-Region primary key with replica keys, the waiting period begins when the last of its replica keys is deleted. Otherwise, the waiting period begins immediately.

This value is optional. If you include a value, it must be between 7 and 30, inclusive. If you do not include a value, it defaults to 30.

", "ScheduleKeyDeletionResponse$PendingWindowInDays": "

The waiting period before the KMS key is deleted.

If the KMS key is a multi-Region primary key with replicas, the waiting period begins when the last of its replica keys is deleted. Otherwise, the waiting period begins immediately.

" } }, @@ -1140,10 +1140,10 @@ "PolicyType": { "base": null, "refs": { - "CreateKeyRequest$Policy": "

The key policy to attach to the KMS key.

If you provide a key policy, it must meet the following criteria:

If you do not provide a key policy, KMS attaches a default key policy to the KMS key. For more information, see Default Key Policy in the Key Management Service Developer Guide.

The key policy size quota is 32 kilobytes (32768 bytes).

For help writing and formatting a JSON policy document, see the IAM JSON Policy Reference in the Identity and Access Management User Guide .

", + "CreateKeyRequest$Policy": "

The key policy to attach to the KMS key. If you do not specify a key policy, KMS attaches a default key policy to the KMS key. For more information, see Default key policy in the Key Management Service Developer Guide.

If you provide a key policy, it must meet the following criteria:

A key policy document must conform to the following rules.

For help writing and formatting a JSON policy document, see the IAM JSON Policy Reference in the Identity and Access Management User Guide .

", "GetKeyPolicyResponse$Policy": "

A key policy document in JSON format.

", - "PutKeyPolicyRequest$Policy": "

The key policy to attach to the KMS key.

The key policy must meet the following criteria:

The key policy cannot exceed 32 kilobytes (32768 bytes). For more information, see Resource Quotas in the Key Management Service Developer Guide.

", - "ReplicateKeyRequest$Policy": "

The key policy to attach to the KMS key. This parameter is optional. If you do not provide a key policy, KMS attaches the default key policy to the KMS key.

The key policy is not a shared property of multi-Region keys. You can specify the same key policy or a different key policy for each key in a set of related multi-Region keys. KMS does not synchronize this property.

If you provide a key policy, it must meet the following criteria:

", + "PutKeyPolicyRequest$Policy": "

The key policy to attach to the KMS key.

The key policy must meet the following criteria:

A key policy document must conform to the following rules.

", + "ReplicateKeyRequest$Policy": "

The key policy to attach to the KMS key. This parameter is optional. If you do not provide a key policy, KMS attaches the default key policy to the KMS key.

The key policy is not a shared property of multi-Region keys. You can specify the same key policy or a different key policy for each key in a set of related multi-Region keys. KMS does not synchronize this property.

If you provide a key policy, it must meet the following criteria:

A key policy document must conform to the following rules.

", "ReplicateKeyResponse$ReplicaPolicy": "

The key policy of the new replica key. The value is a key policy document in JSON format.

" } }, diff --git a/gems/aws-sdk-glue/CHANGELOG.md b/gems/aws-sdk-glue/CHANGELOG.md index 90bac0a3cc8..8d5ad0366f3 100644 --- a/gems/aws-sdk-glue/CHANGELOG.md +++ b/gems/aws-sdk-glue/CHANGELOG.md @@ -1,6 +1,11 @@ Unreleased Changes ------------------ +1.112.0 (2022-05-17) +------------------ + +* Feature - This release adds a new optional parameter called codeGenNodeConfiguration to CRUD job APIs that allows users to manage visual jobs via APIs. The updated CreateJob and UpdateJob will create jobs that can be viewed in Glue Studio as a visual graph. GetJob can be used to get codeGenNodeConfiguration. + 1.111.0 (2022-04-26) ------------------ diff --git a/gems/aws-sdk-glue/VERSION b/gems/aws-sdk-glue/VERSION index d313a193da0..628cac6f078 100644 --- a/gems/aws-sdk-glue/VERSION +++ b/gems/aws-sdk-glue/VERSION @@ -1 +1 @@ -1.111.0 +1.112.0 diff --git a/gems/aws-sdk-glue/lib/aws-sdk-glue.rb b/gems/aws-sdk-glue/lib/aws-sdk-glue.rb index 71c4c539876..1bffb253a32 100644 --- a/gems/aws-sdk-glue/lib/aws-sdk-glue.rb +++ b/gems/aws-sdk-glue/lib/aws-sdk-glue.rb @@ -48,6 +48,6 @@ # @!group service module Aws::Glue - GEM_VERSION = '1.111.0' + GEM_VERSION = '1.112.0' end diff --git a/gems/aws-sdk-glue/lib/aws-sdk-glue/client.rb b/gems/aws-sdk-glue/lib/aws-sdk-glue/client.rb index f5022e0ad87..8c8e3e8a8e7 100644 --- a/gems/aws-sdk-glue/lib/aws-sdk-glue/client.rb +++ b/gems/aws-sdk-glue/lib/aws-sdk-glue/client.rb @@ -964,6 +964,486 @@ def batch_get_dev_endpoints(params = {}, options = {}) # resp.jobs[0].security_configuration #=> String # resp.jobs[0].notification_property.notify_delay_after #=> Integer # resp.jobs[0].glue_version #=> String + # resp.jobs[0].code_gen_configuration_nodes #=> Hash + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].athena_connector_source.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].athena_connector_source.connection_name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].athena_connector_source.connector_name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].athena_connector_source.connection_type #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].athena_connector_source.connection_table #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].athena_connector_source.schema_name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].athena_connector_source.output_schemas #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].athena_connector_source.output_schemas[0].columns #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].athena_connector_source.output_schemas[0].columns[0].name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].athena_connector_source.output_schemas[0].columns[0].type #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_source.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_source.connection_name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_source.connector_name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_source.connection_type #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_source.additional_options.filter_predicate #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_source.additional_options.partition_column #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_source.additional_options.lower_bound #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_source.additional_options.upper_bound #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_source.additional_options.num_partitions #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_source.additional_options.job_bookmark_keys #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_source.additional_options.job_bookmark_keys[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_source.additional_options.job_bookmark_keys_sort_order #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_source.additional_options.data_type_mapping #=> Hash + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_source.additional_options.data_type_mapping["JDBCDataType"] #=> String, one of "DATE", "STRING", "TIMESTAMP", "INT", "FLOAT", "LONG", "BIGDECIMAL", "BYTE", "SHORT", "DOUBLE" + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_source.connection_table #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_source.query #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_source.output_schemas #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_source.output_schemas[0].columns #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_source.output_schemas[0].columns[0].name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_source.output_schemas[0].columns[0].type #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_connector_source.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_connector_source.connection_name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_connector_source.connector_name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_connector_source.connection_type #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_connector_source.additional_options #=> Hash + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_connector_source.additional_options["EnclosedInStringProperty"] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_connector_source.output_schemas #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_connector_source.output_schemas[0].columns #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_connector_source.output_schemas[0].columns[0].name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_connector_source.output_schemas[0].columns[0].type #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_source.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_source.database #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_source.table #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].redshift_source.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].redshift_source.database #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].redshift_source.table #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].redshift_source.redshift_tmp_dir #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].redshift_source.tmp_dir_iam_role #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_catalog_source.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_catalog_source.database #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_catalog_source.table #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_catalog_source.partition_predicate #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_catalog_source.additional_options.bounded_size #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_catalog_source.additional_options.bounded_files #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.paths #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.paths[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.compression_type #=> String, one of "gzip", "bzip2" + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.exclusions #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.exclusions[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.group_size #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.group_files #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.recurse #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.max_band #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.max_files_in_band #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.additional_options.bounded_size #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.additional_options.bounded_files #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.additional_options.enable_sample_path #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.additional_options.sample_path #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.separator #=> String, one of "comma", "ctrla", "pipe", "semicolon", "tab" + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.escaper #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.quote_char #=> String, one of "quote", "quillemet", "single_quote", "disabled" + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.multiline #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.with_header #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.write_header #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.skip_first #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.optimize_performance #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.output_schemas #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.output_schemas[0].columns #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.output_schemas[0].columns[0].name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.output_schemas[0].columns[0].type #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_json_source.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_json_source.paths #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_json_source.paths[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_json_source.compression_type #=> String, one of "gzip", "bzip2" + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_json_source.exclusions #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_json_source.exclusions[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_json_source.group_size #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_json_source.group_files #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_json_source.recurse #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_json_source.max_band #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_json_source.max_files_in_band #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_json_source.additional_options.bounded_size #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_json_source.additional_options.bounded_files #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_json_source.additional_options.enable_sample_path #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_json_source.additional_options.sample_path #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_json_source.json_path #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_json_source.multiline #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_json_source.output_schemas #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_json_source.output_schemas[0].columns #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_json_source.output_schemas[0].columns[0].name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_json_source.output_schemas[0].columns[0].type #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_parquet_source.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_parquet_source.paths #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_parquet_source.paths[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_parquet_source.compression_type #=> String, one of "snappy", "lzo", "gzip", "uncompressed", "none" + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_parquet_source.exclusions #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_parquet_source.exclusions[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_parquet_source.group_size #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_parquet_source.group_files #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_parquet_source.recurse #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_parquet_source.max_band #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_parquet_source.max_files_in_band #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_parquet_source.additional_options.bounded_size #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_parquet_source.additional_options.bounded_files #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_parquet_source.additional_options.enable_sample_path #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_parquet_source.additional_options.sample_path #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_parquet_source.output_schemas #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_parquet_source.output_schemas[0].columns #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_parquet_source.output_schemas[0].columns[0].name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_parquet_source.output_schemas[0].columns[0].type #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].relational_catalog_source.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].relational_catalog_source.database #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].relational_catalog_source.table #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].dynamo_db_catalog_source.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].dynamo_db_catalog_source.database #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].dynamo_db_catalog_source.table #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_target.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_target.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_target.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_target.connection_name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_target.connection_table #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_target.connector_name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_target.connection_type #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_target.additional_options #=> Hash + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_target.additional_options["EnclosedInStringProperty"] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_target.output_schemas #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_target.output_schemas[0].columns #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_target.output_schemas[0].columns[0].name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_target.output_schemas[0].columns[0].type #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_connector_target.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_connector_target.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_connector_target.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_connector_target.connection_name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_connector_target.connector_name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_connector_target.connection_type #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_connector_target.additional_options #=> Hash + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_connector_target.additional_options["EnclosedInStringProperty"] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_connector_target.output_schemas #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_connector_target.output_schemas[0].columns #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_connector_target.output_schemas[0].columns[0].name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_connector_target.output_schemas[0].columns[0].type #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_target.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_target.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_target.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_target.database #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_target.table #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].redshift_target.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].redshift_target.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].redshift_target.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].redshift_target.database #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].redshift_target.table #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].redshift_target.redshift_tmp_dir #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].redshift_target.tmp_dir_iam_role #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].redshift_target.upsert_redshift_options.table_location #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].redshift_target.upsert_redshift_options.connection_name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].redshift_target.upsert_redshift_options.upsert_keys #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].redshift_target.upsert_redshift_options.upsert_keys[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_catalog_target.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_catalog_target.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_catalog_target.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_catalog_target.partition_keys #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_catalog_target.partition_keys[0] #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_catalog_target.partition_keys[0][0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_catalog_target.table #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_catalog_target.database #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_catalog_target.schema_change_policy.enable_update_catalog #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_catalog_target.schema_change_policy.update_behavior #=> String, one of "UPDATE_IN_DATABASE", "LOG" + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_glue_parquet_target.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_glue_parquet_target.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_glue_parquet_target.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_glue_parquet_target.partition_keys #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_glue_parquet_target.partition_keys[0] #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_glue_parquet_target.partition_keys[0][0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_glue_parquet_target.path #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_glue_parquet_target.compression #=> String, one of "snappy", "lzo", "gzip", "uncompressed", "none" + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_glue_parquet_target.schema_change_policy.enable_update_catalog #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_glue_parquet_target.schema_change_policy.update_behavior #=> String, one of "UPDATE_IN_DATABASE", "LOG" + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_glue_parquet_target.schema_change_policy.table #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_glue_parquet_target.schema_change_policy.database #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_direct_target.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_direct_target.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_direct_target.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_direct_target.partition_keys #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_direct_target.partition_keys[0] #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_direct_target.partition_keys[0][0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_direct_target.path #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_direct_target.compression #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_direct_target.format #=> String, one of "json", "csv", "avro", "orc", "parquet" + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_direct_target.schema_change_policy.enable_update_catalog #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_direct_target.schema_change_policy.update_behavior #=> String, one of "UPDATE_IN_DATABASE", "LOG" + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_direct_target.schema_change_policy.table #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_direct_target.schema_change_policy.database #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].apply_mapping.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].apply_mapping.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].apply_mapping.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].apply_mapping.mapping #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].apply_mapping.mapping[0].to_key #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].apply_mapping.mapping[0].from_path #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].apply_mapping.mapping[0].from_path[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].apply_mapping.mapping[0].from_type #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].apply_mapping.mapping[0].to_type #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].apply_mapping.mapping[0].dropped #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].apply_mapping.mapping[0].children #=> Types::Mappings + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].select_fields.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].select_fields.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].select_fields.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].select_fields.paths #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].select_fields.paths[0] #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].select_fields.paths[0][0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].drop_fields.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].drop_fields.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].drop_fields.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].drop_fields.paths #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].drop_fields.paths[0] #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].drop_fields.paths[0][0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].rename_field.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].rename_field.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].rename_field.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].rename_field.source_path #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].rename_field.source_path[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].rename_field.target_path #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].rename_field.target_path[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spigot.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spigot.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spigot.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spigot.path #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spigot.topk #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spigot.prob #=> Float + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].join.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].join.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].join.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].join.join_type #=> String, one of "equijoin", "left", "right", "outer", "leftsemi", "leftanti" + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].join.columns #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].join.columns[0].from #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].join.columns[0].keys #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].join.columns[0].keys[0] #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].join.columns[0].keys[0][0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].split_fields.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].split_fields.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].split_fields.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].split_fields.paths #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].split_fields.paths[0] #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].split_fields.paths[0][0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].select_from_collection.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].select_from_collection.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].select_from_collection.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].select_from_collection.index #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].fill_missing_values.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].fill_missing_values.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].fill_missing_values.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].fill_missing_values.imputed_path #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].fill_missing_values.filled_path #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].filter.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].filter.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].filter.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].filter.logical_operator #=> String, one of "AND", "OR" + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].filter.filters #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].filter.filters[0].operation #=> String, one of "EQ", "LT", "GT", "LTE", "GTE", "REGEX", "ISNULL" + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].filter.filters[0].negated #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].filter.filters[0].values #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].filter.filters[0].values[0].type #=> String, one of "COLUMNEXTRACTED", "CONSTANT" + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].filter.filters[0].values[0].value #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].filter.filters[0].values[0].value[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].custom_code.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].custom_code.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].custom_code.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].custom_code.code #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].custom_code.class_name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].custom_code.output_schemas #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].custom_code.output_schemas[0].columns #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].custom_code.output_schemas[0].columns[0].name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].custom_code.output_schemas[0].columns[0].type #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_sql.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_sql.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_sql.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_sql.sql_query #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_sql.sql_aliases #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_sql.sql_aliases[0].from #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_sql.sql_aliases[0].alias #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_sql.output_schemas #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_sql.output_schemas[0].columns #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_sql.output_schemas[0].columns[0].name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_sql.output_schemas[0].columns[0].type #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.window_size #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.detect_schema #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.endpoint_url #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.stream_name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.classification #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.delimiter #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.starting_position #=> String, one of "latest", "trim_horizon", "earliest" + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.max_fetch_time_in_ms #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.max_fetch_records_per_shard #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.max_record_per_read #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.add_idle_time_between_reads #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.idle_time_between_reads_in_ms #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.describe_shard_interval #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.num_retries #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.retry_interval_ms #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.max_retry_interval_ms #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.avoid_empty_batches #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.stream_arn #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.role_arn #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.role_session_name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.data_preview_options.polling_time #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.data_preview_options.record_polling_limit #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.bootstrap_servers #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.security_protocol #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.connection_name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.topic_name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.assign #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.subscribe_pattern #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.classification #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.delimiter #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.starting_offsets #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.ending_offsets #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.poll_timeout_ms #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.num_retries #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.retry_interval_ms #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.max_offsets_per_trigger #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.min_partitions #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.window_size #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.detect_schema #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.data_preview_options.polling_time #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.data_preview_options.record_polling_limit #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.window_size #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.detect_schema #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.table #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.database #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.endpoint_url #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.stream_name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.classification #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.delimiter #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.starting_position #=> String, one of "latest", "trim_horizon", "earliest" + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.max_fetch_time_in_ms #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.max_fetch_records_per_shard #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.max_record_per_read #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.add_idle_time_between_reads #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.idle_time_between_reads_in_ms #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.describe_shard_interval #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.num_retries #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.retry_interval_ms #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.max_retry_interval_ms #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.avoid_empty_batches #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.stream_arn #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.role_arn #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.role_session_name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.data_preview_options.polling_time #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.data_preview_options.record_polling_limit #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.window_size #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.detect_schema #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.table #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.database #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.bootstrap_servers #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.security_protocol #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.connection_name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.topic_name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.assign #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.subscribe_pattern #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.classification #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.delimiter #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.starting_offsets #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.ending_offsets #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.poll_timeout_ms #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.num_retries #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.retry_interval_ms #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.max_offsets_per_trigger #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.min_partitions #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.data_preview_options.polling_time #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.data_preview_options.record_polling_limit #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].drop_null_fields.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].drop_null_fields.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].drop_null_fields.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].drop_null_fields.null_check_box_list.is_empty #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].drop_null_fields.null_check_box_list.is_null_string #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].drop_null_fields.null_check_box_list.is_neg_one #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].drop_null_fields.null_text_list #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].drop_null_fields.null_text_list[0].value #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].drop_null_fields.null_text_list[0].datatype.id #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].drop_null_fields.null_text_list[0].datatype.label #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].merge.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].merge.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].merge.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].merge.source #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].merge.primary_keys #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].merge.primary_keys[0] #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].merge.primary_keys[0][0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].union.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].union.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].union.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].union.union_type #=> String, one of "ALL", "DISTINCT" + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].pii_detection.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].pii_detection.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].pii_detection.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].pii_detection.pii_type #=> String, one of "RowAudit", "RowMasking", "ColumnAudit", "ColumnMasking" + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].pii_detection.entity_types_to_detect #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].pii_detection.entity_types_to_detect[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].pii_detection.output_column_name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].pii_detection.sample_fraction #=> Float + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].pii_detection.threshold_fraction #=> Float + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].pii_detection.mask_value #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].aggregate.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].aggregate.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].aggregate.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].aggregate.groups #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].aggregate.groups[0] #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].aggregate.groups[0][0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].aggregate.aggs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].aggregate.aggs[0].column #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].aggregate.aggs[0].column[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].aggregate.aggs[0].agg_func #=> String, one of "avg", "countDistinct", "count", "first", "last", "kurtosis", "max", "min", "skewness", "stddev_samp", "stddev_pop", "sum", "sumDistinct", "var_samp", "var_pop" + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].drop_duplicates.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].drop_duplicates.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].drop_duplicates.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].drop_duplicates.columns #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].drop_duplicates.columns[0] #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].drop_duplicates.columns[0][0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].governed_catalog_target.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].governed_catalog_target.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].governed_catalog_target.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].governed_catalog_target.partition_keys #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].governed_catalog_target.partition_keys[0] #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].governed_catalog_target.partition_keys[0][0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].governed_catalog_target.table #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].governed_catalog_target.database #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].governed_catalog_target.schema_change_policy.enable_update_catalog #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].governed_catalog_target.schema_change_policy.update_behavior #=> String, one of "UPDATE_IN_DATABASE", "LOG" + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].governed_catalog_source.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].governed_catalog_source.database #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].governed_catalog_source.table #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].governed_catalog_source.partition_predicate #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].governed_catalog_source.additional_options.bounded_size #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].governed_catalog_source.additional_options.bounded_files #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].microsoft_sql_server_catalog_source.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].microsoft_sql_server_catalog_source.database #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].microsoft_sql_server_catalog_source.table #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].my_sql_catalog_source.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].my_sql_catalog_source.database #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].my_sql_catalog_source.table #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].oracle_sql_catalog_source.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].oracle_sql_catalog_source.database #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].oracle_sql_catalog_source.table #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].postgre_sql_catalog_source.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].postgre_sql_catalog_source.database #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].postgre_sql_catalog_source.table #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].microsoft_sql_server_catalog_target.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].microsoft_sql_server_catalog_target.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].microsoft_sql_server_catalog_target.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].microsoft_sql_server_catalog_target.database #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].microsoft_sql_server_catalog_target.table #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].my_sql_catalog_target.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].my_sql_catalog_target.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].my_sql_catalog_target.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].my_sql_catalog_target.database #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].my_sql_catalog_target.table #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].oracle_sql_catalog_target.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].oracle_sql_catalog_target.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].oracle_sql_catalog_target.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].oracle_sql_catalog_target.database #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].oracle_sql_catalog_target.table #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].postgre_sql_catalog_target.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].postgre_sql_catalog_target.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].postgre_sql_catalog_target.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].postgre_sql_catalog_target.database #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].postgre_sql_catalog_target.table #=> String # resp.jobs_not_found #=> Array # resp.jobs_not_found[0] #=> String # @@ -2344,6 +2824,10 @@ def create_dev_endpoint(params = {}, options = {}) # of memory, 128 GB disk), and provides 1 executor per worker. We # recommend this worker type for memory-intensive jobs. # + # @option params [Hash] :code_gen_configuration_nodes + # The representation of a directed acyclic graph on which both the Glue + # Studio visual component and Glue Studio code generation is based. + # # @return [Types::CreateJobResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::CreateJobResponse#name #name} => String @@ -2386,6 +2870,674 @@ def create_dev_endpoint(params = {}, options = {}) # glue_version: "GlueVersionString", # number_of_workers: 1, # worker_type: "Standard", # accepts Standard, G.1X, G.2X + # code_gen_configuration_nodes: { + # "NodeId" => { + # athena_connector_source: { + # name: "NodeName", # required + # connection_name: "EnclosedInStringProperty", # required + # connector_name: "EnclosedInStringProperty", # required + # connection_type: "EnclosedInStringProperty", # required + # connection_table: "EnclosedInStringPropertyWithQuote", + # schema_name: "EnclosedInStringProperty", # required + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # jdbc_connector_source: { + # name: "NodeName", # required + # connection_name: "EnclosedInStringProperty", # required + # connector_name: "EnclosedInStringProperty", # required + # connection_type: "EnclosedInStringProperty", # required + # additional_options: { + # filter_predicate: "EnclosedInStringProperty", + # partition_column: "EnclosedInStringProperty", + # lower_bound: 1, + # upper_bound: 1, + # num_partitions: 1, + # job_bookmark_keys: ["EnclosedInStringProperty"], + # job_bookmark_keys_sort_order: "EnclosedInStringProperty", + # data_type_mapping: { + # "ARRAY" => "DATE", # accepts DATE, STRING, TIMESTAMP, INT, FLOAT, LONG, BIGDECIMAL, BYTE, SHORT, DOUBLE + # }, + # }, + # connection_table: "EnclosedInStringPropertyWithQuote", + # query: "SqlQuery", + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # spark_connector_source: { + # name: "NodeName", # required + # connection_name: "EnclosedInStringProperty", # required + # connector_name: "EnclosedInStringProperty", # required + # connection_type: "EnclosedInStringProperty", # required + # additional_options: { + # "EnclosedInStringProperty" => "EnclosedInStringProperty", + # }, + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # redshift_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # redshift_tmp_dir: "EnclosedInStringProperty", + # tmp_dir_iam_role: "EnclosedInStringProperty", + # }, + # s3_catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # partition_predicate: "EnclosedInStringProperty", + # additional_options: { + # bounded_size: 1, + # bounded_files: 1, + # }, + # }, + # s3_csv_source: { + # name: "NodeName", # required + # paths: ["EnclosedInStringProperty"], # required + # compression_type: "gzip", # accepts gzip, bzip2 + # exclusions: ["EnclosedInStringProperty"], + # group_size: "EnclosedInStringProperty", + # group_files: "EnclosedInStringProperty", + # recurse: false, + # max_band: 1, + # max_files_in_band: 1, + # additional_options: { + # bounded_size: 1, + # bounded_files: 1, + # enable_sample_path: false, + # sample_path: "EnclosedInStringProperty", + # }, + # separator: "comma", # required, accepts comma, ctrla, pipe, semicolon, tab + # escaper: "EnclosedInStringPropertyWithQuote", + # quote_char: "quote", # required, accepts quote, quillemet, single_quote, disabled + # multiline: false, + # with_header: false, + # write_header: false, + # skip_first: false, + # optimize_performance: false, + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # s3_json_source: { + # name: "NodeName", # required + # paths: ["EnclosedInStringProperty"], # required + # compression_type: "gzip", # accepts gzip, bzip2 + # exclusions: ["EnclosedInStringProperty"], + # group_size: "EnclosedInStringProperty", + # group_files: "EnclosedInStringProperty", + # recurse: false, + # max_band: 1, + # max_files_in_band: 1, + # additional_options: { + # bounded_size: 1, + # bounded_files: 1, + # enable_sample_path: false, + # sample_path: "EnclosedInStringProperty", + # }, + # json_path: "EnclosedInStringProperty", + # multiline: false, + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # s3_parquet_source: { + # name: "NodeName", # required + # paths: ["EnclosedInStringProperty"], # required + # compression_type: "snappy", # accepts snappy, lzo, gzip, uncompressed, none + # exclusions: ["EnclosedInStringProperty"], + # group_size: "EnclosedInStringProperty", + # group_files: "EnclosedInStringProperty", + # recurse: false, + # max_band: 1, + # max_files_in_band: 1, + # additional_options: { + # bounded_size: 1, + # bounded_files: 1, + # enable_sample_path: false, + # sample_path: "EnclosedInStringProperty", + # }, + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # relational_catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # dynamo_db_catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # jdbc_connector_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # connection_name: "EnclosedInStringProperty", # required + # connection_table: "EnclosedInStringPropertyWithQuote", # required + # connector_name: "EnclosedInStringProperty", # required + # connection_type: "EnclosedInStringProperty", # required + # additional_options: { + # "EnclosedInStringProperty" => "EnclosedInStringProperty", + # }, + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # spark_connector_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # connection_name: "EnclosedInStringProperty", # required + # connector_name: "EnclosedInStringProperty", # required + # connection_type: "EnclosedInStringProperty", # required + # additional_options: { + # "EnclosedInStringProperty" => "EnclosedInStringProperty", + # }, + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # catalog_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # redshift_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # redshift_tmp_dir: "EnclosedInStringProperty", + # tmp_dir_iam_role: "EnclosedInStringProperty", + # upsert_redshift_options: { + # table_location: "EnclosedInStringProperty", + # connection_name: "EnclosedInStringProperty", + # upsert_keys: ["EnclosedInStringProperty"], + # }, + # }, + # s3_catalog_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # partition_keys: [ + # ["EnclosedInStringProperty"], + # ], + # table: "EnclosedInStringProperty", # required + # database: "EnclosedInStringProperty", # required + # schema_change_policy: { + # enable_update_catalog: false, + # update_behavior: "UPDATE_IN_DATABASE", # accepts UPDATE_IN_DATABASE, LOG + # }, + # }, + # s3_glue_parquet_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # partition_keys: [ + # ["EnclosedInStringProperty"], + # ], + # path: "EnclosedInStringProperty", # required + # compression: "snappy", # accepts snappy, lzo, gzip, uncompressed, none + # schema_change_policy: { + # enable_update_catalog: false, + # update_behavior: "UPDATE_IN_DATABASE", # accepts UPDATE_IN_DATABASE, LOG + # table: "EnclosedInStringProperty", + # database: "EnclosedInStringProperty", + # }, + # }, + # s3_direct_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # partition_keys: [ + # ["EnclosedInStringProperty"], + # ], + # path: "EnclosedInStringProperty", # required + # compression: "EnclosedInStringProperty", + # format: "json", # required, accepts json, csv, avro, orc, parquet + # schema_change_policy: { + # enable_update_catalog: false, + # update_behavior: "UPDATE_IN_DATABASE", # accepts UPDATE_IN_DATABASE, LOG + # table: "EnclosedInStringProperty", + # database: "EnclosedInStringProperty", + # }, + # }, + # apply_mapping: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # mapping: [ # required + # { + # to_key: "EnclosedInStringProperty", + # from_path: ["EnclosedInStringProperty"], + # from_type: "EnclosedInStringProperty", + # to_type: "EnclosedInStringProperty", + # dropped: false, + # children: { + # # recursive Mappings + # }, + # }, + # ], + # }, + # select_fields: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # paths: [ # required + # ["EnclosedInStringProperty"], + # ], + # }, + # drop_fields: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # paths: [ # required + # ["EnclosedInStringProperty"], + # ], + # }, + # rename_field: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # source_path: ["EnclosedInStringProperty"], # required + # target_path: ["EnclosedInStringProperty"], # required + # }, + # spigot: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # path: "EnclosedInStringProperty", # required + # topk: 1, + # prob: 1.0, + # }, + # join: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # join_type: "equijoin", # required, accepts equijoin, left, right, outer, leftsemi, leftanti + # columns: [ # required + # { + # from: "EnclosedInStringProperty", # required + # keys: [ # required + # ["EnclosedInStringProperty"], + # ], + # }, + # ], + # }, + # split_fields: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # paths: [ # required + # ["EnclosedInStringProperty"], + # ], + # }, + # select_from_collection: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # index: 1, # required + # }, + # fill_missing_values: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # imputed_path: "EnclosedInStringProperty", # required + # filled_path: "EnclosedInStringProperty", + # }, + # filter: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # logical_operator: "AND", # required, accepts AND, OR + # filters: [ # required + # { + # operation: "EQ", # required, accepts EQ, LT, GT, LTE, GTE, REGEX, ISNULL + # negated: false, + # values: [ # required + # { + # type: "COLUMNEXTRACTED", # required, accepts COLUMNEXTRACTED, CONSTANT + # value: ["EnclosedInStringProperty"], # required + # }, + # ], + # }, + # ], + # }, + # custom_code: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # code: "ExtendedString", # required + # class_name: "EnclosedInStringProperty", # required + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # spark_sql: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # sql_query: "SqlQuery", # required + # sql_aliases: [ # required + # { + # from: "NodeId", # required + # alias: "EnclosedInStringPropertyWithQuote", # required + # }, + # ], + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # direct_kinesis_source: { + # name: "NodeName", # required + # window_size: 1, + # detect_schema: false, + # streaming_options: { + # endpoint_url: "EnclosedInStringProperty", + # stream_name: "EnclosedInStringProperty", + # classification: "EnclosedInStringProperty", + # delimiter: "EnclosedInStringProperty", + # starting_position: "latest", # accepts latest, trim_horizon, earliest + # max_fetch_time_in_ms: 1, + # max_fetch_records_per_shard: 1, + # max_record_per_read: 1, + # add_idle_time_between_reads: false, + # idle_time_between_reads_in_ms: 1, + # describe_shard_interval: 1, + # num_retries: 1, + # retry_interval_ms: 1, + # max_retry_interval_ms: 1, + # avoid_empty_batches: false, + # stream_arn: "EnclosedInStringProperty", + # role_arn: "EnclosedInStringProperty", + # role_session_name: "EnclosedInStringProperty", + # }, + # data_preview_options: { + # polling_time: 1, + # record_polling_limit: 1, + # }, + # }, + # direct_kafka_source: { + # name: "NodeName", # required + # streaming_options: { + # bootstrap_servers: "EnclosedInStringProperty", + # security_protocol: "EnclosedInStringProperty", + # connection_name: "EnclosedInStringProperty", + # topic_name: "EnclosedInStringProperty", + # assign: "EnclosedInStringProperty", + # subscribe_pattern: "EnclosedInStringProperty", + # classification: "EnclosedInStringProperty", + # delimiter: "EnclosedInStringProperty", + # starting_offsets: "EnclosedInStringProperty", + # ending_offsets: "EnclosedInStringProperty", + # poll_timeout_ms: 1, + # num_retries: 1, + # retry_interval_ms: 1, + # max_offsets_per_trigger: 1, + # min_partitions: 1, + # }, + # window_size: 1, + # detect_schema: false, + # data_preview_options: { + # polling_time: 1, + # record_polling_limit: 1, + # }, + # }, + # catalog_kinesis_source: { + # name: "NodeName", # required + # window_size: 1, + # detect_schema: false, + # table: "EnclosedInStringProperty", # required + # database: "EnclosedInStringProperty", # required + # streaming_options: { + # endpoint_url: "EnclosedInStringProperty", + # stream_name: "EnclosedInStringProperty", + # classification: "EnclosedInStringProperty", + # delimiter: "EnclosedInStringProperty", + # starting_position: "latest", # accepts latest, trim_horizon, earliest + # max_fetch_time_in_ms: 1, + # max_fetch_records_per_shard: 1, + # max_record_per_read: 1, + # add_idle_time_between_reads: false, + # idle_time_between_reads_in_ms: 1, + # describe_shard_interval: 1, + # num_retries: 1, + # retry_interval_ms: 1, + # max_retry_interval_ms: 1, + # avoid_empty_batches: false, + # stream_arn: "EnclosedInStringProperty", + # role_arn: "EnclosedInStringProperty", + # role_session_name: "EnclosedInStringProperty", + # }, + # data_preview_options: { + # polling_time: 1, + # record_polling_limit: 1, + # }, + # }, + # catalog_kafka_source: { + # name: "NodeName", # required + # window_size: 1, + # detect_schema: false, + # table: "EnclosedInStringProperty", # required + # database: "EnclosedInStringProperty", # required + # streaming_options: { + # bootstrap_servers: "EnclosedInStringProperty", + # security_protocol: "EnclosedInStringProperty", + # connection_name: "EnclosedInStringProperty", + # topic_name: "EnclosedInStringProperty", + # assign: "EnclosedInStringProperty", + # subscribe_pattern: "EnclosedInStringProperty", + # classification: "EnclosedInStringProperty", + # delimiter: "EnclosedInStringProperty", + # starting_offsets: "EnclosedInStringProperty", + # ending_offsets: "EnclosedInStringProperty", + # poll_timeout_ms: 1, + # num_retries: 1, + # retry_interval_ms: 1, + # max_offsets_per_trigger: 1, + # min_partitions: 1, + # }, + # data_preview_options: { + # polling_time: 1, + # record_polling_limit: 1, + # }, + # }, + # drop_null_fields: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # null_check_box_list: { + # is_empty: false, + # is_null_string: false, + # is_neg_one: false, + # }, + # null_text_list: [ + # { + # value: "EnclosedInStringProperty", # required + # datatype: { # required + # id: "GenericLimitedString", # required + # label: "GenericLimitedString", # required + # }, + # }, + # ], + # }, + # merge: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # source: "NodeId", # required + # primary_keys: [ # required + # ["EnclosedInStringProperty"], + # ], + # }, + # union: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # union_type: "ALL", # required, accepts ALL, DISTINCT + # }, + # pii_detection: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # pii_type: "RowAudit", # required, accepts RowAudit, RowMasking, ColumnAudit, ColumnMasking + # entity_types_to_detect: ["EnclosedInStringProperty"], # required + # output_column_name: "EnclosedInStringProperty", + # sample_fraction: 1.0, + # threshold_fraction: 1.0, + # mask_value: "MaskValue", + # }, + # aggregate: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # groups: [ # required + # ["EnclosedInStringProperty"], + # ], + # aggs: [ # required + # { + # column: ["EnclosedInStringProperty"], # required + # agg_func: "avg", # required, accepts avg, countDistinct, count, first, last, kurtosis, max, min, skewness, stddev_samp, stddev_pop, sum, sumDistinct, var_samp, var_pop + # }, + # ], + # }, + # drop_duplicates: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # columns: [ + # ["GenericLimitedString"], + # ], + # }, + # governed_catalog_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # partition_keys: [ + # ["EnclosedInStringProperty"], + # ], + # table: "EnclosedInStringProperty", # required + # database: "EnclosedInStringProperty", # required + # schema_change_policy: { + # enable_update_catalog: false, + # update_behavior: "UPDATE_IN_DATABASE", # accepts UPDATE_IN_DATABASE, LOG + # }, + # }, + # governed_catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # partition_predicate: "EnclosedInStringProperty", + # additional_options: { + # bounded_size: 1, + # bounded_files: 1, + # }, + # }, + # microsoft_sql_server_catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # my_sql_catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # oracle_sql_catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # postgre_sql_catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # microsoft_sql_server_catalog_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # my_sql_catalog_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # oracle_sql_catalog_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # postgre_sql_catalog_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # }, + # }, # }) # # @example Response structure @@ -5525,6 +6677,486 @@ def get_dev_endpoints(params = {}, options = {}) # resp.job.security_configuration #=> String # resp.job.notification_property.notify_delay_after #=> Integer # resp.job.glue_version #=> String + # resp.job.code_gen_configuration_nodes #=> Hash + # resp.job.code_gen_configuration_nodes["NodeId"].athena_connector_source.name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].athena_connector_source.connection_name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].athena_connector_source.connector_name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].athena_connector_source.connection_type #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].athena_connector_source.connection_table #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].athena_connector_source.schema_name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].athena_connector_source.output_schemas #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].athena_connector_source.output_schemas[0].columns #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].athena_connector_source.output_schemas[0].columns[0].name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].athena_connector_source.output_schemas[0].columns[0].type #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].jdbc_connector_source.name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].jdbc_connector_source.connection_name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].jdbc_connector_source.connector_name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].jdbc_connector_source.connection_type #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].jdbc_connector_source.additional_options.filter_predicate #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].jdbc_connector_source.additional_options.partition_column #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].jdbc_connector_source.additional_options.lower_bound #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].jdbc_connector_source.additional_options.upper_bound #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].jdbc_connector_source.additional_options.num_partitions #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].jdbc_connector_source.additional_options.job_bookmark_keys #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].jdbc_connector_source.additional_options.job_bookmark_keys[0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].jdbc_connector_source.additional_options.job_bookmark_keys_sort_order #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].jdbc_connector_source.additional_options.data_type_mapping #=> Hash + # resp.job.code_gen_configuration_nodes["NodeId"].jdbc_connector_source.additional_options.data_type_mapping["JDBCDataType"] #=> String, one of "DATE", "STRING", "TIMESTAMP", "INT", "FLOAT", "LONG", "BIGDECIMAL", "BYTE", "SHORT", "DOUBLE" + # resp.job.code_gen_configuration_nodes["NodeId"].jdbc_connector_source.connection_table #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].jdbc_connector_source.query #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].jdbc_connector_source.output_schemas #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].jdbc_connector_source.output_schemas[0].columns #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].jdbc_connector_source.output_schemas[0].columns[0].name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].jdbc_connector_source.output_schemas[0].columns[0].type #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].spark_connector_source.name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].spark_connector_source.connection_name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].spark_connector_source.connector_name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].spark_connector_source.connection_type #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].spark_connector_source.additional_options #=> Hash + # resp.job.code_gen_configuration_nodes["NodeId"].spark_connector_source.additional_options["EnclosedInStringProperty"] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].spark_connector_source.output_schemas #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].spark_connector_source.output_schemas[0].columns #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].spark_connector_source.output_schemas[0].columns[0].name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].spark_connector_source.output_schemas[0].columns[0].type #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_source.name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_source.database #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_source.table #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].redshift_source.name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].redshift_source.database #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].redshift_source.table #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].redshift_source.redshift_tmp_dir #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].redshift_source.tmp_dir_iam_role #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].s3_catalog_source.name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].s3_catalog_source.database #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].s3_catalog_source.table #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].s3_catalog_source.partition_predicate #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].s3_catalog_source.additional_options.bounded_size #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].s3_catalog_source.additional_options.bounded_files #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].s3_csv_source.name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].s3_csv_source.paths #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].s3_csv_source.paths[0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].s3_csv_source.compression_type #=> String, one of "gzip", "bzip2" + # resp.job.code_gen_configuration_nodes["NodeId"].s3_csv_source.exclusions #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].s3_csv_source.exclusions[0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].s3_csv_source.group_size #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].s3_csv_source.group_files #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].s3_csv_source.recurse #=> Boolean + # resp.job.code_gen_configuration_nodes["NodeId"].s3_csv_source.max_band #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].s3_csv_source.max_files_in_band #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].s3_csv_source.additional_options.bounded_size #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].s3_csv_source.additional_options.bounded_files #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].s3_csv_source.additional_options.enable_sample_path #=> Boolean + # resp.job.code_gen_configuration_nodes["NodeId"].s3_csv_source.additional_options.sample_path #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].s3_csv_source.separator #=> String, one of "comma", "ctrla", "pipe", "semicolon", "tab" + # resp.job.code_gen_configuration_nodes["NodeId"].s3_csv_source.escaper #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].s3_csv_source.quote_char #=> String, one of "quote", "quillemet", "single_quote", "disabled" + # resp.job.code_gen_configuration_nodes["NodeId"].s3_csv_source.multiline #=> Boolean + # resp.job.code_gen_configuration_nodes["NodeId"].s3_csv_source.with_header #=> Boolean + # resp.job.code_gen_configuration_nodes["NodeId"].s3_csv_source.write_header #=> Boolean + # resp.job.code_gen_configuration_nodes["NodeId"].s3_csv_source.skip_first #=> Boolean + # resp.job.code_gen_configuration_nodes["NodeId"].s3_csv_source.optimize_performance #=> Boolean + # resp.job.code_gen_configuration_nodes["NodeId"].s3_csv_source.output_schemas #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].s3_csv_source.output_schemas[0].columns #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].s3_csv_source.output_schemas[0].columns[0].name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].s3_csv_source.output_schemas[0].columns[0].type #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].s3_json_source.name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].s3_json_source.paths #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].s3_json_source.paths[0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].s3_json_source.compression_type #=> String, one of "gzip", "bzip2" + # resp.job.code_gen_configuration_nodes["NodeId"].s3_json_source.exclusions #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].s3_json_source.exclusions[0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].s3_json_source.group_size #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].s3_json_source.group_files #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].s3_json_source.recurse #=> Boolean + # resp.job.code_gen_configuration_nodes["NodeId"].s3_json_source.max_band #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].s3_json_source.max_files_in_band #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].s3_json_source.additional_options.bounded_size #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].s3_json_source.additional_options.bounded_files #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].s3_json_source.additional_options.enable_sample_path #=> Boolean + # resp.job.code_gen_configuration_nodes["NodeId"].s3_json_source.additional_options.sample_path #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].s3_json_source.json_path #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].s3_json_source.multiline #=> Boolean + # resp.job.code_gen_configuration_nodes["NodeId"].s3_json_source.output_schemas #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].s3_json_source.output_schemas[0].columns #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].s3_json_source.output_schemas[0].columns[0].name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].s3_json_source.output_schemas[0].columns[0].type #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].s3_parquet_source.name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].s3_parquet_source.paths #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].s3_parquet_source.paths[0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].s3_parquet_source.compression_type #=> String, one of "snappy", "lzo", "gzip", "uncompressed", "none" + # resp.job.code_gen_configuration_nodes["NodeId"].s3_parquet_source.exclusions #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].s3_parquet_source.exclusions[0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].s3_parquet_source.group_size #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].s3_parquet_source.group_files #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].s3_parquet_source.recurse #=> Boolean + # resp.job.code_gen_configuration_nodes["NodeId"].s3_parquet_source.max_band #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].s3_parquet_source.max_files_in_band #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].s3_parquet_source.additional_options.bounded_size #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].s3_parquet_source.additional_options.bounded_files #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].s3_parquet_source.additional_options.enable_sample_path #=> Boolean + # resp.job.code_gen_configuration_nodes["NodeId"].s3_parquet_source.additional_options.sample_path #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].s3_parquet_source.output_schemas #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].s3_parquet_source.output_schemas[0].columns #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].s3_parquet_source.output_schemas[0].columns[0].name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].s3_parquet_source.output_schemas[0].columns[0].type #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].relational_catalog_source.name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].relational_catalog_source.database #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].relational_catalog_source.table #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].dynamo_db_catalog_source.name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].dynamo_db_catalog_source.database #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].dynamo_db_catalog_source.table #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].jdbc_connector_target.name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].jdbc_connector_target.inputs #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].jdbc_connector_target.inputs[0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].jdbc_connector_target.connection_name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].jdbc_connector_target.connection_table #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].jdbc_connector_target.connector_name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].jdbc_connector_target.connection_type #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].jdbc_connector_target.additional_options #=> Hash + # resp.job.code_gen_configuration_nodes["NodeId"].jdbc_connector_target.additional_options["EnclosedInStringProperty"] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].jdbc_connector_target.output_schemas #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].jdbc_connector_target.output_schemas[0].columns #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].jdbc_connector_target.output_schemas[0].columns[0].name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].jdbc_connector_target.output_schemas[0].columns[0].type #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].spark_connector_target.name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].spark_connector_target.inputs #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].spark_connector_target.inputs[0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].spark_connector_target.connection_name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].spark_connector_target.connector_name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].spark_connector_target.connection_type #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].spark_connector_target.additional_options #=> Hash + # resp.job.code_gen_configuration_nodes["NodeId"].spark_connector_target.additional_options["EnclosedInStringProperty"] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].spark_connector_target.output_schemas #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].spark_connector_target.output_schemas[0].columns #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].spark_connector_target.output_schemas[0].columns[0].name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].spark_connector_target.output_schemas[0].columns[0].type #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_target.name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_target.inputs #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_target.inputs[0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_target.database #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_target.table #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].redshift_target.name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].redshift_target.inputs #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].redshift_target.inputs[0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].redshift_target.database #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].redshift_target.table #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].redshift_target.redshift_tmp_dir #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].redshift_target.tmp_dir_iam_role #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].redshift_target.upsert_redshift_options.table_location #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].redshift_target.upsert_redshift_options.connection_name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].redshift_target.upsert_redshift_options.upsert_keys #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].redshift_target.upsert_redshift_options.upsert_keys[0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].s3_catalog_target.name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].s3_catalog_target.inputs #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].s3_catalog_target.inputs[0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].s3_catalog_target.partition_keys #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].s3_catalog_target.partition_keys[0] #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].s3_catalog_target.partition_keys[0][0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].s3_catalog_target.table #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].s3_catalog_target.database #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].s3_catalog_target.schema_change_policy.enable_update_catalog #=> Boolean + # resp.job.code_gen_configuration_nodes["NodeId"].s3_catalog_target.schema_change_policy.update_behavior #=> String, one of "UPDATE_IN_DATABASE", "LOG" + # resp.job.code_gen_configuration_nodes["NodeId"].s3_glue_parquet_target.name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].s3_glue_parquet_target.inputs #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].s3_glue_parquet_target.inputs[0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].s3_glue_parquet_target.partition_keys #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].s3_glue_parquet_target.partition_keys[0] #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].s3_glue_parquet_target.partition_keys[0][0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].s3_glue_parquet_target.path #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].s3_glue_parquet_target.compression #=> String, one of "snappy", "lzo", "gzip", "uncompressed", "none" + # resp.job.code_gen_configuration_nodes["NodeId"].s3_glue_parquet_target.schema_change_policy.enable_update_catalog #=> Boolean + # resp.job.code_gen_configuration_nodes["NodeId"].s3_glue_parquet_target.schema_change_policy.update_behavior #=> String, one of "UPDATE_IN_DATABASE", "LOG" + # resp.job.code_gen_configuration_nodes["NodeId"].s3_glue_parquet_target.schema_change_policy.table #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].s3_glue_parquet_target.schema_change_policy.database #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].s3_direct_target.name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].s3_direct_target.inputs #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].s3_direct_target.inputs[0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].s3_direct_target.partition_keys #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].s3_direct_target.partition_keys[0] #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].s3_direct_target.partition_keys[0][0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].s3_direct_target.path #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].s3_direct_target.compression #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].s3_direct_target.format #=> String, one of "json", "csv", "avro", "orc", "parquet" + # resp.job.code_gen_configuration_nodes["NodeId"].s3_direct_target.schema_change_policy.enable_update_catalog #=> Boolean + # resp.job.code_gen_configuration_nodes["NodeId"].s3_direct_target.schema_change_policy.update_behavior #=> String, one of "UPDATE_IN_DATABASE", "LOG" + # resp.job.code_gen_configuration_nodes["NodeId"].s3_direct_target.schema_change_policy.table #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].s3_direct_target.schema_change_policy.database #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].apply_mapping.name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].apply_mapping.inputs #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].apply_mapping.inputs[0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].apply_mapping.mapping #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].apply_mapping.mapping[0].to_key #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].apply_mapping.mapping[0].from_path #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].apply_mapping.mapping[0].from_path[0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].apply_mapping.mapping[0].from_type #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].apply_mapping.mapping[0].to_type #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].apply_mapping.mapping[0].dropped #=> Boolean + # resp.job.code_gen_configuration_nodes["NodeId"].apply_mapping.mapping[0].children #=> Types::Mappings + # resp.job.code_gen_configuration_nodes["NodeId"].select_fields.name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].select_fields.inputs #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].select_fields.inputs[0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].select_fields.paths #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].select_fields.paths[0] #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].select_fields.paths[0][0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].drop_fields.name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].drop_fields.inputs #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].drop_fields.inputs[0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].drop_fields.paths #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].drop_fields.paths[0] #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].drop_fields.paths[0][0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].rename_field.name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].rename_field.inputs #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].rename_field.inputs[0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].rename_field.source_path #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].rename_field.source_path[0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].rename_field.target_path #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].rename_field.target_path[0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].spigot.name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].spigot.inputs #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].spigot.inputs[0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].spigot.path #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].spigot.topk #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].spigot.prob #=> Float + # resp.job.code_gen_configuration_nodes["NodeId"].join.name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].join.inputs #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].join.inputs[0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].join.join_type #=> String, one of "equijoin", "left", "right", "outer", "leftsemi", "leftanti" + # resp.job.code_gen_configuration_nodes["NodeId"].join.columns #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].join.columns[0].from #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].join.columns[0].keys #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].join.columns[0].keys[0] #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].join.columns[0].keys[0][0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].split_fields.name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].split_fields.inputs #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].split_fields.inputs[0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].split_fields.paths #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].split_fields.paths[0] #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].split_fields.paths[0][0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].select_from_collection.name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].select_from_collection.inputs #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].select_from_collection.inputs[0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].select_from_collection.index #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].fill_missing_values.name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].fill_missing_values.inputs #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].fill_missing_values.inputs[0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].fill_missing_values.imputed_path #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].fill_missing_values.filled_path #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].filter.name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].filter.inputs #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].filter.inputs[0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].filter.logical_operator #=> String, one of "AND", "OR" + # resp.job.code_gen_configuration_nodes["NodeId"].filter.filters #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].filter.filters[0].operation #=> String, one of "EQ", "LT", "GT", "LTE", "GTE", "REGEX", "ISNULL" + # resp.job.code_gen_configuration_nodes["NodeId"].filter.filters[0].negated #=> Boolean + # resp.job.code_gen_configuration_nodes["NodeId"].filter.filters[0].values #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].filter.filters[0].values[0].type #=> String, one of "COLUMNEXTRACTED", "CONSTANT" + # resp.job.code_gen_configuration_nodes["NodeId"].filter.filters[0].values[0].value #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].filter.filters[0].values[0].value[0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].custom_code.name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].custom_code.inputs #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].custom_code.inputs[0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].custom_code.code #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].custom_code.class_name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].custom_code.output_schemas #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].custom_code.output_schemas[0].columns #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].custom_code.output_schemas[0].columns[0].name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].custom_code.output_schemas[0].columns[0].type #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].spark_sql.name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].spark_sql.inputs #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].spark_sql.inputs[0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].spark_sql.sql_query #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].spark_sql.sql_aliases #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].spark_sql.sql_aliases[0].from #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].spark_sql.sql_aliases[0].alias #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].spark_sql.output_schemas #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].spark_sql.output_schemas[0].columns #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].spark_sql.output_schemas[0].columns[0].name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].spark_sql.output_schemas[0].columns[0].type #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].direct_kinesis_source.name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].direct_kinesis_source.window_size #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].direct_kinesis_source.detect_schema #=> Boolean + # resp.job.code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.endpoint_url #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.stream_name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.classification #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.delimiter #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.starting_position #=> String, one of "latest", "trim_horizon", "earliest" + # resp.job.code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.max_fetch_time_in_ms #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.max_fetch_records_per_shard #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.max_record_per_read #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.add_idle_time_between_reads #=> Boolean + # resp.job.code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.idle_time_between_reads_in_ms #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.describe_shard_interval #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.num_retries #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.retry_interval_ms #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.max_retry_interval_ms #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.avoid_empty_batches #=> Boolean + # resp.job.code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.stream_arn #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.role_arn #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.role_session_name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].direct_kinesis_source.data_preview_options.polling_time #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].direct_kinesis_source.data_preview_options.record_polling_limit #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].direct_kafka_source.name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.bootstrap_servers #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.security_protocol #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.connection_name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.topic_name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.assign #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.subscribe_pattern #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.classification #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.delimiter #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.starting_offsets #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.ending_offsets #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.poll_timeout_ms #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.num_retries #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.retry_interval_ms #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.max_offsets_per_trigger #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.min_partitions #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].direct_kafka_source.window_size #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].direct_kafka_source.detect_schema #=> Boolean + # resp.job.code_gen_configuration_nodes["NodeId"].direct_kafka_source.data_preview_options.polling_time #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].direct_kafka_source.data_preview_options.record_polling_limit #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.window_size #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.detect_schema #=> Boolean + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.table #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.database #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.endpoint_url #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.stream_name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.classification #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.delimiter #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.starting_position #=> String, one of "latest", "trim_horizon", "earliest" + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.max_fetch_time_in_ms #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.max_fetch_records_per_shard #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.max_record_per_read #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.add_idle_time_between_reads #=> Boolean + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.idle_time_between_reads_in_ms #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.describe_shard_interval #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.num_retries #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.retry_interval_ms #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.max_retry_interval_ms #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.avoid_empty_batches #=> Boolean + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.stream_arn #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.role_arn #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.role_session_name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.data_preview_options.polling_time #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.data_preview_options.record_polling_limit #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kafka_source.name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kafka_source.window_size #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kafka_source.detect_schema #=> Boolean + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kafka_source.table #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kafka_source.database #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.bootstrap_servers #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.security_protocol #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.connection_name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.topic_name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.assign #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.subscribe_pattern #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.classification #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.delimiter #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.starting_offsets #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.ending_offsets #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.poll_timeout_ms #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.num_retries #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.retry_interval_ms #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.max_offsets_per_trigger #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.min_partitions #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kafka_source.data_preview_options.polling_time #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].catalog_kafka_source.data_preview_options.record_polling_limit #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].drop_null_fields.name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].drop_null_fields.inputs #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].drop_null_fields.inputs[0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].drop_null_fields.null_check_box_list.is_empty #=> Boolean + # resp.job.code_gen_configuration_nodes["NodeId"].drop_null_fields.null_check_box_list.is_null_string #=> Boolean + # resp.job.code_gen_configuration_nodes["NodeId"].drop_null_fields.null_check_box_list.is_neg_one #=> Boolean + # resp.job.code_gen_configuration_nodes["NodeId"].drop_null_fields.null_text_list #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].drop_null_fields.null_text_list[0].value #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].drop_null_fields.null_text_list[0].datatype.id #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].drop_null_fields.null_text_list[0].datatype.label #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].merge.name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].merge.inputs #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].merge.inputs[0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].merge.source #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].merge.primary_keys #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].merge.primary_keys[0] #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].merge.primary_keys[0][0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].union.name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].union.inputs #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].union.inputs[0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].union.union_type #=> String, one of "ALL", "DISTINCT" + # resp.job.code_gen_configuration_nodes["NodeId"].pii_detection.name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].pii_detection.inputs #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].pii_detection.inputs[0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].pii_detection.pii_type #=> String, one of "RowAudit", "RowMasking", "ColumnAudit", "ColumnMasking" + # resp.job.code_gen_configuration_nodes["NodeId"].pii_detection.entity_types_to_detect #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].pii_detection.entity_types_to_detect[0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].pii_detection.output_column_name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].pii_detection.sample_fraction #=> Float + # resp.job.code_gen_configuration_nodes["NodeId"].pii_detection.threshold_fraction #=> Float + # resp.job.code_gen_configuration_nodes["NodeId"].pii_detection.mask_value #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].aggregate.name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].aggregate.inputs #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].aggregate.inputs[0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].aggregate.groups #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].aggregate.groups[0] #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].aggregate.groups[0][0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].aggregate.aggs #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].aggregate.aggs[0].column #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].aggregate.aggs[0].column[0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].aggregate.aggs[0].agg_func #=> String, one of "avg", "countDistinct", "count", "first", "last", "kurtosis", "max", "min", "skewness", "stddev_samp", "stddev_pop", "sum", "sumDistinct", "var_samp", "var_pop" + # resp.job.code_gen_configuration_nodes["NodeId"].drop_duplicates.name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].drop_duplicates.inputs #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].drop_duplicates.inputs[0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].drop_duplicates.columns #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].drop_duplicates.columns[0] #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].drop_duplicates.columns[0][0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].governed_catalog_target.name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].governed_catalog_target.inputs #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].governed_catalog_target.inputs[0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].governed_catalog_target.partition_keys #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].governed_catalog_target.partition_keys[0] #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].governed_catalog_target.partition_keys[0][0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].governed_catalog_target.table #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].governed_catalog_target.database #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].governed_catalog_target.schema_change_policy.enable_update_catalog #=> Boolean + # resp.job.code_gen_configuration_nodes["NodeId"].governed_catalog_target.schema_change_policy.update_behavior #=> String, one of "UPDATE_IN_DATABASE", "LOG" + # resp.job.code_gen_configuration_nodes["NodeId"].governed_catalog_source.name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].governed_catalog_source.database #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].governed_catalog_source.table #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].governed_catalog_source.partition_predicate #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].governed_catalog_source.additional_options.bounded_size #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].governed_catalog_source.additional_options.bounded_files #=> Integer + # resp.job.code_gen_configuration_nodes["NodeId"].microsoft_sql_server_catalog_source.name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].microsoft_sql_server_catalog_source.database #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].microsoft_sql_server_catalog_source.table #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].my_sql_catalog_source.name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].my_sql_catalog_source.database #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].my_sql_catalog_source.table #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].oracle_sql_catalog_source.name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].oracle_sql_catalog_source.database #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].oracle_sql_catalog_source.table #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].postgre_sql_catalog_source.name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].postgre_sql_catalog_source.database #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].postgre_sql_catalog_source.table #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].microsoft_sql_server_catalog_target.name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].microsoft_sql_server_catalog_target.inputs #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].microsoft_sql_server_catalog_target.inputs[0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].microsoft_sql_server_catalog_target.database #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].microsoft_sql_server_catalog_target.table #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].my_sql_catalog_target.name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].my_sql_catalog_target.inputs #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].my_sql_catalog_target.inputs[0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].my_sql_catalog_target.database #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].my_sql_catalog_target.table #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].oracle_sql_catalog_target.name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].oracle_sql_catalog_target.inputs #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].oracle_sql_catalog_target.inputs[0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].oracle_sql_catalog_target.database #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].oracle_sql_catalog_target.table #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].postgre_sql_catalog_target.name #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].postgre_sql_catalog_target.inputs #=> Array + # resp.job.code_gen_configuration_nodes["NodeId"].postgre_sql_catalog_target.inputs[0] #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].postgre_sql_catalog_target.database #=> String + # resp.job.code_gen_configuration_nodes["NodeId"].postgre_sql_catalog_target.table #=> String # # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetJob AWS API Documentation # @@ -5750,6 +7382,486 @@ def get_job_runs(params = {}, options = {}) # resp.jobs[0].security_configuration #=> String # resp.jobs[0].notification_property.notify_delay_after #=> Integer # resp.jobs[0].glue_version #=> String + # resp.jobs[0].code_gen_configuration_nodes #=> Hash + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].athena_connector_source.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].athena_connector_source.connection_name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].athena_connector_source.connector_name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].athena_connector_source.connection_type #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].athena_connector_source.connection_table #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].athena_connector_source.schema_name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].athena_connector_source.output_schemas #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].athena_connector_source.output_schemas[0].columns #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].athena_connector_source.output_schemas[0].columns[0].name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].athena_connector_source.output_schemas[0].columns[0].type #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_source.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_source.connection_name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_source.connector_name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_source.connection_type #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_source.additional_options.filter_predicate #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_source.additional_options.partition_column #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_source.additional_options.lower_bound #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_source.additional_options.upper_bound #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_source.additional_options.num_partitions #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_source.additional_options.job_bookmark_keys #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_source.additional_options.job_bookmark_keys[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_source.additional_options.job_bookmark_keys_sort_order #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_source.additional_options.data_type_mapping #=> Hash + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_source.additional_options.data_type_mapping["JDBCDataType"] #=> String, one of "DATE", "STRING", "TIMESTAMP", "INT", "FLOAT", "LONG", "BIGDECIMAL", "BYTE", "SHORT", "DOUBLE" + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_source.connection_table #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_source.query #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_source.output_schemas #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_source.output_schemas[0].columns #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_source.output_schemas[0].columns[0].name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_source.output_schemas[0].columns[0].type #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_connector_source.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_connector_source.connection_name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_connector_source.connector_name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_connector_source.connection_type #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_connector_source.additional_options #=> Hash + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_connector_source.additional_options["EnclosedInStringProperty"] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_connector_source.output_schemas #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_connector_source.output_schemas[0].columns #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_connector_source.output_schemas[0].columns[0].name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_connector_source.output_schemas[0].columns[0].type #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_source.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_source.database #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_source.table #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].redshift_source.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].redshift_source.database #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].redshift_source.table #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].redshift_source.redshift_tmp_dir #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].redshift_source.tmp_dir_iam_role #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_catalog_source.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_catalog_source.database #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_catalog_source.table #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_catalog_source.partition_predicate #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_catalog_source.additional_options.bounded_size #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_catalog_source.additional_options.bounded_files #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.paths #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.paths[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.compression_type #=> String, one of "gzip", "bzip2" + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.exclusions #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.exclusions[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.group_size #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.group_files #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.recurse #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.max_band #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.max_files_in_band #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.additional_options.bounded_size #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.additional_options.bounded_files #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.additional_options.enable_sample_path #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.additional_options.sample_path #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.separator #=> String, one of "comma", "ctrla", "pipe", "semicolon", "tab" + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.escaper #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.quote_char #=> String, one of "quote", "quillemet", "single_quote", "disabled" + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.multiline #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.with_header #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.write_header #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.skip_first #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.optimize_performance #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.output_schemas #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.output_schemas[0].columns #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.output_schemas[0].columns[0].name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_csv_source.output_schemas[0].columns[0].type #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_json_source.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_json_source.paths #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_json_source.paths[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_json_source.compression_type #=> String, one of "gzip", "bzip2" + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_json_source.exclusions #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_json_source.exclusions[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_json_source.group_size #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_json_source.group_files #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_json_source.recurse #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_json_source.max_band #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_json_source.max_files_in_band #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_json_source.additional_options.bounded_size #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_json_source.additional_options.bounded_files #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_json_source.additional_options.enable_sample_path #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_json_source.additional_options.sample_path #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_json_source.json_path #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_json_source.multiline #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_json_source.output_schemas #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_json_source.output_schemas[0].columns #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_json_source.output_schemas[0].columns[0].name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_json_source.output_schemas[0].columns[0].type #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_parquet_source.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_parquet_source.paths #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_parquet_source.paths[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_parquet_source.compression_type #=> String, one of "snappy", "lzo", "gzip", "uncompressed", "none" + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_parquet_source.exclusions #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_parquet_source.exclusions[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_parquet_source.group_size #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_parquet_source.group_files #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_parquet_source.recurse #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_parquet_source.max_band #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_parquet_source.max_files_in_band #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_parquet_source.additional_options.bounded_size #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_parquet_source.additional_options.bounded_files #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_parquet_source.additional_options.enable_sample_path #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_parquet_source.additional_options.sample_path #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_parquet_source.output_schemas #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_parquet_source.output_schemas[0].columns #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_parquet_source.output_schemas[0].columns[0].name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_parquet_source.output_schemas[0].columns[0].type #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].relational_catalog_source.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].relational_catalog_source.database #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].relational_catalog_source.table #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].dynamo_db_catalog_source.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].dynamo_db_catalog_source.database #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].dynamo_db_catalog_source.table #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_target.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_target.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_target.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_target.connection_name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_target.connection_table #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_target.connector_name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_target.connection_type #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_target.additional_options #=> Hash + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_target.additional_options["EnclosedInStringProperty"] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_target.output_schemas #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_target.output_schemas[0].columns #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_target.output_schemas[0].columns[0].name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].jdbc_connector_target.output_schemas[0].columns[0].type #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_connector_target.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_connector_target.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_connector_target.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_connector_target.connection_name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_connector_target.connector_name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_connector_target.connection_type #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_connector_target.additional_options #=> Hash + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_connector_target.additional_options["EnclosedInStringProperty"] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_connector_target.output_schemas #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_connector_target.output_schemas[0].columns #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_connector_target.output_schemas[0].columns[0].name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_connector_target.output_schemas[0].columns[0].type #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_target.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_target.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_target.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_target.database #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_target.table #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].redshift_target.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].redshift_target.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].redshift_target.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].redshift_target.database #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].redshift_target.table #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].redshift_target.redshift_tmp_dir #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].redshift_target.tmp_dir_iam_role #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].redshift_target.upsert_redshift_options.table_location #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].redshift_target.upsert_redshift_options.connection_name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].redshift_target.upsert_redshift_options.upsert_keys #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].redshift_target.upsert_redshift_options.upsert_keys[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_catalog_target.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_catalog_target.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_catalog_target.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_catalog_target.partition_keys #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_catalog_target.partition_keys[0] #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_catalog_target.partition_keys[0][0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_catalog_target.table #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_catalog_target.database #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_catalog_target.schema_change_policy.enable_update_catalog #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_catalog_target.schema_change_policy.update_behavior #=> String, one of "UPDATE_IN_DATABASE", "LOG" + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_glue_parquet_target.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_glue_parquet_target.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_glue_parquet_target.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_glue_parquet_target.partition_keys #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_glue_parquet_target.partition_keys[0] #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_glue_parquet_target.partition_keys[0][0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_glue_parquet_target.path #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_glue_parquet_target.compression #=> String, one of "snappy", "lzo", "gzip", "uncompressed", "none" + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_glue_parquet_target.schema_change_policy.enable_update_catalog #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_glue_parquet_target.schema_change_policy.update_behavior #=> String, one of "UPDATE_IN_DATABASE", "LOG" + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_glue_parquet_target.schema_change_policy.table #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_glue_parquet_target.schema_change_policy.database #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_direct_target.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_direct_target.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_direct_target.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_direct_target.partition_keys #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_direct_target.partition_keys[0] #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_direct_target.partition_keys[0][0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_direct_target.path #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_direct_target.compression #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_direct_target.format #=> String, one of "json", "csv", "avro", "orc", "parquet" + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_direct_target.schema_change_policy.enable_update_catalog #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_direct_target.schema_change_policy.update_behavior #=> String, one of "UPDATE_IN_DATABASE", "LOG" + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_direct_target.schema_change_policy.table #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].s3_direct_target.schema_change_policy.database #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].apply_mapping.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].apply_mapping.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].apply_mapping.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].apply_mapping.mapping #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].apply_mapping.mapping[0].to_key #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].apply_mapping.mapping[0].from_path #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].apply_mapping.mapping[0].from_path[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].apply_mapping.mapping[0].from_type #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].apply_mapping.mapping[0].to_type #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].apply_mapping.mapping[0].dropped #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].apply_mapping.mapping[0].children #=> Types::Mappings + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].select_fields.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].select_fields.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].select_fields.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].select_fields.paths #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].select_fields.paths[0] #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].select_fields.paths[0][0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].drop_fields.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].drop_fields.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].drop_fields.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].drop_fields.paths #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].drop_fields.paths[0] #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].drop_fields.paths[0][0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].rename_field.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].rename_field.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].rename_field.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].rename_field.source_path #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].rename_field.source_path[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].rename_field.target_path #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].rename_field.target_path[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spigot.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spigot.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spigot.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spigot.path #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spigot.topk #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spigot.prob #=> Float + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].join.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].join.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].join.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].join.join_type #=> String, one of "equijoin", "left", "right", "outer", "leftsemi", "leftanti" + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].join.columns #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].join.columns[0].from #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].join.columns[0].keys #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].join.columns[0].keys[0] #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].join.columns[0].keys[0][0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].split_fields.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].split_fields.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].split_fields.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].split_fields.paths #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].split_fields.paths[0] #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].split_fields.paths[0][0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].select_from_collection.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].select_from_collection.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].select_from_collection.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].select_from_collection.index #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].fill_missing_values.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].fill_missing_values.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].fill_missing_values.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].fill_missing_values.imputed_path #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].fill_missing_values.filled_path #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].filter.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].filter.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].filter.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].filter.logical_operator #=> String, one of "AND", "OR" + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].filter.filters #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].filter.filters[0].operation #=> String, one of "EQ", "LT", "GT", "LTE", "GTE", "REGEX", "ISNULL" + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].filter.filters[0].negated #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].filter.filters[0].values #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].filter.filters[0].values[0].type #=> String, one of "COLUMNEXTRACTED", "CONSTANT" + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].filter.filters[0].values[0].value #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].filter.filters[0].values[0].value[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].custom_code.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].custom_code.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].custom_code.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].custom_code.code #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].custom_code.class_name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].custom_code.output_schemas #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].custom_code.output_schemas[0].columns #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].custom_code.output_schemas[0].columns[0].name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].custom_code.output_schemas[0].columns[0].type #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_sql.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_sql.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_sql.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_sql.sql_query #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_sql.sql_aliases #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_sql.sql_aliases[0].from #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_sql.sql_aliases[0].alias #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_sql.output_schemas #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_sql.output_schemas[0].columns #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_sql.output_schemas[0].columns[0].name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].spark_sql.output_schemas[0].columns[0].type #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.window_size #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.detect_schema #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.endpoint_url #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.stream_name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.classification #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.delimiter #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.starting_position #=> String, one of "latest", "trim_horizon", "earliest" + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.max_fetch_time_in_ms #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.max_fetch_records_per_shard #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.max_record_per_read #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.add_idle_time_between_reads #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.idle_time_between_reads_in_ms #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.describe_shard_interval #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.num_retries #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.retry_interval_ms #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.max_retry_interval_ms #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.avoid_empty_batches #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.stream_arn #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.role_arn #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.streaming_options.role_session_name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.data_preview_options.polling_time #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kinesis_source.data_preview_options.record_polling_limit #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.bootstrap_servers #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.security_protocol #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.connection_name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.topic_name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.assign #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.subscribe_pattern #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.classification #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.delimiter #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.starting_offsets #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.ending_offsets #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.poll_timeout_ms #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.num_retries #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.retry_interval_ms #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.max_offsets_per_trigger #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.streaming_options.min_partitions #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.window_size #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.detect_schema #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.data_preview_options.polling_time #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].direct_kafka_source.data_preview_options.record_polling_limit #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.window_size #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.detect_schema #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.table #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.database #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.endpoint_url #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.stream_name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.classification #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.delimiter #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.starting_position #=> String, one of "latest", "trim_horizon", "earliest" + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.max_fetch_time_in_ms #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.max_fetch_records_per_shard #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.max_record_per_read #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.add_idle_time_between_reads #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.idle_time_between_reads_in_ms #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.describe_shard_interval #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.num_retries #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.retry_interval_ms #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.max_retry_interval_ms #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.avoid_empty_batches #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.stream_arn #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.role_arn #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.streaming_options.role_session_name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.data_preview_options.polling_time #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kinesis_source.data_preview_options.record_polling_limit #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.window_size #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.detect_schema #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.table #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.database #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.bootstrap_servers #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.security_protocol #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.connection_name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.topic_name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.assign #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.subscribe_pattern #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.classification #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.delimiter #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.starting_offsets #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.ending_offsets #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.poll_timeout_ms #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.num_retries #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.retry_interval_ms #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.max_offsets_per_trigger #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.streaming_options.min_partitions #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.data_preview_options.polling_time #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].catalog_kafka_source.data_preview_options.record_polling_limit #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].drop_null_fields.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].drop_null_fields.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].drop_null_fields.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].drop_null_fields.null_check_box_list.is_empty #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].drop_null_fields.null_check_box_list.is_null_string #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].drop_null_fields.null_check_box_list.is_neg_one #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].drop_null_fields.null_text_list #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].drop_null_fields.null_text_list[0].value #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].drop_null_fields.null_text_list[0].datatype.id #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].drop_null_fields.null_text_list[0].datatype.label #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].merge.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].merge.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].merge.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].merge.source #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].merge.primary_keys #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].merge.primary_keys[0] #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].merge.primary_keys[0][0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].union.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].union.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].union.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].union.union_type #=> String, one of "ALL", "DISTINCT" + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].pii_detection.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].pii_detection.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].pii_detection.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].pii_detection.pii_type #=> String, one of "RowAudit", "RowMasking", "ColumnAudit", "ColumnMasking" + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].pii_detection.entity_types_to_detect #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].pii_detection.entity_types_to_detect[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].pii_detection.output_column_name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].pii_detection.sample_fraction #=> Float + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].pii_detection.threshold_fraction #=> Float + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].pii_detection.mask_value #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].aggregate.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].aggregate.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].aggregate.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].aggregate.groups #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].aggregate.groups[0] #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].aggregate.groups[0][0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].aggregate.aggs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].aggregate.aggs[0].column #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].aggregate.aggs[0].column[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].aggregate.aggs[0].agg_func #=> String, one of "avg", "countDistinct", "count", "first", "last", "kurtosis", "max", "min", "skewness", "stddev_samp", "stddev_pop", "sum", "sumDistinct", "var_samp", "var_pop" + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].drop_duplicates.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].drop_duplicates.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].drop_duplicates.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].drop_duplicates.columns #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].drop_duplicates.columns[0] #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].drop_duplicates.columns[0][0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].governed_catalog_target.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].governed_catalog_target.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].governed_catalog_target.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].governed_catalog_target.partition_keys #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].governed_catalog_target.partition_keys[0] #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].governed_catalog_target.partition_keys[0][0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].governed_catalog_target.table #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].governed_catalog_target.database #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].governed_catalog_target.schema_change_policy.enable_update_catalog #=> Boolean + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].governed_catalog_target.schema_change_policy.update_behavior #=> String, one of "UPDATE_IN_DATABASE", "LOG" + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].governed_catalog_source.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].governed_catalog_source.database #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].governed_catalog_source.table #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].governed_catalog_source.partition_predicate #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].governed_catalog_source.additional_options.bounded_size #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].governed_catalog_source.additional_options.bounded_files #=> Integer + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].microsoft_sql_server_catalog_source.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].microsoft_sql_server_catalog_source.database #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].microsoft_sql_server_catalog_source.table #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].my_sql_catalog_source.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].my_sql_catalog_source.database #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].my_sql_catalog_source.table #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].oracle_sql_catalog_source.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].oracle_sql_catalog_source.database #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].oracle_sql_catalog_source.table #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].postgre_sql_catalog_source.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].postgre_sql_catalog_source.database #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].postgre_sql_catalog_source.table #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].microsoft_sql_server_catalog_target.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].microsoft_sql_server_catalog_target.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].microsoft_sql_server_catalog_target.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].microsoft_sql_server_catalog_target.database #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].microsoft_sql_server_catalog_target.table #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].my_sql_catalog_target.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].my_sql_catalog_target.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].my_sql_catalog_target.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].my_sql_catalog_target.database #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].my_sql_catalog_target.table #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].oracle_sql_catalog_target.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].oracle_sql_catalog_target.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].oracle_sql_catalog_target.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].oracle_sql_catalog_target.database #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].oracle_sql_catalog_target.table #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].postgre_sql_catalog_target.name #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].postgre_sql_catalog_target.inputs #=> Array + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].postgre_sql_catalog_target.inputs[0] #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].postgre_sql_catalog_target.database #=> String + # resp.jobs[0].code_gen_configuration_nodes["NodeId"].postgre_sql_catalog_target.table #=> String # resp.next_token #=> String # # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetJobs AWS API Documentation @@ -11519,6 +13631,674 @@ def update_dev_endpoint(params = {}, options = {}) # notify_delay_after: 1, # }, # glue_version: "GlueVersionString", + # code_gen_configuration_nodes: { + # "NodeId" => { + # athena_connector_source: { + # name: "NodeName", # required + # connection_name: "EnclosedInStringProperty", # required + # connector_name: "EnclosedInStringProperty", # required + # connection_type: "EnclosedInStringProperty", # required + # connection_table: "EnclosedInStringPropertyWithQuote", + # schema_name: "EnclosedInStringProperty", # required + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # jdbc_connector_source: { + # name: "NodeName", # required + # connection_name: "EnclosedInStringProperty", # required + # connector_name: "EnclosedInStringProperty", # required + # connection_type: "EnclosedInStringProperty", # required + # additional_options: { + # filter_predicate: "EnclosedInStringProperty", + # partition_column: "EnclosedInStringProperty", + # lower_bound: 1, + # upper_bound: 1, + # num_partitions: 1, + # job_bookmark_keys: ["EnclosedInStringProperty"], + # job_bookmark_keys_sort_order: "EnclosedInStringProperty", + # data_type_mapping: { + # "ARRAY" => "DATE", # accepts DATE, STRING, TIMESTAMP, INT, FLOAT, LONG, BIGDECIMAL, BYTE, SHORT, DOUBLE + # }, + # }, + # connection_table: "EnclosedInStringPropertyWithQuote", + # query: "SqlQuery", + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # spark_connector_source: { + # name: "NodeName", # required + # connection_name: "EnclosedInStringProperty", # required + # connector_name: "EnclosedInStringProperty", # required + # connection_type: "EnclosedInStringProperty", # required + # additional_options: { + # "EnclosedInStringProperty" => "EnclosedInStringProperty", + # }, + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # redshift_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # redshift_tmp_dir: "EnclosedInStringProperty", + # tmp_dir_iam_role: "EnclosedInStringProperty", + # }, + # s3_catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # partition_predicate: "EnclosedInStringProperty", + # additional_options: { + # bounded_size: 1, + # bounded_files: 1, + # }, + # }, + # s3_csv_source: { + # name: "NodeName", # required + # paths: ["EnclosedInStringProperty"], # required + # compression_type: "gzip", # accepts gzip, bzip2 + # exclusions: ["EnclosedInStringProperty"], + # group_size: "EnclosedInStringProperty", + # group_files: "EnclosedInStringProperty", + # recurse: false, + # max_band: 1, + # max_files_in_band: 1, + # additional_options: { + # bounded_size: 1, + # bounded_files: 1, + # enable_sample_path: false, + # sample_path: "EnclosedInStringProperty", + # }, + # separator: "comma", # required, accepts comma, ctrla, pipe, semicolon, tab + # escaper: "EnclosedInStringPropertyWithQuote", + # quote_char: "quote", # required, accepts quote, quillemet, single_quote, disabled + # multiline: false, + # with_header: false, + # write_header: false, + # skip_first: false, + # optimize_performance: false, + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # s3_json_source: { + # name: "NodeName", # required + # paths: ["EnclosedInStringProperty"], # required + # compression_type: "gzip", # accepts gzip, bzip2 + # exclusions: ["EnclosedInStringProperty"], + # group_size: "EnclosedInStringProperty", + # group_files: "EnclosedInStringProperty", + # recurse: false, + # max_band: 1, + # max_files_in_band: 1, + # additional_options: { + # bounded_size: 1, + # bounded_files: 1, + # enable_sample_path: false, + # sample_path: "EnclosedInStringProperty", + # }, + # json_path: "EnclosedInStringProperty", + # multiline: false, + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # s3_parquet_source: { + # name: "NodeName", # required + # paths: ["EnclosedInStringProperty"], # required + # compression_type: "snappy", # accepts snappy, lzo, gzip, uncompressed, none + # exclusions: ["EnclosedInStringProperty"], + # group_size: "EnclosedInStringProperty", + # group_files: "EnclosedInStringProperty", + # recurse: false, + # max_band: 1, + # max_files_in_band: 1, + # additional_options: { + # bounded_size: 1, + # bounded_files: 1, + # enable_sample_path: false, + # sample_path: "EnclosedInStringProperty", + # }, + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # relational_catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # dynamo_db_catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # jdbc_connector_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # connection_name: "EnclosedInStringProperty", # required + # connection_table: "EnclosedInStringPropertyWithQuote", # required + # connector_name: "EnclosedInStringProperty", # required + # connection_type: "EnclosedInStringProperty", # required + # additional_options: { + # "EnclosedInStringProperty" => "EnclosedInStringProperty", + # }, + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # spark_connector_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # connection_name: "EnclosedInStringProperty", # required + # connector_name: "EnclosedInStringProperty", # required + # connection_type: "EnclosedInStringProperty", # required + # additional_options: { + # "EnclosedInStringProperty" => "EnclosedInStringProperty", + # }, + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # catalog_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # redshift_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # redshift_tmp_dir: "EnclosedInStringProperty", + # tmp_dir_iam_role: "EnclosedInStringProperty", + # upsert_redshift_options: { + # table_location: "EnclosedInStringProperty", + # connection_name: "EnclosedInStringProperty", + # upsert_keys: ["EnclosedInStringProperty"], + # }, + # }, + # s3_catalog_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # partition_keys: [ + # ["EnclosedInStringProperty"], + # ], + # table: "EnclosedInStringProperty", # required + # database: "EnclosedInStringProperty", # required + # schema_change_policy: { + # enable_update_catalog: false, + # update_behavior: "UPDATE_IN_DATABASE", # accepts UPDATE_IN_DATABASE, LOG + # }, + # }, + # s3_glue_parquet_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # partition_keys: [ + # ["EnclosedInStringProperty"], + # ], + # path: "EnclosedInStringProperty", # required + # compression: "snappy", # accepts snappy, lzo, gzip, uncompressed, none + # schema_change_policy: { + # enable_update_catalog: false, + # update_behavior: "UPDATE_IN_DATABASE", # accepts UPDATE_IN_DATABASE, LOG + # table: "EnclosedInStringProperty", + # database: "EnclosedInStringProperty", + # }, + # }, + # s3_direct_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # partition_keys: [ + # ["EnclosedInStringProperty"], + # ], + # path: "EnclosedInStringProperty", # required + # compression: "EnclosedInStringProperty", + # format: "json", # required, accepts json, csv, avro, orc, parquet + # schema_change_policy: { + # enable_update_catalog: false, + # update_behavior: "UPDATE_IN_DATABASE", # accepts UPDATE_IN_DATABASE, LOG + # table: "EnclosedInStringProperty", + # database: "EnclosedInStringProperty", + # }, + # }, + # apply_mapping: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # mapping: [ # required + # { + # to_key: "EnclosedInStringProperty", + # from_path: ["EnclosedInStringProperty"], + # from_type: "EnclosedInStringProperty", + # to_type: "EnclosedInStringProperty", + # dropped: false, + # children: { + # # recursive Mappings + # }, + # }, + # ], + # }, + # select_fields: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # paths: [ # required + # ["EnclosedInStringProperty"], + # ], + # }, + # drop_fields: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # paths: [ # required + # ["EnclosedInStringProperty"], + # ], + # }, + # rename_field: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # source_path: ["EnclosedInStringProperty"], # required + # target_path: ["EnclosedInStringProperty"], # required + # }, + # spigot: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # path: "EnclosedInStringProperty", # required + # topk: 1, + # prob: 1.0, + # }, + # join: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # join_type: "equijoin", # required, accepts equijoin, left, right, outer, leftsemi, leftanti + # columns: [ # required + # { + # from: "EnclosedInStringProperty", # required + # keys: [ # required + # ["EnclosedInStringProperty"], + # ], + # }, + # ], + # }, + # split_fields: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # paths: [ # required + # ["EnclosedInStringProperty"], + # ], + # }, + # select_from_collection: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # index: 1, # required + # }, + # fill_missing_values: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # imputed_path: "EnclosedInStringProperty", # required + # filled_path: "EnclosedInStringProperty", + # }, + # filter: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # logical_operator: "AND", # required, accepts AND, OR + # filters: [ # required + # { + # operation: "EQ", # required, accepts EQ, LT, GT, LTE, GTE, REGEX, ISNULL + # negated: false, + # values: [ # required + # { + # type: "COLUMNEXTRACTED", # required, accepts COLUMNEXTRACTED, CONSTANT + # value: ["EnclosedInStringProperty"], # required + # }, + # ], + # }, + # ], + # }, + # custom_code: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # code: "ExtendedString", # required + # class_name: "EnclosedInStringProperty", # required + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # spark_sql: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # sql_query: "SqlQuery", # required + # sql_aliases: [ # required + # { + # from: "NodeId", # required + # alias: "EnclosedInStringPropertyWithQuote", # required + # }, + # ], + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # direct_kinesis_source: { + # name: "NodeName", # required + # window_size: 1, + # detect_schema: false, + # streaming_options: { + # endpoint_url: "EnclosedInStringProperty", + # stream_name: "EnclosedInStringProperty", + # classification: "EnclosedInStringProperty", + # delimiter: "EnclosedInStringProperty", + # starting_position: "latest", # accepts latest, trim_horizon, earliest + # max_fetch_time_in_ms: 1, + # max_fetch_records_per_shard: 1, + # max_record_per_read: 1, + # add_idle_time_between_reads: false, + # idle_time_between_reads_in_ms: 1, + # describe_shard_interval: 1, + # num_retries: 1, + # retry_interval_ms: 1, + # max_retry_interval_ms: 1, + # avoid_empty_batches: false, + # stream_arn: "EnclosedInStringProperty", + # role_arn: "EnclosedInStringProperty", + # role_session_name: "EnclosedInStringProperty", + # }, + # data_preview_options: { + # polling_time: 1, + # record_polling_limit: 1, + # }, + # }, + # direct_kafka_source: { + # name: "NodeName", # required + # streaming_options: { + # bootstrap_servers: "EnclosedInStringProperty", + # security_protocol: "EnclosedInStringProperty", + # connection_name: "EnclosedInStringProperty", + # topic_name: "EnclosedInStringProperty", + # assign: "EnclosedInStringProperty", + # subscribe_pattern: "EnclosedInStringProperty", + # classification: "EnclosedInStringProperty", + # delimiter: "EnclosedInStringProperty", + # starting_offsets: "EnclosedInStringProperty", + # ending_offsets: "EnclosedInStringProperty", + # poll_timeout_ms: 1, + # num_retries: 1, + # retry_interval_ms: 1, + # max_offsets_per_trigger: 1, + # min_partitions: 1, + # }, + # window_size: 1, + # detect_schema: false, + # data_preview_options: { + # polling_time: 1, + # record_polling_limit: 1, + # }, + # }, + # catalog_kinesis_source: { + # name: "NodeName", # required + # window_size: 1, + # detect_schema: false, + # table: "EnclosedInStringProperty", # required + # database: "EnclosedInStringProperty", # required + # streaming_options: { + # endpoint_url: "EnclosedInStringProperty", + # stream_name: "EnclosedInStringProperty", + # classification: "EnclosedInStringProperty", + # delimiter: "EnclosedInStringProperty", + # starting_position: "latest", # accepts latest, trim_horizon, earliest + # max_fetch_time_in_ms: 1, + # max_fetch_records_per_shard: 1, + # max_record_per_read: 1, + # add_idle_time_between_reads: false, + # idle_time_between_reads_in_ms: 1, + # describe_shard_interval: 1, + # num_retries: 1, + # retry_interval_ms: 1, + # max_retry_interval_ms: 1, + # avoid_empty_batches: false, + # stream_arn: "EnclosedInStringProperty", + # role_arn: "EnclosedInStringProperty", + # role_session_name: "EnclosedInStringProperty", + # }, + # data_preview_options: { + # polling_time: 1, + # record_polling_limit: 1, + # }, + # }, + # catalog_kafka_source: { + # name: "NodeName", # required + # window_size: 1, + # detect_schema: false, + # table: "EnclosedInStringProperty", # required + # database: "EnclosedInStringProperty", # required + # streaming_options: { + # bootstrap_servers: "EnclosedInStringProperty", + # security_protocol: "EnclosedInStringProperty", + # connection_name: "EnclosedInStringProperty", + # topic_name: "EnclosedInStringProperty", + # assign: "EnclosedInStringProperty", + # subscribe_pattern: "EnclosedInStringProperty", + # classification: "EnclosedInStringProperty", + # delimiter: "EnclosedInStringProperty", + # starting_offsets: "EnclosedInStringProperty", + # ending_offsets: "EnclosedInStringProperty", + # poll_timeout_ms: 1, + # num_retries: 1, + # retry_interval_ms: 1, + # max_offsets_per_trigger: 1, + # min_partitions: 1, + # }, + # data_preview_options: { + # polling_time: 1, + # record_polling_limit: 1, + # }, + # }, + # drop_null_fields: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # null_check_box_list: { + # is_empty: false, + # is_null_string: false, + # is_neg_one: false, + # }, + # null_text_list: [ + # { + # value: "EnclosedInStringProperty", # required + # datatype: { # required + # id: "GenericLimitedString", # required + # label: "GenericLimitedString", # required + # }, + # }, + # ], + # }, + # merge: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # source: "NodeId", # required + # primary_keys: [ # required + # ["EnclosedInStringProperty"], + # ], + # }, + # union: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # union_type: "ALL", # required, accepts ALL, DISTINCT + # }, + # pii_detection: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # pii_type: "RowAudit", # required, accepts RowAudit, RowMasking, ColumnAudit, ColumnMasking + # entity_types_to_detect: ["EnclosedInStringProperty"], # required + # output_column_name: "EnclosedInStringProperty", + # sample_fraction: 1.0, + # threshold_fraction: 1.0, + # mask_value: "MaskValue", + # }, + # aggregate: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # groups: [ # required + # ["EnclosedInStringProperty"], + # ], + # aggs: [ # required + # { + # column: ["EnclosedInStringProperty"], # required + # agg_func: "avg", # required, accepts avg, countDistinct, count, first, last, kurtosis, max, min, skewness, stddev_samp, stddev_pop, sum, sumDistinct, var_samp, var_pop + # }, + # ], + # }, + # drop_duplicates: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # columns: [ + # ["GenericLimitedString"], + # ], + # }, + # governed_catalog_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # partition_keys: [ + # ["EnclosedInStringProperty"], + # ], + # table: "EnclosedInStringProperty", # required + # database: "EnclosedInStringProperty", # required + # schema_change_policy: { + # enable_update_catalog: false, + # update_behavior: "UPDATE_IN_DATABASE", # accepts UPDATE_IN_DATABASE, LOG + # }, + # }, + # governed_catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # partition_predicate: "EnclosedInStringProperty", + # additional_options: { + # bounded_size: 1, + # bounded_files: 1, + # }, + # }, + # microsoft_sql_server_catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # my_sql_catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # oracle_sql_catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # postgre_sql_catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # microsoft_sql_server_catalog_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # my_sql_catalog_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # oracle_sql_catalog_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # postgre_sql_catalog_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # }, + # }, # }, # }) # @@ -12194,7 +14974,7 @@ def build_request(operation_name, params = {}) params: params, config: config) context[:gem_name] = 'aws-sdk-glue' - context[:gem_version] = '1.111.0' + context[:gem_version] = '1.112.0' Seahorse::Client::Request.new(handlers, context) end diff --git a/gems/aws-sdk-glue/lib/aws-sdk-glue/client_api.rb b/gems/aws-sdk-glue/lib/aws-sdk-glue/client_api.rb index 34b4291c738..c87127281d3 100644 --- a/gems/aws-sdk-glue/lib/aws-sdk-glue/client_api.rb +++ b/gems/aws-sdk-glue/lib/aws-sdk-glue/client_api.rb @@ -17,8 +17,15 @@ module ClientApi AccountId = Shapes::StringShape.new(name: 'AccountId') Action = Shapes::StructureShape.new(name: 'Action') ActionList = Shapes::ListShape.new(name: 'ActionList') + AdditionalOptions = Shapes::MapShape.new(name: 'AdditionalOptions') AdditionalPlanOptionsMap = Shapes::MapShape.new(name: 'AdditionalPlanOptionsMap') + AggFunction = Shapes::StringShape.new(name: 'AggFunction') + Aggregate = Shapes::StructureShape.new(name: 'Aggregate') + AggregateOperation = Shapes::StructureShape.new(name: 'AggregateOperation') + AggregateOperations = Shapes::ListShape.new(name: 'AggregateOperations') AlreadyExistsException = Shapes::StructureShape.new(name: 'AlreadyExistsException') + ApplyMapping = Shapes::StructureShape.new(name: 'ApplyMapping') + AthenaConnectorSource = Shapes::StructureShape.new(name: 'AthenaConnectorSource') AttemptCount = Shapes::IntegerShape.new(name: 'AttemptCount') AuditColumnNamesList = Shapes::ListShape.new(name: 'AuditColumnNamesList') AuditContext = Shapes::StructureShape.new(name: 'AuditContext') @@ -27,6 +34,7 @@ module ClientApi BackfillErrorCode = Shapes::StringShape.new(name: 'BackfillErrorCode') BackfillErroredPartitionsList = Shapes::ListShape.new(name: 'BackfillErroredPartitionsList') BackfillErrors = Shapes::ListShape.new(name: 'BackfillErrors') + BasicCatalogTarget = Shapes::StructureShape.new(name: 'BasicCatalogTarget') BatchCreatePartitionRequest = Shapes::StructureShape.new(name: 'BatchCreatePartitionRequest') BatchCreatePartitionResponse = Shapes::StructureShape.new(name: 'BatchCreatePartitionResponse') BatchDeleteConnectionRequest = Shapes::StructureShape.new(name: 'BatchDeleteConnectionRequest') @@ -90,6 +98,12 @@ module ClientApi BooleanNullable = Shapes::BooleanShape.new(name: 'BooleanNullable') BooleanValue = Shapes::BooleanShape.new(name: 'BooleanValue') BoundedPartitionValueList = Shapes::ListShape.new(name: 'BoundedPartitionValueList') + BoxedBoolean = Shapes::BooleanShape.new(name: 'BoxedBoolean') + BoxedDoubleFraction = Shapes::FloatShape.new(name: 'BoxedDoubleFraction') + BoxedLong = Shapes::IntegerShape.new(name: 'BoxedLong') + BoxedNonNegativeInt = Shapes::IntegerShape.new(name: 'BoxedNonNegativeInt') + BoxedNonNegativeLong = Shapes::IntegerShape.new(name: 'BoxedNonNegativeLong') + BoxedPositiveInt = Shapes::IntegerShape.new(name: 'BoxedPositiveInt') CancelMLTaskRunRequest = Shapes::StructureShape.new(name: 'CancelMLTaskRunRequest') CancelMLTaskRunResponse = Shapes::StructureShape.new(name: 'CancelMLTaskRunResponse') CancelStatementRequest = Shapes::StructureShape.new(name: 'CancelStatementRequest') @@ -100,6 +114,10 @@ module ClientApi CatalogGetterPageSize = Shapes::IntegerShape.new(name: 'CatalogGetterPageSize') CatalogIdString = Shapes::StringShape.new(name: 'CatalogIdString') CatalogImportStatus = Shapes::StructureShape.new(name: 'CatalogImportStatus') + CatalogKafkaSource = Shapes::StructureShape.new(name: 'CatalogKafkaSource') + CatalogKinesisSource = Shapes::StructureShape.new(name: 'CatalogKinesisSource') + CatalogSchemaChangePolicy = Shapes::StructureShape.new(name: 'CatalogSchemaChangePolicy') + CatalogSource = Shapes::StructureShape.new(name: 'CatalogSource') CatalogTablesList = Shapes::ListShape.new(name: 'CatalogTablesList') CatalogTarget = Shapes::StructureShape.new(name: 'CatalogTarget') CatalogTargetList = Shapes::ListShape.new(name: 'CatalogTargetList') @@ -113,6 +131,8 @@ module ClientApi CloudWatchEncryptionMode = Shapes::StringShape.new(name: 'CloudWatchEncryptionMode') CodeGenArgName = Shapes::StringShape.new(name: 'CodeGenArgName') CodeGenArgValue = Shapes::StringShape.new(name: 'CodeGenArgValue') + CodeGenConfigurationNode = Shapes::StructureShape.new(name: 'CodeGenConfigurationNode') + CodeGenConfigurationNodes = Shapes::MapShape.new(name: 'CodeGenConfigurationNodes') CodeGenEdge = Shapes::StructureShape.new(name: 'CodeGenEdge') CodeGenIdentifier = Shapes::StringShape.new(name: 'CodeGenIdentifier') CodeGenNode = Shapes::StructureShape.new(name: 'CodeGenNode') @@ -140,6 +160,7 @@ module ClientApi CommentString = Shapes::StringShape.new(name: 'CommentString') Comparator = Shapes::StringShape.new(name: 'Comparator') Compatibility = Shapes::StringShape.new(name: 'Compatibility') + CompressionType = Shapes::StringShape.new(name: 'CompressionType') ConcurrentModificationException = Shapes::StructureShape.new(name: 'ConcurrentModificationException') ConcurrentRunsExceededException = Shapes::StructureShape.new(name: 'ConcurrentRunsExceededException') Condition = Shapes::StructureShape.new(name: 'Condition') @@ -225,6 +246,7 @@ module ClientApi CsvHeader = Shapes::ListShape.new(name: 'CsvHeader') CsvHeaderOption = Shapes::StringShape.new(name: 'CsvHeaderOption') CsvQuoteSymbol = Shapes::StringShape.new(name: 'CsvQuoteSymbol') + CustomCode = Shapes::StructureShape.new(name: 'CustomCode') CustomEntityType = Shapes::StructureShape.new(name: 'CustomEntityType') CustomEntityTypeNames = Shapes::ListShape.new(name: 'CustomEntityTypeNames') CustomEntityTypes = Shapes::ListShape.new(name: 'CustomEntityTypes') @@ -240,6 +262,7 @@ module ClientApi DatabaseInput = Shapes::StructureShape.new(name: 'DatabaseInput') DatabaseList = Shapes::ListShape.new(name: 'DatabaseList') DatabaseName = Shapes::StringShape.new(name: 'DatabaseName') + Datatype = Shapes::StructureShape.new(name: 'Datatype') DateColumnStatisticsData = Shapes::StructureShape.new(name: 'DateColumnStatisticsData') DecimalColumnStatisticsData = Shapes::StructureShape.new(name: 'DecimalColumnStatisticsData') DecimalNumber = Shapes::StructureShape.new(name: 'DecimalNumber') @@ -302,14 +325,25 @@ module ClientApi DevEndpointList = Shapes::ListShape.new(name: 'DevEndpointList') DevEndpointNameList = Shapes::ListShape.new(name: 'DevEndpointNameList') DevEndpointNames = Shapes::ListShape.new(name: 'DevEndpointNames') + DirectKafkaSource = Shapes::StructureShape.new(name: 'DirectKafkaSource') + DirectKinesisSource = Shapes::StructureShape.new(name: 'DirectKinesisSource') + DirectSchemaChangePolicy = Shapes::StructureShape.new(name: 'DirectSchemaChangePolicy') Double = Shapes::FloatShape.new(name: 'Double') DoubleColumnStatisticsData = Shapes::StructureShape.new(name: 'DoubleColumnStatisticsData') DoubleValue = Shapes::FloatShape.new(name: 'DoubleValue') + DropDuplicates = Shapes::StructureShape.new(name: 'DropDuplicates') + DropFields = Shapes::StructureShape.new(name: 'DropFields') + DropNullFields = Shapes::StructureShape.new(name: 'DropNullFields') + DynamoDBCatalogSource = Shapes::StructureShape.new(name: 'DynamoDBCatalogSource') DynamoDBTarget = Shapes::StructureShape.new(name: 'DynamoDBTarget') DynamoDBTargetList = Shapes::ListShape.new(name: 'DynamoDBTargetList') Edge = Shapes::StructureShape.new(name: 'Edge') EdgeList = Shapes::ListShape.new(name: 'EdgeList') EnableHybridValues = Shapes::StringShape.new(name: 'EnableHybridValues') + EnclosedInStringProperties = Shapes::ListShape.new(name: 'EnclosedInStringProperties') + EnclosedInStringPropertiesMinOne = Shapes::ListShape.new(name: 'EnclosedInStringPropertiesMinOne') + EnclosedInStringProperty = Shapes::StringShape.new(name: 'EnclosedInStringProperty') + EnclosedInStringPropertyWithQuote = Shapes::StringShape.new(name: 'EnclosedInStringPropertyWithQuote') EncryptionAtRest = Shapes::StructureShape.new(name: 'EncryptionAtRest') EncryptionConfiguration = Shapes::StructureShape.new(name: 'EncryptionConfiguration') EntityNotFoundException = Shapes::StructureShape.new(name: 'EntityNotFoundException') @@ -326,14 +360,25 @@ module ClientApi ExecutionTime = Shapes::IntegerShape.new(name: 'ExecutionTime') ExistCondition = Shapes::StringShape.new(name: 'ExistCondition') ExportLabelsTaskRunProperties = Shapes::StructureShape.new(name: 'ExportLabelsTaskRunProperties') + ExtendedString = Shapes::StringShape.new(name: 'ExtendedString') FieldType = Shapes::StringShape.new(name: 'FieldType') + FillMissingValues = Shapes::StructureShape.new(name: 'FillMissingValues') + Filter = Shapes::StructureShape.new(name: 'Filter') + FilterExpression = Shapes::StructureShape.new(name: 'FilterExpression') + FilterExpressions = Shapes::ListShape.new(name: 'FilterExpressions') + FilterLogicalOperator = Shapes::StringShape.new(name: 'FilterLogicalOperator') + FilterOperation = Shapes::StringShape.new(name: 'FilterOperation') FilterString = Shapes::StringShape.new(name: 'FilterString') + FilterValue = Shapes::StructureShape.new(name: 'FilterValue') + FilterValueType = Shapes::StringShape.new(name: 'FilterValueType') + FilterValues = Shapes::ListShape.new(name: 'FilterValues') FindMatchesMetrics = Shapes::StructureShape.new(name: 'FindMatchesMetrics') FindMatchesParameters = Shapes::StructureShape.new(name: 'FindMatchesParameters') FindMatchesTaskRunProperties = Shapes::StructureShape.new(name: 'FindMatchesTaskRunProperties') FormatString = Shapes::StringShape.new(name: 'FormatString') Generic512CharString = Shapes::StringShape.new(name: 'Generic512CharString') GenericBoundedDouble = Shapes::FloatShape.new(name: 'GenericBoundedDouble') + GenericLimitedString = Shapes::StringShape.new(name: 'GenericLimitedString') GenericMap = Shapes::MapShape.new(name: 'GenericMap') GenericString = Shapes::StringShape.new(name: 'GenericString') GetBlueprintRequest = Shapes::StructureShape.new(name: 'GetBlueprintRequest') @@ -464,10 +509,19 @@ module ClientApi GetWorkflowRunsResponse = Shapes::StructureShape.new(name: 'GetWorkflowRunsResponse') GlueEncryptionException = Shapes::StructureShape.new(name: 'GlueEncryptionException') GluePolicy = Shapes::StructureShape.new(name: 'GluePolicy') + GlueRecordType = Shapes::StringShape.new(name: 'GlueRecordType') GlueResourceArn = Shapes::StringShape.new(name: 'GlueResourceArn') + GlueSchema = Shapes::StructureShape.new(name: 'GlueSchema') + GlueSchemas = Shapes::ListShape.new(name: 'GlueSchemas') + GlueStudioColumnNameString = Shapes::StringShape.new(name: 'GlueStudioColumnNameString') + GlueStudioPathList = Shapes::ListShape.new(name: 'GlueStudioPathList') + GlueStudioSchemaColumn = Shapes::StructureShape.new(name: 'GlueStudioSchemaColumn') + GlueStudioSchemaColumnList = Shapes::ListShape.new(name: 'GlueStudioSchemaColumnList') GlueTable = Shapes::StructureShape.new(name: 'GlueTable') GlueTables = Shapes::ListShape.new(name: 'GlueTables') GlueVersionString = Shapes::StringShape.new(name: 'GlueVersionString') + GovernedCatalogSource = Shapes::StructureShape.new(name: 'GovernedCatalogSource') + GovernedCatalogTarget = Shapes::StructureShape.new(name: 'GovernedCatalogTarget') GrokClassifier = Shapes::StructureShape.new(name: 'GrokClassifier') GrokPattern = Shapes::StringShape.new(name: 'GrokPattern') HashString = Shapes::StringShape.new(name: 'HashString') @@ -486,6 +540,11 @@ module ClientApi InvalidInputException = Shapes::StructureShape.new(name: 'InvalidInputException') InvalidStateException = Shapes::StructureShape.new(name: 'InvalidStateException') IsVersionValid = Shapes::BooleanShape.new(name: 'IsVersionValid') + JDBCConnectorOptions = Shapes::StructureShape.new(name: 'JDBCConnectorOptions') + JDBCConnectorSource = Shapes::StructureShape.new(name: 'JDBCConnectorSource') + JDBCConnectorTarget = Shapes::StructureShape.new(name: 'JDBCConnectorTarget') + JDBCDataType = Shapes::StringShape.new(name: 'JDBCDataType') + JDBCDataTypeMapping = Shapes::MapShape.new(name: 'JDBCDataTypeMapping') JdbcTarget = Shapes::StructureShape.new(name: 'JdbcTarget') JdbcTargetList = Shapes::ListShape.new(name: 'JdbcTargetList') Job = Shapes::StructureShape.new(name: 'Job') @@ -501,13 +560,19 @@ module ClientApi JobRunList = Shapes::ListShape.new(name: 'JobRunList') JobRunState = Shapes::StringShape.new(name: 'JobRunState') JobUpdate = Shapes::StructureShape.new(name: 'JobUpdate') + Join = Shapes::StructureShape.new(name: 'Join') + JoinColumn = Shapes::StructureShape.new(name: 'JoinColumn') + JoinColumns = Shapes::ListShape.new(name: 'JoinColumns') + JoinType = Shapes::StringShape.new(name: 'JoinType') JsonClassifier = Shapes::StructureShape.new(name: 'JsonClassifier') JsonPath = Shapes::StringShape.new(name: 'JsonPath') JsonValue = Shapes::StringShape.new(name: 'JsonValue') + KafkaStreamingSourceOptions = Shapes::StructureShape.new(name: 'KafkaStreamingSourceOptions') KeyList = Shapes::ListShape.new(name: 'KeyList') KeySchemaElement = Shapes::StructureShape.new(name: 'KeySchemaElement') KeySchemaElementList = Shapes::ListShape.new(name: 'KeySchemaElementList') KeyString = Shapes::StringShape.new(name: 'KeyString') + KinesisStreamingSourceOptions = Shapes::StructureShape.new(name: 'KinesisStreamingSourceOptions') KmsKeyArn = Shapes::StringShape.new(name: 'KmsKeyArn') LabelCount = Shapes::IntegerShape.new(name: 'LabelCount') LabelingSetGenerationTaskRunProperties = Shapes::StructureShape.new(name: 'LabelingSetGenerationTaskRunProperties') @@ -517,6 +582,8 @@ module ClientApi LastCrawlInfo = Shapes::StructureShape.new(name: 'LastCrawlInfo') LastCrawlStatus = Shapes::StringShape.new(name: 'LastCrawlStatus') LatestSchemaVersionBoolean = Shapes::BooleanShape.new(name: 'LatestSchemaVersionBoolean') + LimitedPathList = Shapes::ListShape.new(name: 'LimitedPathList') + LimitedStringList = Shapes::ListShape.new(name: 'LimitedStringList') LineageConfiguration = Shapes::StructureShape.new(name: 'LineageConfiguration') ListBlueprintsRequest = Shapes::StructureShape.new(name: 'ListBlueprintsRequest') ListBlueprintsResponse = Shapes::StructureShape.new(name: 'ListBlueprintsResponse') @@ -559,13 +626,18 @@ module ClientApi MLTransformNotReadyException = Shapes::StructureShape.new(name: 'MLTransformNotReadyException') MLUserDataEncryption = Shapes::StructureShape.new(name: 'MLUserDataEncryption') MLUserDataEncryptionModeString = Shapes::StringShape.new(name: 'MLUserDataEncryptionModeString') + ManyInputs = Shapes::ListShape.new(name: 'ManyInputs') MapValue = Shapes::MapShape.new(name: 'MapValue') + Mapping = Shapes::StructureShape.new(name: 'Mapping') MappingEntry = Shapes::StructureShape.new(name: 'MappingEntry') MappingList = Shapes::ListShape.new(name: 'MappingList') + Mappings = Shapes::ListShape.new(name: 'Mappings') + MaskValue = Shapes::StringShape.new(name: 'MaskValue') MatchCriteria = Shapes::ListShape.new(name: 'MatchCriteria') MaxConcurrentRuns = Shapes::IntegerShape.new(name: 'MaxConcurrentRuns') MaxResultsNumber = Shapes::IntegerShape.new(name: 'MaxResultsNumber') MaxRetries = Shapes::IntegerShape.new(name: 'MaxRetries') + Merge = Shapes::StructureShape.new(name: 'Merge') MessagePrefix = Shapes::StringShape.new(name: 'MessagePrefix') MessageString = Shapes::StringShape.new(name: 'MessageString') MetadataInfo = Shapes::StructureShape.new(name: 'MetadataInfo') @@ -574,25 +646,38 @@ module ClientApi MetadataKeyValuePair = Shapes::StructureShape.new(name: 'MetadataKeyValuePair') MetadataList = Shapes::ListShape.new(name: 'MetadataList') MetadataValueString = Shapes::StringShape.new(name: 'MetadataValueString') + MicrosoftSQLServerCatalogSource = Shapes::StructureShape.new(name: 'MicrosoftSQLServerCatalogSource') + MicrosoftSQLServerCatalogTarget = Shapes::StructureShape.new(name: 'MicrosoftSQLServerCatalogTarget') MillisecondsCount = Shapes::IntegerShape.new(name: 'MillisecondsCount') MongoDBTarget = Shapes::StructureShape.new(name: 'MongoDBTarget') MongoDBTargetList = Shapes::ListShape.new(name: 'MongoDBTargetList') + MySQLCatalogSource = Shapes::StructureShape.new(name: 'MySQLCatalogSource') + MySQLCatalogTarget = Shapes::StructureShape.new(name: 'MySQLCatalogTarget') NameString = Shapes::StringShape.new(name: 'NameString') NameStringList = Shapes::ListShape.new(name: 'NameStringList') NoScheduleException = Shapes::StructureShape.new(name: 'NoScheduleException') Node = Shapes::StructureShape.new(name: 'Node') + NodeId = Shapes::StringShape.new(name: 'NodeId') NodeIdList = Shapes::ListShape.new(name: 'NodeIdList') NodeList = Shapes::ListShape.new(name: 'NodeList') + NodeName = Shapes::StringShape.new(name: 'NodeName') NodeType = Shapes::StringShape.new(name: 'NodeType') NonNegativeDouble = Shapes::FloatShape.new(name: 'NonNegativeDouble') + NonNegativeInt = Shapes::IntegerShape.new(name: 'NonNegativeInt') NonNegativeInteger = Shapes::IntegerShape.new(name: 'NonNegativeInteger') NonNegativeLong = Shapes::IntegerShape.new(name: 'NonNegativeLong') NotificationProperty = Shapes::StructureShape.new(name: 'NotificationProperty') NotifyDelayAfter = Shapes::IntegerShape.new(name: 'NotifyDelayAfter') + NullCheckBoxList = Shapes::StructureShape.new(name: 'NullCheckBoxList') + NullValueField = Shapes::StructureShape.new(name: 'NullValueField') + NullValueFields = Shapes::ListShape.new(name: 'NullValueFields') NullableBoolean = Shapes::BooleanShape.new(name: 'NullableBoolean') NullableDouble = Shapes::FloatShape.new(name: 'NullableDouble') NullableInteger = Shapes::IntegerShape.new(name: 'NullableInteger') + OneInput = Shapes::ListShape.new(name: 'OneInput') OperationTimeoutException = Shapes::StructureShape.new(name: 'OperationTimeoutException') + OracleSQLCatalogSource = Shapes::StructureShape.new(name: 'OracleSQLCatalogSource') + OracleSQLCatalogTarget = Shapes::StructureShape.new(name: 'OracleSQLCatalogTarget') OrchestrationArgumentsMap = Shapes::MapShape.new(name: 'OrchestrationArgumentsMap') OrchestrationArgumentsValue = Shapes::StringShape.new(name: 'OrchestrationArgumentsValue') OrchestrationIAMRoleArn = Shapes::StringShape.new(name: 'OrchestrationIAMRoleArn') @@ -606,10 +691,12 @@ module ClientApi OrderList = Shapes::ListShape.new(name: 'OrderList') OtherMetadataValueList = Shapes::ListShape.new(name: 'OtherMetadataValueList') OtherMetadataValueListItem = Shapes::StructureShape.new(name: 'OtherMetadataValueListItem') + PIIDetection = Shapes::StructureShape.new(name: 'PIIDetection') PageSize = Shapes::IntegerShape.new(name: 'PageSize') PaginationToken = Shapes::StringShape.new(name: 'PaginationToken') ParametersMap = Shapes::MapShape.new(name: 'ParametersMap') ParametersMapValue = Shapes::StringShape.new(name: 'ParametersMapValue') + ParquetCompressionType = Shapes::StringShape.new(name: 'ParquetCompressionType') Partition = Shapes::StructureShape.new(name: 'Partition') PartitionError = Shapes::StructureShape.new(name: 'PartitionError') PartitionErrors = Shapes::ListShape.new(name: 'PartitionErrors') @@ -630,7 +717,12 @@ module ClientApi PermissionTypeList = Shapes::ListShape.new(name: 'PermissionTypeList') PermissionTypeMismatchException = Shapes::StructureShape.new(name: 'PermissionTypeMismatchException') PhysicalConnectionRequirements = Shapes::StructureShape.new(name: 'PhysicalConnectionRequirements') + PiiType = Shapes::StringShape.new(name: 'PiiType') PolicyJsonString = Shapes::StringShape.new(name: 'PolicyJsonString') + PollingTime = Shapes::IntegerShape.new(name: 'PollingTime') + PositiveLong = Shapes::IntegerShape.new(name: 'PositiveLong') + PostgreSQLCatalogSource = Shapes::StructureShape.new(name: 'PostgreSQLCatalogSource') + PostgreSQLCatalogTarget = Shapes::StructureShape.new(name: 'PostgreSQLCatalogTarget') Predecessor = Shapes::StructureShape.new(name: 'Predecessor') PredecessorList = Shapes::ListShape.new(name: 'PredecessorList') Predicate = Shapes::StructureShape.new(name: 'Predicate') @@ -638,6 +730,7 @@ module ClientApi PrincipalPermissions = Shapes::StructureShape.new(name: 'PrincipalPermissions') PrincipalPermissionsList = Shapes::ListShape.new(name: 'PrincipalPermissionsList') PrincipalType = Shapes::StringShape.new(name: 'PrincipalType') + Prob = Shapes::FloatShape.new(name: 'Prob') PropertyPredicate = Shapes::StructureShape.new(name: 'PropertyPredicate') PublicKeysList = Shapes::ListShape.new(name: 'PublicKeysList') PutDataCatalogEncryptionSettingsRequest = Shapes::StructureShape.new(name: 'PutDataCatalogEncryptionSettingsRequest') @@ -653,17 +746,22 @@ module ClientApi QuerySchemaVersionMetadataInput = Shapes::StructureShape.new(name: 'QuerySchemaVersionMetadataInput') QuerySchemaVersionMetadataMaxResults = Shapes::IntegerShape.new(name: 'QuerySchemaVersionMetadataMaxResults') QuerySchemaVersionMetadataResponse = Shapes::StructureShape.new(name: 'QuerySchemaVersionMetadataResponse') + QuoteChar = Shapes::StringShape.new(name: 'QuoteChar') RecordsCount = Shapes::IntegerShape.new(name: 'RecordsCount') RecrawlBehavior = Shapes::StringShape.new(name: 'RecrawlBehavior') RecrawlPolicy = Shapes::StructureShape.new(name: 'RecrawlPolicy') + RedshiftSource = Shapes::StructureShape.new(name: 'RedshiftSource') + RedshiftTarget = Shapes::StructureShape.new(name: 'RedshiftTarget') RegisterSchemaVersionInput = Shapes::StructureShape.new(name: 'RegisterSchemaVersionInput') RegisterSchemaVersionResponse = Shapes::StructureShape.new(name: 'RegisterSchemaVersionResponse') RegistryId = Shapes::StructureShape.new(name: 'RegistryId') RegistryListDefinition = Shapes::ListShape.new(name: 'RegistryListDefinition') RegistryListItem = Shapes::StructureShape.new(name: 'RegistryListItem') RegistryStatus = Shapes::StringShape.new(name: 'RegistryStatus') + RelationalCatalogSource = Shapes::StructureShape.new(name: 'RelationalCatalogSource') RemoveSchemaVersionMetadataInput = Shapes::StructureShape.new(name: 'RemoveSchemaVersionMetadataInput') RemoveSchemaVersionMetadataResponse = Shapes::StructureShape.new(name: 'RemoveSchemaVersionMetadataResponse') + RenameField = Shapes::StructureShape.new(name: 'RenameField') ReplaceBoolean = Shapes::BooleanShape.new(name: 'ReplaceBoolean') ResetJobBookmarkRequest = Shapes::StructureShape.new(name: 'ResetJobBookmarkRequest') ResetJobBookmarkResponse = Shapes::StructureShape.new(name: 'ResetJobBookmarkResponse') @@ -682,9 +780,18 @@ module ClientApi RunId = Shapes::StringShape.new(name: 'RunId') RunStatementRequest = Shapes::StructureShape.new(name: 'RunStatementRequest') RunStatementResponse = Shapes::StructureShape.new(name: 'RunStatementResponse') + S3CatalogSource = Shapes::StructureShape.new(name: 'S3CatalogSource') + S3CatalogTarget = Shapes::StructureShape.new(name: 'S3CatalogTarget') + S3CsvSource = Shapes::StructureShape.new(name: 'S3CsvSource') + S3DirectSourceAdditionalOptions = Shapes::StructureShape.new(name: 'S3DirectSourceAdditionalOptions') + S3DirectTarget = Shapes::StructureShape.new(name: 'S3DirectTarget') S3Encryption = Shapes::StructureShape.new(name: 'S3Encryption') S3EncryptionList = Shapes::ListShape.new(name: 'S3EncryptionList') S3EncryptionMode = Shapes::StringShape.new(name: 'S3EncryptionMode') + S3GlueParquetTarget = Shapes::StructureShape.new(name: 'S3GlueParquetTarget') + S3JsonSource = Shapes::StructureShape.new(name: 'S3JsonSource') + S3ParquetSource = Shapes::StructureShape.new(name: 'S3ParquetSource') + S3SourceAdditionalOptions = Shapes::StructureShape.new(name: 'S3SourceAdditionalOptions') S3Target = Shapes::StructureShape.new(name: 'S3Target') S3TargetList = Shapes::ListShape.new(name: 'S3TargetList') ScalaCode = Shapes::StringShape.new(name: 'ScalaCode') @@ -723,6 +830,9 @@ module ClientApi SecurityConfigurationList = Shapes::ListShape.new(name: 'SecurityConfigurationList') SecurityGroupIdList = Shapes::ListShape.new(name: 'SecurityGroupIdList') Segment = Shapes::StructureShape.new(name: 'Segment') + SelectFields = Shapes::StructureShape.new(name: 'SelectFields') + SelectFromCollection = Shapes::StructureShape.new(name: 'SelectFromCollection') + Separator = Shapes::StringShape.new(name: 'Separator') SerDeInfo = Shapes::StructureShape.new(name: 'SerDeInfo') Session = Shapes::StructureShape.new(name: 'Session') SessionCommand = Shapes::StructureShape.new(name: 'SessionCommand') @@ -734,6 +844,14 @@ module ClientApi SortCriteria = Shapes::ListShape.new(name: 'SortCriteria') SortCriterion = Shapes::StructureShape.new(name: 'SortCriterion') SortDirectionType = Shapes::StringShape.new(name: 'SortDirectionType') + SparkConnectorSource = Shapes::StructureShape.new(name: 'SparkConnectorSource') + SparkConnectorTarget = Shapes::StructureShape.new(name: 'SparkConnectorTarget') + SparkSQL = Shapes::StructureShape.new(name: 'SparkSQL') + Spigot = Shapes::StructureShape.new(name: 'Spigot') + SplitFields = Shapes::StructureShape.new(name: 'SplitFields') + SqlAlias = Shapes::StructureShape.new(name: 'SqlAlias') + SqlAliases = Shapes::ListShape.new(name: 'SqlAliases') + SqlQuery = Shapes::StringShape.new(name: 'SqlQuery') StartBlueprintRunRequest = Shapes::StructureShape.new(name: 'StartBlueprintRunRequest') StartBlueprintRunResponse = Shapes::StructureShape.new(name: 'StartBlueprintRunResponse') StartCrawlerRequest = Shapes::StructureShape.new(name: 'StartCrawlerRequest') @@ -755,6 +873,7 @@ module ClientApi StartWorkflowRunRequest = Shapes::StructureShape.new(name: 'StartWorkflowRunRequest') StartWorkflowRunResponse = Shapes::StructureShape.new(name: 'StartWorkflowRunResponse') StartingEventBatchCondition = Shapes::StructureShape.new(name: 'StartingEventBatchCondition') + StartingPosition = Shapes::StringShape.new(name: 'StartingPosition') Statement = Shapes::StructureShape.new(name: 'Statement') StatementList = Shapes::ListShape.new(name: 'StatementList') StatementOutput = Shapes::StructureShape.new(name: 'StatementOutput') @@ -771,6 +890,7 @@ module ClientApi StopWorkflowRunRequest = Shapes::StructureShape.new(name: 'StopWorkflowRunRequest') StopWorkflowRunResponse = Shapes::StructureShape.new(name: 'StopWorkflowRunResponse') StorageDescriptor = Shapes::StructureShape.new(name: 'StorageDescriptor') + StreamingDataPreviewOptions = Shapes::StructureShape.new(name: 'StreamingDataPreviewOptions') StringColumnStatisticsData = Shapes::StructureShape.new(name: 'StringColumnStatisticsData') StringList = Shapes::ListShape.new(name: 'StringList') Table = Shapes::StructureShape.new(name: 'Table') @@ -791,6 +911,7 @@ module ClientApi TagResourceResponse = Shapes::StructureShape.new(name: 'TagResourceResponse') TagValue = Shapes::StringShape.new(name: 'TagValue') TagsMap = Shapes::MapShape.new(name: 'TagsMap') + TargetFormat = Shapes::StringShape.new(name: 'TargetFormat') TaskRun = Shapes::StructureShape.new(name: 'TaskRun') TaskRunFilterCriteria = Shapes::StructureShape.new(name: 'TaskRunFilterCriteria') TaskRunList = Shapes::ListShape.new(name: 'TaskRunList') @@ -803,6 +924,7 @@ module ClientApi Timestamp = Shapes::TimestampShape.new(name: 'Timestamp') TimestampValue = Shapes::TimestampShape.new(name: 'TimestampValue') Token = Shapes::StringShape.new(name: 'Token') + Topk = Shapes::IntegerShape.new(name: 'Topk') TotalSegmentsInteger = Shapes::IntegerShape.new(name: 'TotalSegmentsInteger') TransactionIdString = Shapes::StringShape.new(name: 'TransactionIdString') TransformEncryption = Shapes::StructureShape.new(name: 'TransformEncryption') @@ -822,15 +944,19 @@ module ClientApi TriggerState = Shapes::StringShape.new(name: 'TriggerState') TriggerType = Shapes::StringShape.new(name: 'TriggerType') TriggerUpdate = Shapes::StructureShape.new(name: 'TriggerUpdate') + TwoInputs = Shapes::ListShape.new(name: 'TwoInputs') TypeString = Shapes::StringShape.new(name: 'TypeString') URI = Shapes::StringShape.new(name: 'URI') UnfilteredPartition = Shapes::StructureShape.new(name: 'UnfilteredPartition') UnfilteredPartitionList = Shapes::ListShape.new(name: 'UnfilteredPartitionList') + Union = Shapes::StructureShape.new(name: 'Union') + UnionType = Shapes::StringShape.new(name: 'UnionType') UntagResourceRequest = Shapes::StructureShape.new(name: 'UntagResourceRequest') UntagResourceResponse = Shapes::StructureShape.new(name: 'UntagResourceResponse') UpdateBehavior = Shapes::StringShape.new(name: 'UpdateBehavior') UpdateBlueprintRequest = Shapes::StructureShape.new(name: 'UpdateBlueprintRequest') UpdateBlueprintResponse = Shapes::StructureShape.new(name: 'UpdateBlueprintResponse') + UpdateCatalogBehavior = Shapes::StringShape.new(name: 'UpdateCatalogBehavior') UpdateClassifierRequest = Shapes::StructureShape.new(name: 'UpdateClassifierRequest') UpdateClassifierResponse = Shapes::StructureShape.new(name: 'UpdateClassifierResponse') UpdateColumnStatisticsForPartitionRequest = Shapes::StructureShape.new(name: 'UpdateColumnStatisticsForPartitionRequest') @@ -871,6 +997,7 @@ module ClientApi UpdateWorkflowResponse = Shapes::StructureShape.new(name: 'UpdateWorkflowResponse') UpdateXMLClassifierRequest = Shapes::StructureShape.new(name: 'UpdateXMLClassifierRequest') UpdatedTimestamp = Shapes::StringShape.new(name: 'UpdatedTimestamp') + UpsertRedshiftTargetOptions = Shapes::StructureShape.new(name: 'UpsertRedshiftTargetOptions') UriString = Shapes::StringShape.new(name: 'UriString') UserDefinedFunction = Shapes::StructureShape.new(name: 'UserDefinedFunction') UserDefinedFunctionInput = Shapes::StructureShape.new(name: 'UserDefinedFunctionInput') @@ -909,12 +1036,41 @@ module ClientApi ActionList.member = Shapes::ShapeRef.new(shape: Action) + AdditionalOptions.key = Shapes::ShapeRef.new(shape: EnclosedInStringProperty) + AdditionalOptions.value = Shapes::ShapeRef.new(shape: EnclosedInStringProperty) + AdditionalPlanOptionsMap.key = Shapes::ShapeRef.new(shape: GenericString) AdditionalPlanOptionsMap.value = Shapes::ShapeRef.new(shape: GenericString) + Aggregate.add_member(:name, Shapes::ShapeRef.new(shape: NodeName, required: true, location_name: "Name")) + Aggregate.add_member(:inputs, Shapes::ShapeRef.new(shape: OneInput, required: true, location_name: "Inputs")) + Aggregate.add_member(:groups, Shapes::ShapeRef.new(shape: GlueStudioPathList, required: true, location_name: "Groups")) + Aggregate.add_member(:aggs, Shapes::ShapeRef.new(shape: AggregateOperations, required: true, location_name: "Aggs")) + Aggregate.struct_class = Types::Aggregate + + AggregateOperation.add_member(:column, Shapes::ShapeRef.new(shape: EnclosedInStringProperties, required: true, location_name: "Column")) + AggregateOperation.add_member(:agg_func, Shapes::ShapeRef.new(shape: AggFunction, required: true, location_name: "AggFunc")) + AggregateOperation.struct_class = Types::AggregateOperation + + AggregateOperations.member = Shapes::ShapeRef.new(shape: AggregateOperation) + AlreadyExistsException.add_member(:message, Shapes::ShapeRef.new(shape: MessageString, location_name: "Message")) AlreadyExistsException.struct_class = Types::AlreadyExistsException + ApplyMapping.add_member(:name, Shapes::ShapeRef.new(shape: NodeName, required: true, location_name: "Name")) + ApplyMapping.add_member(:inputs, Shapes::ShapeRef.new(shape: OneInput, required: true, location_name: "Inputs")) + ApplyMapping.add_member(:mapping, Shapes::ShapeRef.new(shape: Mappings, required: true, location_name: "Mapping")) + ApplyMapping.struct_class = Types::ApplyMapping + + AthenaConnectorSource.add_member(:name, Shapes::ShapeRef.new(shape: NodeName, required: true, location_name: "Name")) + AthenaConnectorSource.add_member(:connection_name, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "ConnectionName")) + AthenaConnectorSource.add_member(:connector_name, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "ConnectorName")) + AthenaConnectorSource.add_member(:connection_type, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "ConnectionType")) + AthenaConnectorSource.add_member(:connection_table, Shapes::ShapeRef.new(shape: EnclosedInStringPropertyWithQuote, location_name: "ConnectionTable")) + AthenaConnectorSource.add_member(:schema_name, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "SchemaName")) + AthenaConnectorSource.add_member(:output_schemas, Shapes::ShapeRef.new(shape: GlueSchemas, location_name: "OutputSchemas")) + AthenaConnectorSource.struct_class = Types::AthenaConnectorSource + AuditColumnNamesList.member = Shapes::ShapeRef.new(shape: ColumnNameString) AuditContext.add_member(:additional_audit_context, Shapes::ShapeRef.new(shape: AuditContextString, location_name: "AdditionalAuditContext")) @@ -930,6 +1086,12 @@ module ClientApi BackfillErrors.member = Shapes::ShapeRef.new(shape: BackfillError) + BasicCatalogTarget.add_member(:name, Shapes::ShapeRef.new(shape: NodeName, required: true, location_name: "Name")) + BasicCatalogTarget.add_member(:inputs, Shapes::ShapeRef.new(shape: OneInput, required: true, location_name: "Inputs")) + BasicCatalogTarget.add_member(:database, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "Database")) + BasicCatalogTarget.add_member(:table, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "Table")) + BasicCatalogTarget.struct_class = Types::BasicCatalogTarget + BatchCreatePartitionRequest.add_member(:catalog_id, Shapes::ShapeRef.new(shape: CatalogIdString, location_name: "CatalogId")) BatchCreatePartitionRequest.add_member(:database_name, Shapes::ShapeRef.new(shape: NameString, required: true, location_name: "DatabaseName")) BatchCreatePartitionRequest.add_member(:table_name, Shapes::ShapeRef.new(shape: NameString, required: true, location_name: "TableName")) @@ -1163,6 +1325,33 @@ module ClientApi CatalogImportStatus.add_member(:imported_by, Shapes::ShapeRef.new(shape: NameString, location_name: "ImportedBy")) CatalogImportStatus.struct_class = Types::CatalogImportStatus + CatalogKafkaSource.add_member(:name, Shapes::ShapeRef.new(shape: NodeName, required: true, location_name: "Name")) + CatalogKafkaSource.add_member(:window_size, Shapes::ShapeRef.new(shape: BoxedPositiveInt, location_name: "WindowSize", metadata: {"box"=>true})) + CatalogKafkaSource.add_member(:detect_schema, Shapes::ShapeRef.new(shape: BoxedBoolean, location_name: "DetectSchema", metadata: {"box"=>true})) + CatalogKafkaSource.add_member(:table, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "Table")) + CatalogKafkaSource.add_member(:database, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "Database")) + CatalogKafkaSource.add_member(:streaming_options, Shapes::ShapeRef.new(shape: KafkaStreamingSourceOptions, location_name: "StreamingOptions")) + CatalogKafkaSource.add_member(:data_preview_options, Shapes::ShapeRef.new(shape: StreamingDataPreviewOptions, location_name: "DataPreviewOptions")) + CatalogKafkaSource.struct_class = Types::CatalogKafkaSource + + CatalogKinesisSource.add_member(:name, Shapes::ShapeRef.new(shape: NodeName, required: true, location_name: "Name")) + CatalogKinesisSource.add_member(:window_size, Shapes::ShapeRef.new(shape: BoxedPositiveInt, location_name: "WindowSize")) + CatalogKinesisSource.add_member(:detect_schema, Shapes::ShapeRef.new(shape: BoxedBoolean, location_name: "DetectSchema", metadata: {"box"=>true})) + CatalogKinesisSource.add_member(:table, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "Table")) + CatalogKinesisSource.add_member(:database, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "Database")) + CatalogKinesisSource.add_member(:streaming_options, Shapes::ShapeRef.new(shape: KinesisStreamingSourceOptions, location_name: "StreamingOptions")) + CatalogKinesisSource.add_member(:data_preview_options, Shapes::ShapeRef.new(shape: StreamingDataPreviewOptions, location_name: "DataPreviewOptions")) + CatalogKinesisSource.struct_class = Types::CatalogKinesisSource + + CatalogSchemaChangePolicy.add_member(:enable_update_catalog, Shapes::ShapeRef.new(shape: BoxedBoolean, location_name: "EnableUpdateCatalog")) + CatalogSchemaChangePolicy.add_member(:update_behavior, Shapes::ShapeRef.new(shape: UpdateCatalogBehavior, location_name: "UpdateBehavior")) + CatalogSchemaChangePolicy.struct_class = Types::CatalogSchemaChangePolicy + + CatalogSource.add_member(:name, Shapes::ShapeRef.new(shape: NodeName, required: true, location_name: "Name")) + CatalogSource.add_member(:database, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "Database")) + CatalogSource.add_member(:table, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "Table")) + CatalogSource.struct_class = Types::CatalogSource + CatalogTablesList.member = Shapes::ShapeRef.new(shape: NameString) CatalogTarget.add_member(:database_name, Shapes::ShapeRef.new(shape: NameString, required: true, location_name: "DatabaseName")) @@ -1194,6 +1383,61 @@ module ClientApi CloudWatchEncryption.add_member(:kms_key_arn, Shapes::ShapeRef.new(shape: KmsKeyArn, location_name: "KmsKeyArn")) CloudWatchEncryption.struct_class = Types::CloudWatchEncryption + CodeGenConfigurationNode.add_member(:athena_connector_source, Shapes::ShapeRef.new(shape: AthenaConnectorSource, location_name: "AthenaConnectorSource")) + CodeGenConfigurationNode.add_member(:jdbc_connector_source, Shapes::ShapeRef.new(shape: JDBCConnectorSource, location_name: "JDBCConnectorSource")) + CodeGenConfigurationNode.add_member(:spark_connector_source, Shapes::ShapeRef.new(shape: SparkConnectorSource, location_name: "SparkConnectorSource")) + CodeGenConfigurationNode.add_member(:catalog_source, Shapes::ShapeRef.new(shape: CatalogSource, location_name: "CatalogSource")) + CodeGenConfigurationNode.add_member(:redshift_source, Shapes::ShapeRef.new(shape: RedshiftSource, location_name: "RedshiftSource")) + CodeGenConfigurationNode.add_member(:s3_catalog_source, Shapes::ShapeRef.new(shape: S3CatalogSource, location_name: "S3CatalogSource")) + CodeGenConfigurationNode.add_member(:s3_csv_source, Shapes::ShapeRef.new(shape: S3CsvSource, location_name: "S3CsvSource")) + CodeGenConfigurationNode.add_member(:s3_json_source, Shapes::ShapeRef.new(shape: S3JsonSource, location_name: "S3JsonSource")) + CodeGenConfigurationNode.add_member(:s3_parquet_source, Shapes::ShapeRef.new(shape: S3ParquetSource, location_name: "S3ParquetSource")) + CodeGenConfigurationNode.add_member(:relational_catalog_source, Shapes::ShapeRef.new(shape: RelationalCatalogSource, location_name: "RelationalCatalogSource")) + CodeGenConfigurationNode.add_member(:dynamo_db_catalog_source, Shapes::ShapeRef.new(shape: DynamoDBCatalogSource, location_name: "DynamoDBCatalogSource")) + CodeGenConfigurationNode.add_member(:jdbc_connector_target, Shapes::ShapeRef.new(shape: JDBCConnectorTarget, location_name: "JDBCConnectorTarget")) + CodeGenConfigurationNode.add_member(:spark_connector_target, Shapes::ShapeRef.new(shape: SparkConnectorTarget, location_name: "SparkConnectorTarget")) + CodeGenConfigurationNode.add_member(:catalog_target, Shapes::ShapeRef.new(shape: BasicCatalogTarget, location_name: "CatalogTarget")) + CodeGenConfigurationNode.add_member(:redshift_target, Shapes::ShapeRef.new(shape: RedshiftTarget, location_name: "RedshiftTarget")) + CodeGenConfigurationNode.add_member(:s3_catalog_target, Shapes::ShapeRef.new(shape: S3CatalogTarget, location_name: "S3CatalogTarget")) + CodeGenConfigurationNode.add_member(:s3_glue_parquet_target, Shapes::ShapeRef.new(shape: S3GlueParquetTarget, location_name: "S3GlueParquetTarget")) + CodeGenConfigurationNode.add_member(:s3_direct_target, Shapes::ShapeRef.new(shape: S3DirectTarget, location_name: "S3DirectTarget")) + CodeGenConfigurationNode.add_member(:apply_mapping, Shapes::ShapeRef.new(shape: ApplyMapping, location_name: "ApplyMapping")) + CodeGenConfigurationNode.add_member(:select_fields, Shapes::ShapeRef.new(shape: SelectFields, location_name: "SelectFields")) + CodeGenConfigurationNode.add_member(:drop_fields, Shapes::ShapeRef.new(shape: DropFields, location_name: "DropFields")) + CodeGenConfigurationNode.add_member(:rename_field, Shapes::ShapeRef.new(shape: RenameField, location_name: "RenameField")) + CodeGenConfigurationNode.add_member(:spigot, Shapes::ShapeRef.new(shape: Spigot, location_name: "Spigot")) + CodeGenConfigurationNode.add_member(:join, Shapes::ShapeRef.new(shape: Join, location_name: "Join")) + CodeGenConfigurationNode.add_member(:split_fields, Shapes::ShapeRef.new(shape: SplitFields, location_name: "SplitFields")) + CodeGenConfigurationNode.add_member(:select_from_collection, Shapes::ShapeRef.new(shape: SelectFromCollection, location_name: "SelectFromCollection")) + CodeGenConfigurationNode.add_member(:fill_missing_values, Shapes::ShapeRef.new(shape: FillMissingValues, location_name: "FillMissingValues")) + CodeGenConfigurationNode.add_member(:filter, Shapes::ShapeRef.new(shape: Filter, location_name: "Filter")) + CodeGenConfigurationNode.add_member(:custom_code, Shapes::ShapeRef.new(shape: CustomCode, location_name: "CustomCode")) + CodeGenConfigurationNode.add_member(:spark_sql, Shapes::ShapeRef.new(shape: SparkSQL, location_name: "SparkSQL")) + CodeGenConfigurationNode.add_member(:direct_kinesis_source, Shapes::ShapeRef.new(shape: DirectKinesisSource, location_name: "DirectKinesisSource")) + CodeGenConfigurationNode.add_member(:direct_kafka_source, Shapes::ShapeRef.new(shape: DirectKafkaSource, location_name: "DirectKafkaSource")) + CodeGenConfigurationNode.add_member(:catalog_kinesis_source, Shapes::ShapeRef.new(shape: CatalogKinesisSource, location_name: "CatalogKinesisSource")) + CodeGenConfigurationNode.add_member(:catalog_kafka_source, Shapes::ShapeRef.new(shape: CatalogKafkaSource, location_name: "CatalogKafkaSource")) + CodeGenConfigurationNode.add_member(:drop_null_fields, Shapes::ShapeRef.new(shape: DropNullFields, location_name: "DropNullFields")) + CodeGenConfigurationNode.add_member(:merge, Shapes::ShapeRef.new(shape: Merge, location_name: "Merge")) + CodeGenConfigurationNode.add_member(:union, Shapes::ShapeRef.new(shape: Union, location_name: "Union")) + CodeGenConfigurationNode.add_member(:pii_detection, Shapes::ShapeRef.new(shape: PIIDetection, location_name: "PIIDetection")) + CodeGenConfigurationNode.add_member(:aggregate, Shapes::ShapeRef.new(shape: Aggregate, location_name: "Aggregate")) + CodeGenConfigurationNode.add_member(:drop_duplicates, Shapes::ShapeRef.new(shape: DropDuplicates, location_name: "DropDuplicates")) + CodeGenConfigurationNode.add_member(:governed_catalog_target, Shapes::ShapeRef.new(shape: GovernedCatalogTarget, location_name: "GovernedCatalogTarget")) + CodeGenConfigurationNode.add_member(:governed_catalog_source, Shapes::ShapeRef.new(shape: GovernedCatalogSource, location_name: "GovernedCatalogSource")) + CodeGenConfigurationNode.add_member(:microsoft_sql_server_catalog_source, Shapes::ShapeRef.new(shape: MicrosoftSQLServerCatalogSource, location_name: "MicrosoftSQLServerCatalogSource")) + CodeGenConfigurationNode.add_member(:my_sql_catalog_source, Shapes::ShapeRef.new(shape: MySQLCatalogSource, location_name: "MySQLCatalogSource")) + CodeGenConfigurationNode.add_member(:oracle_sql_catalog_source, Shapes::ShapeRef.new(shape: OracleSQLCatalogSource, location_name: "OracleSQLCatalogSource")) + CodeGenConfigurationNode.add_member(:postgre_sql_catalog_source, Shapes::ShapeRef.new(shape: PostgreSQLCatalogSource, location_name: "PostgreSQLCatalogSource")) + CodeGenConfigurationNode.add_member(:microsoft_sql_server_catalog_target, Shapes::ShapeRef.new(shape: MicrosoftSQLServerCatalogTarget, location_name: "MicrosoftSQLServerCatalogTarget")) + CodeGenConfigurationNode.add_member(:my_sql_catalog_target, Shapes::ShapeRef.new(shape: MySQLCatalogTarget, location_name: "MySQLCatalogTarget")) + CodeGenConfigurationNode.add_member(:oracle_sql_catalog_target, Shapes::ShapeRef.new(shape: OracleSQLCatalogTarget, location_name: "OracleSQLCatalogTarget")) + CodeGenConfigurationNode.add_member(:postgre_sql_catalog_target, Shapes::ShapeRef.new(shape: PostgreSQLCatalogTarget, location_name: "PostgreSQLCatalogTarget")) + CodeGenConfigurationNode.struct_class = Types::CodeGenConfigurationNode + + CodeGenConfigurationNodes.key = Shapes::ShapeRef.new(shape: NodeId) + CodeGenConfigurationNodes.value = Shapes::ShapeRef.new(shape: CodeGenConfigurationNode) + CodeGenEdge.add_member(:source, Shapes::ShapeRef.new(shape: CodeGenIdentifier, required: true, location_name: "Source")) CodeGenEdge.add_member(:target, Shapes::ShapeRef.new(shape: CodeGenIdentifier, required: true, location_name: "Target")) CodeGenEdge.add_member(:target_parameter, Shapes::ShapeRef.new(shape: CodeGenArgName, location_name: "TargetParameter")) @@ -1521,6 +1765,7 @@ module ClientApi CreateJobRequest.add_member(:glue_version, Shapes::ShapeRef.new(shape: GlueVersionString, location_name: "GlueVersion")) CreateJobRequest.add_member(:number_of_workers, Shapes::ShapeRef.new(shape: NullableInteger, location_name: "NumberOfWorkers")) CreateJobRequest.add_member(:worker_type, Shapes::ShapeRef.new(shape: WorkerType, location_name: "WorkerType")) + CreateJobRequest.add_member(:code_gen_configuration_nodes, Shapes::ShapeRef.new(shape: CodeGenConfigurationNodes, location_name: "CodeGenConfigurationNodes")) CreateJobRequest.struct_class = Types::CreateJobRequest CreateJobResponse.add_member(:name, Shapes::ShapeRef.new(shape: NameString, location_name: "Name")) @@ -1697,6 +1942,13 @@ module ClientApi CsvHeader.member = Shapes::ShapeRef.new(shape: NameString) + CustomCode.add_member(:name, Shapes::ShapeRef.new(shape: NodeName, required: true, location_name: "Name")) + CustomCode.add_member(:inputs, Shapes::ShapeRef.new(shape: ManyInputs, required: true, location_name: "Inputs")) + CustomCode.add_member(:code, Shapes::ShapeRef.new(shape: ExtendedString, required: true, location_name: "Code")) + CustomCode.add_member(:class_name, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "ClassName")) + CustomCode.add_member(:output_schemas, Shapes::ShapeRef.new(shape: GlueSchemas, location_name: "OutputSchemas")) + CustomCode.struct_class = Types::CustomCode + CustomEntityType.add_member(:name, Shapes::ShapeRef.new(shape: NameString, required: true, location_name: "Name")) CustomEntityType.add_member(:regex_string, Shapes::ShapeRef.new(shape: NameString, required: true, location_name: "RegexString")) CustomEntityType.add_member(:context_words, Shapes::ShapeRef.new(shape: ContextWords, location_name: "ContextWords")) @@ -1741,6 +1993,10 @@ module ClientApi DatabaseList.member = Shapes::ShapeRef.new(shape: Database) + Datatype.add_member(:id, Shapes::ShapeRef.new(shape: GenericLimitedString, required: true, location_name: "Id")) + Datatype.add_member(:label, Shapes::ShapeRef.new(shape: GenericLimitedString, required: true, location_name: "Label")) + Datatype.struct_class = Types::Datatype + DateColumnStatisticsData.add_member(:minimum_value, Shapes::ShapeRef.new(shape: Timestamp, location_name: "MinimumValue")) DateColumnStatisticsData.add_member(:maximum_value, Shapes::ShapeRef.new(shape: Timestamp, location_name: "MaximumValue")) DateColumnStatisticsData.add_member(:number_of_nulls, Shapes::ShapeRef.new(shape: NonNegativeLong, required: true, location_name: "NumberOfNulls")) @@ -1963,12 +2219,53 @@ module ClientApi DevEndpointNames.member = Shapes::ShapeRef.new(shape: GenericString) + DirectKafkaSource.add_member(:name, Shapes::ShapeRef.new(shape: NodeName, required: true, location_name: "Name")) + DirectKafkaSource.add_member(:streaming_options, Shapes::ShapeRef.new(shape: KafkaStreamingSourceOptions, location_name: "StreamingOptions")) + DirectKafkaSource.add_member(:window_size, Shapes::ShapeRef.new(shape: BoxedPositiveInt, location_name: "WindowSize", metadata: {"box"=>true})) + DirectKafkaSource.add_member(:detect_schema, Shapes::ShapeRef.new(shape: BoxedBoolean, location_name: "DetectSchema", metadata: {"box"=>true})) + DirectKafkaSource.add_member(:data_preview_options, Shapes::ShapeRef.new(shape: StreamingDataPreviewOptions, location_name: "DataPreviewOptions")) + DirectKafkaSource.struct_class = Types::DirectKafkaSource + + DirectKinesisSource.add_member(:name, Shapes::ShapeRef.new(shape: NodeName, required: true, location_name: "Name")) + DirectKinesisSource.add_member(:window_size, Shapes::ShapeRef.new(shape: BoxedPositiveInt, location_name: "WindowSize", metadata: {"box"=>true})) + DirectKinesisSource.add_member(:detect_schema, Shapes::ShapeRef.new(shape: BoxedBoolean, location_name: "DetectSchema", metadata: {"box"=>true})) + DirectKinesisSource.add_member(:streaming_options, Shapes::ShapeRef.new(shape: KinesisStreamingSourceOptions, location_name: "StreamingOptions")) + DirectKinesisSource.add_member(:data_preview_options, Shapes::ShapeRef.new(shape: StreamingDataPreviewOptions, location_name: "DataPreviewOptions")) + DirectKinesisSource.struct_class = Types::DirectKinesisSource + + DirectSchemaChangePolicy.add_member(:enable_update_catalog, Shapes::ShapeRef.new(shape: BoxedBoolean, location_name: "EnableUpdateCatalog")) + DirectSchemaChangePolicy.add_member(:update_behavior, Shapes::ShapeRef.new(shape: UpdateCatalogBehavior, location_name: "UpdateBehavior")) + DirectSchemaChangePolicy.add_member(:table, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, location_name: "Table")) + DirectSchemaChangePolicy.add_member(:database, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, location_name: "Database")) + DirectSchemaChangePolicy.struct_class = Types::DirectSchemaChangePolicy + DoubleColumnStatisticsData.add_member(:minimum_value, Shapes::ShapeRef.new(shape: Double, location_name: "MinimumValue")) DoubleColumnStatisticsData.add_member(:maximum_value, Shapes::ShapeRef.new(shape: Double, location_name: "MaximumValue")) DoubleColumnStatisticsData.add_member(:number_of_nulls, Shapes::ShapeRef.new(shape: NonNegativeLong, required: true, location_name: "NumberOfNulls")) DoubleColumnStatisticsData.add_member(:number_of_distinct_values, Shapes::ShapeRef.new(shape: NonNegativeLong, required: true, location_name: "NumberOfDistinctValues")) DoubleColumnStatisticsData.struct_class = Types::DoubleColumnStatisticsData + DropDuplicates.add_member(:name, Shapes::ShapeRef.new(shape: NodeName, required: true, location_name: "Name")) + DropDuplicates.add_member(:inputs, Shapes::ShapeRef.new(shape: OneInput, required: true, location_name: "Inputs")) + DropDuplicates.add_member(:columns, Shapes::ShapeRef.new(shape: LimitedPathList, location_name: "Columns")) + DropDuplicates.struct_class = Types::DropDuplicates + + DropFields.add_member(:name, Shapes::ShapeRef.new(shape: NodeName, required: true, location_name: "Name")) + DropFields.add_member(:inputs, Shapes::ShapeRef.new(shape: OneInput, required: true, location_name: "Inputs")) + DropFields.add_member(:paths, Shapes::ShapeRef.new(shape: GlueStudioPathList, required: true, location_name: "Paths")) + DropFields.struct_class = Types::DropFields + + DropNullFields.add_member(:name, Shapes::ShapeRef.new(shape: NodeName, required: true, location_name: "Name")) + DropNullFields.add_member(:inputs, Shapes::ShapeRef.new(shape: OneInput, required: true, location_name: "Inputs")) + DropNullFields.add_member(:null_check_box_list, Shapes::ShapeRef.new(shape: NullCheckBoxList, location_name: "NullCheckBoxList")) + DropNullFields.add_member(:null_text_list, Shapes::ShapeRef.new(shape: NullValueFields, location_name: "NullTextList")) + DropNullFields.struct_class = Types::DropNullFields + + DynamoDBCatalogSource.add_member(:name, Shapes::ShapeRef.new(shape: NodeName, required: true, location_name: "Name")) + DynamoDBCatalogSource.add_member(:database, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "Database")) + DynamoDBCatalogSource.add_member(:table, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "Table")) + DynamoDBCatalogSource.struct_class = Types::DynamoDBCatalogSource + DynamoDBTarget.add_member(:path, Shapes::ShapeRef.new(shape: Path, location_name: "Path")) DynamoDBTarget.add_member(:scan_all, Shapes::ShapeRef.new(shape: NullableBoolean, location_name: "scanAll")) DynamoDBTarget.add_member(:scan_rate, Shapes::ShapeRef.new(shape: NullableDouble, location_name: "scanRate")) @@ -1982,6 +2279,10 @@ module ClientApi EdgeList.member = Shapes::ShapeRef.new(shape: Edge) + EnclosedInStringProperties.member = Shapes::ShapeRef.new(shape: EnclosedInStringProperty) + + EnclosedInStringPropertiesMinOne.member = Shapes::ShapeRef.new(shape: EnclosedInStringProperty) + EncryptionAtRest.add_member(:catalog_encryption_mode, Shapes::ShapeRef.new(shape: CatalogEncryptionMode, required: true, location_name: "CatalogEncryptionMode")) EncryptionAtRest.add_member(:sse_aws_kms_key_id, Shapes::ShapeRef.new(shape: NameString, location_name: "SseAwsKmsKeyId")) EncryptionAtRest.struct_class = Types::EncryptionAtRest @@ -2019,6 +2320,31 @@ module ClientApi ExportLabelsTaskRunProperties.add_member(:output_s3_path, Shapes::ShapeRef.new(shape: UriString, location_name: "OutputS3Path")) ExportLabelsTaskRunProperties.struct_class = Types::ExportLabelsTaskRunProperties + FillMissingValues.add_member(:name, Shapes::ShapeRef.new(shape: NodeName, required: true, location_name: "Name")) + FillMissingValues.add_member(:inputs, Shapes::ShapeRef.new(shape: OneInput, required: true, location_name: "Inputs")) + FillMissingValues.add_member(:imputed_path, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "ImputedPath")) + FillMissingValues.add_member(:filled_path, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, location_name: "FilledPath")) + FillMissingValues.struct_class = Types::FillMissingValues + + Filter.add_member(:name, Shapes::ShapeRef.new(shape: NodeName, required: true, location_name: "Name")) + Filter.add_member(:inputs, Shapes::ShapeRef.new(shape: OneInput, required: true, location_name: "Inputs")) + Filter.add_member(:logical_operator, Shapes::ShapeRef.new(shape: FilterLogicalOperator, required: true, location_name: "LogicalOperator")) + Filter.add_member(:filters, Shapes::ShapeRef.new(shape: FilterExpressions, required: true, location_name: "Filters")) + Filter.struct_class = Types::Filter + + FilterExpression.add_member(:operation, Shapes::ShapeRef.new(shape: FilterOperation, required: true, location_name: "Operation")) + FilterExpression.add_member(:negated, Shapes::ShapeRef.new(shape: BoxedBoolean, location_name: "Negated")) + FilterExpression.add_member(:values, Shapes::ShapeRef.new(shape: FilterValues, required: true, location_name: "Values")) + FilterExpression.struct_class = Types::FilterExpression + + FilterExpressions.member = Shapes::ShapeRef.new(shape: FilterExpression) + + FilterValue.add_member(:type, Shapes::ShapeRef.new(shape: FilterValueType, required: true, location_name: "Type")) + FilterValue.add_member(:value, Shapes::ShapeRef.new(shape: EnclosedInStringProperties, required: true, location_name: "Value")) + FilterValue.struct_class = Types::FilterValue + + FilterValues.member = Shapes::ShapeRef.new(shape: FilterValue) + FindMatchesMetrics.add_member(:area_under_pr_curve, Shapes::ShapeRef.new(shape: GenericBoundedDouble, location_name: "AreaUnderPRCurve")) FindMatchesMetrics.add_member(:precision, Shapes::ShapeRef.new(shape: GenericBoundedDouble, location_name: "Precision")) FindMatchesMetrics.add_member(:recall, Shapes::ShapeRef.new(shape: GenericBoundedDouble, location_name: "Recall")) @@ -2638,6 +2964,19 @@ module ClientApi GluePolicy.add_member(:update_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "UpdateTime")) GluePolicy.struct_class = Types::GluePolicy + GlueSchema.add_member(:columns, Shapes::ShapeRef.new(shape: GlueStudioSchemaColumnList, location_name: "Columns")) + GlueSchema.struct_class = Types::GlueSchema + + GlueSchemas.member = Shapes::ShapeRef.new(shape: GlueSchema) + + GlueStudioPathList.member = Shapes::ShapeRef.new(shape: EnclosedInStringProperties) + + GlueStudioSchemaColumn.add_member(:name, Shapes::ShapeRef.new(shape: GlueStudioColumnNameString, required: true, location_name: "Name")) + GlueStudioSchemaColumn.add_member(:type, Shapes::ShapeRef.new(shape: ColumnTypeString, location_name: "Type")) + GlueStudioSchemaColumn.struct_class = Types::GlueStudioSchemaColumn + + GlueStudioSchemaColumnList.member = Shapes::ShapeRef.new(shape: GlueStudioSchemaColumn) + GlueTable.add_member(:database_name, Shapes::ShapeRef.new(shape: NameString, required: true, location_name: "DatabaseName")) GlueTable.add_member(:table_name, Shapes::ShapeRef.new(shape: NameString, required: true, location_name: "TableName")) GlueTable.add_member(:catalog_id, Shapes::ShapeRef.new(shape: NameString, location_name: "CatalogId")) @@ -2646,6 +2985,21 @@ module ClientApi GlueTables.member = Shapes::ShapeRef.new(shape: GlueTable) + GovernedCatalogSource.add_member(:name, Shapes::ShapeRef.new(shape: NodeName, required: true, location_name: "Name")) + GovernedCatalogSource.add_member(:database, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "Database")) + GovernedCatalogSource.add_member(:table, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "Table")) + GovernedCatalogSource.add_member(:partition_predicate, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, location_name: "PartitionPredicate")) + GovernedCatalogSource.add_member(:additional_options, Shapes::ShapeRef.new(shape: S3SourceAdditionalOptions, location_name: "AdditionalOptions")) + GovernedCatalogSource.struct_class = Types::GovernedCatalogSource + + GovernedCatalogTarget.add_member(:name, Shapes::ShapeRef.new(shape: NodeName, required: true, location_name: "Name")) + GovernedCatalogTarget.add_member(:inputs, Shapes::ShapeRef.new(shape: OneInput, required: true, location_name: "Inputs")) + GovernedCatalogTarget.add_member(:partition_keys, Shapes::ShapeRef.new(shape: GlueStudioPathList, location_name: "PartitionKeys")) + GovernedCatalogTarget.add_member(:table, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "Table")) + GovernedCatalogTarget.add_member(:database, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "Database")) + GovernedCatalogTarget.add_member(:schema_change_policy, Shapes::ShapeRef.new(shape: CatalogSchemaChangePolicy, location_name: "SchemaChangePolicy")) + GovernedCatalogTarget.struct_class = Types::GovernedCatalogTarget + GrokClassifier.add_member(:name, Shapes::ShapeRef.new(shape: NameString, required: true, location_name: "Name")) GrokClassifier.add_member(:classification, Shapes::ShapeRef.new(shape: Classification, required: true, location_name: "Classification")) GrokClassifier.add_member(:creation_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "CreationTime")) @@ -2685,6 +3039,39 @@ module ClientApi InvalidStateException.add_member(:message, Shapes::ShapeRef.new(shape: MessageString, location_name: "Message")) InvalidStateException.struct_class = Types::InvalidStateException + JDBCConnectorOptions.add_member(:filter_predicate, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, location_name: "FilterPredicate")) + JDBCConnectorOptions.add_member(:partition_column, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, location_name: "PartitionColumn")) + JDBCConnectorOptions.add_member(:lower_bound, Shapes::ShapeRef.new(shape: BoxedNonNegativeLong, location_name: "LowerBound")) + JDBCConnectorOptions.add_member(:upper_bound, Shapes::ShapeRef.new(shape: BoxedNonNegativeLong, location_name: "UpperBound")) + JDBCConnectorOptions.add_member(:num_partitions, Shapes::ShapeRef.new(shape: BoxedNonNegativeLong, location_name: "NumPartitions")) + JDBCConnectorOptions.add_member(:job_bookmark_keys, Shapes::ShapeRef.new(shape: EnclosedInStringProperties, location_name: "JobBookmarkKeys")) + JDBCConnectorOptions.add_member(:job_bookmark_keys_sort_order, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, location_name: "JobBookmarkKeysSortOrder")) + JDBCConnectorOptions.add_member(:data_type_mapping, Shapes::ShapeRef.new(shape: JDBCDataTypeMapping, location_name: "DataTypeMapping")) + JDBCConnectorOptions.struct_class = Types::JDBCConnectorOptions + + JDBCConnectorSource.add_member(:name, Shapes::ShapeRef.new(shape: NodeName, required: true, location_name: "Name")) + JDBCConnectorSource.add_member(:connection_name, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "ConnectionName")) + JDBCConnectorSource.add_member(:connector_name, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "ConnectorName")) + JDBCConnectorSource.add_member(:connection_type, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "ConnectionType")) + JDBCConnectorSource.add_member(:additional_options, Shapes::ShapeRef.new(shape: JDBCConnectorOptions, location_name: "AdditionalOptions")) + JDBCConnectorSource.add_member(:connection_table, Shapes::ShapeRef.new(shape: EnclosedInStringPropertyWithQuote, location_name: "ConnectionTable")) + JDBCConnectorSource.add_member(:query, Shapes::ShapeRef.new(shape: SqlQuery, location_name: "Query")) + JDBCConnectorSource.add_member(:output_schemas, Shapes::ShapeRef.new(shape: GlueSchemas, location_name: "OutputSchemas")) + JDBCConnectorSource.struct_class = Types::JDBCConnectorSource + + JDBCConnectorTarget.add_member(:name, Shapes::ShapeRef.new(shape: NodeName, required: true, location_name: "Name")) + JDBCConnectorTarget.add_member(:inputs, Shapes::ShapeRef.new(shape: OneInput, required: true, location_name: "Inputs")) + JDBCConnectorTarget.add_member(:connection_name, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "ConnectionName")) + JDBCConnectorTarget.add_member(:connection_table, Shapes::ShapeRef.new(shape: EnclosedInStringPropertyWithQuote, required: true, location_name: "ConnectionTable")) + JDBCConnectorTarget.add_member(:connector_name, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "ConnectorName")) + JDBCConnectorTarget.add_member(:connection_type, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "ConnectionType")) + JDBCConnectorTarget.add_member(:additional_options, Shapes::ShapeRef.new(shape: AdditionalOptions, location_name: "AdditionalOptions")) + JDBCConnectorTarget.add_member(:output_schemas, Shapes::ShapeRef.new(shape: GlueSchemas, location_name: "OutputSchemas")) + JDBCConnectorTarget.struct_class = Types::JDBCConnectorTarget + + JDBCDataTypeMapping.key = Shapes::ShapeRef.new(shape: JDBCDataType) + JDBCDataTypeMapping.value = Shapes::ShapeRef.new(shape: GlueRecordType) + JdbcTarget.add_member(:connection_name, Shapes::ShapeRef.new(shape: ConnectionName, location_name: "ConnectionName")) JdbcTarget.add_member(:path, Shapes::ShapeRef.new(shape: Path, location_name: "Path")) JdbcTarget.add_member(:exclusions, Shapes::ShapeRef.new(shape: PathList, location_name: "Exclusions")) @@ -2712,6 +3099,7 @@ module ClientApi Job.add_member(:security_configuration, Shapes::ShapeRef.new(shape: NameString, location_name: "SecurityConfiguration")) Job.add_member(:notification_property, Shapes::ShapeRef.new(shape: NotificationProperty, location_name: "NotificationProperty")) Job.add_member(:glue_version, Shapes::ShapeRef.new(shape: GlueVersionString, location_name: "GlueVersion")) + Job.add_member(:code_gen_configuration_nodes, Shapes::ShapeRef.new(shape: CodeGenConfigurationNodes, location_name: "CodeGenConfigurationNodes")) Job.struct_class = Types::Job JobBookmarkEntry.add_member(:job_name, Shapes::ShapeRef.new(shape: JobName, location_name: "JobName")) @@ -2783,8 +3171,21 @@ module ClientApi JobUpdate.add_member(:security_configuration, Shapes::ShapeRef.new(shape: NameString, location_name: "SecurityConfiguration")) JobUpdate.add_member(:notification_property, Shapes::ShapeRef.new(shape: NotificationProperty, location_name: "NotificationProperty")) JobUpdate.add_member(:glue_version, Shapes::ShapeRef.new(shape: GlueVersionString, location_name: "GlueVersion")) + JobUpdate.add_member(:code_gen_configuration_nodes, Shapes::ShapeRef.new(shape: CodeGenConfigurationNodes, location_name: "CodeGenConfigurationNodes")) JobUpdate.struct_class = Types::JobUpdate + Join.add_member(:name, Shapes::ShapeRef.new(shape: NodeName, required: true, location_name: "Name")) + Join.add_member(:inputs, Shapes::ShapeRef.new(shape: TwoInputs, required: true, location_name: "Inputs")) + Join.add_member(:join_type, Shapes::ShapeRef.new(shape: JoinType, required: true, location_name: "JoinType")) + Join.add_member(:columns, Shapes::ShapeRef.new(shape: JoinColumns, required: true, location_name: "Columns")) + Join.struct_class = Types::Join + + JoinColumn.add_member(:from, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "From")) + JoinColumn.add_member(:keys, Shapes::ShapeRef.new(shape: GlueStudioPathList, required: true, location_name: "Keys")) + JoinColumn.struct_class = Types::JoinColumn + + JoinColumns.member = Shapes::ShapeRef.new(shape: JoinColumn) + JsonClassifier.add_member(:name, Shapes::ShapeRef.new(shape: NameString, required: true, location_name: "Name")) JsonClassifier.add_member(:creation_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "CreationTime")) JsonClassifier.add_member(:last_updated, Shapes::ShapeRef.new(shape: Timestamp, location_name: "LastUpdated")) @@ -2792,6 +3193,23 @@ module ClientApi JsonClassifier.add_member(:json_path, Shapes::ShapeRef.new(shape: JsonPath, required: true, location_name: "JsonPath")) JsonClassifier.struct_class = Types::JsonClassifier + KafkaStreamingSourceOptions.add_member(:bootstrap_servers, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, location_name: "BootstrapServers")) + KafkaStreamingSourceOptions.add_member(:security_protocol, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, location_name: "SecurityProtocol")) + KafkaStreamingSourceOptions.add_member(:connection_name, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, location_name: "ConnectionName")) + KafkaStreamingSourceOptions.add_member(:topic_name, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, location_name: "TopicName")) + KafkaStreamingSourceOptions.add_member(:assign, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, location_name: "Assign")) + KafkaStreamingSourceOptions.add_member(:subscribe_pattern, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, location_name: "SubscribePattern")) + KafkaStreamingSourceOptions.add_member(:classification, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, location_name: "Classification")) + KafkaStreamingSourceOptions.add_member(:delimiter, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, location_name: "Delimiter")) + KafkaStreamingSourceOptions.add_member(:starting_offsets, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, location_name: "StartingOffsets")) + KafkaStreamingSourceOptions.add_member(:ending_offsets, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, location_name: "EndingOffsets")) + KafkaStreamingSourceOptions.add_member(:poll_timeout_ms, Shapes::ShapeRef.new(shape: BoxedNonNegativeLong, location_name: "PollTimeoutMs")) + KafkaStreamingSourceOptions.add_member(:num_retries, Shapes::ShapeRef.new(shape: BoxedNonNegativeInt, location_name: "NumRetries")) + KafkaStreamingSourceOptions.add_member(:retry_interval_ms, Shapes::ShapeRef.new(shape: BoxedNonNegativeLong, location_name: "RetryIntervalMs")) + KafkaStreamingSourceOptions.add_member(:max_offsets_per_trigger, Shapes::ShapeRef.new(shape: BoxedNonNegativeLong, location_name: "MaxOffsetsPerTrigger")) + KafkaStreamingSourceOptions.add_member(:min_partitions, Shapes::ShapeRef.new(shape: BoxedNonNegativeInt, location_name: "MinPartitions")) + KafkaStreamingSourceOptions.struct_class = Types::KafkaStreamingSourceOptions + KeyList.member = Shapes::ShapeRef.new(shape: NameString) KeySchemaElement.add_member(:name, Shapes::ShapeRef.new(shape: NameString, required: true, location_name: "Name")) @@ -2800,6 +3218,26 @@ module ClientApi KeySchemaElementList.member = Shapes::ShapeRef.new(shape: KeySchemaElement) + KinesisStreamingSourceOptions.add_member(:endpoint_url, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, location_name: "EndpointUrl")) + KinesisStreamingSourceOptions.add_member(:stream_name, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, location_name: "StreamName")) + KinesisStreamingSourceOptions.add_member(:classification, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, location_name: "Classification")) + KinesisStreamingSourceOptions.add_member(:delimiter, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, location_name: "Delimiter")) + KinesisStreamingSourceOptions.add_member(:starting_position, Shapes::ShapeRef.new(shape: StartingPosition, location_name: "StartingPosition")) + KinesisStreamingSourceOptions.add_member(:max_fetch_time_in_ms, Shapes::ShapeRef.new(shape: BoxedNonNegativeLong, location_name: "MaxFetchTimeInMs")) + KinesisStreamingSourceOptions.add_member(:max_fetch_records_per_shard, Shapes::ShapeRef.new(shape: BoxedNonNegativeLong, location_name: "MaxFetchRecordsPerShard")) + KinesisStreamingSourceOptions.add_member(:max_record_per_read, Shapes::ShapeRef.new(shape: BoxedNonNegativeLong, location_name: "MaxRecordPerRead")) + KinesisStreamingSourceOptions.add_member(:add_idle_time_between_reads, Shapes::ShapeRef.new(shape: BoxedBoolean, location_name: "AddIdleTimeBetweenReads")) + KinesisStreamingSourceOptions.add_member(:idle_time_between_reads_in_ms, Shapes::ShapeRef.new(shape: BoxedNonNegativeLong, location_name: "IdleTimeBetweenReadsInMs")) + KinesisStreamingSourceOptions.add_member(:describe_shard_interval, Shapes::ShapeRef.new(shape: BoxedNonNegativeLong, location_name: "DescribeShardInterval")) + KinesisStreamingSourceOptions.add_member(:num_retries, Shapes::ShapeRef.new(shape: BoxedNonNegativeInt, location_name: "NumRetries")) + KinesisStreamingSourceOptions.add_member(:retry_interval_ms, Shapes::ShapeRef.new(shape: BoxedNonNegativeLong, location_name: "RetryIntervalMs")) + KinesisStreamingSourceOptions.add_member(:max_retry_interval_ms, Shapes::ShapeRef.new(shape: BoxedNonNegativeLong, location_name: "MaxRetryIntervalMs")) + KinesisStreamingSourceOptions.add_member(:avoid_empty_batches, Shapes::ShapeRef.new(shape: BoxedBoolean, location_name: "AvoidEmptyBatches")) + KinesisStreamingSourceOptions.add_member(:stream_arn, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, location_name: "StreamArn")) + KinesisStreamingSourceOptions.add_member(:role_arn, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, location_name: "RoleArn")) + KinesisStreamingSourceOptions.add_member(:role_session_name, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, location_name: "RoleSessionName")) + KinesisStreamingSourceOptions.struct_class = Types::KinesisStreamingSourceOptions + LabelingSetGenerationTaskRunProperties.add_member(:output_s3_path, Shapes::ShapeRef.new(shape: UriString, location_name: "OutputS3Path")) LabelingSetGenerationTaskRunProperties.struct_class = Types::LabelingSetGenerationTaskRunProperties @@ -2822,6 +3260,10 @@ module ClientApi LastCrawlInfo.add_member(:start_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "StartTime")) LastCrawlInfo.struct_class = Types::LastCrawlInfo + LimitedPathList.member = Shapes::ShapeRef.new(shape: LimitedStringList) + + LimitedStringList.member = Shapes::ShapeRef.new(shape: GenericLimitedString) + LineageConfiguration.add_member(:crawler_lineage_settings, Shapes::ShapeRef.new(shape: CrawlerLineageSettings, location_name: "CrawlerLineageSettings")) LineageConfiguration.struct_class = Types::LineageConfiguration @@ -2988,9 +3430,19 @@ module ClientApi MLUserDataEncryption.add_member(:kms_key_id, Shapes::ShapeRef.new(shape: NameString, location_name: "KmsKeyId")) MLUserDataEncryption.struct_class = Types::MLUserDataEncryption + ManyInputs.member = Shapes::ShapeRef.new(shape: NodeId) + MapValue.key = Shapes::ShapeRef.new(shape: GenericString) MapValue.value = Shapes::ShapeRef.new(shape: GenericString) + Mapping.add_member(:to_key, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, location_name: "ToKey")) + Mapping.add_member(:from_path, Shapes::ShapeRef.new(shape: EnclosedInStringProperties, location_name: "FromPath")) + Mapping.add_member(:from_type, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, location_name: "FromType")) + Mapping.add_member(:to_type, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, location_name: "ToType")) + Mapping.add_member(:dropped, Shapes::ShapeRef.new(shape: BoxedBoolean, location_name: "Dropped")) + Mapping.add_member(:children, Shapes::ShapeRef.new(shape: Mappings, location_name: "Children")) + Mapping.struct_class = Types::Mapping + MappingEntry.add_member(:source_table, Shapes::ShapeRef.new(shape: TableName, location_name: "SourceTable")) MappingEntry.add_member(:source_path, Shapes::ShapeRef.new(shape: SchemaPathString, location_name: "SourcePath")) MappingEntry.add_member(:source_type, Shapes::ShapeRef.new(shape: FieldType, location_name: "SourceType")) @@ -3001,8 +3453,16 @@ module ClientApi MappingList.member = Shapes::ShapeRef.new(shape: MappingEntry) + Mappings.member = Shapes::ShapeRef.new(shape: Mapping) + MatchCriteria.member = Shapes::ShapeRef.new(shape: NameString) + Merge.add_member(:name, Shapes::ShapeRef.new(shape: NodeName, required: true, location_name: "Name")) + Merge.add_member(:inputs, Shapes::ShapeRef.new(shape: TwoInputs, required: true, location_name: "Inputs")) + Merge.add_member(:source, Shapes::ShapeRef.new(shape: NodeId, required: true, location_name: "Source")) + Merge.add_member(:primary_keys, Shapes::ShapeRef.new(shape: GlueStudioPathList, required: true, location_name: "PrimaryKeys")) + Merge.struct_class = Types::Merge + MetadataInfo.add_member(:metadata_value, Shapes::ShapeRef.new(shape: MetadataValueString, location_name: "MetadataValue")) MetadataInfo.add_member(:created_time, Shapes::ShapeRef.new(shape: CreatedTimestamp, location_name: "CreatedTime")) MetadataInfo.add_member(:other_metadata_value_list, Shapes::ShapeRef.new(shape: OtherMetadataValueList, location_name: "OtherMetadataValueList")) @@ -3017,6 +3477,17 @@ module ClientApi MetadataList.member = Shapes::ShapeRef.new(shape: MetadataKeyValuePair) + MicrosoftSQLServerCatalogSource.add_member(:name, Shapes::ShapeRef.new(shape: NodeName, required: true, location_name: "Name")) + MicrosoftSQLServerCatalogSource.add_member(:database, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "Database")) + MicrosoftSQLServerCatalogSource.add_member(:table, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "Table")) + MicrosoftSQLServerCatalogSource.struct_class = Types::MicrosoftSQLServerCatalogSource + + MicrosoftSQLServerCatalogTarget.add_member(:name, Shapes::ShapeRef.new(shape: NodeName, required: true, location_name: "Name")) + MicrosoftSQLServerCatalogTarget.add_member(:inputs, Shapes::ShapeRef.new(shape: OneInput, required: true, location_name: "Inputs")) + MicrosoftSQLServerCatalogTarget.add_member(:database, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "Database")) + MicrosoftSQLServerCatalogTarget.add_member(:table, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "Table")) + MicrosoftSQLServerCatalogTarget.struct_class = Types::MicrosoftSQLServerCatalogTarget + MongoDBTarget.add_member(:connection_name, Shapes::ShapeRef.new(shape: ConnectionName, location_name: "ConnectionName")) MongoDBTarget.add_member(:path, Shapes::ShapeRef.new(shape: Path, location_name: "Path")) MongoDBTarget.add_member(:scan_all, Shapes::ShapeRef.new(shape: NullableBoolean, location_name: "ScanAll")) @@ -3024,6 +3495,17 @@ module ClientApi MongoDBTargetList.member = Shapes::ShapeRef.new(shape: MongoDBTarget) + MySQLCatalogSource.add_member(:name, Shapes::ShapeRef.new(shape: NodeName, required: true, location_name: "Name")) + MySQLCatalogSource.add_member(:database, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "Database")) + MySQLCatalogSource.add_member(:table, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "Table")) + MySQLCatalogSource.struct_class = Types::MySQLCatalogSource + + MySQLCatalogTarget.add_member(:name, Shapes::ShapeRef.new(shape: NodeName, required: true, location_name: "Name")) + MySQLCatalogTarget.add_member(:inputs, Shapes::ShapeRef.new(shape: OneInput, required: true, location_name: "Inputs")) + MySQLCatalogTarget.add_member(:database, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "Database")) + MySQLCatalogTarget.add_member(:table, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "Table")) + MySQLCatalogTarget.struct_class = Types::MySQLCatalogTarget + NameStringList.member = Shapes::ShapeRef.new(shape: NameString) NoScheduleException.add_member(:message, Shapes::ShapeRef.new(shape: MessageString, location_name: "Message")) @@ -3044,9 +3526,33 @@ module ClientApi NotificationProperty.add_member(:notify_delay_after, Shapes::ShapeRef.new(shape: NotifyDelayAfter, location_name: "NotifyDelayAfter")) NotificationProperty.struct_class = Types::NotificationProperty + NullCheckBoxList.add_member(:is_empty, Shapes::ShapeRef.new(shape: BoxedBoolean, location_name: "IsEmpty")) + NullCheckBoxList.add_member(:is_null_string, Shapes::ShapeRef.new(shape: BoxedBoolean, location_name: "IsNullString")) + NullCheckBoxList.add_member(:is_neg_one, Shapes::ShapeRef.new(shape: BoxedBoolean, location_name: "IsNegOne")) + NullCheckBoxList.struct_class = Types::NullCheckBoxList + + NullValueField.add_member(:value, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "Value")) + NullValueField.add_member(:datatype, Shapes::ShapeRef.new(shape: Datatype, required: true, location_name: "Datatype")) + NullValueField.struct_class = Types::NullValueField + + NullValueFields.member = Shapes::ShapeRef.new(shape: NullValueField) + + OneInput.member = Shapes::ShapeRef.new(shape: NodeId) + OperationTimeoutException.add_member(:message, Shapes::ShapeRef.new(shape: MessageString, location_name: "Message")) OperationTimeoutException.struct_class = Types::OperationTimeoutException + OracleSQLCatalogSource.add_member(:name, Shapes::ShapeRef.new(shape: NodeName, required: true, location_name: "Name")) + OracleSQLCatalogSource.add_member(:database, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "Database")) + OracleSQLCatalogSource.add_member(:table, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "Table")) + OracleSQLCatalogSource.struct_class = Types::OracleSQLCatalogSource + + OracleSQLCatalogTarget.add_member(:name, Shapes::ShapeRef.new(shape: NodeName, required: true, location_name: "Name")) + OracleSQLCatalogTarget.add_member(:inputs, Shapes::ShapeRef.new(shape: OneInput, required: true, location_name: "Inputs")) + OracleSQLCatalogTarget.add_member(:database, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "Database")) + OracleSQLCatalogTarget.add_member(:table, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "Table")) + OracleSQLCatalogTarget.struct_class = Types::OracleSQLCatalogTarget + OrchestrationArgumentsMap.key = Shapes::ShapeRef.new(shape: OrchestrationNameString) OrchestrationArgumentsMap.value = Shapes::ShapeRef.new(shape: OrchestrationArgumentsValue) @@ -3064,6 +3570,16 @@ module ClientApi OtherMetadataValueListItem.add_member(:created_time, Shapes::ShapeRef.new(shape: CreatedTimestamp, location_name: "CreatedTime")) OtherMetadataValueListItem.struct_class = Types::OtherMetadataValueListItem + PIIDetection.add_member(:name, Shapes::ShapeRef.new(shape: NodeName, required: true, location_name: "Name")) + PIIDetection.add_member(:inputs, Shapes::ShapeRef.new(shape: OneInput, required: true, location_name: "Inputs")) + PIIDetection.add_member(:pii_type, Shapes::ShapeRef.new(shape: PiiType, required: true, location_name: "PiiType")) + PIIDetection.add_member(:entity_types_to_detect, Shapes::ShapeRef.new(shape: EnclosedInStringProperties, required: true, location_name: "EntityTypesToDetect")) + PIIDetection.add_member(:output_column_name, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, location_name: "OutputColumnName")) + PIIDetection.add_member(:sample_fraction, Shapes::ShapeRef.new(shape: BoxedDoubleFraction, location_name: "SampleFraction")) + PIIDetection.add_member(:threshold_fraction, Shapes::ShapeRef.new(shape: BoxedDoubleFraction, location_name: "ThresholdFraction")) + PIIDetection.add_member(:mask_value, Shapes::ShapeRef.new(shape: MaskValue, location_name: "MaskValue")) + PIIDetection.struct_class = Types::PIIDetection + ParametersMap.key = Shapes::ShapeRef.new(shape: KeyString) ParametersMap.value = Shapes::ShapeRef.new(shape: ParametersMapValue) @@ -3126,6 +3642,17 @@ module ClientApi PhysicalConnectionRequirements.add_member(:availability_zone, Shapes::ShapeRef.new(shape: NameString, location_name: "AvailabilityZone")) PhysicalConnectionRequirements.struct_class = Types::PhysicalConnectionRequirements + PostgreSQLCatalogSource.add_member(:name, Shapes::ShapeRef.new(shape: NodeName, required: true, location_name: "Name")) + PostgreSQLCatalogSource.add_member(:database, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "Database")) + PostgreSQLCatalogSource.add_member(:table, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "Table")) + PostgreSQLCatalogSource.struct_class = Types::PostgreSQLCatalogSource + + PostgreSQLCatalogTarget.add_member(:name, Shapes::ShapeRef.new(shape: NodeName, required: true, location_name: "Name")) + PostgreSQLCatalogTarget.add_member(:inputs, Shapes::ShapeRef.new(shape: OneInput, required: true, location_name: "Inputs")) + PostgreSQLCatalogTarget.add_member(:database, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "Database")) + PostgreSQLCatalogTarget.add_member(:table, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "Table")) + PostgreSQLCatalogTarget.struct_class = Types::PostgreSQLCatalogTarget + Predecessor.add_member(:job_name, Shapes::ShapeRef.new(shape: NameString, location_name: "JobName")) Predecessor.add_member(:run_id, Shapes::ShapeRef.new(shape: IdString, location_name: "RunId")) Predecessor.struct_class = Types::Predecessor @@ -3204,6 +3731,22 @@ module ClientApi RecrawlPolicy.add_member(:recrawl_behavior, Shapes::ShapeRef.new(shape: RecrawlBehavior, location_name: "RecrawlBehavior")) RecrawlPolicy.struct_class = Types::RecrawlPolicy + RedshiftSource.add_member(:name, Shapes::ShapeRef.new(shape: NodeName, required: true, location_name: "Name")) + RedshiftSource.add_member(:database, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "Database")) + RedshiftSource.add_member(:table, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "Table")) + RedshiftSource.add_member(:redshift_tmp_dir, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, location_name: "RedshiftTmpDir")) + RedshiftSource.add_member(:tmp_dir_iam_role, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, location_name: "TmpDirIAMRole")) + RedshiftSource.struct_class = Types::RedshiftSource + + RedshiftTarget.add_member(:name, Shapes::ShapeRef.new(shape: NodeName, required: true, location_name: "Name")) + RedshiftTarget.add_member(:inputs, Shapes::ShapeRef.new(shape: OneInput, required: true, location_name: "Inputs")) + RedshiftTarget.add_member(:database, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "Database")) + RedshiftTarget.add_member(:table, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "Table")) + RedshiftTarget.add_member(:redshift_tmp_dir, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, location_name: "RedshiftTmpDir")) + RedshiftTarget.add_member(:tmp_dir_iam_role, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, location_name: "TmpDirIAMRole")) + RedshiftTarget.add_member(:upsert_redshift_options, Shapes::ShapeRef.new(shape: UpsertRedshiftTargetOptions, location_name: "UpsertRedshiftOptions")) + RedshiftTarget.struct_class = Types::RedshiftTarget + RegisterSchemaVersionInput.add_member(:schema_id, Shapes::ShapeRef.new(shape: SchemaId, required: true, location_name: "SchemaId")) RegisterSchemaVersionInput.add_member(:schema_definition, Shapes::ShapeRef.new(shape: SchemaDefinitionString, required: true, location_name: "SchemaDefinition")) RegisterSchemaVersionInput.struct_class = Types::RegisterSchemaVersionInput @@ -3227,6 +3770,11 @@ module ClientApi RegistryListItem.add_member(:updated_time, Shapes::ShapeRef.new(shape: UpdatedTimestamp, location_name: "UpdatedTime")) RegistryListItem.struct_class = Types::RegistryListItem + RelationalCatalogSource.add_member(:name, Shapes::ShapeRef.new(shape: NodeName, required: true, location_name: "Name")) + RelationalCatalogSource.add_member(:database, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "Database")) + RelationalCatalogSource.add_member(:table, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "Table")) + RelationalCatalogSource.struct_class = Types::RelationalCatalogSource + RemoveSchemaVersionMetadataInput.add_member(:schema_id, Shapes::ShapeRef.new(shape: SchemaId, location_name: "SchemaId")) RemoveSchemaVersionMetadataInput.add_member(:schema_version_number, Shapes::ShapeRef.new(shape: SchemaVersionNumber, location_name: "SchemaVersionNumber")) RemoveSchemaVersionMetadataInput.add_member(:schema_version_id, Shapes::ShapeRef.new(shape: SchemaVersionIdString, location_name: "SchemaVersionId")) @@ -3243,6 +3791,12 @@ module ClientApi RemoveSchemaVersionMetadataResponse.add_member(:metadata_value, Shapes::ShapeRef.new(shape: MetadataValueString, location_name: "MetadataValue")) RemoveSchemaVersionMetadataResponse.struct_class = Types::RemoveSchemaVersionMetadataResponse + RenameField.add_member(:name, Shapes::ShapeRef.new(shape: NodeName, required: true, location_name: "Name")) + RenameField.add_member(:inputs, Shapes::ShapeRef.new(shape: OneInput, required: true, location_name: "Inputs")) + RenameField.add_member(:source_path, Shapes::ShapeRef.new(shape: EnclosedInStringProperties, required: true, location_name: "SourcePath")) + RenameField.add_member(:target_path, Shapes::ShapeRef.new(shape: EnclosedInStringProperties, required: true, location_name: "TargetPath")) + RenameField.struct_class = Types::RenameField + ResetJobBookmarkRequest.add_member(:job_name, Shapes::ShapeRef.new(shape: JobName, required: true, location_name: "JobName")) ResetJobBookmarkRequest.add_member(:run_id, Shapes::ShapeRef.new(shape: RunId, location_name: "RunId")) ResetJobBookmarkRequest.struct_class = Types::ResetJobBookmarkRequest @@ -3279,12 +3833,103 @@ module ClientApi RunStatementResponse.add_member(:id, Shapes::ShapeRef.new(shape: IntegerValue, location_name: "Id")) RunStatementResponse.struct_class = Types::RunStatementResponse + S3CatalogSource.add_member(:name, Shapes::ShapeRef.new(shape: NodeName, required: true, location_name: "Name")) + S3CatalogSource.add_member(:database, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "Database")) + S3CatalogSource.add_member(:table, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "Table")) + S3CatalogSource.add_member(:partition_predicate, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, location_name: "PartitionPredicate")) + S3CatalogSource.add_member(:additional_options, Shapes::ShapeRef.new(shape: S3SourceAdditionalOptions, location_name: "AdditionalOptions")) + S3CatalogSource.struct_class = Types::S3CatalogSource + + S3CatalogTarget.add_member(:name, Shapes::ShapeRef.new(shape: NodeName, required: true, location_name: "Name")) + S3CatalogTarget.add_member(:inputs, Shapes::ShapeRef.new(shape: OneInput, required: true, location_name: "Inputs")) + S3CatalogTarget.add_member(:partition_keys, Shapes::ShapeRef.new(shape: GlueStudioPathList, location_name: "PartitionKeys")) + S3CatalogTarget.add_member(:table, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "Table")) + S3CatalogTarget.add_member(:database, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "Database")) + S3CatalogTarget.add_member(:schema_change_policy, Shapes::ShapeRef.new(shape: CatalogSchemaChangePolicy, location_name: "SchemaChangePolicy")) + S3CatalogTarget.struct_class = Types::S3CatalogTarget + + S3CsvSource.add_member(:name, Shapes::ShapeRef.new(shape: NodeName, required: true, location_name: "Name")) + S3CsvSource.add_member(:paths, Shapes::ShapeRef.new(shape: EnclosedInStringProperties, required: true, location_name: "Paths")) + S3CsvSource.add_member(:compression_type, Shapes::ShapeRef.new(shape: CompressionType, location_name: "CompressionType")) + S3CsvSource.add_member(:exclusions, Shapes::ShapeRef.new(shape: EnclosedInStringProperties, location_name: "Exclusions")) + S3CsvSource.add_member(:group_size, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, location_name: "GroupSize")) + S3CsvSource.add_member(:group_files, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, location_name: "GroupFiles")) + S3CsvSource.add_member(:recurse, Shapes::ShapeRef.new(shape: BoxedBoolean, location_name: "Recurse")) + S3CsvSource.add_member(:max_band, Shapes::ShapeRef.new(shape: BoxedNonNegativeInt, location_name: "MaxBand")) + S3CsvSource.add_member(:max_files_in_band, Shapes::ShapeRef.new(shape: BoxedNonNegativeInt, location_name: "MaxFilesInBand")) + S3CsvSource.add_member(:additional_options, Shapes::ShapeRef.new(shape: S3DirectSourceAdditionalOptions, location_name: "AdditionalOptions")) + S3CsvSource.add_member(:separator, Shapes::ShapeRef.new(shape: Separator, required: true, location_name: "Separator")) + S3CsvSource.add_member(:escaper, Shapes::ShapeRef.new(shape: EnclosedInStringPropertyWithQuote, location_name: "Escaper")) + S3CsvSource.add_member(:quote_char, Shapes::ShapeRef.new(shape: QuoteChar, required: true, location_name: "QuoteChar")) + S3CsvSource.add_member(:multiline, Shapes::ShapeRef.new(shape: BoxedBoolean, location_name: "Multiline")) + S3CsvSource.add_member(:with_header, Shapes::ShapeRef.new(shape: BoxedBoolean, location_name: "WithHeader")) + S3CsvSource.add_member(:write_header, Shapes::ShapeRef.new(shape: BoxedBoolean, location_name: "WriteHeader")) + S3CsvSource.add_member(:skip_first, Shapes::ShapeRef.new(shape: BoxedBoolean, location_name: "SkipFirst")) + S3CsvSource.add_member(:optimize_performance, Shapes::ShapeRef.new(shape: BooleanValue, location_name: "OptimizePerformance")) + S3CsvSource.add_member(:output_schemas, Shapes::ShapeRef.new(shape: GlueSchemas, location_name: "OutputSchemas")) + S3CsvSource.struct_class = Types::S3CsvSource + + S3DirectSourceAdditionalOptions.add_member(:bounded_size, Shapes::ShapeRef.new(shape: BoxedLong, location_name: "BoundedSize")) + S3DirectSourceAdditionalOptions.add_member(:bounded_files, Shapes::ShapeRef.new(shape: BoxedLong, location_name: "BoundedFiles")) + S3DirectSourceAdditionalOptions.add_member(:enable_sample_path, Shapes::ShapeRef.new(shape: BoxedBoolean, location_name: "EnableSamplePath")) + S3DirectSourceAdditionalOptions.add_member(:sample_path, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, location_name: "SamplePath")) + S3DirectSourceAdditionalOptions.struct_class = Types::S3DirectSourceAdditionalOptions + + S3DirectTarget.add_member(:name, Shapes::ShapeRef.new(shape: NodeName, required: true, location_name: "Name")) + S3DirectTarget.add_member(:inputs, Shapes::ShapeRef.new(shape: OneInput, required: true, location_name: "Inputs")) + S3DirectTarget.add_member(:partition_keys, Shapes::ShapeRef.new(shape: GlueStudioPathList, location_name: "PartitionKeys")) + S3DirectTarget.add_member(:path, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "Path")) + S3DirectTarget.add_member(:compression, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, location_name: "Compression")) + S3DirectTarget.add_member(:format, Shapes::ShapeRef.new(shape: TargetFormat, required: true, location_name: "Format")) + S3DirectTarget.add_member(:schema_change_policy, Shapes::ShapeRef.new(shape: DirectSchemaChangePolicy, location_name: "SchemaChangePolicy")) + S3DirectTarget.struct_class = Types::S3DirectTarget + S3Encryption.add_member(:s3_encryption_mode, Shapes::ShapeRef.new(shape: S3EncryptionMode, location_name: "S3EncryptionMode")) S3Encryption.add_member(:kms_key_arn, Shapes::ShapeRef.new(shape: KmsKeyArn, location_name: "KmsKeyArn")) S3Encryption.struct_class = Types::S3Encryption S3EncryptionList.member = Shapes::ShapeRef.new(shape: S3Encryption) + S3GlueParquetTarget.add_member(:name, Shapes::ShapeRef.new(shape: NodeName, required: true, location_name: "Name")) + S3GlueParquetTarget.add_member(:inputs, Shapes::ShapeRef.new(shape: OneInput, required: true, location_name: "Inputs")) + S3GlueParquetTarget.add_member(:partition_keys, Shapes::ShapeRef.new(shape: GlueStudioPathList, location_name: "PartitionKeys")) + S3GlueParquetTarget.add_member(:path, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "Path")) + S3GlueParquetTarget.add_member(:compression, Shapes::ShapeRef.new(shape: ParquetCompressionType, location_name: "Compression")) + S3GlueParquetTarget.add_member(:schema_change_policy, Shapes::ShapeRef.new(shape: DirectSchemaChangePolicy, location_name: "SchemaChangePolicy")) + S3GlueParquetTarget.struct_class = Types::S3GlueParquetTarget + + S3JsonSource.add_member(:name, Shapes::ShapeRef.new(shape: NodeName, required: true, location_name: "Name")) + S3JsonSource.add_member(:paths, Shapes::ShapeRef.new(shape: EnclosedInStringProperties, required: true, location_name: "Paths")) + S3JsonSource.add_member(:compression_type, Shapes::ShapeRef.new(shape: CompressionType, location_name: "CompressionType")) + S3JsonSource.add_member(:exclusions, Shapes::ShapeRef.new(shape: EnclosedInStringProperties, location_name: "Exclusions")) + S3JsonSource.add_member(:group_size, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, location_name: "GroupSize")) + S3JsonSource.add_member(:group_files, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, location_name: "GroupFiles")) + S3JsonSource.add_member(:recurse, Shapes::ShapeRef.new(shape: BoxedBoolean, location_name: "Recurse")) + S3JsonSource.add_member(:max_band, Shapes::ShapeRef.new(shape: BoxedNonNegativeInt, location_name: "MaxBand")) + S3JsonSource.add_member(:max_files_in_band, Shapes::ShapeRef.new(shape: BoxedNonNegativeInt, location_name: "MaxFilesInBand")) + S3JsonSource.add_member(:additional_options, Shapes::ShapeRef.new(shape: S3DirectSourceAdditionalOptions, location_name: "AdditionalOptions")) + S3JsonSource.add_member(:json_path, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, location_name: "JsonPath")) + S3JsonSource.add_member(:multiline, Shapes::ShapeRef.new(shape: BoxedBoolean, location_name: "Multiline")) + S3JsonSource.add_member(:output_schemas, Shapes::ShapeRef.new(shape: GlueSchemas, location_name: "OutputSchemas")) + S3JsonSource.struct_class = Types::S3JsonSource + + S3ParquetSource.add_member(:name, Shapes::ShapeRef.new(shape: NodeName, required: true, location_name: "Name")) + S3ParquetSource.add_member(:paths, Shapes::ShapeRef.new(shape: EnclosedInStringProperties, required: true, location_name: "Paths")) + S3ParquetSource.add_member(:compression_type, Shapes::ShapeRef.new(shape: ParquetCompressionType, location_name: "CompressionType")) + S3ParquetSource.add_member(:exclusions, Shapes::ShapeRef.new(shape: EnclosedInStringProperties, location_name: "Exclusions")) + S3ParquetSource.add_member(:group_size, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, location_name: "GroupSize")) + S3ParquetSource.add_member(:group_files, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, location_name: "GroupFiles")) + S3ParquetSource.add_member(:recurse, Shapes::ShapeRef.new(shape: BoxedBoolean, location_name: "Recurse")) + S3ParquetSource.add_member(:max_band, Shapes::ShapeRef.new(shape: BoxedNonNegativeInt, location_name: "MaxBand")) + S3ParquetSource.add_member(:max_files_in_band, Shapes::ShapeRef.new(shape: BoxedNonNegativeInt, location_name: "MaxFilesInBand")) + S3ParquetSource.add_member(:additional_options, Shapes::ShapeRef.new(shape: S3DirectSourceAdditionalOptions, location_name: "AdditionalOptions")) + S3ParquetSource.add_member(:output_schemas, Shapes::ShapeRef.new(shape: GlueSchemas, location_name: "OutputSchemas")) + S3ParquetSource.struct_class = Types::S3ParquetSource + + S3SourceAdditionalOptions.add_member(:bounded_size, Shapes::ShapeRef.new(shape: BoxedLong, location_name: "BoundedSize")) + S3SourceAdditionalOptions.add_member(:bounded_files, Shapes::ShapeRef.new(shape: BoxedLong, location_name: "BoundedFiles")) + S3SourceAdditionalOptions.struct_class = Types::S3SourceAdditionalOptions + S3Target.add_member(:path, Shapes::ShapeRef.new(shape: Path, location_name: "Path")) S3Target.add_member(:exclusions, Shapes::ShapeRef.new(shape: PathList, location_name: "Exclusions")) S3Target.add_member(:connection_name, Shapes::ShapeRef.new(shape: ConnectionName, location_name: "ConnectionName")) @@ -3384,6 +4029,16 @@ module ClientApi Segment.add_member(:total_segments, Shapes::ShapeRef.new(shape: TotalSegmentsInteger, required: true, location_name: "TotalSegments")) Segment.struct_class = Types::Segment + SelectFields.add_member(:name, Shapes::ShapeRef.new(shape: NodeName, required: true, location_name: "Name")) + SelectFields.add_member(:inputs, Shapes::ShapeRef.new(shape: OneInput, required: true, location_name: "Inputs")) + SelectFields.add_member(:paths, Shapes::ShapeRef.new(shape: GlueStudioPathList, required: true, location_name: "Paths")) + SelectFields.struct_class = Types::SelectFields + + SelectFromCollection.add_member(:name, Shapes::ShapeRef.new(shape: NodeName, required: true, location_name: "Name")) + SelectFromCollection.add_member(:inputs, Shapes::ShapeRef.new(shape: OneInput, required: true, location_name: "Inputs")) + SelectFromCollection.add_member(:index, Shapes::ShapeRef.new(shape: NonNegativeInt, required: true, location_name: "Index")) + SelectFromCollection.struct_class = Types::SelectFromCollection + SerDeInfo.add_member(:name, Shapes::ShapeRef.new(shape: NameString, location_name: "Name")) SerDeInfo.add_member(:serialization_library, Shapes::ShapeRef.new(shape: NameString, location_name: "SerializationLibrary")) SerDeInfo.add_member(:parameters, Shapes::ShapeRef.new(shape: ParametersMap, location_name: "Parameters")) @@ -3423,6 +4078,48 @@ module ClientApi SortCriterion.add_member(:sort, Shapes::ShapeRef.new(shape: Sort, location_name: "Sort")) SortCriterion.struct_class = Types::SortCriterion + SparkConnectorSource.add_member(:name, Shapes::ShapeRef.new(shape: NodeName, required: true, location_name: "Name")) + SparkConnectorSource.add_member(:connection_name, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "ConnectionName")) + SparkConnectorSource.add_member(:connector_name, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "ConnectorName")) + SparkConnectorSource.add_member(:connection_type, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "ConnectionType")) + SparkConnectorSource.add_member(:additional_options, Shapes::ShapeRef.new(shape: AdditionalOptions, location_name: "AdditionalOptions")) + SparkConnectorSource.add_member(:output_schemas, Shapes::ShapeRef.new(shape: GlueSchemas, location_name: "OutputSchemas")) + SparkConnectorSource.struct_class = Types::SparkConnectorSource + + SparkConnectorTarget.add_member(:name, Shapes::ShapeRef.new(shape: NodeName, required: true, location_name: "Name")) + SparkConnectorTarget.add_member(:inputs, Shapes::ShapeRef.new(shape: OneInput, required: true, location_name: "Inputs")) + SparkConnectorTarget.add_member(:connection_name, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "ConnectionName")) + SparkConnectorTarget.add_member(:connector_name, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "ConnectorName")) + SparkConnectorTarget.add_member(:connection_type, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "ConnectionType")) + SparkConnectorTarget.add_member(:additional_options, Shapes::ShapeRef.new(shape: AdditionalOptions, location_name: "AdditionalOptions")) + SparkConnectorTarget.add_member(:output_schemas, Shapes::ShapeRef.new(shape: GlueSchemas, location_name: "OutputSchemas")) + SparkConnectorTarget.struct_class = Types::SparkConnectorTarget + + SparkSQL.add_member(:name, Shapes::ShapeRef.new(shape: NodeName, required: true, location_name: "Name")) + SparkSQL.add_member(:inputs, Shapes::ShapeRef.new(shape: ManyInputs, required: true, location_name: "Inputs")) + SparkSQL.add_member(:sql_query, Shapes::ShapeRef.new(shape: SqlQuery, required: true, location_name: "SqlQuery")) + SparkSQL.add_member(:sql_aliases, Shapes::ShapeRef.new(shape: SqlAliases, required: true, location_name: "SqlAliases")) + SparkSQL.add_member(:output_schemas, Shapes::ShapeRef.new(shape: GlueSchemas, location_name: "OutputSchemas")) + SparkSQL.struct_class = Types::SparkSQL + + Spigot.add_member(:name, Shapes::ShapeRef.new(shape: NodeName, required: true, location_name: "Name")) + Spigot.add_member(:inputs, Shapes::ShapeRef.new(shape: OneInput, required: true, location_name: "Inputs")) + Spigot.add_member(:path, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, required: true, location_name: "Path")) + Spigot.add_member(:topk, Shapes::ShapeRef.new(shape: Topk, location_name: "Topk")) + Spigot.add_member(:prob, Shapes::ShapeRef.new(shape: Prob, location_name: "Prob")) + Spigot.struct_class = Types::Spigot + + SplitFields.add_member(:name, Shapes::ShapeRef.new(shape: NodeName, required: true, location_name: "Name")) + SplitFields.add_member(:inputs, Shapes::ShapeRef.new(shape: OneInput, required: true, location_name: "Inputs")) + SplitFields.add_member(:paths, Shapes::ShapeRef.new(shape: GlueStudioPathList, required: true, location_name: "Paths")) + SplitFields.struct_class = Types::SplitFields + + SqlAlias.add_member(:from, Shapes::ShapeRef.new(shape: NodeId, required: true, location_name: "From")) + SqlAlias.add_member(:alias, Shapes::ShapeRef.new(shape: EnclosedInStringPropertyWithQuote, required: true, location_name: "Alias")) + SqlAlias.struct_class = Types::SqlAlias + + SqlAliases.member = Shapes::ShapeRef.new(shape: SqlAlias) + StartBlueprintRunRequest.add_member(:blueprint_name, Shapes::ShapeRef.new(shape: OrchestrationNameString, required: true, location_name: "BlueprintName")) StartBlueprintRunRequest.add_member(:parameters, Shapes::ShapeRef.new(shape: BlueprintParameters, location_name: "Parameters")) StartBlueprintRunRequest.add_member(:role_arn, Shapes::ShapeRef.new(shape: OrchestrationIAMRoleArn, required: true, location_name: "RoleArn")) @@ -3568,6 +4265,10 @@ module ClientApi StorageDescriptor.add_member(:schema_reference, Shapes::ShapeRef.new(shape: SchemaReference, location_name: "SchemaReference")) StorageDescriptor.struct_class = Types::StorageDescriptor + StreamingDataPreviewOptions.add_member(:polling_time, Shapes::ShapeRef.new(shape: PollingTime, location_name: "PollingTime")) + StreamingDataPreviewOptions.add_member(:record_polling_limit, Shapes::ShapeRef.new(shape: PositiveLong, location_name: "RecordPollingLimit")) + StreamingDataPreviewOptions.struct_class = Types::StreamingDataPreviewOptions + StringColumnStatisticsData.add_member(:maximum_length, Shapes::ShapeRef.new(shape: NonNegativeLong, required: true, location_name: "MaximumLength")) StringColumnStatisticsData.add_member(:average_length, Shapes::ShapeRef.new(shape: NonNegativeDouble, required: true, location_name: "AverageLength")) StringColumnStatisticsData.add_member(:number_of_nulls, Shapes::ShapeRef.new(shape: NonNegativeLong, required: true, location_name: "NumberOfNulls")) @@ -3735,6 +4436,8 @@ module ClientApi TriggerUpdate.add_member(:event_batching_condition, Shapes::ShapeRef.new(shape: EventBatchingCondition, location_name: "EventBatchingCondition")) TriggerUpdate.struct_class = Types::TriggerUpdate + TwoInputs.member = Shapes::ShapeRef.new(shape: NodeId) + UnfilteredPartition.add_member(:partition, Shapes::ShapeRef.new(shape: Partition, location_name: "Partition")) UnfilteredPartition.add_member(:authorized_columns, Shapes::ShapeRef.new(shape: NameStringList, location_name: "AuthorizedColumns")) UnfilteredPartition.add_member(:is_registered_with_lake_formation, Shapes::ShapeRef.new(shape: Boolean, location_name: "IsRegisteredWithLakeFormation")) @@ -3742,6 +4445,11 @@ module ClientApi UnfilteredPartitionList.member = Shapes::ShapeRef.new(shape: UnfilteredPartition) + Union.add_member(:name, Shapes::ShapeRef.new(shape: NodeName, required: true, location_name: "Name")) + Union.add_member(:inputs, Shapes::ShapeRef.new(shape: TwoInputs, required: true, location_name: "Inputs")) + Union.add_member(:union_type, Shapes::ShapeRef.new(shape: UnionType, required: true, location_name: "UnionType")) + Union.struct_class = Types::Union + UntagResourceRequest.add_member(:resource_arn, Shapes::ShapeRef.new(shape: GlueResourceArn, required: true, location_name: "ResourceArn")) UntagResourceRequest.add_member(:tags_to_remove, Shapes::ShapeRef.new(shape: TagKeysList, required: true, location_name: "TagsToRemove")) UntagResourceRequest.struct_class = Types::UntagResourceRequest @@ -3944,6 +4652,11 @@ module ClientApi UpdateXMLClassifierRequest.add_member(:row_tag, Shapes::ShapeRef.new(shape: RowTag, location_name: "RowTag")) UpdateXMLClassifierRequest.struct_class = Types::UpdateXMLClassifierRequest + UpsertRedshiftTargetOptions.add_member(:table_location, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, location_name: "TableLocation")) + UpsertRedshiftTargetOptions.add_member(:connection_name, Shapes::ShapeRef.new(shape: EnclosedInStringProperty, location_name: "ConnectionName")) + UpsertRedshiftTargetOptions.add_member(:upsert_keys, Shapes::ShapeRef.new(shape: EnclosedInStringPropertiesMinOne, location_name: "UpsertKeys")) + UpsertRedshiftTargetOptions.struct_class = Types::UpsertRedshiftTargetOptions + UserDefinedFunction.add_member(:function_name, Shapes::ShapeRef.new(shape: NameString, location_name: "FunctionName")) UserDefinedFunction.add_member(:database_name, Shapes::ShapeRef.new(shape: NameString, location_name: "DatabaseName")) UserDefinedFunction.add_member(:class_name, Shapes::ShapeRef.new(shape: NameString, location_name: "ClassName")) diff --git a/gems/aws-sdk-glue/lib/aws-sdk-glue/types.rb b/gems/aws-sdk-glue/lib/aws-sdk-glue/types.rb index d4d5530f4ce..ec2c1e65a02 100644 --- a/gems/aws-sdk-glue/lib/aws-sdk-glue/types.rb +++ b/gems/aws-sdk-glue/lib/aws-sdk-glue/types.rb @@ -100,6 +100,88 @@ class Action < Struct.new( include Aws::Structure end + # Specifies a transform that groups rows by chosen fields and computes + # the aggregated value by specified function. + # + # @note When making an API call, you may pass Aggregate + # data as a hash: + # + # { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # groups: [ # required + # ["EnclosedInStringProperty"], + # ], + # aggs: [ # required + # { + # column: ["EnclosedInStringProperty"], # required + # agg_func: "avg", # required, accepts avg, countDistinct, count, first, last, kurtosis, max, min, skewness, stddev_samp, stddev_pop, sum, sumDistinct, var_samp, var_pop + # }, + # ], + # } + # + # @!attribute [rw] name + # The name of the transform node. + # @return [String] + # + # @!attribute [rw] inputs + # Specifies the fields and rows to use as inputs for the aggregate + # transform. + # @return [Array] + # + # @!attribute [rw] groups + # Specifies the fields to group by. + # @return [Array>] + # + # @!attribute [rw] aggs + # Specifies the aggregate functions to be performed on specified + # fields. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/Aggregate AWS API Documentation + # + class Aggregate < Struct.new( + :name, + :inputs, + :groups, + :aggs) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies the set of parameters needed to perform aggregation in the + # aggregate transform. + # + # @note When making an API call, you may pass AggregateOperation + # data as a hash: + # + # { + # column: ["EnclosedInStringProperty"], # required + # agg_func: "avg", # required, accepts avg, countDistinct, count, first, last, kurtosis, max, min, skewness, stddev_samp, stddev_pop, sum, sumDistinct, var_samp, var_pop + # } + # + # @!attribute [rw] column + # Specifies the column on the data set on which the aggregation + # function will be applied. + # @return [Array] + # + # @!attribute [rw] agg_func + # Specifies the aggregation function to apply. + # + # Possible aggregation functions include: avg countDistinct, count, + # first, last, kurtosis, max, min, skewness, stddev\_samp, + # stddev\_pop, sum, sumDistinct, var\_samp, var\_pop + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/AggregateOperation AWS API Documentation + # + class AggregateOperation < Struct.new( + :column, + :agg_func) + SENSITIVE = [] + include Aws::Structure + end + # A resource to be created or added already exists. # # @!attribute [rw] message @@ -114,6 +196,123 @@ class AlreadyExistsException < Struct.new( include Aws::Structure end + # Specifies a transform that maps data property keys in the data source + # to data property keys in the data target. You can rename keys, modify + # the data types for keys, and choose which keys to drop from the + # dataset. + # + # @note When making an API call, you may pass ApplyMapping + # data as a hash: + # + # { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # mapping: [ # required + # { + # to_key: "EnclosedInStringProperty", + # from_path: ["EnclosedInStringProperty"], + # from_type: "EnclosedInStringProperty", + # to_type: "EnclosedInStringProperty", + # dropped: false, + # children: { + # # recursive Mappings + # }, + # }, + # ], + # } + # + # @!attribute [rw] name + # The name of the transform node. + # @return [String] + # + # @!attribute [rw] inputs + # The data inputs identified by their node names. + # @return [Array] + # + # @!attribute [rw] mapping + # Specifies the mapping of data property keys in the data source to + # data property keys in the data target. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ApplyMapping AWS API Documentation + # + class ApplyMapping < Struct.new( + :name, + :inputs, + :mapping) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies a connector to an Amazon Athena data source. + # + # @note When making an API call, you may pass AthenaConnectorSource + # data as a hash: + # + # { + # name: "NodeName", # required + # connection_name: "EnclosedInStringProperty", # required + # connector_name: "EnclosedInStringProperty", # required + # connection_type: "EnclosedInStringProperty", # required + # connection_table: "EnclosedInStringPropertyWithQuote", + # schema_name: "EnclosedInStringProperty", # required + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # } + # + # @!attribute [rw] name + # The name of the data source. + # @return [String] + # + # @!attribute [rw] connection_name + # The name of the connection that is associated with the connector. + # @return [String] + # + # @!attribute [rw] connector_name + # The name of a connector that assists with accessing the data store + # in Glue Studio. + # @return [String] + # + # @!attribute [rw] connection_type + # The type of connection, such as marketplace.athena or custom.athena, + # designating a connection to an Amazon Athena data store. + # @return [String] + # + # @!attribute [rw] connection_table + # The name of the table in the data source. + # @return [String] + # + # @!attribute [rw] schema_name + # The name of the Cloudwatch log group to read from. For example, + # `/aws-glue/jobs/output`. + # @return [String] + # + # @!attribute [rw] output_schemas + # Specifies the data schema for the custom Athena source. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/AthenaConnectorSource AWS API Documentation + # + class AthenaConnectorSource < Struct.new( + :name, + :connection_name, + :connector_name, + :connection_type, + :connection_table, + :schema_name, + :output_schemas) + SENSITIVE = [] + include Aws::Structure + end + # A structure containing information for audit. # # @note When making an API call, you may pass AuditContext @@ -186,6 +385,47 @@ class BackfillError < Struct.new( include Aws::Structure end + # Specifies a target that uses a Glue Data Catalog table. + # + # @note When making an API call, you may pass BasicCatalogTarget + # data as a hash: + # + # { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # } + # + # @!attribute [rw] name + # The name of your data target. + # @return [String] + # + # @!attribute [rw] inputs + # The nodes that are inputs to the data target. + # @return [Array] + # + # @!attribute [rw] database + # The database that contains the table you want to use as the target. + # This database must already exist in the Data Catalog. + # @return [String] + # + # @!attribute [rw] table + # The table that defines the schema of your output data. This table + # must already exist in the Data Catalog. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/BasicCatalogTarget AWS API Documentation + # + class BasicCatalogTarget < Struct.new( + :name, + :inputs, + :database, + :table) + SENSITIVE = [] + include Aws::Structure + end + # @note When making an API call, you may pass BatchCreatePartitionRequest # data as a hash: # @@ -1530,6 +1770,225 @@ class CatalogImportStatus < Struct.new( include Aws::Structure end + # Specifies an Apache Kafka data store in the Data Catalog. + # + # @note When making an API call, you may pass CatalogKafkaSource + # data as a hash: + # + # { + # name: "NodeName", # required + # window_size: 1, + # detect_schema: false, + # table: "EnclosedInStringProperty", # required + # database: "EnclosedInStringProperty", # required + # streaming_options: { + # bootstrap_servers: "EnclosedInStringProperty", + # security_protocol: "EnclosedInStringProperty", + # connection_name: "EnclosedInStringProperty", + # topic_name: "EnclosedInStringProperty", + # assign: "EnclosedInStringProperty", + # subscribe_pattern: "EnclosedInStringProperty", + # classification: "EnclosedInStringProperty", + # delimiter: "EnclosedInStringProperty", + # starting_offsets: "EnclosedInStringProperty", + # ending_offsets: "EnclosedInStringProperty", + # poll_timeout_ms: 1, + # num_retries: 1, + # retry_interval_ms: 1, + # max_offsets_per_trigger: 1, + # min_partitions: 1, + # }, + # data_preview_options: { + # polling_time: 1, + # record_polling_limit: 1, + # }, + # } + # + # @!attribute [rw] name + # The name of the data store. + # @return [String] + # + # @!attribute [rw] window_size + # The amount of time to spend processing each micro batch. + # @return [Integer] + # + # @!attribute [rw] detect_schema + # Whether to automatically determine the schema from the incoming + # data. + # @return [Boolean] + # + # @!attribute [rw] table + # The name of the table in the database to read from. + # @return [String] + # + # @!attribute [rw] database + # The name of the database to read from. + # @return [String] + # + # @!attribute [rw] streaming_options + # Specifies the streaming options. + # @return [Types::KafkaStreamingSourceOptions] + # + # @!attribute [rw] data_preview_options + # Specifies options related to data preview for viewing a sample of + # your data. + # @return [Types::StreamingDataPreviewOptions] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CatalogKafkaSource AWS API Documentation + # + class CatalogKafkaSource < Struct.new( + :name, + :window_size, + :detect_schema, + :table, + :database, + :streaming_options, + :data_preview_options) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies a Kinesis data source in the Glue Data Catalog. + # + # @note When making an API call, you may pass CatalogKinesisSource + # data as a hash: + # + # { + # name: "NodeName", # required + # window_size: 1, + # detect_schema: false, + # table: "EnclosedInStringProperty", # required + # database: "EnclosedInStringProperty", # required + # streaming_options: { + # endpoint_url: "EnclosedInStringProperty", + # stream_name: "EnclosedInStringProperty", + # classification: "EnclosedInStringProperty", + # delimiter: "EnclosedInStringProperty", + # starting_position: "latest", # accepts latest, trim_horizon, earliest + # max_fetch_time_in_ms: 1, + # max_fetch_records_per_shard: 1, + # max_record_per_read: 1, + # add_idle_time_between_reads: false, + # idle_time_between_reads_in_ms: 1, + # describe_shard_interval: 1, + # num_retries: 1, + # retry_interval_ms: 1, + # max_retry_interval_ms: 1, + # avoid_empty_batches: false, + # stream_arn: "EnclosedInStringProperty", + # role_arn: "EnclosedInStringProperty", + # role_session_name: "EnclosedInStringProperty", + # }, + # data_preview_options: { + # polling_time: 1, + # record_polling_limit: 1, + # }, + # } + # + # @!attribute [rw] name + # The name of the data source. + # @return [String] + # + # @!attribute [rw] window_size + # The amount of time to spend processing each micro batch. + # @return [Integer] + # + # @!attribute [rw] detect_schema + # Whether to automatically determine the schema from the incoming + # data. + # @return [Boolean] + # + # @!attribute [rw] table + # The name of the table in the database to read from. + # @return [String] + # + # @!attribute [rw] database + # The name of the database to read from. + # @return [String] + # + # @!attribute [rw] streaming_options + # Additional options for the Kinesis streaming data source. + # @return [Types::KinesisStreamingSourceOptions] + # + # @!attribute [rw] data_preview_options + # Additional options for data preview. + # @return [Types::StreamingDataPreviewOptions] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CatalogKinesisSource AWS API Documentation + # + class CatalogKinesisSource < Struct.new( + :name, + :window_size, + :detect_schema, + :table, + :database, + :streaming_options, + :data_preview_options) + SENSITIVE = [] + include Aws::Structure + end + + # A policy that specifies update behavior for the crawler. + # + # @note When making an API call, you may pass CatalogSchemaChangePolicy + # data as a hash: + # + # { + # enable_update_catalog: false, + # update_behavior: "UPDATE_IN_DATABASE", # accepts UPDATE_IN_DATABASE, LOG + # } + # + # @!attribute [rw] enable_update_catalog + # Whether to use the specified update behavior when the crawler finds + # a changed schema. + # @return [Boolean] + # + # @!attribute [rw] update_behavior + # The update behavior when the crawler finds a changed schema. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CatalogSchemaChangePolicy AWS API Documentation + # + class CatalogSchemaChangePolicy < Struct.new( + :enable_update_catalog, + :update_behavior) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies a data store in the Glue Data Catalog. + # + # @note When making an API call, you may pass CatalogSource + # data as a hash: + # + # { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # } + # + # @!attribute [rw] name + # The name of the data store. + # @return [String] + # + # @!attribute [rw] database + # The name of the database to read from. + # @return [String] + # + # @!attribute [rw] table + # The name of the table in the database to read from. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CatalogSource AWS API Documentation + # + class CatalogSource < Struct.new( + :name, + :database, + :table) + SENSITIVE = [] + include Aws::Structure + end + # Specifies an Glue Data Catalog target. # # @note When making an API call, you may pass CatalogTarget @@ -1675,101 +2134,1067 @@ class CloudWatchEncryption < Struct.new( include Aws::Structure end - # Represents a directional edge in a directed acyclic graph (DAG). + # `CodeGenConfigurationNode` enumerates all valid Node types. One and + # only one of its member variables can be populated. # - # @note When making an API call, you may pass CodeGenEdge + # @note When making an API call, you may pass CodeGenConfigurationNode # data as a hash: # # { - # source: "CodeGenIdentifier", # required - # target: "CodeGenIdentifier", # required - # target_parameter: "CodeGenArgName", - # } - # - # @!attribute [rw] source - # The ID of the node at which the edge starts. - # @return [String] - # - # @!attribute [rw] target - # The ID of the node at which the edge ends. - # @return [String] - # - # @!attribute [rw] target_parameter - # The target of the edge. - # @return [String] - # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CodeGenEdge AWS API Documentation - # - class CodeGenEdge < Struct.new( - :source, - :target, - :target_parameter) - SENSITIVE = [] - include Aws::Structure - end - - # Represents a node in a directed acyclic graph (DAG) - # - # @note When making an API call, you may pass CodeGenNode - # data as a hash: - # - # { - # id: "CodeGenIdentifier", # required - # node_type: "CodeGenNodeType", # required - # args: [ # required - # { - # name: "CodeGenArgName", # required - # value: "CodeGenArgValue", # required - # param: false, + # athena_connector_source: { + # name: "NodeName", # required + # connection_name: "EnclosedInStringProperty", # required + # connector_name: "EnclosedInStringProperty", # required + # connection_type: "EnclosedInStringProperty", # required + # connection_table: "EnclosedInStringPropertyWithQuote", + # schema_name: "EnclosedInStringProperty", # required + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # jdbc_connector_source: { + # name: "NodeName", # required + # connection_name: "EnclosedInStringProperty", # required + # connector_name: "EnclosedInStringProperty", # required + # connection_type: "EnclosedInStringProperty", # required + # additional_options: { + # filter_predicate: "EnclosedInStringProperty", + # partition_column: "EnclosedInStringProperty", + # lower_bound: 1, + # upper_bound: 1, + # num_partitions: 1, + # job_bookmark_keys: ["EnclosedInStringProperty"], + # job_bookmark_keys_sort_order: "EnclosedInStringProperty", + # data_type_mapping: { + # "ARRAY" => "DATE", # accepts DATE, STRING, TIMESTAMP, INT, FLOAT, LONG, BIGDECIMAL, BYTE, SHORT, DOUBLE + # }, # }, - # ], - # line_number: 1, + # connection_table: "EnclosedInStringPropertyWithQuote", + # query: "SqlQuery", + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # spark_connector_source: { + # name: "NodeName", # required + # connection_name: "EnclosedInStringProperty", # required + # connector_name: "EnclosedInStringProperty", # required + # connection_type: "EnclosedInStringProperty", # required + # additional_options: { + # "EnclosedInStringProperty" => "EnclosedInStringProperty", + # }, + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # redshift_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # redshift_tmp_dir: "EnclosedInStringProperty", + # tmp_dir_iam_role: "EnclosedInStringProperty", + # }, + # s3_catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # partition_predicate: "EnclosedInStringProperty", + # additional_options: { + # bounded_size: 1, + # bounded_files: 1, + # }, + # }, + # s3_csv_source: { + # name: "NodeName", # required + # paths: ["EnclosedInStringProperty"], # required + # compression_type: "gzip", # accepts gzip, bzip2 + # exclusions: ["EnclosedInStringProperty"], + # group_size: "EnclosedInStringProperty", + # group_files: "EnclosedInStringProperty", + # recurse: false, + # max_band: 1, + # max_files_in_band: 1, + # additional_options: { + # bounded_size: 1, + # bounded_files: 1, + # enable_sample_path: false, + # sample_path: "EnclosedInStringProperty", + # }, + # separator: "comma", # required, accepts comma, ctrla, pipe, semicolon, tab + # escaper: "EnclosedInStringPropertyWithQuote", + # quote_char: "quote", # required, accepts quote, quillemet, single_quote, disabled + # multiline: false, + # with_header: false, + # write_header: false, + # skip_first: false, + # optimize_performance: false, + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # s3_json_source: { + # name: "NodeName", # required + # paths: ["EnclosedInStringProperty"], # required + # compression_type: "gzip", # accepts gzip, bzip2 + # exclusions: ["EnclosedInStringProperty"], + # group_size: "EnclosedInStringProperty", + # group_files: "EnclosedInStringProperty", + # recurse: false, + # max_band: 1, + # max_files_in_band: 1, + # additional_options: { + # bounded_size: 1, + # bounded_files: 1, + # enable_sample_path: false, + # sample_path: "EnclosedInStringProperty", + # }, + # json_path: "EnclosedInStringProperty", + # multiline: false, + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # s3_parquet_source: { + # name: "NodeName", # required + # paths: ["EnclosedInStringProperty"], # required + # compression_type: "snappy", # accepts snappy, lzo, gzip, uncompressed, none + # exclusions: ["EnclosedInStringProperty"], + # group_size: "EnclosedInStringProperty", + # group_files: "EnclosedInStringProperty", + # recurse: false, + # max_band: 1, + # max_files_in_band: 1, + # additional_options: { + # bounded_size: 1, + # bounded_files: 1, + # enable_sample_path: false, + # sample_path: "EnclosedInStringProperty", + # }, + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # relational_catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # dynamo_db_catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # jdbc_connector_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # connection_name: "EnclosedInStringProperty", # required + # connection_table: "EnclosedInStringPropertyWithQuote", # required + # connector_name: "EnclosedInStringProperty", # required + # connection_type: "EnclosedInStringProperty", # required + # additional_options: { + # "EnclosedInStringProperty" => "EnclosedInStringProperty", + # }, + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # spark_connector_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # connection_name: "EnclosedInStringProperty", # required + # connector_name: "EnclosedInStringProperty", # required + # connection_type: "EnclosedInStringProperty", # required + # additional_options: { + # "EnclosedInStringProperty" => "EnclosedInStringProperty", + # }, + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # catalog_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # redshift_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # redshift_tmp_dir: "EnclosedInStringProperty", + # tmp_dir_iam_role: "EnclosedInStringProperty", + # upsert_redshift_options: { + # table_location: "EnclosedInStringProperty", + # connection_name: "EnclosedInStringProperty", + # upsert_keys: ["EnclosedInStringProperty"], + # }, + # }, + # s3_catalog_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # partition_keys: [ + # ["EnclosedInStringProperty"], + # ], + # table: "EnclosedInStringProperty", # required + # database: "EnclosedInStringProperty", # required + # schema_change_policy: { + # enable_update_catalog: false, + # update_behavior: "UPDATE_IN_DATABASE", # accepts UPDATE_IN_DATABASE, LOG + # }, + # }, + # s3_glue_parquet_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # partition_keys: [ + # ["EnclosedInStringProperty"], + # ], + # path: "EnclosedInStringProperty", # required + # compression: "snappy", # accepts snappy, lzo, gzip, uncompressed, none + # schema_change_policy: { + # enable_update_catalog: false, + # update_behavior: "UPDATE_IN_DATABASE", # accepts UPDATE_IN_DATABASE, LOG + # table: "EnclosedInStringProperty", + # database: "EnclosedInStringProperty", + # }, + # }, + # s3_direct_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # partition_keys: [ + # ["EnclosedInStringProperty"], + # ], + # path: "EnclosedInStringProperty", # required + # compression: "EnclosedInStringProperty", + # format: "json", # required, accepts json, csv, avro, orc, parquet + # schema_change_policy: { + # enable_update_catalog: false, + # update_behavior: "UPDATE_IN_DATABASE", # accepts UPDATE_IN_DATABASE, LOG + # table: "EnclosedInStringProperty", + # database: "EnclosedInStringProperty", + # }, + # }, + # apply_mapping: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # mapping: [ # required + # { + # to_key: "EnclosedInStringProperty", + # from_path: ["EnclosedInStringProperty"], + # from_type: "EnclosedInStringProperty", + # to_type: "EnclosedInStringProperty", + # dropped: false, + # children: { + # # recursive Mappings + # }, + # }, + # ], + # }, + # select_fields: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # paths: [ # required + # ["EnclosedInStringProperty"], + # ], + # }, + # drop_fields: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # paths: [ # required + # ["EnclosedInStringProperty"], + # ], + # }, + # rename_field: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # source_path: ["EnclosedInStringProperty"], # required + # target_path: ["EnclosedInStringProperty"], # required + # }, + # spigot: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # path: "EnclosedInStringProperty", # required + # topk: 1, + # prob: 1.0, + # }, + # join: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # join_type: "equijoin", # required, accepts equijoin, left, right, outer, leftsemi, leftanti + # columns: [ # required + # { + # from: "EnclosedInStringProperty", # required + # keys: [ # required + # ["EnclosedInStringProperty"], + # ], + # }, + # ], + # }, + # split_fields: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # paths: [ # required + # ["EnclosedInStringProperty"], + # ], + # }, + # select_from_collection: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # index: 1, # required + # }, + # fill_missing_values: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # imputed_path: "EnclosedInStringProperty", # required + # filled_path: "EnclosedInStringProperty", + # }, + # filter: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # logical_operator: "AND", # required, accepts AND, OR + # filters: [ # required + # { + # operation: "EQ", # required, accepts EQ, LT, GT, LTE, GTE, REGEX, ISNULL + # negated: false, + # values: [ # required + # { + # type: "COLUMNEXTRACTED", # required, accepts COLUMNEXTRACTED, CONSTANT + # value: ["EnclosedInStringProperty"], # required + # }, + # ], + # }, + # ], + # }, + # custom_code: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # code: "ExtendedString", # required + # class_name: "EnclosedInStringProperty", # required + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # spark_sql: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # sql_query: "SqlQuery", # required + # sql_aliases: [ # required + # { + # from: "NodeId", # required + # alias: "EnclosedInStringPropertyWithQuote", # required + # }, + # ], + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # direct_kinesis_source: { + # name: "NodeName", # required + # window_size: 1, + # detect_schema: false, + # streaming_options: { + # endpoint_url: "EnclosedInStringProperty", + # stream_name: "EnclosedInStringProperty", + # classification: "EnclosedInStringProperty", + # delimiter: "EnclosedInStringProperty", + # starting_position: "latest", # accepts latest, trim_horizon, earliest + # max_fetch_time_in_ms: 1, + # max_fetch_records_per_shard: 1, + # max_record_per_read: 1, + # add_idle_time_between_reads: false, + # idle_time_between_reads_in_ms: 1, + # describe_shard_interval: 1, + # num_retries: 1, + # retry_interval_ms: 1, + # max_retry_interval_ms: 1, + # avoid_empty_batches: false, + # stream_arn: "EnclosedInStringProperty", + # role_arn: "EnclosedInStringProperty", + # role_session_name: "EnclosedInStringProperty", + # }, + # data_preview_options: { + # polling_time: 1, + # record_polling_limit: 1, + # }, + # }, + # direct_kafka_source: { + # name: "NodeName", # required + # streaming_options: { + # bootstrap_servers: "EnclosedInStringProperty", + # security_protocol: "EnclosedInStringProperty", + # connection_name: "EnclosedInStringProperty", + # topic_name: "EnclosedInStringProperty", + # assign: "EnclosedInStringProperty", + # subscribe_pattern: "EnclosedInStringProperty", + # classification: "EnclosedInStringProperty", + # delimiter: "EnclosedInStringProperty", + # starting_offsets: "EnclosedInStringProperty", + # ending_offsets: "EnclosedInStringProperty", + # poll_timeout_ms: 1, + # num_retries: 1, + # retry_interval_ms: 1, + # max_offsets_per_trigger: 1, + # min_partitions: 1, + # }, + # window_size: 1, + # detect_schema: false, + # data_preview_options: { + # polling_time: 1, + # record_polling_limit: 1, + # }, + # }, + # catalog_kinesis_source: { + # name: "NodeName", # required + # window_size: 1, + # detect_schema: false, + # table: "EnclosedInStringProperty", # required + # database: "EnclosedInStringProperty", # required + # streaming_options: { + # endpoint_url: "EnclosedInStringProperty", + # stream_name: "EnclosedInStringProperty", + # classification: "EnclosedInStringProperty", + # delimiter: "EnclosedInStringProperty", + # starting_position: "latest", # accepts latest, trim_horizon, earliest + # max_fetch_time_in_ms: 1, + # max_fetch_records_per_shard: 1, + # max_record_per_read: 1, + # add_idle_time_between_reads: false, + # idle_time_between_reads_in_ms: 1, + # describe_shard_interval: 1, + # num_retries: 1, + # retry_interval_ms: 1, + # max_retry_interval_ms: 1, + # avoid_empty_batches: false, + # stream_arn: "EnclosedInStringProperty", + # role_arn: "EnclosedInStringProperty", + # role_session_name: "EnclosedInStringProperty", + # }, + # data_preview_options: { + # polling_time: 1, + # record_polling_limit: 1, + # }, + # }, + # catalog_kafka_source: { + # name: "NodeName", # required + # window_size: 1, + # detect_schema: false, + # table: "EnclosedInStringProperty", # required + # database: "EnclosedInStringProperty", # required + # streaming_options: { + # bootstrap_servers: "EnclosedInStringProperty", + # security_protocol: "EnclosedInStringProperty", + # connection_name: "EnclosedInStringProperty", + # topic_name: "EnclosedInStringProperty", + # assign: "EnclosedInStringProperty", + # subscribe_pattern: "EnclosedInStringProperty", + # classification: "EnclosedInStringProperty", + # delimiter: "EnclosedInStringProperty", + # starting_offsets: "EnclosedInStringProperty", + # ending_offsets: "EnclosedInStringProperty", + # poll_timeout_ms: 1, + # num_retries: 1, + # retry_interval_ms: 1, + # max_offsets_per_trigger: 1, + # min_partitions: 1, + # }, + # data_preview_options: { + # polling_time: 1, + # record_polling_limit: 1, + # }, + # }, + # drop_null_fields: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # null_check_box_list: { + # is_empty: false, + # is_null_string: false, + # is_neg_one: false, + # }, + # null_text_list: [ + # { + # value: "EnclosedInStringProperty", # required + # datatype: { # required + # id: "GenericLimitedString", # required + # label: "GenericLimitedString", # required + # }, + # }, + # ], + # }, + # merge: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # source: "NodeId", # required + # primary_keys: [ # required + # ["EnclosedInStringProperty"], + # ], + # }, + # union: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # union_type: "ALL", # required, accepts ALL, DISTINCT + # }, + # pii_detection: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # pii_type: "RowAudit", # required, accepts RowAudit, RowMasking, ColumnAudit, ColumnMasking + # entity_types_to_detect: ["EnclosedInStringProperty"], # required + # output_column_name: "EnclosedInStringProperty", + # sample_fraction: 1.0, + # threshold_fraction: 1.0, + # mask_value: "MaskValue", + # }, + # aggregate: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # groups: [ # required + # ["EnclosedInStringProperty"], + # ], + # aggs: [ # required + # { + # column: ["EnclosedInStringProperty"], # required + # agg_func: "avg", # required, accepts avg, countDistinct, count, first, last, kurtosis, max, min, skewness, stddev_samp, stddev_pop, sum, sumDistinct, var_samp, var_pop + # }, + # ], + # }, + # drop_duplicates: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # columns: [ + # ["GenericLimitedString"], + # ], + # }, + # governed_catalog_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # partition_keys: [ + # ["EnclosedInStringProperty"], + # ], + # table: "EnclosedInStringProperty", # required + # database: "EnclosedInStringProperty", # required + # schema_change_policy: { + # enable_update_catalog: false, + # update_behavior: "UPDATE_IN_DATABASE", # accepts UPDATE_IN_DATABASE, LOG + # }, + # }, + # governed_catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # partition_predicate: "EnclosedInStringProperty", + # additional_options: { + # bounded_size: 1, + # bounded_files: 1, + # }, + # }, + # microsoft_sql_server_catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # my_sql_catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # oracle_sql_catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # postgre_sql_catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # microsoft_sql_server_catalog_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # my_sql_catalog_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # oracle_sql_catalog_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # postgre_sql_catalog_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, # } # - # @!attribute [rw] id - # A node identifier that is unique within the node's graph. - # @return [String] + # @!attribute [rw] athena_connector_source + # Specifies a connector to an Amazon Athena data source. + # @return [Types::AthenaConnectorSource] # - # @!attribute [rw] node_type - # The type of node that this is. - # @return [String] + # @!attribute [rw] jdbc_connector_source + # Specifies a connector to a JDBC data source. + # @return [Types::JDBCConnectorSource] # - # @!attribute [rw] args - # Properties of the node, in the form of name-value pairs. - # @return [Array] + # @!attribute [rw] spark_connector_source + # Specifies a connector to an Apache Spark data source. + # @return [Types::SparkConnectorSource] # - # @!attribute [rw] line_number - # The line number of the node. - # @return [Integer] + # @!attribute [rw] catalog_source + # Specifies a data store in the Glue Data Catalog. + # @return [Types::CatalogSource] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CodeGenNode AWS API Documentation + # @!attribute [rw] redshift_source + # Specifies an Amazon Redshift data store. + # @return [Types::RedshiftSource] # - class CodeGenNode < Struct.new( - :id, - :node_type, - :args, - :line_number) + # @!attribute [rw] s3_catalog_source + # Specifies an Amazon S3 data store in the Glue Data Catalog. + # @return [Types::S3CatalogSource] + # + # @!attribute [rw] s3_csv_source + # Specifies a command-separated value (CSV) data store stored in + # Amazon S3. + # @return [Types::S3CsvSource] + # + # @!attribute [rw] s3_json_source + # Specifies a JSON data store stored in Amazon S3. + # @return [Types::S3JsonSource] + # + # @!attribute [rw] s3_parquet_source + # Specifies an Apache Parquet data store stored in Amazon S3. + # @return [Types::S3ParquetSource] + # + # @!attribute [rw] relational_catalog_source + # Specifies a Relational database data source in the Glue Data + # Catalog. + # @return [Types::RelationalCatalogSource] + # + # @!attribute [rw] dynamo_db_catalog_source + # Specifies a DynamoDB data source in the Glue Data Catalog. + # @return [Types::DynamoDBCatalogSource] + # + # @!attribute [rw] jdbc_connector_target + # Specifies a data target that writes to Amazon S3 in Apache Parquet + # columnar storage. + # @return [Types::JDBCConnectorTarget] + # + # @!attribute [rw] spark_connector_target + # Specifies a target that uses an Apache Spark connector. + # @return [Types::SparkConnectorTarget] + # + # @!attribute [rw] catalog_target + # Specifies a target that uses a Glue Data Catalog table. + # @return [Types::BasicCatalogTarget] + # + # @!attribute [rw] redshift_target + # Specifies a target that uses Amazon Redshift. + # @return [Types::RedshiftTarget] + # + # @!attribute [rw] s3_catalog_target + # Specifies a data target that writes to Amazon S3 using the Glue Data + # Catalog. + # @return [Types::S3CatalogTarget] + # + # @!attribute [rw] s3_glue_parquet_target + # Specifies a data target that writes to Amazon S3 in Apache Parquet + # columnar storage. + # @return [Types::S3GlueParquetTarget] + # + # @!attribute [rw] s3_direct_target + # Specifies a data target that writes to Amazon S3. + # @return [Types::S3DirectTarget] + # + # @!attribute [rw] apply_mapping + # Specifies a transform that maps data property keys in the data + # source to data property keys in the data target. You can rename + # keys, modify the data types for keys, and choose which keys to drop + # from the dataset. + # @return [Types::ApplyMapping] + # + # @!attribute [rw] select_fields + # Specifies a transform that chooses the data property keys that you + # want to keep. + # @return [Types::SelectFields] + # + # @!attribute [rw] drop_fields + # Specifies a transform that chooses the data property keys that you + # want to drop. + # @return [Types::DropFields] + # + # @!attribute [rw] rename_field + # Specifies a transform that renames a single data property key. + # @return [Types::RenameField] + # + # @!attribute [rw] spigot + # Specifies a transform that writes samples of the data to an Amazon + # S3 bucket. + # @return [Types::Spigot] + # + # @!attribute [rw] join + # Specifies a transform that joins two datasets into one dataset using + # a comparison phrase on the specified data property keys. You can use + # inner, outer, left, right, left semi, and left anti joins. + # @return [Types::Join] + # + # @!attribute [rw] split_fields + # Specifies a transform that splits data property keys into two + # `DynamicFrames`. The output is a collection of `DynamicFrames`\: one + # with selected data property keys, and one with the remaining data + # property keys. + # @return [Types::SplitFields] + # + # @!attribute [rw] select_from_collection + # Specifies a transform that chooses one `DynamicFrame` from a + # collection of `DynamicFrames`. The output is the selected + # `DynamicFrame` + # @return [Types::SelectFromCollection] + # + # @!attribute [rw] fill_missing_values + # Specifies a transform that locates records in the dataset that have + # missing values and adds a new field with a value determined by + # imputation. The input data set is used to train the machine learning + # model that determines what the missing value should be. + # @return [Types::FillMissingValues] + # + # @!attribute [rw] filter + # Specifies a transform that splits a dataset into two, based on a + # filter condition. + # @return [Types::Filter] + # + # @!attribute [rw] custom_code + # Specifies a transform that uses custom code you provide to perform + # the data transformation. The output is a collection of + # DynamicFrames. + # @return [Types::CustomCode] + # + # @!attribute [rw] spark_sql + # Specifies a transform where you enter a SQL query using Spark SQL + # syntax to transform the data. The output is a single `DynamicFrame`. + # @return [Types::SparkSQL] + # + # @!attribute [rw] direct_kinesis_source + # Specifies a direct Amazon Kinesis data source. + # @return [Types::DirectKinesisSource] + # + # @!attribute [rw] direct_kafka_source + # Specifies an Apache Kafka data store. + # @return [Types::DirectKafkaSource] + # + # @!attribute [rw] catalog_kinesis_source + # Specifies a Kinesis data source in the Glue Data Catalog. + # @return [Types::CatalogKinesisSource] + # + # @!attribute [rw] catalog_kafka_source + # Specifies an Apache Kafka data store in the Data Catalog. + # @return [Types::CatalogKafkaSource] + # + # @!attribute [rw] drop_null_fields + # Specifies a transform that removes columns from the dataset if all + # values in the column are 'null'. By default, Glue Studio will + # recognize null objects, but some values such as empty strings, + # strings that are "null", -1 integers or other placeholders such as + # zeros, are not automatically recognized as nulls. + # @return [Types::DropNullFields] + # + # @!attribute [rw] merge + # Specifies a transform that merges a `DynamicFrame` with a staging + # `DynamicFrame` based on the specified primary keys to identify + # records. Duplicate records (records with the same primary keys) are + # not de-duplicated. + # @return [Types::Merge] + # + # @!attribute [rw] union + # Specifies a transform that combines the rows from two or more + # datasets into a single result. + # @return [Types::Union] + # + # @!attribute [rw] pii_detection + # Specifies a transform that identifies, removes or masks PII data. + # @return [Types::PIIDetection] + # + # @!attribute [rw] aggregate + # Specifies a transform that groups rows by chosen fields and computes + # the aggregated value by specified function. + # @return [Types::Aggregate] + # + # @!attribute [rw] drop_duplicates + # Specifies a transform that removes rows of repeating data from a + # data set. + # @return [Types::DropDuplicates] + # + # @!attribute [rw] governed_catalog_target + # Specifies a data target that writes to a goverened catalog. + # @return [Types::GovernedCatalogTarget] + # + # @!attribute [rw] governed_catalog_source + # Specifies a data source in a goverened Data Catalog. + # @return [Types::GovernedCatalogSource] + # + # @!attribute [rw] microsoft_sql_server_catalog_source + # Specifies a Microsoft SQL server data source in the Glue Data + # Catalog. + # @return [Types::MicrosoftSQLServerCatalogSource] + # + # @!attribute [rw] my_sql_catalog_source + # Specifies a MySQL data source in the Glue Data Catalog. + # @return [Types::MySQLCatalogSource] + # + # @!attribute [rw] oracle_sql_catalog_source + # Specifies an Oracle data source in the Glue Data Catalog. + # @return [Types::OracleSQLCatalogSource] + # + # @!attribute [rw] postgre_sql_catalog_source + # Specifies a PostgresSQL data source in the Glue Data Catalog. + # @return [Types::PostgreSQLCatalogSource] + # + # @!attribute [rw] microsoft_sql_server_catalog_target + # Specifies a target that uses Microsoft SQL. + # @return [Types::MicrosoftSQLServerCatalogTarget] + # + # @!attribute [rw] my_sql_catalog_target + # Specifies a target that uses MySQL. + # @return [Types::MySQLCatalogTarget] + # + # @!attribute [rw] oracle_sql_catalog_target + # Specifies a target that uses Oracle SQL. + # @return [Types::OracleSQLCatalogTarget] + # + # @!attribute [rw] postgre_sql_catalog_target + # Specifies a target that uses Postgres SQL. + # @return [Types::PostgreSQLCatalogTarget] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CodeGenConfigurationNode AWS API Documentation + # + class CodeGenConfigurationNode < Struct.new( + :athena_connector_source, + :jdbc_connector_source, + :spark_connector_source, + :catalog_source, + :redshift_source, + :s3_catalog_source, + :s3_csv_source, + :s3_json_source, + :s3_parquet_source, + :relational_catalog_source, + :dynamo_db_catalog_source, + :jdbc_connector_target, + :spark_connector_target, + :catalog_target, + :redshift_target, + :s3_catalog_target, + :s3_glue_parquet_target, + :s3_direct_target, + :apply_mapping, + :select_fields, + :drop_fields, + :rename_field, + :spigot, + :join, + :split_fields, + :select_from_collection, + :fill_missing_values, + :filter, + :custom_code, + :spark_sql, + :direct_kinesis_source, + :direct_kafka_source, + :catalog_kinesis_source, + :catalog_kafka_source, + :drop_null_fields, + :merge, + :union, + :pii_detection, + :aggregate, + :drop_duplicates, + :governed_catalog_target, + :governed_catalog_source, + :microsoft_sql_server_catalog_source, + :my_sql_catalog_source, + :oracle_sql_catalog_source, + :postgre_sql_catalog_source, + :microsoft_sql_server_catalog_target, + :my_sql_catalog_target, + :oracle_sql_catalog_target, + :postgre_sql_catalog_target) SENSITIVE = [] include Aws::Structure end - # An argument or property of a node. + # Represents a directional edge in a directed acyclic graph (DAG). # - # @note When making an API call, you may pass CodeGenNodeArg + # @note When making an API call, you may pass CodeGenEdge # data as a hash: # # { - # name: "CodeGenArgName", # required - # value: "CodeGenArgValue", # required - # param: false, + # source: "CodeGenIdentifier", # required + # target: "CodeGenIdentifier", # required + # target_parameter: "CodeGenArgName", # } # - # @!attribute [rw] name - # The name of the argument or property. + # @!attribute [rw] source + # The ID of the node at which the edge starts. # @return [String] # - # @!attribute [rw] value - # The value of the argument or property. + # @!attribute [rw] target + # The ID of the node at which the edge ends. + # @return [String] + # + # @!attribute [rw] target_parameter + # The target of the edge. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CodeGenEdge AWS API Documentation + # + class CodeGenEdge < Struct.new( + :source, + :target, + :target_parameter) + SENSITIVE = [] + include Aws::Structure + end + + # Represents a node in a directed acyclic graph (DAG) + # + # @note When making an API call, you may pass CodeGenNode + # data as a hash: + # + # { + # id: "CodeGenIdentifier", # required + # node_type: "CodeGenNodeType", # required + # args: [ # required + # { + # name: "CodeGenArgName", # required + # value: "CodeGenArgValue", # required + # param: false, + # }, + # ], + # line_number: 1, + # } + # + # @!attribute [rw] id + # A node identifier that is unique within the node's graph. + # @return [String] + # + # @!attribute [rw] node_type + # The type of node that this is. + # @return [String] + # + # @!attribute [rw] args + # Properties of the node, in the form of name-value pairs. + # @return [Array] + # + # @!attribute [rw] line_number + # The line number of the node. + # @return [Integer] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CodeGenNode AWS API Documentation + # + class CodeGenNode < Struct.new( + :id, + :node_type, + :args, + :line_number) + SENSITIVE = [] + include Aws::Structure + end + + # An argument or property of a node. + # + # @note When making an API call, you may pass CodeGenNodeArg + # data as a hash: + # + # { + # name: "CodeGenArgName", # required + # value: "CodeGenArgValue", # required + # param: false, + # } + # + # @!attribute [rw] name + # The name of the argument or property. + # @return [String] + # + # @!attribute [rw] value + # The value of the argument or property. # @return [String] # # @!attribute [rw] param @@ -3830,6 +5255,674 @@ class CreateGrokClassifierRequest < Struct.new( # glue_version: "GlueVersionString", # number_of_workers: 1, # worker_type: "Standard", # accepts Standard, G.1X, G.2X + # code_gen_configuration_nodes: { + # "NodeId" => { + # athena_connector_source: { + # name: "NodeName", # required + # connection_name: "EnclosedInStringProperty", # required + # connector_name: "EnclosedInStringProperty", # required + # connection_type: "EnclosedInStringProperty", # required + # connection_table: "EnclosedInStringPropertyWithQuote", + # schema_name: "EnclosedInStringProperty", # required + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # jdbc_connector_source: { + # name: "NodeName", # required + # connection_name: "EnclosedInStringProperty", # required + # connector_name: "EnclosedInStringProperty", # required + # connection_type: "EnclosedInStringProperty", # required + # additional_options: { + # filter_predicate: "EnclosedInStringProperty", + # partition_column: "EnclosedInStringProperty", + # lower_bound: 1, + # upper_bound: 1, + # num_partitions: 1, + # job_bookmark_keys: ["EnclosedInStringProperty"], + # job_bookmark_keys_sort_order: "EnclosedInStringProperty", + # data_type_mapping: { + # "ARRAY" => "DATE", # accepts DATE, STRING, TIMESTAMP, INT, FLOAT, LONG, BIGDECIMAL, BYTE, SHORT, DOUBLE + # }, + # }, + # connection_table: "EnclosedInStringPropertyWithQuote", + # query: "SqlQuery", + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # spark_connector_source: { + # name: "NodeName", # required + # connection_name: "EnclosedInStringProperty", # required + # connector_name: "EnclosedInStringProperty", # required + # connection_type: "EnclosedInStringProperty", # required + # additional_options: { + # "EnclosedInStringProperty" => "EnclosedInStringProperty", + # }, + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # redshift_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # redshift_tmp_dir: "EnclosedInStringProperty", + # tmp_dir_iam_role: "EnclosedInStringProperty", + # }, + # s3_catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # partition_predicate: "EnclosedInStringProperty", + # additional_options: { + # bounded_size: 1, + # bounded_files: 1, + # }, + # }, + # s3_csv_source: { + # name: "NodeName", # required + # paths: ["EnclosedInStringProperty"], # required + # compression_type: "gzip", # accepts gzip, bzip2 + # exclusions: ["EnclosedInStringProperty"], + # group_size: "EnclosedInStringProperty", + # group_files: "EnclosedInStringProperty", + # recurse: false, + # max_band: 1, + # max_files_in_band: 1, + # additional_options: { + # bounded_size: 1, + # bounded_files: 1, + # enable_sample_path: false, + # sample_path: "EnclosedInStringProperty", + # }, + # separator: "comma", # required, accepts comma, ctrla, pipe, semicolon, tab + # escaper: "EnclosedInStringPropertyWithQuote", + # quote_char: "quote", # required, accepts quote, quillemet, single_quote, disabled + # multiline: false, + # with_header: false, + # write_header: false, + # skip_first: false, + # optimize_performance: false, + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # s3_json_source: { + # name: "NodeName", # required + # paths: ["EnclosedInStringProperty"], # required + # compression_type: "gzip", # accepts gzip, bzip2 + # exclusions: ["EnclosedInStringProperty"], + # group_size: "EnclosedInStringProperty", + # group_files: "EnclosedInStringProperty", + # recurse: false, + # max_band: 1, + # max_files_in_band: 1, + # additional_options: { + # bounded_size: 1, + # bounded_files: 1, + # enable_sample_path: false, + # sample_path: "EnclosedInStringProperty", + # }, + # json_path: "EnclosedInStringProperty", + # multiline: false, + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # s3_parquet_source: { + # name: "NodeName", # required + # paths: ["EnclosedInStringProperty"], # required + # compression_type: "snappy", # accepts snappy, lzo, gzip, uncompressed, none + # exclusions: ["EnclosedInStringProperty"], + # group_size: "EnclosedInStringProperty", + # group_files: "EnclosedInStringProperty", + # recurse: false, + # max_band: 1, + # max_files_in_band: 1, + # additional_options: { + # bounded_size: 1, + # bounded_files: 1, + # enable_sample_path: false, + # sample_path: "EnclosedInStringProperty", + # }, + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # relational_catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # dynamo_db_catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # jdbc_connector_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # connection_name: "EnclosedInStringProperty", # required + # connection_table: "EnclosedInStringPropertyWithQuote", # required + # connector_name: "EnclosedInStringProperty", # required + # connection_type: "EnclosedInStringProperty", # required + # additional_options: { + # "EnclosedInStringProperty" => "EnclosedInStringProperty", + # }, + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # spark_connector_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # connection_name: "EnclosedInStringProperty", # required + # connector_name: "EnclosedInStringProperty", # required + # connection_type: "EnclosedInStringProperty", # required + # additional_options: { + # "EnclosedInStringProperty" => "EnclosedInStringProperty", + # }, + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # catalog_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # redshift_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # redshift_tmp_dir: "EnclosedInStringProperty", + # tmp_dir_iam_role: "EnclosedInStringProperty", + # upsert_redshift_options: { + # table_location: "EnclosedInStringProperty", + # connection_name: "EnclosedInStringProperty", + # upsert_keys: ["EnclosedInStringProperty"], + # }, + # }, + # s3_catalog_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # partition_keys: [ + # ["EnclosedInStringProperty"], + # ], + # table: "EnclosedInStringProperty", # required + # database: "EnclosedInStringProperty", # required + # schema_change_policy: { + # enable_update_catalog: false, + # update_behavior: "UPDATE_IN_DATABASE", # accepts UPDATE_IN_DATABASE, LOG + # }, + # }, + # s3_glue_parquet_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # partition_keys: [ + # ["EnclosedInStringProperty"], + # ], + # path: "EnclosedInStringProperty", # required + # compression: "snappy", # accepts snappy, lzo, gzip, uncompressed, none + # schema_change_policy: { + # enable_update_catalog: false, + # update_behavior: "UPDATE_IN_DATABASE", # accepts UPDATE_IN_DATABASE, LOG + # table: "EnclosedInStringProperty", + # database: "EnclosedInStringProperty", + # }, + # }, + # s3_direct_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # partition_keys: [ + # ["EnclosedInStringProperty"], + # ], + # path: "EnclosedInStringProperty", # required + # compression: "EnclosedInStringProperty", + # format: "json", # required, accepts json, csv, avro, orc, parquet + # schema_change_policy: { + # enable_update_catalog: false, + # update_behavior: "UPDATE_IN_DATABASE", # accepts UPDATE_IN_DATABASE, LOG + # table: "EnclosedInStringProperty", + # database: "EnclosedInStringProperty", + # }, + # }, + # apply_mapping: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # mapping: [ # required + # { + # to_key: "EnclosedInStringProperty", + # from_path: ["EnclosedInStringProperty"], + # from_type: "EnclosedInStringProperty", + # to_type: "EnclosedInStringProperty", + # dropped: false, + # children: { + # # recursive Mappings + # }, + # }, + # ], + # }, + # select_fields: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # paths: [ # required + # ["EnclosedInStringProperty"], + # ], + # }, + # drop_fields: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # paths: [ # required + # ["EnclosedInStringProperty"], + # ], + # }, + # rename_field: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # source_path: ["EnclosedInStringProperty"], # required + # target_path: ["EnclosedInStringProperty"], # required + # }, + # spigot: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # path: "EnclosedInStringProperty", # required + # topk: 1, + # prob: 1.0, + # }, + # join: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # join_type: "equijoin", # required, accepts equijoin, left, right, outer, leftsemi, leftanti + # columns: [ # required + # { + # from: "EnclosedInStringProperty", # required + # keys: [ # required + # ["EnclosedInStringProperty"], + # ], + # }, + # ], + # }, + # split_fields: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # paths: [ # required + # ["EnclosedInStringProperty"], + # ], + # }, + # select_from_collection: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # index: 1, # required + # }, + # fill_missing_values: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # imputed_path: "EnclosedInStringProperty", # required + # filled_path: "EnclosedInStringProperty", + # }, + # filter: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # logical_operator: "AND", # required, accepts AND, OR + # filters: [ # required + # { + # operation: "EQ", # required, accepts EQ, LT, GT, LTE, GTE, REGEX, ISNULL + # negated: false, + # values: [ # required + # { + # type: "COLUMNEXTRACTED", # required, accepts COLUMNEXTRACTED, CONSTANT + # value: ["EnclosedInStringProperty"], # required + # }, + # ], + # }, + # ], + # }, + # custom_code: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # code: "ExtendedString", # required + # class_name: "EnclosedInStringProperty", # required + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # spark_sql: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # sql_query: "SqlQuery", # required + # sql_aliases: [ # required + # { + # from: "NodeId", # required + # alias: "EnclosedInStringPropertyWithQuote", # required + # }, + # ], + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # direct_kinesis_source: { + # name: "NodeName", # required + # window_size: 1, + # detect_schema: false, + # streaming_options: { + # endpoint_url: "EnclosedInStringProperty", + # stream_name: "EnclosedInStringProperty", + # classification: "EnclosedInStringProperty", + # delimiter: "EnclosedInStringProperty", + # starting_position: "latest", # accepts latest, trim_horizon, earliest + # max_fetch_time_in_ms: 1, + # max_fetch_records_per_shard: 1, + # max_record_per_read: 1, + # add_idle_time_between_reads: false, + # idle_time_between_reads_in_ms: 1, + # describe_shard_interval: 1, + # num_retries: 1, + # retry_interval_ms: 1, + # max_retry_interval_ms: 1, + # avoid_empty_batches: false, + # stream_arn: "EnclosedInStringProperty", + # role_arn: "EnclosedInStringProperty", + # role_session_name: "EnclosedInStringProperty", + # }, + # data_preview_options: { + # polling_time: 1, + # record_polling_limit: 1, + # }, + # }, + # direct_kafka_source: { + # name: "NodeName", # required + # streaming_options: { + # bootstrap_servers: "EnclosedInStringProperty", + # security_protocol: "EnclosedInStringProperty", + # connection_name: "EnclosedInStringProperty", + # topic_name: "EnclosedInStringProperty", + # assign: "EnclosedInStringProperty", + # subscribe_pattern: "EnclosedInStringProperty", + # classification: "EnclosedInStringProperty", + # delimiter: "EnclosedInStringProperty", + # starting_offsets: "EnclosedInStringProperty", + # ending_offsets: "EnclosedInStringProperty", + # poll_timeout_ms: 1, + # num_retries: 1, + # retry_interval_ms: 1, + # max_offsets_per_trigger: 1, + # min_partitions: 1, + # }, + # window_size: 1, + # detect_schema: false, + # data_preview_options: { + # polling_time: 1, + # record_polling_limit: 1, + # }, + # }, + # catalog_kinesis_source: { + # name: "NodeName", # required + # window_size: 1, + # detect_schema: false, + # table: "EnclosedInStringProperty", # required + # database: "EnclosedInStringProperty", # required + # streaming_options: { + # endpoint_url: "EnclosedInStringProperty", + # stream_name: "EnclosedInStringProperty", + # classification: "EnclosedInStringProperty", + # delimiter: "EnclosedInStringProperty", + # starting_position: "latest", # accepts latest, trim_horizon, earliest + # max_fetch_time_in_ms: 1, + # max_fetch_records_per_shard: 1, + # max_record_per_read: 1, + # add_idle_time_between_reads: false, + # idle_time_between_reads_in_ms: 1, + # describe_shard_interval: 1, + # num_retries: 1, + # retry_interval_ms: 1, + # max_retry_interval_ms: 1, + # avoid_empty_batches: false, + # stream_arn: "EnclosedInStringProperty", + # role_arn: "EnclosedInStringProperty", + # role_session_name: "EnclosedInStringProperty", + # }, + # data_preview_options: { + # polling_time: 1, + # record_polling_limit: 1, + # }, + # }, + # catalog_kafka_source: { + # name: "NodeName", # required + # window_size: 1, + # detect_schema: false, + # table: "EnclosedInStringProperty", # required + # database: "EnclosedInStringProperty", # required + # streaming_options: { + # bootstrap_servers: "EnclosedInStringProperty", + # security_protocol: "EnclosedInStringProperty", + # connection_name: "EnclosedInStringProperty", + # topic_name: "EnclosedInStringProperty", + # assign: "EnclosedInStringProperty", + # subscribe_pattern: "EnclosedInStringProperty", + # classification: "EnclosedInStringProperty", + # delimiter: "EnclosedInStringProperty", + # starting_offsets: "EnclosedInStringProperty", + # ending_offsets: "EnclosedInStringProperty", + # poll_timeout_ms: 1, + # num_retries: 1, + # retry_interval_ms: 1, + # max_offsets_per_trigger: 1, + # min_partitions: 1, + # }, + # data_preview_options: { + # polling_time: 1, + # record_polling_limit: 1, + # }, + # }, + # drop_null_fields: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # null_check_box_list: { + # is_empty: false, + # is_null_string: false, + # is_neg_one: false, + # }, + # null_text_list: [ + # { + # value: "EnclosedInStringProperty", # required + # datatype: { # required + # id: "GenericLimitedString", # required + # label: "GenericLimitedString", # required + # }, + # }, + # ], + # }, + # merge: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # source: "NodeId", # required + # primary_keys: [ # required + # ["EnclosedInStringProperty"], + # ], + # }, + # union: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # union_type: "ALL", # required, accepts ALL, DISTINCT + # }, + # pii_detection: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # pii_type: "RowAudit", # required, accepts RowAudit, RowMasking, ColumnAudit, ColumnMasking + # entity_types_to_detect: ["EnclosedInStringProperty"], # required + # output_column_name: "EnclosedInStringProperty", + # sample_fraction: 1.0, + # threshold_fraction: 1.0, + # mask_value: "MaskValue", + # }, + # aggregate: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # groups: [ # required + # ["EnclosedInStringProperty"], + # ], + # aggs: [ # required + # { + # column: ["EnclosedInStringProperty"], # required + # agg_func: "avg", # required, accepts avg, countDistinct, count, first, last, kurtosis, max, min, skewness, stddev_samp, stddev_pop, sum, sumDistinct, var_samp, var_pop + # }, + # ], + # }, + # drop_duplicates: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # columns: [ + # ["GenericLimitedString"], + # ], + # }, + # governed_catalog_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # partition_keys: [ + # ["EnclosedInStringProperty"], + # ], + # table: "EnclosedInStringProperty", # required + # database: "EnclosedInStringProperty", # required + # schema_change_policy: { + # enable_update_catalog: false, + # update_behavior: "UPDATE_IN_DATABASE", # accepts UPDATE_IN_DATABASE, LOG + # }, + # }, + # governed_catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # partition_predicate: "EnclosedInStringProperty", + # additional_options: { + # bounded_size: 1, + # bounded_files: 1, + # }, + # }, + # microsoft_sql_server_catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # my_sql_catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # oracle_sql_catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # postgre_sql_catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # microsoft_sql_server_catalog_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # my_sql_catalog_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # oracle_sql_catalog_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # postgre_sql_catalog_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # }, + # }, # } # # @!attribute [rw] name @@ -4004,6 +6097,12 @@ class CreateGrokClassifierRequest < Struct.new( # recommend this worker type for memory-intensive jobs. # @return [String] # + # @!attribute [rw] code_gen_configuration_nodes + # The representation of a directed acyclic graph on which both the + # Glue Studio visual component and Glue Studio code generation is + # based. + # @return [Hash] + # # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CreateJobRequest AWS API Documentation # class CreateJobRequest < Struct.new( @@ -4025,8 +6124,9 @@ class CreateJobRequest < Struct.new( :notification_property, :glue_version, :number_of_workers, - :worker_type) - SENSITIVE = [] + :worker_type, + :code_gen_configuration_nodes) + SENSITIVE = [:code_gen_configuration_nodes] include Aws::Structure end @@ -5442,23 +7542,78 @@ class CsvClassifier < Struct.new( include Aws::Structure end - # An object representing a custom pattern for detecting sensitive data - # across the columns and rows of your structured data. + # Specifies a transform that uses custom code you provide to perform the + # data transformation. The output is a collection of DynamicFrames. + # + # @note When making an API call, you may pass CustomCode + # data as a hash: + # + # { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # code: "ExtendedString", # required + # class_name: "EnclosedInStringProperty", # required + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # } # # @!attribute [rw] name - # A name for the custom pattern that allows it to be retrieved or - # deleted later. This name must be unique per Amazon Web Services - # account. + # The name of the transform node. # @return [String] # - # @!attribute [rw] regex_string - # A regular expression string that is used for detecting sensitive - # data in a custom pattern. + # @!attribute [rw] inputs + # The data inputs identified by their node names. + # @return [Array] + # + # @!attribute [rw] code + # The custom code that is used to perform the data transformation. # @return [String] # - # @!attribute [rw] context_words - # A list of context words. If none of these context words are found - # within the vicinity of the regular expression the data will not be + # @!attribute [rw] class_name + # The name defined for the custom code node class. + # @return [String] + # + # @!attribute [rw] output_schemas + # Specifies the data schema for the custom code transform. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/CustomCode AWS API Documentation + # + class CustomCode < Struct.new( + :name, + :inputs, + :code, + :class_name, + :output_schemas) + SENSITIVE = [] + include Aws::Structure + end + + # An object representing a custom pattern for detecting sensitive data + # across the columns and rows of your structured data. + # + # @!attribute [rw] name + # A name for the custom pattern that allows it to be retrieved or + # deleted later. This name must be unique per Amazon Web Services + # account. + # @return [String] + # + # @!attribute [rw] regex_string + # A regular expression string that is used for detecting sensitive + # data in a custom pattern. + # @return [String] + # + # @!attribute [rw] context_words + # A list of context words. If none of these context words are found + # within the vicinity of the regular expression the data will not be # detected as sensitive data. # # If no context words are passed only a regular expression is checked. @@ -5682,6 +7837,33 @@ class DatabaseInput < Struct.new( include Aws::Structure end + # A structure representing the datatype of the value. + # + # @note When making an API call, you may pass Datatype + # data as a hash: + # + # { + # id: "GenericLimitedString", # required + # label: "GenericLimitedString", # required + # } + # + # @!attribute [rw] id + # The datatype of the value. + # @return [String] + # + # @!attribute [rw] label + # A label assigned to the datatype. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/Datatype AWS API Documentation + # + class Datatype < Struct.new( + :id, + :label) + SENSITIVE = [] + include Aws::Structure + end + # Defines column statistics supported for timestamp data columns. # # @note When making an API call, you may pass DateColumnStatisticsData @@ -6946,6 +9128,181 @@ class DevEndpointCustomLibraries < Struct.new( include Aws::Structure end + # Specifies an Apache Kafka data store. + # + # @note When making an API call, you may pass DirectKafkaSource + # data as a hash: + # + # { + # name: "NodeName", # required + # streaming_options: { + # bootstrap_servers: "EnclosedInStringProperty", + # security_protocol: "EnclosedInStringProperty", + # connection_name: "EnclosedInStringProperty", + # topic_name: "EnclosedInStringProperty", + # assign: "EnclosedInStringProperty", + # subscribe_pattern: "EnclosedInStringProperty", + # classification: "EnclosedInStringProperty", + # delimiter: "EnclosedInStringProperty", + # starting_offsets: "EnclosedInStringProperty", + # ending_offsets: "EnclosedInStringProperty", + # poll_timeout_ms: 1, + # num_retries: 1, + # retry_interval_ms: 1, + # max_offsets_per_trigger: 1, + # min_partitions: 1, + # }, + # window_size: 1, + # detect_schema: false, + # data_preview_options: { + # polling_time: 1, + # record_polling_limit: 1, + # }, + # } + # + # @!attribute [rw] name + # The name of the data store. + # @return [String] + # + # @!attribute [rw] streaming_options + # Specifies the streaming options. + # @return [Types::KafkaStreamingSourceOptions] + # + # @!attribute [rw] window_size + # The amount of time to spend processing each micro batch. + # @return [Integer] + # + # @!attribute [rw] detect_schema + # Whether to automatically determine the schema from the incoming + # data. + # @return [Boolean] + # + # @!attribute [rw] data_preview_options + # Specifies options related to data preview for viewing a sample of + # your data. + # @return [Types::StreamingDataPreviewOptions] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DirectKafkaSource AWS API Documentation + # + class DirectKafkaSource < Struct.new( + :name, + :streaming_options, + :window_size, + :detect_schema, + :data_preview_options) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies a direct Amazon Kinesis data source. + # + # @note When making an API call, you may pass DirectKinesisSource + # data as a hash: + # + # { + # name: "NodeName", # required + # window_size: 1, + # detect_schema: false, + # streaming_options: { + # endpoint_url: "EnclosedInStringProperty", + # stream_name: "EnclosedInStringProperty", + # classification: "EnclosedInStringProperty", + # delimiter: "EnclosedInStringProperty", + # starting_position: "latest", # accepts latest, trim_horizon, earliest + # max_fetch_time_in_ms: 1, + # max_fetch_records_per_shard: 1, + # max_record_per_read: 1, + # add_idle_time_between_reads: false, + # idle_time_between_reads_in_ms: 1, + # describe_shard_interval: 1, + # num_retries: 1, + # retry_interval_ms: 1, + # max_retry_interval_ms: 1, + # avoid_empty_batches: false, + # stream_arn: "EnclosedInStringProperty", + # role_arn: "EnclosedInStringProperty", + # role_session_name: "EnclosedInStringProperty", + # }, + # data_preview_options: { + # polling_time: 1, + # record_polling_limit: 1, + # }, + # } + # + # @!attribute [rw] name + # The name of the data source. + # @return [String] + # + # @!attribute [rw] window_size + # The amount of time to spend processing each micro batch. + # @return [Integer] + # + # @!attribute [rw] detect_schema + # Whether to automatically determine the schema from the incoming + # data. + # @return [Boolean] + # + # @!attribute [rw] streaming_options + # Additional options for the Kinesis streaming data source. + # @return [Types::KinesisStreamingSourceOptions] + # + # @!attribute [rw] data_preview_options + # Additional options for data preview. + # @return [Types::StreamingDataPreviewOptions] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DirectKinesisSource AWS API Documentation + # + class DirectKinesisSource < Struct.new( + :name, + :window_size, + :detect_schema, + :streaming_options, + :data_preview_options) + SENSITIVE = [] + include Aws::Structure + end + + # A policy that specifies update behavior for the crawler. + # + # @note When making an API call, you may pass DirectSchemaChangePolicy + # data as a hash: + # + # { + # enable_update_catalog: false, + # update_behavior: "UPDATE_IN_DATABASE", # accepts UPDATE_IN_DATABASE, LOG + # table: "EnclosedInStringProperty", + # database: "EnclosedInStringProperty", + # } + # + # @!attribute [rw] enable_update_catalog + # Whether to use the specified update behavior when the crawler finds + # a changed schema. + # @return [Boolean] + # + # @!attribute [rw] update_behavior + # The update behavior when the crawler finds a changed schema. + # @return [String] + # + # @!attribute [rw] table + # Specifies the table in the database that the schema change policy + # applies to. + # @return [String] + # + # @!attribute [rw] database + # Specifies the database that the schema change policy applies to. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DirectSchemaChangePolicy AWS API Documentation + # + class DirectSchemaChangePolicy < Struct.new( + :enable_update_catalog, + :update_behavior, + :table, + :database) + SENSITIVE = [] + include Aws::Structure + end + # Defines column statistics supported for floating-point number data # columns. # @@ -6986,6 +9343,173 @@ class DoubleColumnStatisticsData < Struct.new( include Aws::Structure end + # Specifies a transform that removes rows of repeating data from a data + # set. + # + # @note When making an API call, you may pass DropDuplicates + # data as a hash: + # + # { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # columns: [ + # ["GenericLimitedString"], + # ], + # } + # + # @!attribute [rw] name + # The name of the transform node. + # @return [String] + # + # @!attribute [rw] inputs + # The data inputs identified by their node names. + # @return [Array] + # + # @!attribute [rw] columns + # The name of the columns to be merged or removed if repeating. + # @return [Array>] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DropDuplicates AWS API Documentation + # + class DropDuplicates < Struct.new( + :name, + :inputs, + :columns) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies a transform that chooses the data property keys that you + # want to drop. + # + # @note When making an API call, you may pass DropFields + # data as a hash: + # + # { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # paths: [ # required + # ["EnclosedInStringProperty"], + # ], + # } + # + # @!attribute [rw] name + # The name of the transform node. + # @return [String] + # + # @!attribute [rw] inputs + # The data inputs identified by their node names. + # @return [Array] + # + # @!attribute [rw] paths + # A JSON path to a variable in the data structure. + # @return [Array>] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DropFields AWS API Documentation + # + class DropFields < Struct.new( + :name, + :inputs, + :paths) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies a transform that removes columns from the dataset if all + # values in the column are 'null'. By default, Glue Studio will + # recognize null objects, but some values such as empty strings, strings + # that are "null", -1 integers or other placeholders such as zeros, + # are not automatically recognized as nulls. + # + # @note When making an API call, you may pass DropNullFields + # data as a hash: + # + # { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # null_check_box_list: { + # is_empty: false, + # is_null_string: false, + # is_neg_one: false, + # }, + # null_text_list: [ + # { + # value: "EnclosedInStringProperty", # required + # datatype: { # required + # id: "GenericLimitedString", # required + # label: "GenericLimitedString", # required + # }, + # }, + # ], + # } + # + # @!attribute [rw] name + # The name of the transform node. + # @return [String] + # + # @!attribute [rw] inputs + # The data inputs identified by their node names. + # @return [Array] + # + # @!attribute [rw] null_check_box_list + # A structure that represents whether certain values are recognized as + # null values for removal. + # @return [Types::NullCheckBoxList] + # + # @!attribute [rw] null_text_list + # A structure that specifies a list of NullValueField structures that + # represent a custom null value such as zero or other value being used + # as a null placeholder unique to the dataset. + # + # The `DropNullFields` transform removes custom null values only if + # both the value of the null placeholder and the datatype match the + # data. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DropNullFields AWS API Documentation + # + class DropNullFields < Struct.new( + :name, + :inputs, + :null_check_box_list, + :null_text_list) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies a DynamoDB data source in the Glue Data Catalog. + # + # @note When making an API call, you may pass DynamoDBCatalogSource + # data as a hash: + # + # { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # } + # + # @!attribute [rw] name + # The name of the data source. + # @return [String] + # + # @!attribute [rw] database + # The name of the database to read from. + # @return [String] + # + # @!attribute [rw] table + # The name of the table in the database to read from. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DynamoDBCatalogSource AWS API Documentation + # + class DynamoDBCatalogSource < Struct.new( + :name, + :database, + :table) + SENSITIVE = [] + include Aws::Structure + end + # Specifies an Amazon DynamoDB table to crawl. # # @note When making an API call, you may pass DynamoDBTarget @@ -7266,49 +9790,211 @@ class ExportLabelsTaskRunProperties < Struct.new( include Aws::Structure end - # The evaluation metrics for the find matches algorithm. The quality of - # your machine learning transform is measured by getting your transform - # to predict some matches and comparing the results to known matches - # from the same dataset. The quality metrics are based on a subset of - # your data, so they are not precise. + # Specifies a transform that locates records in the dataset that have + # missing values and adds a new field with a value determined by + # imputation. The input data set is used to train the machine learning + # model that determines what the missing value should be. # - # @!attribute [rw] area_under_pr_curve - # The area under the precision/recall curve (AUPRC) is a single number - # measuring the overall quality of the transform, that is independent - # of the choice made for precision vs. recall. Higher values indicate - # that you have a more attractive precision vs. recall tradeoff. + # @note When making an API call, you may pass FillMissingValues + # data as a hash: # - # For more information, see [Precision and recall][1] in Wikipedia. + # { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # imputed_path: "EnclosedInStringProperty", # required + # filled_path: "EnclosedInStringProperty", + # } # + # @!attribute [rw] name + # The name of the transform node. + # @return [String] # + # @!attribute [rw] inputs + # The data inputs identified by their node names. + # @return [Array] # - # [1]: https://en.wikipedia.org/wiki/Precision_and_recall - # @return [Float] + # @!attribute [rw] imputed_path + # A JSON path to a variable in the data structure for the dataset that + # is imputed. + # @return [String] # - # @!attribute [rw] precision - # The precision metric indicates when often your transform is correct - # when it predicts a match. Specifically, it measures how well the - # transform finds true positives from the total true positives - # possible. + # @!attribute [rw] filled_path + # A JSON path to a variable in the data structure for the dataset that + # is filled. + # @return [String] # - # For more information, see [Precision and recall][1] in Wikipedia. + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/FillMissingValues AWS API Documentation # + class FillMissingValues < Struct.new( + :name, + :inputs, + :imputed_path, + :filled_path) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies a transform that splits a dataset into two, based on a + # filter condition. # + # @note When making an API call, you may pass Filter + # data as a hash: # - # [1]: https://en.wikipedia.org/wiki/Precision_and_recall - # @return [Float] + # { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # logical_operator: "AND", # required, accepts AND, OR + # filters: [ # required + # { + # operation: "EQ", # required, accepts EQ, LT, GT, LTE, GTE, REGEX, ISNULL + # negated: false, + # values: [ # required + # { + # type: "COLUMNEXTRACTED", # required, accepts COLUMNEXTRACTED, CONSTANT + # value: ["EnclosedInStringProperty"], # required + # }, + # ], + # }, + # ], + # } # - # @!attribute [rw] recall - # The recall metric indicates that for an actual match, how often your - # transform predicts the match. Specifically, it measures how well the - # transform finds true positives from the total records in the source - # data. + # @!attribute [rw] name + # The name of the transform node. + # @return [String] # - # For more information, see [Precision and recall][1] in Wikipedia. + # @!attribute [rw] inputs + # The data inputs identified by their node names. + # @return [Array] # + # @!attribute [rw] logical_operator + # The operator used to filter rows by comparing the key value to a + # specified value. + # @return [String] # + # @!attribute [rw] filters + # Specifies a filter expression. + # @return [Array] # - # [1]: https://en.wikipedia.org/wiki/Precision_and_recall + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/Filter AWS API Documentation + # + class Filter < Struct.new( + :name, + :inputs, + :logical_operator, + :filters) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies a filter expression. + # + # @note When making an API call, you may pass FilterExpression + # data as a hash: + # + # { + # operation: "EQ", # required, accepts EQ, LT, GT, LTE, GTE, REGEX, ISNULL + # negated: false, + # values: [ # required + # { + # type: "COLUMNEXTRACTED", # required, accepts COLUMNEXTRACTED, CONSTANT + # value: ["EnclosedInStringProperty"], # required + # }, + # ], + # } + # + # @!attribute [rw] operation + # The type of operation to perform in the expression. + # @return [String] + # + # @!attribute [rw] negated + # Whether the expression is to be negated. + # @return [Boolean] + # + # @!attribute [rw] values + # A list of filter values. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/FilterExpression AWS API Documentation + # + class FilterExpression < Struct.new( + :operation, + :negated, + :values) + SENSITIVE = [] + include Aws::Structure + end + + # Represents a single entry in the list of values for a + # `FilterExpression`. + # + # @note When making an API call, you may pass FilterValue + # data as a hash: + # + # { + # type: "COLUMNEXTRACTED", # required, accepts COLUMNEXTRACTED, CONSTANT + # value: ["EnclosedInStringProperty"], # required + # } + # + # @!attribute [rw] type + # The type of filter value. + # @return [String] + # + # @!attribute [rw] value + # The value to be associated. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/FilterValue AWS API Documentation + # + class FilterValue < Struct.new( + :type, + :value) + SENSITIVE = [] + include Aws::Structure + end + + # The evaluation metrics for the find matches algorithm. The quality of + # your machine learning transform is measured by getting your transform + # to predict some matches and comparing the results to known matches + # from the same dataset. The quality metrics are based on a subset of + # your data, so they are not precise. + # + # @!attribute [rw] area_under_pr_curve + # The area under the precision/recall curve (AUPRC) is a single number + # measuring the overall quality of the transform, that is independent + # of the choice made for precision vs. recall. Higher values indicate + # that you have a more attractive precision vs. recall tradeoff. + # + # For more information, see [Precision and recall][1] in Wikipedia. + # + # + # + # [1]: https://en.wikipedia.org/wiki/Precision_and_recall + # @return [Float] + # + # @!attribute [rw] precision + # The precision metric indicates when often your transform is correct + # when it predicts a match. Specifically, it measures how well the + # transform finds true positives from the total true positives + # possible. + # + # For more information, see [Precision and recall][1] in Wikipedia. + # + # + # + # [1]: https://en.wikipedia.org/wiki/Precision_and_recall + # @return [Float] + # + # @!attribute [rw] recall + # The recall metric indicates that for an actual match, how often your + # transform predicts the match. Specifically, it measures how well the + # transform finds true positives from the total records in the source + # data. + # + # For more information, see [Precision and recall][1] in Wikipedia. + # + # + # + # [1]: https://en.wikipedia.org/wiki/Precision_and_recall # @return [Float] # # @!attribute [rw] f1 @@ -11005,6 +13691,60 @@ class GluePolicy < Struct.new( include Aws::Structure end + # Specifies a user-defined schema when a schema cannot be determined by + # AWS Glue. + # + # @note When making an API call, you may pass GlueSchema + # data as a hash: + # + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # } + # + # @!attribute [rw] columns + # Specifies the column definitions that make up a Glue schema. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GlueSchema AWS API Documentation + # + class GlueSchema < Struct.new( + :columns) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies a single column in a Glue schema definition. + # + # @note When making an API call, you may pass GlueStudioSchemaColumn + # data as a hash: + # + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # } + # + # @!attribute [rw] name + # The name of the column in the Glue Studio schema. + # @return [String] + # + # @!attribute [rw] type + # The hive type for this column in the Glue Studio schema. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GlueStudioSchemaColumn AWS API Documentation + # + class GlueStudioSchemaColumn < Struct.new( + :name, + :type) + SENSITIVE = [] + include Aws::Structure + end + # The database and table in the Glue Data Catalog that is used for input # or output data. # @@ -11045,3084 +13785,5553 @@ class GlueTable < Struct.new( include Aws::Structure end + # Specifies the data store in the governed Glue Data Catalog. + # + # @note When making an API call, you may pass GovernedCatalogSource + # data as a hash: + # + # { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # partition_predicate: "EnclosedInStringProperty", + # additional_options: { + # bounded_size: 1, + # bounded_files: 1, + # }, + # } + # + # @!attribute [rw] name + # The name of the data store. + # @return [String] + # + # @!attribute [rw] database + # The database to read from. + # @return [String] + # + # @!attribute [rw] table + # The database table to read from. + # @return [String] + # + # @!attribute [rw] partition_predicate + # Partitions satisfying this predicate are deleted. Files within the + # retention period in these partitions are not deleted. Set to `""` – + # empty by default. + # @return [String] + # + # @!attribute [rw] additional_options + # Specifies additional connection options. + # @return [Types::S3SourceAdditionalOptions] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GovernedCatalogSource AWS API Documentation + # + class GovernedCatalogSource < Struct.new( + :name, + :database, + :table, + :partition_predicate, + :additional_options) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies a data target that writes to Amazon S3 using the Glue Data + # Catalog. + # + # @note When making an API call, you may pass GovernedCatalogTarget + # data as a hash: + # + # { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # partition_keys: [ + # ["EnclosedInStringProperty"], + # ], + # table: "EnclosedInStringProperty", # required + # database: "EnclosedInStringProperty", # required + # schema_change_policy: { + # enable_update_catalog: false, + # update_behavior: "UPDATE_IN_DATABASE", # accepts UPDATE_IN_DATABASE, LOG + # }, + # } + # + # @!attribute [rw] name + # The name of the data target. + # @return [String] + # + # @!attribute [rw] inputs + # The nodes that are inputs to the data target. + # @return [Array] + # + # @!attribute [rw] partition_keys + # Specifies native partitioning using a sequence of keys. + # @return [Array>] + # + # @!attribute [rw] table + # The name of the table in the database to write to. + # @return [String] + # + # @!attribute [rw] database + # The name of the database to write to. + # @return [String] + # + # @!attribute [rw] schema_change_policy + # A policy that specifies update behavior for the governed catalog. + # @return [Types::CatalogSchemaChangePolicy] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GovernedCatalogTarget AWS API Documentation + # + class GovernedCatalogTarget < Struct.new( + :name, + :inputs, + :partition_keys, + :table, + :database, + :schema_change_policy) + SENSITIVE = [] + include Aws::Structure + end + # A classifier that uses `grok` patterns. # - # @!attribute [rw] name - # The name of the classifier. - # @return [String] + # @!attribute [rw] name + # The name of the classifier. + # @return [String] + # + # @!attribute [rw] classification + # An identifier of the data format that the classifier matches, such + # as Twitter, JSON, Omniture logs, and so on. + # @return [String] + # + # @!attribute [rw] creation_time + # The time that this classifier was registered. + # @return [Time] + # + # @!attribute [rw] last_updated + # The time that this classifier was last updated. + # @return [Time] + # + # @!attribute [rw] version + # The version of this classifier. + # @return [Integer] + # + # @!attribute [rw] grok_pattern + # The grok pattern applied to a data store by this classifier. For + # more information, see built-in patterns in [Writing Custom + # Classifiers][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/glue/latest/dg/custom-classifier.html + # @return [String] + # + # @!attribute [rw] custom_patterns + # Optional custom grok patterns defined by this classifier. For more + # information, see custom patterns in [Writing Custom Classifiers][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/glue/latest/dg/custom-classifier.html + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GrokClassifier AWS API Documentation + # + class GrokClassifier < Struct.new( + :name, + :classification, + :creation_time, + :last_updated, + :version, + :grok_pattern, + :custom_patterns) + SENSITIVE = [] + include Aws::Structure + end + + # The same unique identifier was associated with two different records. + # + # @!attribute [rw] message + # A message describing the problem. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/IdempotentParameterMismatchException AWS API Documentation + # + class IdempotentParameterMismatchException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/IllegalBlueprintStateException AWS API Documentation + # + class IllegalBlueprintStateException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # The session is in an invalid state to perform a requested operation. + # + # @!attribute [rw] message + # A message describing the problem. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/IllegalSessionStateException AWS API Documentation + # + class IllegalSessionStateException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # The workflow is in an invalid state to perform a requested operation. + # + # @!attribute [rw] message + # A message describing the problem. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/IllegalWorkflowStateException AWS API Documentation + # + class IllegalWorkflowStateException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # @note When making an API call, you may pass ImportCatalogToGlueRequest + # data as a hash: + # + # { + # catalog_id: "CatalogIdString", + # } + # + # @!attribute [rw] catalog_id + # The ID of the catalog to import. Currently, this should be the + # Amazon Web Services account ID. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ImportCatalogToGlueRequest AWS API Documentation + # + class ImportCatalogToGlueRequest < Struct.new( + :catalog_id) + SENSITIVE = [] + include Aws::Structure + end + + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ImportCatalogToGlueResponse AWS API Documentation + # + class ImportCatalogToGlueResponse < Aws::EmptyStructure; end + + # Specifies configuration properties for an importing labels task run. + # + # @!attribute [rw] input_s3_path + # The Amazon Simple Storage Service (Amazon S3) path from where you + # will import the labels. + # @return [String] + # + # @!attribute [rw] replace + # Indicates whether to overwrite your existing labels. + # @return [Boolean] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ImportLabelsTaskRunProperties AWS API Documentation + # + class ImportLabelsTaskRunProperties < Struct.new( + :input_s3_path, + :replace) + SENSITIVE = [] + include Aws::Structure + end + + # An internal service error occurred. + # + # @!attribute [rw] message + # A message describing the problem. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/InternalServiceException AWS API Documentation + # + class InternalServiceException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # The input provided was not valid. + # + # @!attribute [rw] message + # A message describing the problem. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/InvalidInputException AWS API Documentation + # + class InvalidInputException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # An error that indicates your data is in an invalid state. + # + # @!attribute [rw] message + # A message describing the problem. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/InvalidStateException AWS API Documentation + # + class InvalidStateException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # Additional connection options for the connector. + # + # @note When making an API call, you may pass JDBCConnectorOptions + # data as a hash: + # + # { + # filter_predicate: "EnclosedInStringProperty", + # partition_column: "EnclosedInStringProperty", + # lower_bound: 1, + # upper_bound: 1, + # num_partitions: 1, + # job_bookmark_keys: ["EnclosedInStringProperty"], + # job_bookmark_keys_sort_order: "EnclosedInStringProperty", + # data_type_mapping: { + # "ARRAY" => "DATE", # accepts DATE, STRING, TIMESTAMP, INT, FLOAT, LONG, BIGDECIMAL, BYTE, SHORT, DOUBLE + # }, + # } + # + # @!attribute [rw] filter_predicate + # Extra condition clause to filter data from source. For example: + # + # `BillingCity='Mountain View'` + # + # When using a query instead of a table name, you should validate that + # the query works with the specified `filterPredicate`. + # @return [String] + # + # @!attribute [rw] partition_column + # The name of an integer column that is used for partitioning. This + # option works only when it's included with `lowerBound`, + # `upperBound`, and `numPartitions`. This option works the same way as + # in the Spark SQL JDBC reader. + # @return [String] + # + # @!attribute [rw] lower_bound + # The minimum value of `partitionColumn` that is used to decide + # partition stride. + # @return [Integer] + # + # @!attribute [rw] upper_bound + # The maximum value of `partitionColumn` that is used to decide + # partition stride. + # @return [Integer] + # + # @!attribute [rw] num_partitions + # The number of partitions. This value, along with `lowerBound` + # (inclusive) and `upperBound` (exclusive), form partition strides for + # generated `WHERE` clause expressions that are used to split the + # `partitionColumn`. + # @return [Integer] + # + # @!attribute [rw] job_bookmark_keys + # The name of the job bookmark keys on which to sort. + # @return [Array] + # + # @!attribute [rw] job_bookmark_keys_sort_order + # Specifies an ascending or descending sort order. + # @return [String] + # + # @!attribute [rw] data_type_mapping + # Custom data type mapping that builds a mapping from a JDBC data type + # to an Glue data type. For example, the option + # `"dataTypeMapping":\{"FLOAT":"STRING"\}` maps data fields of JDBC + # type `FLOAT` into the Java `String` type by calling the + # `ResultSet.getString()` method of the driver, and uses it to build + # the Glue record. The `ResultSet` object is implemented by each + # driver, so the behavior is specific to the driver you use. Refer to + # the documentation for your JDBC driver to understand how the driver + # performs the conversions. + # @return [Hash] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/JDBCConnectorOptions AWS API Documentation + # + class JDBCConnectorOptions < Struct.new( + :filter_predicate, + :partition_column, + :lower_bound, + :upper_bound, + :num_partitions, + :job_bookmark_keys, + :job_bookmark_keys_sort_order, + :data_type_mapping) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies a connector to a JDBC data source. + # + # @note When making an API call, you may pass JDBCConnectorSource + # data as a hash: + # + # { + # name: "NodeName", # required + # connection_name: "EnclosedInStringProperty", # required + # connector_name: "EnclosedInStringProperty", # required + # connection_type: "EnclosedInStringProperty", # required + # additional_options: { + # filter_predicate: "EnclosedInStringProperty", + # partition_column: "EnclosedInStringProperty", + # lower_bound: 1, + # upper_bound: 1, + # num_partitions: 1, + # job_bookmark_keys: ["EnclosedInStringProperty"], + # job_bookmark_keys_sort_order: "EnclosedInStringProperty", + # data_type_mapping: { + # "ARRAY" => "DATE", # accepts DATE, STRING, TIMESTAMP, INT, FLOAT, LONG, BIGDECIMAL, BYTE, SHORT, DOUBLE + # }, + # }, + # connection_table: "EnclosedInStringPropertyWithQuote", + # query: "SqlQuery", + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # } + # + # @!attribute [rw] name + # The name of the data source. + # @return [String] + # + # @!attribute [rw] connection_name + # The name of the connection that is associated with the connector. + # @return [String] + # + # @!attribute [rw] connector_name + # The name of a connector that assists with accessing the data store + # in Glue Studio. + # @return [String] + # + # @!attribute [rw] connection_type + # The type of connection, such as marketplace.jdbc or custom.jdbc, + # designating a connection to a JDBC data store. + # @return [String] + # + # @!attribute [rw] additional_options + # Additional connection options for the connector. + # @return [Types::JDBCConnectorOptions] + # + # @!attribute [rw] connection_table + # The name of the table in the data source. + # @return [String] + # + # @!attribute [rw] query + # The table or SQL query to get the data from. You can specify either + # `ConnectionTable` or `query`, but not both. + # @return [String] + # + # @!attribute [rw] output_schemas + # Specifies the data schema for the custom JDBC source. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/JDBCConnectorSource AWS API Documentation + # + class JDBCConnectorSource < Struct.new( + :name, + :connection_name, + :connector_name, + :connection_type, + :additional_options, + :connection_table, + :query, + :output_schemas) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies a data target that writes to Amazon S3 in Apache Parquet + # columnar storage. + # + # @note When making an API call, you may pass JDBCConnectorTarget + # data as a hash: + # + # { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # connection_name: "EnclosedInStringProperty", # required + # connection_table: "EnclosedInStringPropertyWithQuote", # required + # connector_name: "EnclosedInStringProperty", # required + # connection_type: "EnclosedInStringProperty", # required + # additional_options: { + # "EnclosedInStringProperty" => "EnclosedInStringProperty", + # }, + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # } + # + # @!attribute [rw] name + # The name of the data target. + # @return [String] + # + # @!attribute [rw] inputs + # The nodes that are inputs to the data target. + # @return [Array] + # + # @!attribute [rw] connection_name + # The name of the connection that is associated with the connector. + # @return [String] + # + # @!attribute [rw] connection_table + # The name of the table in the data target. + # @return [String] + # + # @!attribute [rw] connector_name + # The name of a connector that will be used. + # @return [String] + # + # @!attribute [rw] connection_type + # The type of connection, such as marketplace.jdbc or custom.jdbc, + # designating a connection to a JDBC data target. + # @return [String] + # + # @!attribute [rw] additional_options + # Additional connection options for the connector. + # @return [Hash] + # + # @!attribute [rw] output_schemas + # Specifies the data schema for the JDBC target. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/JDBCConnectorTarget AWS API Documentation + # + class JDBCConnectorTarget < Struct.new( + :name, + :inputs, + :connection_name, + :connection_table, + :connector_name, + :connection_type, + :additional_options, + :output_schemas) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies a JDBC data store to crawl. + # + # @note When making an API call, you may pass JdbcTarget + # data as a hash: + # + # { + # connection_name: "ConnectionName", + # path: "Path", + # exclusions: ["Path"], + # } + # + # @!attribute [rw] connection_name + # The name of the connection to use to connect to the JDBC target. + # @return [String] + # + # @!attribute [rw] path + # The path of the JDBC target. + # @return [String] + # + # @!attribute [rw] exclusions + # A list of glob patterns used to exclude from the crawl. For more + # information, see [Catalog Tables with a Crawler][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/glue/latest/dg/add-crawler.html + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/JdbcTarget AWS API Documentation + # + class JdbcTarget < Struct.new( + :connection_name, + :path, + :exclusions) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies a job definition. + # + # @!attribute [rw] name + # The name you assign to this job definition. + # @return [String] + # + # @!attribute [rw] description + # A description of the job. + # @return [String] + # + # @!attribute [rw] log_uri + # This field is reserved for future use. + # @return [String] + # + # @!attribute [rw] role + # The name or Amazon Resource Name (ARN) of the IAM role associated + # with this job. + # @return [String] + # + # @!attribute [rw] created_on + # The time and date that this job definition was created. + # @return [Time] + # + # @!attribute [rw] last_modified_on + # The last point in time when this job definition was modified. + # @return [Time] + # + # @!attribute [rw] execution_property + # An `ExecutionProperty` specifying the maximum number of concurrent + # runs allowed for this job. + # @return [Types::ExecutionProperty] + # + # @!attribute [rw] command + # The `JobCommand` that runs this job. + # @return [Types::JobCommand] + # + # @!attribute [rw] default_arguments + # The default arguments for this job, specified as name-value pairs. + # + # You can specify arguments here that your own job-execution script + # consumes, as well as arguments that Glue itself consumes. + # + # For information about how to specify and consume your own Job + # arguments, see the [Calling Glue APIs in Python][1] topic in the + # developer guide. + # + # For information about the key-value pairs that Glue consumes to set + # up your job, see the [Special Parameters Used by Glue][2] topic in + # the developer guide. + # + # + # + # [1]: https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html + # [2]: https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html + # @return [Hash] + # + # @!attribute [rw] non_overridable_arguments + # Non-overridable arguments for this job, specified as name-value + # pairs. + # @return [Hash] + # + # @!attribute [rw] connections + # The connections used for this job. + # @return [Types::ConnectionsList] + # + # @!attribute [rw] max_retries + # The maximum number of times to retry this job after a JobRun fails. + # @return [Integer] + # + # @!attribute [rw] allocated_capacity + # This field is deprecated. Use `MaxCapacity` instead. + # + # The number of Glue data processing units (DPUs) allocated to runs of + # this job. You can allocate from 2 to 100 DPUs; the default is 10. A + # DPU is a relative measure of processing power that consists of 4 + # vCPUs of compute capacity and 16 GB of memory. For more information, + # see the [Glue pricing page][1]. + # + # + # + # + # + # [1]: https://aws.amazon.com/glue/pricing/ + # @return [Integer] + # + # @!attribute [rw] timeout + # The job timeout in minutes. This is the maximum time that a job run + # can consume resources before it is terminated and enters `TIMEOUT` + # status. The default is 2,880 minutes (48 hours). + # @return [Integer] + # + # @!attribute [rw] max_capacity + # For Glue version 1.0 or earlier jobs, using the standard worker + # type, the number of Glue data processing units (DPUs) that can be + # allocated when this job runs. A DPU is a relative measure of + # processing power that consists of 4 vCPUs of compute capacity and 16 + # GB of memory. For more information, see the [Glue pricing page][1]. + # + # Do not set `Max Capacity` if using `WorkerType` and + # `NumberOfWorkers`. + # + # The value that can be allocated for `MaxCapacity` depends on whether + # you are running a Python shell job, an Apache Spark ETL job, or an + # Apache Spark streaming ETL job: + # + # * When you specify a Python shell job + # (`JobCommand.Name`="pythonshell"), you can allocate either + # 0.0625 or 1 DPU. The default is 0.0625 DPU. + # + # * When you specify an Apache Spark ETL job + # (`JobCommand.Name`="glueetl") or Apache Spark streaming ETL job + # (`JobCommand.Name`="gluestreaming"), you can allocate from 2 to + # 100 DPUs. The default is 10 DPUs. This job type cannot have a + # fractional DPU allocation. + # + # For Glue version 2.0 jobs, you cannot instead specify a `Maximum + # capacity`. Instead, you should specify a `Worker type` and the + # `Number of workers`. + # + # + # + # [1]: https://aws.amazon.com/glue/pricing/ + # @return [Float] + # + # @!attribute [rw] worker_type + # The type of predefined worker that is allocated when a job runs. + # Accepts a value of Standard, G.1X, or G.2X. + # + # * For the `Standard` worker type, each worker provides 4 vCPU, 16 GB + # of memory and a 50GB disk, and 2 executors per worker. + # + # * For the `G.1X` worker type, each worker maps to 1 DPU (4 vCPU, 16 + # GB of memory, 64 GB disk), and provides 1 executor per worker. We + # recommend this worker type for memory-intensive jobs. + # + # * For the `G.2X` worker type, each worker maps to 2 DPU (8 vCPU, 32 + # GB of memory, 128 GB disk), and provides 1 executor per worker. We + # recommend this worker type for memory-intensive jobs. + # @return [String] + # + # @!attribute [rw] number_of_workers + # The number of workers of a defined `workerType` that are allocated + # when a job runs. + # + # The maximum number of workers you can define are 299 for `G.1X`, and + # 149 for `G.2X`. + # @return [Integer] + # + # @!attribute [rw] security_configuration + # The name of the `SecurityConfiguration` structure to be used with + # this job. + # @return [String] + # + # @!attribute [rw] notification_property + # Specifies configuration properties of a job notification. + # @return [Types::NotificationProperty] + # + # @!attribute [rw] glue_version + # Glue version determines the versions of Apache Spark and Python that + # Glue supports. The Python version indicates the version supported + # for jobs of type Spark. + # + # For more information about the available Glue versions and + # corresponding Spark and Python versions, see [Glue version][1] in + # the developer guide. + # + # Jobs that are created without specifying a Glue version default to + # Glue 0.9. + # + # + # + # [1]: https://docs.aws.amazon.com/glue/latest/dg/add-job.html + # @return [String] + # + # @!attribute [rw] code_gen_configuration_nodes + # The representation of a directed acyclic graph on which both the + # Glue Studio visual component and Glue Studio code generation is + # based. + # @return [Hash] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/Job AWS API Documentation + # + class Job < Struct.new( + :name, + :description, + :log_uri, + :role, + :created_on, + :last_modified_on, + :execution_property, + :command, + :default_arguments, + :non_overridable_arguments, + :connections, + :max_retries, + :allocated_capacity, + :timeout, + :max_capacity, + :worker_type, + :number_of_workers, + :security_configuration, + :notification_property, + :glue_version, + :code_gen_configuration_nodes) + SENSITIVE = [:code_gen_configuration_nodes] + include Aws::Structure + end + + # Defines a point that a job can resume processing. + # + # @!attribute [rw] job_name + # The name of the job in question. + # @return [String] + # + # @!attribute [rw] version + # The version of the job. + # @return [Integer] + # + # @!attribute [rw] run + # The run ID number. + # @return [Integer] + # + # @!attribute [rw] attempt + # The attempt ID number. + # @return [Integer] + # + # @!attribute [rw] previous_run_id + # The unique run identifier associated with the previous job run. + # @return [String] + # + # @!attribute [rw] run_id + # The run ID number. + # @return [String] + # + # @!attribute [rw] job_bookmark + # The bookmark itself. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/JobBookmarkEntry AWS API Documentation + # + class JobBookmarkEntry < Struct.new( + :job_name, + :version, + :run, + :attempt, + :previous_run_id, + :run_id, + :job_bookmark) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies how job bookmark data should be encrypted. + # + # @note When making an API call, you may pass JobBookmarksEncryption + # data as a hash: + # + # { + # job_bookmarks_encryption_mode: "DISABLED", # accepts DISABLED, CSE-KMS + # kms_key_arn: "KmsKeyArn", + # } + # + # @!attribute [rw] job_bookmarks_encryption_mode + # The encryption mode to use for job bookmarks data. + # @return [String] + # + # @!attribute [rw] kms_key_arn + # The Amazon Resource Name (ARN) of the KMS key to be used to encrypt + # the data. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/JobBookmarksEncryption AWS API Documentation + # + class JobBookmarksEncryption < Struct.new( + :job_bookmarks_encryption_mode, + :kms_key_arn) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies code that runs when a job is run. + # + # @note When making an API call, you may pass JobCommand + # data as a hash: + # + # { + # name: "GenericString", + # script_location: "ScriptLocationString", + # python_version: "PythonVersionString", + # } + # + # @!attribute [rw] name + # The name of the job command. For an Apache Spark ETL job, this must + # be `glueetl`. For a Python shell job, it must be `pythonshell`. For + # an Apache Spark streaming ETL job, this must be `gluestreaming`. + # @return [String] + # + # @!attribute [rw] script_location + # Specifies the Amazon Simple Storage Service (Amazon S3) path to a + # script that runs a job. + # @return [String] + # + # @!attribute [rw] python_version + # The Python version being used to run a Python shell job. Allowed + # values are 2 or 3. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/JobCommand AWS API Documentation + # + class JobCommand < Struct.new( + :name, + :script_location, + :python_version) + SENSITIVE = [] + include Aws::Structure + end + + # The details of a Job node present in the workflow. + # + # @!attribute [rw] job_runs + # The information for the job runs represented by the job node. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/JobNodeDetails AWS API Documentation + # + class JobNodeDetails < Struct.new( + :job_runs) + SENSITIVE = [] + include Aws::Structure + end + + # Contains information about a job run. + # + # @!attribute [rw] id + # The ID of this job run. + # @return [String] + # + # @!attribute [rw] attempt + # The number of the attempt to run this job. + # @return [Integer] + # + # @!attribute [rw] previous_run_id + # The ID of the previous run of this job. For example, the `JobRunId` + # specified in the `StartJobRun` action. + # @return [String] + # + # @!attribute [rw] trigger_name + # The name of the trigger that started this job run. + # @return [String] + # + # @!attribute [rw] job_name + # The name of the job definition being used in this run. + # @return [String] + # + # @!attribute [rw] started_on + # The date and time at which this job run was started. + # @return [Time] + # + # @!attribute [rw] last_modified_on + # The last time that this job run was modified. + # @return [Time] + # + # @!attribute [rw] completed_on + # The date and time that this job run completed. + # @return [Time] + # + # @!attribute [rw] job_run_state + # The current state of the job run. For more information about the + # statuses of jobs that have terminated abnormally, see [Glue Job Run + # Statuses][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/glue/latest/dg/job-run-statuses.html + # @return [String] + # + # @!attribute [rw] arguments + # The job arguments associated with this run. For this job run, they + # replace the default arguments set in the job definition itself. + # + # You can specify arguments here that your own job-execution script + # consumes, as well as arguments that Glue itself consumes. + # + # For information about how to specify and consume your own job + # arguments, see the [Calling Glue APIs in Python][1] topic in the + # developer guide. + # + # For information about the key-value pairs that Glue consumes to set + # up your job, see the [Special Parameters Used by Glue][2] topic in + # the developer guide. + # + # + # + # [1]: https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html + # [2]: https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html + # @return [Hash] + # + # @!attribute [rw] error_message + # An error message associated with this job run. + # @return [String] + # + # @!attribute [rw] predecessor_runs + # A list of predecessors to this job run. + # @return [Array] + # + # @!attribute [rw] allocated_capacity + # This field is deprecated. Use `MaxCapacity` instead. + # + # The number of Glue data processing units (DPUs) allocated to this + # JobRun. From 2 to 100 DPUs can be allocated; the default is 10. A + # DPU is a relative measure of processing power that consists of 4 + # vCPUs of compute capacity and 16 GB of memory. For more information, + # see the [Glue pricing page][1]. + # + # + # + # [1]: https://aws.amazon.com/glue/pricing/ + # @return [Integer] + # + # @!attribute [rw] execution_time + # The amount of time (in seconds) that the job run consumed resources. + # @return [Integer] + # + # @!attribute [rw] timeout + # The `JobRun` timeout in minutes. This is the maximum time that a job + # run can consume resources before it is terminated and enters + # `TIMEOUT` status. The default is 2,880 minutes (48 hours). This + # overrides the timeout value set in the parent job. + # @return [Integer] + # + # @!attribute [rw] max_capacity + # The number of Glue data processing units (DPUs) that can be + # allocated when this job runs. A DPU is a relative measure of + # processing power that consists of 4 vCPUs of compute capacity and 16 + # GB of memory. For more information, see the [Glue pricing page][1]. + # + # Do not set `Max Capacity` if using `WorkerType` and + # `NumberOfWorkers`. + # + # The value that can be allocated for `MaxCapacity` depends on whether + # you are running a Python shell job or an Apache Spark ETL job: + # + # * When you specify a Python shell job + # (`JobCommand.Name`="pythonshell"), you can allocate either + # 0.0625 or 1 DPU. The default is 0.0625 DPU. + # + # * When you specify an Apache Spark ETL job + # (`JobCommand.Name`="glueetl"), you can allocate from 2 to 100 + # DPUs. The default is 10 DPUs. This job type cannot have a + # fractional DPU allocation. + # + # + # + # [1]: https://aws.amazon.com/glue/pricing/ + # @return [Float] + # + # @!attribute [rw] worker_type + # The type of predefined worker that is allocated when a job runs. + # Accepts a value of Standard, G.1X, or G.2X. + # + # * For the `Standard` worker type, each worker provides 4 vCPU, 16 GB + # of memory and a 50GB disk, and 2 executors per worker. + # + # * For the `G.1X` worker type, each worker provides 4 vCPU, 16 GB of + # memory and a 64GB disk, and 1 executor per worker. + # + # * For the `G.2X` worker type, each worker provides 8 vCPU, 32 GB of + # memory and a 128GB disk, and 1 executor per worker. + # @return [String] + # + # @!attribute [rw] number_of_workers + # The number of workers of a defined `workerType` that are allocated + # when a job runs. + # + # The maximum number of workers you can define are 299 for `G.1X`, and + # 149 for `G.2X`. + # @return [Integer] + # + # @!attribute [rw] security_configuration + # The name of the `SecurityConfiguration` structure to be used with + # this job run. + # @return [String] + # + # @!attribute [rw] log_group_name + # The name of the log group for secure logging that can be server-side + # encrypted in Amazon CloudWatch using KMS. This name can be + # `/aws-glue/jobs/`, in which case the default encryption is `NONE`. + # If you add a role name and `SecurityConfiguration` name (in other + # words, + # `/aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/`), then + # that security configuration is used to encrypt the log group. + # @return [String] + # + # @!attribute [rw] notification_property + # Specifies configuration properties of a job run notification. + # @return [Types::NotificationProperty] + # + # @!attribute [rw] glue_version + # Glue version determines the versions of Apache Spark and Python that + # Glue supports. The Python version indicates the version supported + # for jobs of type Spark. + # + # For more information about the available Glue versions and + # corresponding Spark and Python versions, see [Glue version][1] in + # the developer guide. + # + # Jobs that are created without specifying a Glue version default to + # Glue 0.9. + # + # + # + # [1]: https://docs.aws.amazon.com/glue/latest/dg/add-job.html + # @return [String] + # + # @!attribute [rw] dpu_seconds + # This field populates only when an Auto Scaling job run completes, + # and represents the total time each executor ran during the lifecycle + # of a job run in seconds, multiplied by a DPU factor (1 for `G.1X` + # and 2 for `G.2X` workers). This value may be different than the + # `executionEngineRuntime` * `MaxCapacity` as in the case of Auto + # Scaling jobs, as the number of executors running at a given time may + # be less than the `MaxCapacity`. Therefore, it is possible that the + # value of `DPUSeconds` is less than `executionEngineRuntime` * + # `MaxCapacity`. + # @return [Float] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/JobRun AWS API Documentation + # + class JobRun < Struct.new( + :id, + :attempt, + :previous_run_id, + :trigger_name, + :job_name, + :started_on, + :last_modified_on, + :completed_on, + :job_run_state, + :arguments, + :error_message, + :predecessor_runs, + :allocated_capacity, + :execution_time, + :timeout, + :max_capacity, + :worker_type, + :number_of_workers, + :security_configuration, + :log_group_name, + :notification_property, + :glue_version, + :dpu_seconds) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies information used to update an existing job definition. The + # previous job definition is completely overwritten by this information. + # + # @note When making an API call, you may pass JobUpdate + # data as a hash: + # + # { + # description: "DescriptionString", + # log_uri: "UriString", + # role: "RoleString", + # execution_property: { + # max_concurrent_runs: 1, + # }, + # command: { + # name: "GenericString", + # script_location: "ScriptLocationString", + # python_version: "PythonVersionString", + # }, + # default_arguments: { + # "GenericString" => "GenericString", + # }, + # non_overridable_arguments: { + # "GenericString" => "GenericString", + # }, + # connections: { + # connections: ["GenericString"], + # }, + # max_retries: 1, + # allocated_capacity: 1, + # timeout: 1, + # max_capacity: 1.0, + # worker_type: "Standard", # accepts Standard, G.1X, G.2X + # number_of_workers: 1, + # security_configuration: "NameString", + # notification_property: { + # notify_delay_after: 1, + # }, + # glue_version: "GlueVersionString", + # code_gen_configuration_nodes: { + # "NodeId" => { + # athena_connector_source: { + # name: "NodeName", # required + # connection_name: "EnclosedInStringProperty", # required + # connector_name: "EnclosedInStringProperty", # required + # connection_type: "EnclosedInStringProperty", # required + # connection_table: "EnclosedInStringPropertyWithQuote", + # schema_name: "EnclosedInStringProperty", # required + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # jdbc_connector_source: { + # name: "NodeName", # required + # connection_name: "EnclosedInStringProperty", # required + # connector_name: "EnclosedInStringProperty", # required + # connection_type: "EnclosedInStringProperty", # required + # additional_options: { + # filter_predicate: "EnclosedInStringProperty", + # partition_column: "EnclosedInStringProperty", + # lower_bound: 1, + # upper_bound: 1, + # num_partitions: 1, + # job_bookmark_keys: ["EnclosedInStringProperty"], + # job_bookmark_keys_sort_order: "EnclosedInStringProperty", + # data_type_mapping: { + # "ARRAY" => "DATE", # accepts DATE, STRING, TIMESTAMP, INT, FLOAT, LONG, BIGDECIMAL, BYTE, SHORT, DOUBLE + # }, + # }, + # connection_table: "EnclosedInStringPropertyWithQuote", + # query: "SqlQuery", + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # spark_connector_source: { + # name: "NodeName", # required + # connection_name: "EnclosedInStringProperty", # required + # connector_name: "EnclosedInStringProperty", # required + # connection_type: "EnclosedInStringProperty", # required + # additional_options: { + # "EnclosedInStringProperty" => "EnclosedInStringProperty", + # }, + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # redshift_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # redshift_tmp_dir: "EnclosedInStringProperty", + # tmp_dir_iam_role: "EnclosedInStringProperty", + # }, + # s3_catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # partition_predicate: "EnclosedInStringProperty", + # additional_options: { + # bounded_size: 1, + # bounded_files: 1, + # }, + # }, + # s3_csv_source: { + # name: "NodeName", # required + # paths: ["EnclosedInStringProperty"], # required + # compression_type: "gzip", # accepts gzip, bzip2 + # exclusions: ["EnclosedInStringProperty"], + # group_size: "EnclosedInStringProperty", + # group_files: "EnclosedInStringProperty", + # recurse: false, + # max_band: 1, + # max_files_in_band: 1, + # additional_options: { + # bounded_size: 1, + # bounded_files: 1, + # enable_sample_path: false, + # sample_path: "EnclosedInStringProperty", + # }, + # separator: "comma", # required, accepts comma, ctrla, pipe, semicolon, tab + # escaper: "EnclosedInStringPropertyWithQuote", + # quote_char: "quote", # required, accepts quote, quillemet, single_quote, disabled + # multiline: false, + # with_header: false, + # write_header: false, + # skip_first: false, + # optimize_performance: false, + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # s3_json_source: { + # name: "NodeName", # required + # paths: ["EnclosedInStringProperty"], # required + # compression_type: "gzip", # accepts gzip, bzip2 + # exclusions: ["EnclosedInStringProperty"], + # group_size: "EnclosedInStringProperty", + # group_files: "EnclosedInStringProperty", + # recurse: false, + # max_band: 1, + # max_files_in_band: 1, + # additional_options: { + # bounded_size: 1, + # bounded_files: 1, + # enable_sample_path: false, + # sample_path: "EnclosedInStringProperty", + # }, + # json_path: "EnclosedInStringProperty", + # multiline: false, + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # s3_parquet_source: { + # name: "NodeName", # required + # paths: ["EnclosedInStringProperty"], # required + # compression_type: "snappy", # accepts snappy, lzo, gzip, uncompressed, none + # exclusions: ["EnclosedInStringProperty"], + # group_size: "EnclosedInStringProperty", + # group_files: "EnclosedInStringProperty", + # recurse: false, + # max_band: 1, + # max_files_in_band: 1, + # additional_options: { + # bounded_size: 1, + # bounded_files: 1, + # enable_sample_path: false, + # sample_path: "EnclosedInStringProperty", + # }, + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # relational_catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # dynamo_db_catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # jdbc_connector_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # connection_name: "EnclosedInStringProperty", # required + # connection_table: "EnclosedInStringPropertyWithQuote", # required + # connector_name: "EnclosedInStringProperty", # required + # connection_type: "EnclosedInStringProperty", # required + # additional_options: { + # "EnclosedInStringProperty" => "EnclosedInStringProperty", + # }, + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # spark_connector_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # connection_name: "EnclosedInStringProperty", # required + # connector_name: "EnclosedInStringProperty", # required + # connection_type: "EnclosedInStringProperty", # required + # additional_options: { + # "EnclosedInStringProperty" => "EnclosedInStringProperty", + # }, + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # catalog_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # redshift_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # redshift_tmp_dir: "EnclosedInStringProperty", + # tmp_dir_iam_role: "EnclosedInStringProperty", + # upsert_redshift_options: { + # table_location: "EnclosedInStringProperty", + # connection_name: "EnclosedInStringProperty", + # upsert_keys: ["EnclosedInStringProperty"], + # }, + # }, + # s3_catalog_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # partition_keys: [ + # ["EnclosedInStringProperty"], + # ], + # table: "EnclosedInStringProperty", # required + # database: "EnclosedInStringProperty", # required + # schema_change_policy: { + # enable_update_catalog: false, + # update_behavior: "UPDATE_IN_DATABASE", # accepts UPDATE_IN_DATABASE, LOG + # }, + # }, + # s3_glue_parquet_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # partition_keys: [ + # ["EnclosedInStringProperty"], + # ], + # path: "EnclosedInStringProperty", # required + # compression: "snappy", # accepts snappy, lzo, gzip, uncompressed, none + # schema_change_policy: { + # enable_update_catalog: false, + # update_behavior: "UPDATE_IN_DATABASE", # accepts UPDATE_IN_DATABASE, LOG + # table: "EnclosedInStringProperty", + # database: "EnclosedInStringProperty", + # }, + # }, + # s3_direct_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # partition_keys: [ + # ["EnclosedInStringProperty"], + # ], + # path: "EnclosedInStringProperty", # required + # compression: "EnclosedInStringProperty", + # format: "json", # required, accepts json, csv, avro, orc, parquet + # schema_change_policy: { + # enable_update_catalog: false, + # update_behavior: "UPDATE_IN_DATABASE", # accepts UPDATE_IN_DATABASE, LOG + # table: "EnclosedInStringProperty", + # database: "EnclosedInStringProperty", + # }, + # }, + # apply_mapping: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # mapping: [ # required + # { + # to_key: "EnclosedInStringProperty", + # from_path: ["EnclosedInStringProperty"], + # from_type: "EnclosedInStringProperty", + # to_type: "EnclosedInStringProperty", + # dropped: false, + # children: { + # # recursive Mappings + # }, + # }, + # ], + # }, + # select_fields: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # paths: [ # required + # ["EnclosedInStringProperty"], + # ], + # }, + # drop_fields: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # paths: [ # required + # ["EnclosedInStringProperty"], + # ], + # }, + # rename_field: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # source_path: ["EnclosedInStringProperty"], # required + # target_path: ["EnclosedInStringProperty"], # required + # }, + # spigot: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # path: "EnclosedInStringProperty", # required + # topk: 1, + # prob: 1.0, + # }, + # join: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # join_type: "equijoin", # required, accepts equijoin, left, right, outer, leftsemi, leftanti + # columns: [ # required + # { + # from: "EnclosedInStringProperty", # required + # keys: [ # required + # ["EnclosedInStringProperty"], + # ], + # }, + # ], + # }, + # split_fields: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # paths: [ # required + # ["EnclosedInStringProperty"], + # ], + # }, + # select_from_collection: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # index: 1, # required + # }, + # fill_missing_values: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # imputed_path: "EnclosedInStringProperty", # required + # filled_path: "EnclosedInStringProperty", + # }, + # filter: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # logical_operator: "AND", # required, accepts AND, OR + # filters: [ # required + # { + # operation: "EQ", # required, accepts EQ, LT, GT, LTE, GTE, REGEX, ISNULL + # negated: false, + # values: [ # required + # { + # type: "COLUMNEXTRACTED", # required, accepts COLUMNEXTRACTED, CONSTANT + # value: ["EnclosedInStringProperty"], # required + # }, + # ], + # }, + # ], + # }, + # custom_code: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # code: "ExtendedString", # required + # class_name: "EnclosedInStringProperty", # required + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # spark_sql: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # sql_query: "SqlQuery", # required + # sql_aliases: [ # required + # { + # from: "NodeId", # required + # alias: "EnclosedInStringPropertyWithQuote", # required + # }, + # ], + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # direct_kinesis_source: { + # name: "NodeName", # required + # window_size: 1, + # detect_schema: false, + # streaming_options: { + # endpoint_url: "EnclosedInStringProperty", + # stream_name: "EnclosedInStringProperty", + # classification: "EnclosedInStringProperty", + # delimiter: "EnclosedInStringProperty", + # starting_position: "latest", # accepts latest, trim_horizon, earliest + # max_fetch_time_in_ms: 1, + # max_fetch_records_per_shard: 1, + # max_record_per_read: 1, + # add_idle_time_between_reads: false, + # idle_time_between_reads_in_ms: 1, + # describe_shard_interval: 1, + # num_retries: 1, + # retry_interval_ms: 1, + # max_retry_interval_ms: 1, + # avoid_empty_batches: false, + # stream_arn: "EnclosedInStringProperty", + # role_arn: "EnclosedInStringProperty", + # role_session_name: "EnclosedInStringProperty", + # }, + # data_preview_options: { + # polling_time: 1, + # record_polling_limit: 1, + # }, + # }, + # direct_kafka_source: { + # name: "NodeName", # required + # streaming_options: { + # bootstrap_servers: "EnclosedInStringProperty", + # security_protocol: "EnclosedInStringProperty", + # connection_name: "EnclosedInStringProperty", + # topic_name: "EnclosedInStringProperty", + # assign: "EnclosedInStringProperty", + # subscribe_pattern: "EnclosedInStringProperty", + # classification: "EnclosedInStringProperty", + # delimiter: "EnclosedInStringProperty", + # starting_offsets: "EnclosedInStringProperty", + # ending_offsets: "EnclosedInStringProperty", + # poll_timeout_ms: 1, + # num_retries: 1, + # retry_interval_ms: 1, + # max_offsets_per_trigger: 1, + # min_partitions: 1, + # }, + # window_size: 1, + # detect_schema: false, + # data_preview_options: { + # polling_time: 1, + # record_polling_limit: 1, + # }, + # }, + # catalog_kinesis_source: { + # name: "NodeName", # required + # window_size: 1, + # detect_schema: false, + # table: "EnclosedInStringProperty", # required + # database: "EnclosedInStringProperty", # required + # streaming_options: { + # endpoint_url: "EnclosedInStringProperty", + # stream_name: "EnclosedInStringProperty", + # classification: "EnclosedInStringProperty", + # delimiter: "EnclosedInStringProperty", + # starting_position: "latest", # accepts latest, trim_horizon, earliest + # max_fetch_time_in_ms: 1, + # max_fetch_records_per_shard: 1, + # max_record_per_read: 1, + # add_idle_time_between_reads: false, + # idle_time_between_reads_in_ms: 1, + # describe_shard_interval: 1, + # num_retries: 1, + # retry_interval_ms: 1, + # max_retry_interval_ms: 1, + # avoid_empty_batches: false, + # stream_arn: "EnclosedInStringProperty", + # role_arn: "EnclosedInStringProperty", + # role_session_name: "EnclosedInStringProperty", + # }, + # data_preview_options: { + # polling_time: 1, + # record_polling_limit: 1, + # }, + # }, + # catalog_kafka_source: { + # name: "NodeName", # required + # window_size: 1, + # detect_schema: false, + # table: "EnclosedInStringProperty", # required + # database: "EnclosedInStringProperty", # required + # streaming_options: { + # bootstrap_servers: "EnclosedInStringProperty", + # security_protocol: "EnclosedInStringProperty", + # connection_name: "EnclosedInStringProperty", + # topic_name: "EnclosedInStringProperty", + # assign: "EnclosedInStringProperty", + # subscribe_pattern: "EnclosedInStringProperty", + # classification: "EnclosedInStringProperty", + # delimiter: "EnclosedInStringProperty", + # starting_offsets: "EnclosedInStringProperty", + # ending_offsets: "EnclosedInStringProperty", + # poll_timeout_ms: 1, + # num_retries: 1, + # retry_interval_ms: 1, + # max_offsets_per_trigger: 1, + # min_partitions: 1, + # }, + # data_preview_options: { + # polling_time: 1, + # record_polling_limit: 1, + # }, + # }, + # drop_null_fields: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # null_check_box_list: { + # is_empty: false, + # is_null_string: false, + # is_neg_one: false, + # }, + # null_text_list: [ + # { + # value: "EnclosedInStringProperty", # required + # datatype: { # required + # id: "GenericLimitedString", # required + # label: "GenericLimitedString", # required + # }, + # }, + # ], + # }, + # merge: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # source: "NodeId", # required + # primary_keys: [ # required + # ["EnclosedInStringProperty"], + # ], + # }, + # union: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # union_type: "ALL", # required, accepts ALL, DISTINCT + # }, + # pii_detection: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # pii_type: "RowAudit", # required, accepts RowAudit, RowMasking, ColumnAudit, ColumnMasking + # entity_types_to_detect: ["EnclosedInStringProperty"], # required + # output_column_name: "EnclosedInStringProperty", + # sample_fraction: 1.0, + # threshold_fraction: 1.0, + # mask_value: "MaskValue", + # }, + # aggregate: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # groups: [ # required + # ["EnclosedInStringProperty"], + # ], + # aggs: [ # required + # { + # column: ["EnclosedInStringProperty"], # required + # agg_func: "avg", # required, accepts avg, countDistinct, count, first, last, kurtosis, max, min, skewness, stddev_samp, stddev_pop, sum, sumDistinct, var_samp, var_pop + # }, + # ], + # }, + # drop_duplicates: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # columns: [ + # ["GenericLimitedString"], + # ], + # }, + # governed_catalog_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # partition_keys: [ + # ["EnclosedInStringProperty"], + # ], + # table: "EnclosedInStringProperty", # required + # database: "EnclosedInStringProperty", # required + # schema_change_policy: { + # enable_update_catalog: false, + # update_behavior: "UPDATE_IN_DATABASE", # accepts UPDATE_IN_DATABASE, LOG + # }, + # }, + # governed_catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # partition_predicate: "EnclosedInStringProperty", + # additional_options: { + # bounded_size: 1, + # bounded_files: 1, + # }, + # }, + # microsoft_sql_server_catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # my_sql_catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # oracle_sql_catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # postgre_sql_catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # microsoft_sql_server_catalog_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # my_sql_catalog_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # oracle_sql_catalog_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # postgre_sql_catalog_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # }, + # }, + # } + # + # @!attribute [rw] description + # Description of the job being defined. + # @return [String] + # + # @!attribute [rw] log_uri + # This field is reserved for future use. + # @return [String] + # + # @!attribute [rw] role + # The name or Amazon Resource Name (ARN) of the IAM role associated + # with this job (required). + # @return [String] + # + # @!attribute [rw] execution_property + # An `ExecutionProperty` specifying the maximum number of concurrent + # runs allowed for this job. + # @return [Types::ExecutionProperty] + # + # @!attribute [rw] command + # The `JobCommand` that runs this job (required). + # @return [Types::JobCommand] + # + # @!attribute [rw] default_arguments + # The default arguments for this job. + # + # You can specify arguments here that your own job-execution script + # consumes, as well as arguments that Glue itself consumes. + # + # For information about how to specify and consume your own Job + # arguments, see the [Calling Glue APIs in Python][1] topic in the + # developer guide. + # + # For information about the key-value pairs that Glue consumes to set + # up your job, see the [Special Parameters Used by Glue][2] topic in + # the developer guide. + # + # + # + # [1]: https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html + # [2]: https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html + # @return [Hash] + # + # @!attribute [rw] non_overridable_arguments + # Non-overridable arguments for this job, specified as name-value + # pairs. + # @return [Hash] + # + # @!attribute [rw] connections + # The connections used for this job. + # @return [Types::ConnectionsList] + # + # @!attribute [rw] max_retries + # The maximum number of times to retry this job if it fails. + # @return [Integer] # - # @!attribute [rw] classification - # An identifier of the data format that the classifier matches, such - # as Twitter, JSON, Omniture logs, and so on. - # @return [String] + # @!attribute [rw] allocated_capacity + # This field is deprecated. Use `MaxCapacity` instead. # - # @!attribute [rw] creation_time - # The time that this classifier was registered. - # @return [Time] + # The number of Glue data processing units (DPUs) to allocate to this + # job. You can allocate from 2 to 100 DPUs; the default is 10. A DPU + # is a relative measure of processing power that consists of 4 vCPUs + # of compute capacity and 16 GB of memory. For more information, see + # the [Glue pricing page][1]. # - # @!attribute [rw] last_updated - # The time that this classifier was last updated. - # @return [Time] # - # @!attribute [rw] version - # The version of this classifier. + # + # [1]: https://aws.amazon.com/glue/pricing/ # @return [Integer] # - # @!attribute [rw] grok_pattern - # The grok pattern applied to a data store by this classifier. For - # more information, see built-in patterns in [Writing Custom - # Classifiers][1]. + # @!attribute [rw] timeout + # The job timeout in minutes. This is the maximum time that a job run + # can consume resources before it is terminated and enters `TIMEOUT` + # status. The default is 2,880 minutes (48 hours). + # @return [Integer] # + # @!attribute [rw] max_capacity + # For Glue version 1.0 or earlier jobs, using the standard worker + # type, the number of Glue data processing units (DPUs) that can be + # allocated when this job runs. A DPU is a relative measure of + # processing power that consists of 4 vCPUs of compute capacity and 16 + # GB of memory. For more information, see the [Glue pricing page][1]. # + # Do not set `Max Capacity` if using `WorkerType` and + # `NumberOfWorkers`. # - # [1]: https://docs.aws.amazon.com/glue/latest/dg/custom-classifier.html - # @return [String] + # The value that can be allocated for `MaxCapacity` depends on whether + # you are running a Python shell job or an Apache Spark ETL job: # - # @!attribute [rw] custom_patterns - # Optional custom grok patterns defined by this classifier. For more - # information, see custom patterns in [Writing Custom Classifiers][1]. + # * When you specify a Python shell job + # (`JobCommand.Name`="pythonshell"), you can allocate either + # 0.0625 or 1 DPU. The default is 0.0625 DPU. # + # * When you specify an Apache Spark ETL job + # (`JobCommand.Name`="glueetl") or Apache Spark streaming ETL job + # (`JobCommand.Name`="gluestreaming"), you can allocate from 2 to + # 100 DPUs. The default is 10 DPUs. This job type cannot have a + # fractional DPU allocation. # + # For Glue version 2.0 jobs, you cannot instead specify a `Maximum + # capacity`. Instead, you should specify a `Worker type` and the + # `Number of workers`. # - # [1]: https://docs.aws.amazon.com/glue/latest/dg/custom-classifier.html - # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GrokClassifier AWS API Documentation # - class GrokClassifier < Struct.new( - :name, - :classification, - :creation_time, - :last_updated, - :version, - :grok_pattern, - :custom_patterns) - SENSITIVE = [] - include Aws::Structure - end - - # The same unique identifier was associated with two different records. + # [1]: https://aws.amazon.com/glue/pricing/ + # @return [Float] # - # @!attribute [rw] message - # A message describing the problem. - # @return [String] + # @!attribute [rw] worker_type + # The type of predefined worker that is allocated when a job runs. + # Accepts a value of Standard, G.1X, or G.2X. # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/IdempotentParameterMismatchException AWS API Documentation + # * For the `Standard` worker type, each worker provides 4 vCPU, 16 GB + # of memory and a 50GB disk, and 2 executors per worker. # - class IdempotentParameterMismatchException < Struct.new( - :message) - SENSITIVE = [] - include Aws::Structure - end - - # @!attribute [rw] message + # * For the `G.1X` worker type, each worker maps to 1 DPU (4 vCPU, 16 + # GB of memory, 64 GB disk), and provides 1 executor per worker. We + # recommend this worker type for memory-intensive jobs. + # + # * For the `G.2X` worker type, each worker maps to 2 DPU (8 vCPU, 32 + # GB of memory, 128 GB disk), and provides 1 executor per worker. We + # recommend this worker type for memory-intensive jobs. # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/IllegalBlueprintStateException AWS API Documentation + # @!attribute [rw] number_of_workers + # The number of workers of a defined `workerType` that are allocated + # when a job runs. # - class IllegalBlueprintStateException < Struct.new( - :message) - SENSITIVE = [] - include Aws::Structure - end - - # The session is in an invalid state to perform a requested operation. + # The maximum number of workers you can define are 299 for `G.1X`, and + # 149 for `G.2X`. + # @return [Integer] # - # @!attribute [rw] message - # A message describing the problem. + # @!attribute [rw] security_configuration + # The name of the `SecurityConfiguration` structure to be used with + # this job. # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/IllegalSessionStateException AWS API Documentation + # @!attribute [rw] notification_property + # Specifies the configuration properties of a job notification. + # @return [Types::NotificationProperty] # - class IllegalSessionStateException < Struct.new( - :message) - SENSITIVE = [] - include Aws::Structure - end - - # The workflow is in an invalid state to perform a requested operation. + # @!attribute [rw] glue_version + # Glue version determines the versions of Apache Spark and Python that + # Glue supports. The Python version indicates the version supported + # for jobs of type Spark. # - # @!attribute [rw] message - # A message describing the problem. + # For more information about the available Glue versions and + # corresponding Spark and Python versions, see [Glue version][1] in + # the developer guide. + # + # + # + # [1]: https://docs.aws.amazon.com/glue/latest/dg/add-job.html # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/IllegalWorkflowStateException AWS API Documentation + # @!attribute [rw] code_gen_configuration_nodes + # The representation of a directed acyclic graph on which both the + # Glue Studio visual component and Glue Studio code generation is + # based. + # @return [Hash] # - class IllegalWorkflowStateException < Struct.new( - :message) - SENSITIVE = [] + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/JobUpdate AWS API Documentation + # + class JobUpdate < Struct.new( + :description, + :log_uri, + :role, + :execution_property, + :command, + :default_arguments, + :non_overridable_arguments, + :connections, + :max_retries, + :allocated_capacity, + :timeout, + :max_capacity, + :worker_type, + :number_of_workers, + :security_configuration, + :notification_property, + :glue_version, + :code_gen_configuration_nodes) + SENSITIVE = [:code_gen_configuration_nodes] include Aws::Structure end - # @note When making an API call, you may pass ImportCatalogToGlueRequest + # Specifies a transform that joins two datasets into one dataset using a + # comparison phrase on the specified data property keys. You can use + # inner, outer, left, right, left semi, and left anti joins. + # + # @note When making an API call, you may pass Join # data as a hash: # # { - # catalog_id: "CatalogIdString", + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # join_type: "equijoin", # required, accepts equijoin, left, right, outer, leftsemi, leftanti + # columns: [ # required + # { + # from: "EnclosedInStringProperty", # required + # keys: [ # required + # ["EnclosedInStringProperty"], + # ], + # }, + # ], # } # - # @!attribute [rw] catalog_id - # The ID of the catalog to import. Currently, this should be the - # Amazon Web Services account ID. + # @!attribute [rw] name + # The name of the transform node. # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ImportCatalogToGlueRequest AWS API Documentation + # @!attribute [rw] inputs + # The data inputs identified by their node names. + # @return [Array] # - class ImportCatalogToGlueRequest < Struct.new( - :catalog_id) + # @!attribute [rw] join_type + # Specifies the type of join to be performed on the datasets. + # @return [String] + # + # @!attribute [rw] columns + # A list of the two columns to be joined. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/Join AWS API Documentation + # + class Join < Struct.new( + :name, + :inputs, + :join_type, + :columns) SENSITIVE = [] include Aws::Structure end - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ImportCatalogToGlueResponse AWS API Documentation + # Specifies a column to be joined. # - class ImportCatalogToGlueResponse < Aws::EmptyStructure; end - - # Specifies configuration properties for an importing labels task run. + # @note When making an API call, you may pass JoinColumn + # data as a hash: # - # @!attribute [rw] input_s3_path - # The Amazon Simple Storage Service (Amazon S3) path from where you - # will import the labels. + # { + # from: "EnclosedInStringProperty", # required + # keys: [ # required + # ["EnclosedInStringProperty"], + # ], + # } + # + # @!attribute [rw] from + # The column to be joined. # @return [String] # - # @!attribute [rw] replace - # Indicates whether to overwrite your existing labels. - # @return [Boolean] + # @!attribute [rw] keys + # The key of the column to be joined. + # @return [Array>] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ImportLabelsTaskRunProperties AWS API Documentation + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/JoinColumn AWS API Documentation # - class ImportLabelsTaskRunProperties < Struct.new( - :input_s3_path, - :replace) + class JoinColumn < Struct.new( + :from, + :keys) SENSITIVE = [] include Aws::Structure end - # An internal service error occurred. + # A classifier for `JSON` content. # - # @!attribute [rw] message - # A message describing the problem. + # @!attribute [rw] name + # The name of the classifier. # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/InternalServiceException AWS API Documentation + # @!attribute [rw] creation_time + # The time that this classifier was registered. + # @return [Time] # - class InternalServiceException < Struct.new( - :message) - SENSITIVE = [] - include Aws::Structure - end - - # The input provided was not valid. + # @!attribute [rw] last_updated + # The time that this classifier was last updated. + # @return [Time] # - # @!attribute [rw] message - # A message describing the problem. - # @return [String] + # @!attribute [rw] version + # The version of this classifier. + # @return [Integer] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/InvalidInputException AWS API Documentation + # @!attribute [rw] json_path + # A `JsonPath` string defining the JSON data for the classifier to + # classify. Glue supports a subset of JsonPath, as described in + # [Writing JsonPath Custom Classifiers][1]. # - class InvalidInputException < Struct.new( - :message) - SENSITIVE = [] - include Aws::Structure - end - - # An error that indicates your data is in an invalid state. # - # @!attribute [rw] message - # A message describing the problem. + # + # [1]: https://docs.aws.amazon.com/glue/latest/dg/custom-classifier.html#custom-classifier-json # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/InvalidStateException AWS API Documentation + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/JsonClassifier AWS API Documentation # - class InvalidStateException < Struct.new( - :message) + class JsonClassifier < Struct.new( + :name, + :creation_time, + :last_updated, + :version, + :json_path) SENSITIVE = [] include Aws::Structure end - # Specifies a JDBC data store to crawl. + # Additional options for streaming. # - # @note When making an API call, you may pass JdbcTarget + # @note When making an API call, you may pass KafkaStreamingSourceOptions # data as a hash: # # { - # connection_name: "ConnectionName", - # path: "Path", - # exclusions: ["Path"], + # bootstrap_servers: "EnclosedInStringProperty", + # security_protocol: "EnclosedInStringProperty", + # connection_name: "EnclosedInStringProperty", + # topic_name: "EnclosedInStringProperty", + # assign: "EnclosedInStringProperty", + # subscribe_pattern: "EnclosedInStringProperty", + # classification: "EnclosedInStringProperty", + # delimiter: "EnclosedInStringProperty", + # starting_offsets: "EnclosedInStringProperty", + # ending_offsets: "EnclosedInStringProperty", + # poll_timeout_ms: 1, + # num_retries: 1, + # retry_interval_ms: 1, + # max_offsets_per_trigger: 1, + # min_partitions: 1, # } # - # @!attribute [rw] connection_name - # The name of the connection to use to connect to the JDBC target. + # @!attribute [rw] bootstrap_servers + # A list of bootstrap server URLs, for example, as + # `b-1.vpc-test-2.o4q88o.c6.kafka.us-east-1.amazonaws.com:9094`. This + # option must be specified in the API call or defined in the table + # metadata in the Data Catalog. # @return [String] # - # @!attribute [rw] path - # The path of the JDBC target. + # @!attribute [rw] security_protocol + # The protocol used to communicate with brokers. The possible values + # are `"SSL"` or `"PLAINTEXT"`. # @return [String] # - # @!attribute [rw] exclusions - # A list of glob patterns used to exclude from the crawl. For more - # information, see [Catalog Tables with a Crawler][1]. - # - # - # - # [1]: https://docs.aws.amazon.com/glue/latest/dg/add-crawler.html - # @return [Array] + # @!attribute [rw] connection_name + # The name of the connection. + # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/JdbcTarget AWS API Documentation + # @!attribute [rw] topic_name + # The topic name as specified in Apache Kafka. You must specify at + # least one of `"topicName"`, `"assign"` or `"subscribePattern"`. + # @return [String] # - class JdbcTarget < Struct.new( - :connection_name, - :path, - :exclusions) - SENSITIVE = [] - include Aws::Structure - end - - # Specifies a job definition. + # @!attribute [rw] assign + # The specific `TopicPartitions` to consume. You must specify at least + # one of `"topicName"`, `"assign"` or `"subscribePattern"`. + # @return [String] # - # @!attribute [rw] name - # The name you assign to this job definition. + # @!attribute [rw] subscribe_pattern + # A Java regex string that identifies the topic list to subscribe to. + # You must specify at least one of `"topicName"`, `"assign"` or + # `"subscribePattern"`. # @return [String] # - # @!attribute [rw] description - # A description of the job. + # @!attribute [rw] classification + # An optional classification. # @return [String] # - # @!attribute [rw] log_uri - # This field is reserved for future use. + # @!attribute [rw] delimiter + # Specifies the delimiter character. # @return [String] # - # @!attribute [rw] role - # The name or Amazon Resource Name (ARN) of the IAM role associated - # with this job. + # @!attribute [rw] starting_offsets + # The starting position in the Kafka topic to read data from. The + # possible values are `"earliest"` or `"latest"`. The default value is + # `"latest"`. # @return [String] # - # @!attribute [rw] created_on - # The time and date that this job definition was created. - # @return [Time] + # @!attribute [rw] ending_offsets + # The end point when a batch query is ended. Possible values are + # either `"latest"` or a JSON string that specifies an ending offset + # for each `TopicPartition`. + # @return [String] # - # @!attribute [rw] last_modified_on - # The last point in time when this job definition was modified. - # @return [Time] + # @!attribute [rw] poll_timeout_ms + # The timeout in milliseconds to poll data from Kafka in Spark job + # executors. The default value is `512`. + # @return [Integer] # - # @!attribute [rw] execution_property - # An `ExecutionProperty` specifying the maximum number of concurrent - # runs allowed for this job. - # @return [Types::ExecutionProperty] + # @!attribute [rw] num_retries + # The number of times to retry before failing to fetch Kafka offsets. + # The default value is `3`. + # @return [Integer] # - # @!attribute [rw] command - # The `JobCommand` that runs this job. - # @return [Types::JobCommand] + # @!attribute [rw] retry_interval_ms + # The time in milliseconds to wait before retrying to fetch Kafka + # offsets. The default value is `10`. + # @return [Integer] # - # @!attribute [rw] default_arguments - # The default arguments for this job, specified as name-value pairs. + # @!attribute [rw] max_offsets_per_trigger + # The rate limit on the maximum number of offsets that are processed + # per trigger interval. The specified total number of offsets is + # proportionally split across `topicPartitions` of different volumes. + # The default value is null, which means that the consumer reads all + # offsets until the known latest offset. + # @return [Integer] # - # You can specify arguments here that your own job-execution script - # consumes, as well as arguments that Glue itself consumes. + # @!attribute [rw] min_partitions + # The desired minimum number of partitions to read from Kafka. The + # default value is null, which means that the number of spark + # partitions is equal to the number of Kafka partitions. + # @return [Integer] # - # For information about how to specify and consume your own Job - # arguments, see the [Calling Glue APIs in Python][1] topic in the - # developer guide. + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/KafkaStreamingSourceOptions AWS API Documentation # - # For information about the key-value pairs that Glue consumes to set - # up your job, see the [Special Parameters Used by Glue][2] topic in - # the developer guide. + class KafkaStreamingSourceOptions < Struct.new( + :bootstrap_servers, + :security_protocol, + :connection_name, + :topic_name, + :assign, + :subscribe_pattern, + :classification, + :delimiter, + :starting_offsets, + :ending_offsets, + :poll_timeout_ms, + :num_retries, + :retry_interval_ms, + :max_offsets_per_trigger, + :min_partitions) + SENSITIVE = [] + include Aws::Structure + end + + # A partition key pair consisting of a name and a type. # + # @!attribute [rw] name + # The name of a partition key. + # @return [String] # + # @!attribute [rw] type + # The type of a partition key. + # @return [String] # - # [1]: https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html - # [2]: https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html - # @return [Hash] + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/KeySchemaElement AWS API Documentation # - # @!attribute [rw] non_overridable_arguments - # Non-overridable arguments for this job, specified as name-value - # pairs. - # @return [Hash] + class KeySchemaElement < Struct.new( + :name, + :type) + SENSITIVE = [] + include Aws::Structure + end + + # Additional options for the Amazon Kinesis streaming data source. # - # @!attribute [rw] connections - # The connections used for this job. - # @return [Types::ConnectionsList] + # @note When making an API call, you may pass KinesisStreamingSourceOptions + # data as a hash: # - # @!attribute [rw] max_retries - # The maximum number of times to retry this job after a JobRun fails. - # @return [Integer] + # { + # endpoint_url: "EnclosedInStringProperty", + # stream_name: "EnclosedInStringProperty", + # classification: "EnclosedInStringProperty", + # delimiter: "EnclosedInStringProperty", + # starting_position: "latest", # accepts latest, trim_horizon, earliest + # max_fetch_time_in_ms: 1, + # max_fetch_records_per_shard: 1, + # max_record_per_read: 1, + # add_idle_time_between_reads: false, + # idle_time_between_reads_in_ms: 1, + # describe_shard_interval: 1, + # num_retries: 1, + # retry_interval_ms: 1, + # max_retry_interval_ms: 1, + # avoid_empty_batches: false, + # stream_arn: "EnclosedInStringProperty", + # role_arn: "EnclosedInStringProperty", + # role_session_name: "EnclosedInStringProperty", + # } # - # @!attribute [rw] allocated_capacity - # This field is deprecated. Use `MaxCapacity` instead. + # @!attribute [rw] endpoint_url + # The URL of the Kinesis endpoint. + # @return [String] # - # The number of Glue data processing units (DPUs) allocated to runs of - # this job. You can allocate from 2 to 100 DPUs; the default is 10. A - # DPU is a relative measure of processing power that consists of 4 - # vCPUs of compute capacity and 16 GB of memory. For more information, - # see the [Glue pricing page][1]. + # @!attribute [rw] stream_name + # The name of the Kinesis data stream. + # @return [String] # + # @!attribute [rw] classification + # An optional classification. + # @return [String] # + # @!attribute [rw] delimiter + # Specifies the delimiter character. + # @return [String] # + # @!attribute [rw] starting_position + # The starting position in the Kinesis data stream to read data from. + # The possible values are `"latest"`, `"trim_horizon"`, or + # `"earliest"`. The default value is `"latest"`. + # @return [String] # + # @!attribute [rw] max_fetch_time_in_ms + # The maximum time spent in the job executor to fetch a record from + # the Kinesis data stream per shard, specified in milliseconds (ms). + # The default value is `1000`. + # @return [Integer] # - # [1]: https://aws.amazon.com/glue/pricing/ + # @!attribute [rw] max_fetch_records_per_shard + # The maximum number of records to fetch per shard in the Kinesis data + # stream. The default value is `100000`. # @return [Integer] # - # @!attribute [rw] timeout - # The job timeout in minutes. This is the maximum time that a job run - # can consume resources before it is terminated and enters `TIMEOUT` - # status. The default is 2,880 minutes (48 hours). + # @!attribute [rw] max_record_per_read + # The maximum number of records to fetch from the Kinesis data stream + # in each getRecords operation. The default value is `10000`. # @return [Integer] # - # @!attribute [rw] max_capacity - # For Glue version 1.0 or earlier jobs, using the standard worker - # type, the number of Glue data processing units (DPUs) that can be - # allocated when this job runs. A DPU is a relative measure of - # processing power that consists of 4 vCPUs of compute capacity and 16 - # GB of memory. For more information, see the [Glue pricing page][1]. + # @!attribute [rw] add_idle_time_between_reads + # Adds a time delay between two consecutive getRecords operations. The + # default value is `"False"`. This option is only configurable for + # Glue version 2.0 and above. + # @return [Boolean] # - # Do not set `Max Capacity` if using `WorkerType` and - # `NumberOfWorkers`. + # @!attribute [rw] idle_time_between_reads_in_ms + # The minimum time delay between two consecutive getRecords + # operations, specified in ms. The default value is `1000`. This + # option is only configurable for Glue version 2.0 and above. + # @return [Integer] # - # The value that can be allocated for `MaxCapacity` depends on whether - # you are running a Python shell job, an Apache Spark ETL job, or an - # Apache Spark streaming ETL job: + # @!attribute [rw] describe_shard_interval + # The minimum time interval between two ListShards API calls for your + # script to consider resharding. The default value is `1s`. + # @return [Integer] # - # * When you specify a Python shell job - # (`JobCommand.Name`="pythonshell"), you can allocate either - # 0.0625 or 1 DPU. The default is 0.0625 DPU. + # @!attribute [rw] num_retries + # The maximum number of retries for Kinesis Data Streams API requests. + # The default value is `3`. + # @return [Integer] # - # * When you specify an Apache Spark ETL job - # (`JobCommand.Name`="glueetl") or Apache Spark streaming ETL job - # (`JobCommand.Name`="gluestreaming"), you can allocate from 2 to - # 100 DPUs. The default is 10 DPUs. This job type cannot have a - # fractional DPU allocation. + # @!attribute [rw] retry_interval_ms + # The cool-off time period (specified in ms) before retrying the + # Kinesis Data Streams API call. The default value is `1000`. + # @return [Integer] # - # For Glue version 2.0 jobs, you cannot instead specify a `Maximum - # capacity`. Instead, you should specify a `Worker type` and the - # `Number of workers`. + # @!attribute [rw] max_retry_interval_ms + # The maximum cool-off time period (specified in ms) between two + # retries of a Kinesis Data Streams API call. The default value is + # `10000`. + # @return [Integer] # + # @!attribute [rw] avoid_empty_batches + # Avoids creating an empty microbatch job by checking for unread data + # in the Kinesis data stream before the batch is started. The default + # value is `"False"`. + # @return [Boolean] # + # @!attribute [rw] stream_arn + # The Amazon Resource Name (ARN) of the Kinesis data stream. + # @return [String] # - # [1]: https://aws.amazon.com/glue/pricing/ - # @return [Float] + # @!attribute [rw] role_arn + # The Amazon Resource Name (ARN) of the role to assume using AWS + # Security Token Service (AWS STS). This role must have permissions + # for describe or read record operations for the Kinesis data stream. + # You must use this parameter when accessing a data stream in a + # different account. Used in conjunction with `"awsSTSSessionName"`. + # @return [String] # - # @!attribute [rw] worker_type - # The type of predefined worker that is allocated when a job runs. - # Accepts a value of Standard, G.1X, or G.2X. + # @!attribute [rw] role_session_name + # An identifier for the session assuming the role using AWS STS. You + # must use this parameter when accessing a data stream in a different + # account. Used in conjunction with `"awsSTSRoleARN"`. + # @return [String] # - # * For the `Standard` worker type, each worker provides 4 vCPU, 16 GB - # of memory and a 50GB disk, and 2 executors per worker. + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/KinesisStreamingSourceOptions AWS API Documentation # - # * For the `G.1X` worker type, each worker maps to 1 DPU (4 vCPU, 16 - # GB of memory, 64 GB disk), and provides 1 executor per worker. We - # recommend this worker type for memory-intensive jobs. + class KinesisStreamingSourceOptions < Struct.new( + :endpoint_url, + :stream_name, + :classification, + :delimiter, + :starting_position, + :max_fetch_time_in_ms, + :max_fetch_records_per_shard, + :max_record_per_read, + :add_idle_time_between_reads, + :idle_time_between_reads_in_ms, + :describe_shard_interval, + :num_retries, + :retry_interval_ms, + :max_retry_interval_ms, + :avoid_empty_batches, + :stream_arn, + :role_arn, + :role_session_name) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies configuration properties for a labeling set generation task + # run. # - # * For the `G.2X` worker type, each worker maps to 2 DPU (8 vCPU, 32 - # GB of memory, 128 GB disk), and provides 1 executor per worker. We - # recommend this worker type for memory-intensive jobs. + # @!attribute [rw] output_s3_path + # The Amazon Simple Storage Service (Amazon S3) path where you will + # generate the labeling set. # @return [String] # - # @!attribute [rw] number_of_workers - # The number of workers of a defined `workerType` that are allocated - # when a job runs. + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/LabelingSetGenerationTaskRunProperties AWS API Documentation # - # The maximum number of workers you can define are 299 for `G.1X`, and - # 149 for `G.2X`. - # @return [Integer] + class LabelingSetGenerationTaskRunProperties < Struct.new( + :output_s3_path) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies AWS Lake Formation configuration settings for the crawler. # - # @!attribute [rw] security_configuration - # The name of the `SecurityConfiguration` structure to be used with - # this job. + # @note When making an API call, you may pass LakeFormationConfiguration + # data as a hash: + # + # { + # use_lake_formation_credentials: false, + # account_id: "AccountId", + # } + # + # @!attribute [rw] use_lake_formation_credentials + # Specifies whether to use AWS Lake Formation credentials for the + # crawler instead of the IAM role credentials. + # @return [Boolean] + # + # @!attribute [rw] account_id + # Required for cross account crawls. For same account crawls as the + # target data, this can be left as null. # @return [String] # - # @!attribute [rw] notification_property - # Specifies configuration properties of a job notification. - # @return [Types::NotificationProperty] + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/LakeFormationConfiguration AWS API Documentation # - # @!attribute [rw] glue_version - # Glue version determines the versions of Apache Spark and Python that - # Glue supports. The Python version indicates the version supported - # for jobs of type Spark. + class LakeFormationConfiguration < Struct.new( + :use_lake_formation_credentials, + :account_id) + SENSITIVE = [] + include Aws::Structure + end + + # When there are multiple versions of a blueprint and the latest version + # has some errors, this attribute indicates the last successful + # blueprint definition that is available with the service. # - # For more information about the available Glue versions and - # corresponding Spark and Python versions, see [Glue version][1] in - # the developer guide. + # @!attribute [rw] description + # The description of the blueprint. + # @return [String] # - # Jobs that are created without specifying a Glue version default to - # Glue 0.9. + # @!attribute [rw] last_modified_on + # The date and time the blueprint was last modified. + # @return [Time] # + # @!attribute [rw] parameter_spec + # A JSON string specifying the parameters for the blueprint. + # @return [String] # + # @!attribute [rw] blueprint_location + # Specifies a path in Amazon S3 where the blueprint is published by + # the Glue developer. + # @return [String] # - # [1]: https://docs.aws.amazon.com/glue/latest/dg/add-job.html + # @!attribute [rw] blueprint_service_location + # Specifies a path in Amazon S3 where the blueprint is copied when you + # create or update the blueprint. # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/Job AWS API Documentation + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/LastActiveDefinition AWS API Documentation # - class Job < Struct.new( - :name, + class LastActiveDefinition < Struct.new( :description, - :log_uri, - :role, - :created_on, :last_modified_on, - :execution_property, - :command, - :default_arguments, - :non_overridable_arguments, - :connections, - :max_retries, - :allocated_capacity, - :timeout, - :max_capacity, - :worker_type, - :number_of_workers, - :security_configuration, - :notification_property, - :glue_version) + :parameter_spec, + :blueprint_location, + :blueprint_service_location) SENSITIVE = [] include Aws::Structure end - # Defines a point that a job can resume processing. + # Status and error information about the most recent crawl. # - # @!attribute [rw] job_name - # The name of the job in question. + # @!attribute [rw] status + # Status of the last crawl. # @return [String] # - # @!attribute [rw] version - # The version of the job. - # @return [Integer] - # - # @!attribute [rw] run - # The run ID number. - # @return [Integer] - # - # @!attribute [rw] attempt - # The attempt ID number. - # @return [Integer] + # @!attribute [rw] error_message + # If an error occurred, the error information about the last crawl. + # @return [String] # - # @!attribute [rw] previous_run_id - # The unique run identifier associated with the previous job run. + # @!attribute [rw] log_group + # The log group for the last crawl. # @return [String] # - # @!attribute [rw] run_id - # The run ID number. + # @!attribute [rw] log_stream + # The log stream for the last crawl. # @return [String] # - # @!attribute [rw] job_bookmark - # The bookmark itself. + # @!attribute [rw] message_prefix + # The prefix for a message about this crawl. # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/JobBookmarkEntry AWS API Documentation + # @!attribute [rw] start_time + # The time at which the crawl started. + # @return [Time] # - class JobBookmarkEntry < Struct.new( - :job_name, - :version, - :run, - :attempt, - :previous_run_id, - :run_id, - :job_bookmark) + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/LastCrawlInfo AWS API Documentation + # + class LastCrawlInfo < Struct.new( + :status, + :error_message, + :log_group, + :log_stream, + :message_prefix, + :start_time) SENSITIVE = [] include Aws::Structure end - # Specifies how job bookmark data should be encrypted. + # Specifies data lineage configuration settings for the crawler. # - # @note When making an API call, you may pass JobBookmarksEncryption + # @note When making an API call, you may pass LineageConfiguration # data as a hash: # # { - # job_bookmarks_encryption_mode: "DISABLED", # accepts DISABLED, CSE-KMS - # kms_key_arn: "KmsKeyArn", + # crawler_lineage_settings: "ENABLE", # accepts ENABLE, DISABLE # } # - # @!attribute [rw] job_bookmarks_encryption_mode - # The encryption mode to use for job bookmarks data. - # @return [String] + # @!attribute [rw] crawler_lineage_settings + # Specifies whether data lineage is enabled for the crawler. Valid + # values are: # - # @!attribute [rw] kms_key_arn - # The Amazon Resource Name (ARN) of the KMS key to be used to encrypt - # the data. + # * ENABLE: enables data lineage for the crawler + # + # * DISABLE: disables data lineage for the crawler # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/JobBookmarksEncryption AWS API Documentation + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/LineageConfiguration AWS API Documentation # - class JobBookmarksEncryption < Struct.new( - :job_bookmarks_encryption_mode, - :kms_key_arn) + class LineageConfiguration < Struct.new( + :crawler_lineage_settings) SENSITIVE = [] include Aws::Structure end - # Specifies code that runs when a job is run. - # - # @note When making an API call, you may pass JobCommand + # @note When making an API call, you may pass ListBlueprintsRequest # data as a hash: # # { - # name: "GenericString", - # script_location: "ScriptLocationString", - # python_version: "PythonVersionString", + # next_token: "GenericString", + # max_results: 1, + # tags: { + # "TagKey" => "TagValue", + # }, # } # - # @!attribute [rw] name - # The name of the job command. For an Apache Spark ETL job, this must - # be `glueetl`. For a Python shell job, it must be `pythonshell`. For - # an Apache Spark streaming ETL job, this must be `gluestreaming`. + # @!attribute [rw] next_token + # A continuation token, if this is a continuation request. # @return [String] # - # @!attribute [rw] script_location - # Specifies the Amazon Simple Storage Service (Amazon S3) path to a - # script that runs a job. - # @return [String] + # @!attribute [rw] max_results + # The maximum size of a list to return. + # @return [Integer] # - # @!attribute [rw] python_version - # The Python version being used to run a Python shell job. Allowed - # values are 2 or 3. - # @return [String] + # @!attribute [rw] tags + # Filters the list by an Amazon Web Services resource tag. + # @return [Hash] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/JobCommand AWS API Documentation + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListBlueprintsRequest AWS API Documentation # - class JobCommand < Struct.new( - :name, - :script_location, - :python_version) + class ListBlueprintsRequest < Struct.new( + :next_token, + :max_results, + :tags) SENSITIVE = [] include Aws::Structure end - # The details of a Job node present in the workflow. + # @!attribute [rw] blueprints + # List of names of blueprints in the account. + # @return [Array] # - # @!attribute [rw] job_runs - # The information for the job runs represented by the job node. - # @return [Array] + # @!attribute [rw] next_token + # A continuation token, if not all blueprint names have been returned. + # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/JobNodeDetails AWS API Documentation + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListBlueprintsResponse AWS API Documentation # - class JobNodeDetails < Struct.new( - :job_runs) + class ListBlueprintsResponse < Struct.new( + :blueprints, + :next_token) SENSITIVE = [] include Aws::Structure end - # Contains information about a job run. + # @note When making an API call, you may pass ListCrawlersRequest + # data as a hash: # - # @!attribute [rw] id - # The ID of this job run. - # @return [String] + # { + # max_results: 1, + # next_token: "Token", + # tags: { + # "TagKey" => "TagValue", + # }, + # } # - # @!attribute [rw] attempt - # The number of the attempt to run this job. + # @!attribute [rw] max_results + # The maximum size of a list to return. # @return [Integer] # - # @!attribute [rw] previous_run_id - # The ID of the previous run of this job. For example, the `JobRunId` - # specified in the `StartJobRun` action. - # @return [String] - # - # @!attribute [rw] trigger_name - # The name of the trigger that started this job run. - # @return [String] - # - # @!attribute [rw] job_name - # The name of the job definition being used in this run. - # @return [String] - # - # @!attribute [rw] started_on - # The date and time at which this job run was started. - # @return [Time] - # - # @!attribute [rw] last_modified_on - # The last time that this job run was modified. - # @return [Time] - # - # @!attribute [rw] completed_on - # The date and time that this job run completed. - # @return [Time] - # - # @!attribute [rw] job_run_state - # The current state of the job run. For more information about the - # statuses of jobs that have terminated abnormally, see [Glue Job Run - # Statuses][1]. - # - # - # - # [1]: https://docs.aws.amazon.com/glue/latest/dg/job-run-statuses.html + # @!attribute [rw] next_token + # A continuation token, if this is a continuation request. # @return [String] # - # @!attribute [rw] arguments - # The job arguments associated with this run. For this job run, they - # replace the default arguments set in the job definition itself. + # @!attribute [rw] tags + # Specifies to return only these tagged resources. + # @return [Hash] # - # You can specify arguments here that your own job-execution script - # consumes, as well as arguments that Glue itself consumes. + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListCrawlersRequest AWS API Documentation # - # For information about how to specify and consume your own job - # arguments, see the [Calling Glue APIs in Python][1] topic in the - # developer guide. + class ListCrawlersRequest < Struct.new( + :max_results, + :next_token, + :tags) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] crawler_names + # The names of all crawlers in the account, or the crawlers with the + # specified tags. + # @return [Array] # - # For information about the key-value pairs that Glue consumes to set - # up your job, see the [Special Parameters Used by Glue][2] topic in - # the developer guide. + # @!attribute [rw] next_token + # A continuation token, if the returned list does not contain the last + # metric available. + # @return [String] # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListCrawlersResponse AWS API Documentation # + class ListCrawlersResponse < Struct.new( + :crawler_names, + :next_token) + SENSITIVE = [] + include Aws::Structure + end + + # @note When making an API call, you may pass ListCustomEntityTypesRequest + # data as a hash: # - # [1]: https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html - # [2]: https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html - # @return [Hash] + # { + # next_token: "PaginationToken", + # max_results: 1, + # } # - # @!attribute [rw] error_message - # An error message associated with this job run. + # @!attribute [rw] next_token + # A paginated token to offset the results. # @return [String] # - # @!attribute [rw] predecessor_runs - # A list of predecessors to this job run. - # @return [Array] + # @!attribute [rw] max_results + # The maximum number of results to return. + # @return [Integer] # - # @!attribute [rw] allocated_capacity - # This field is deprecated. Use `MaxCapacity` instead. + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListCustomEntityTypesRequest AWS API Documentation # - # The number of Glue data processing units (DPUs) allocated to this - # JobRun. From 2 to 100 DPUs can be allocated; the default is 10. A - # DPU is a relative measure of processing power that consists of 4 - # vCPUs of compute capacity and 16 GB of memory. For more information, - # see the [Glue pricing page][1]. + class ListCustomEntityTypesRequest < Struct.new( + :next_token, + :max_results) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] custom_entity_types + # A list of `CustomEntityType` objects representing custom patterns. + # @return [Array] # + # @!attribute [rw] next_token + # A pagination token, if more results are available. + # @return [String] # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListCustomEntityTypesResponse AWS API Documentation # - # [1]: https://aws.amazon.com/glue/pricing/ - # @return [Integer] + class ListCustomEntityTypesResponse < Struct.new( + :custom_entity_types, + :next_token) + SENSITIVE = [] + include Aws::Structure + end + + # @note When making an API call, you may pass ListDevEndpointsRequest + # data as a hash: # - # @!attribute [rw] execution_time - # The amount of time (in seconds) that the job run consumed resources. - # @return [Integer] + # { + # next_token: "GenericString", + # max_results: 1, + # tags: { + # "TagKey" => "TagValue", + # }, + # } # - # @!attribute [rw] timeout - # The `JobRun` timeout in minutes. This is the maximum time that a job - # run can consume resources before it is terminated and enters - # `TIMEOUT` status. The default is 2,880 minutes (48 hours). This - # overrides the timeout value set in the parent job. + # @!attribute [rw] next_token + # A continuation token, if this is a continuation request. + # @return [String] + # + # @!attribute [rw] max_results + # The maximum size of a list to return. # @return [Integer] # - # @!attribute [rw] max_capacity - # The number of Glue data processing units (DPUs) that can be - # allocated when this job runs. A DPU is a relative measure of - # processing power that consists of 4 vCPUs of compute capacity and 16 - # GB of memory. For more information, see the [Glue pricing page][1]. + # @!attribute [rw] tags + # Specifies to return only these tagged resources. + # @return [Hash] # - # Do not set `Max Capacity` if using `WorkerType` and - # `NumberOfWorkers`. + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListDevEndpointsRequest AWS API Documentation # - # The value that can be allocated for `MaxCapacity` depends on whether - # you are running a Python shell job or an Apache Spark ETL job: + class ListDevEndpointsRequest < Struct.new( + :next_token, + :max_results, + :tags) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] dev_endpoint_names + # The names of all the `DevEndpoint`s in the account, or the + # `DevEndpoint`s with the specified tags. + # @return [Array] # - # * When you specify a Python shell job - # (`JobCommand.Name`="pythonshell"), you can allocate either - # 0.0625 or 1 DPU. The default is 0.0625 DPU. + # @!attribute [rw] next_token + # A continuation token, if the returned list does not contain the last + # metric available. + # @return [String] # - # * When you specify an Apache Spark ETL job - # (`JobCommand.Name`="glueetl"), you can allocate from 2 to 100 - # DPUs. The default is 10 DPUs. This job type cannot have a - # fractional DPU allocation. + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListDevEndpointsResponse AWS API Documentation + # + class ListDevEndpointsResponse < Struct.new( + :dev_endpoint_names, + :next_token) + SENSITIVE = [] + include Aws::Structure + end + + # @note When making an API call, you may pass ListJobsRequest + # data as a hash: # + # { + # next_token: "GenericString", + # max_results: 1, + # tags: { + # "TagKey" => "TagValue", + # }, + # } # + # @!attribute [rw] next_token + # A continuation token, if this is a continuation request. + # @return [String] # - # [1]: https://aws.amazon.com/glue/pricing/ - # @return [Float] + # @!attribute [rw] max_results + # The maximum size of a list to return. + # @return [Integer] # - # @!attribute [rw] worker_type - # The type of predefined worker that is allocated when a job runs. - # Accepts a value of Standard, G.1X, or G.2X. + # @!attribute [rw] tags + # Specifies to return only these tagged resources. + # @return [Hash] # - # * For the `Standard` worker type, each worker provides 4 vCPU, 16 GB - # of memory and a 50GB disk, and 2 executors per worker. + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListJobsRequest AWS API Documentation # - # * For the `G.1X` worker type, each worker provides 4 vCPU, 16 GB of - # memory and a 64GB disk, and 1 executor per worker. + class ListJobsRequest < Struct.new( + :next_token, + :max_results, + :tags) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] job_names + # The names of all jobs in the account, or the jobs with the specified + # tags. + # @return [Array] # - # * For the `G.2X` worker type, each worker provides 8 vCPU, 32 GB of - # memory and a 128GB disk, and 1 executor per worker. + # @!attribute [rw] next_token + # A continuation token, if the returned list does not contain the last + # metric available. # @return [String] # - # @!attribute [rw] number_of_workers - # The number of workers of a defined `workerType` that are allocated - # when a job runs. + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListJobsResponse AWS API Documentation # - # The maximum number of workers you can define are 299 for `G.1X`, and - # 149 for `G.2X`. - # @return [Integer] + class ListJobsResponse < Struct.new( + :job_names, + :next_token) + SENSITIVE = [] + include Aws::Structure + end + + # @note When making an API call, you may pass ListMLTransformsRequest + # data as a hash: # - # @!attribute [rw] security_configuration - # The name of the `SecurityConfiguration` structure to be used with - # this job run. - # @return [String] + # { + # next_token: "PaginationToken", + # max_results: 1, + # filter: { + # name: "NameString", + # transform_type: "FIND_MATCHES", # accepts FIND_MATCHES + # status: "NOT_READY", # accepts NOT_READY, READY, DELETING + # glue_version: "GlueVersionString", + # created_before: Time.now, + # created_after: Time.now, + # last_modified_before: Time.now, + # last_modified_after: Time.now, + # schema: [ + # { + # name: "ColumnNameString", + # data_type: "ColumnTypeString", + # }, + # ], + # }, + # sort: { + # column: "NAME", # required, accepts NAME, TRANSFORM_TYPE, STATUS, CREATED, LAST_MODIFIED + # sort_direction: "DESCENDING", # required, accepts DESCENDING, ASCENDING + # }, + # tags: { + # "TagKey" => "TagValue", + # }, + # } # - # @!attribute [rw] log_group_name - # The name of the log group for secure logging that can be server-side - # encrypted in Amazon CloudWatch using KMS. This name can be - # `/aws-glue/jobs/`, in which case the default encryption is `NONE`. - # If you add a role name and `SecurityConfiguration` name (in other - # words, - # `/aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/`), then - # that security configuration is used to encrypt the log group. + # @!attribute [rw] next_token + # A continuation token, if this is a continuation request. # @return [String] # - # @!attribute [rw] notification_property - # Specifies configuration properties of a job run notification. - # @return [Types::NotificationProperty] + # @!attribute [rw] max_results + # The maximum size of a list to return. + # @return [Integer] # - # @!attribute [rw] glue_version - # Glue version determines the versions of Apache Spark and Python that - # Glue supports. The Python version indicates the version supported - # for jobs of type Spark. + # @!attribute [rw] filter + # A `TransformFilterCriteria` used to filter the machine learning + # transforms. + # @return [Types::TransformFilterCriteria] # - # For more information about the available Glue versions and - # corresponding Spark and Python versions, see [Glue version][1] in - # the developer guide. + # @!attribute [rw] sort + # A `TransformSortCriteria` used to sort the machine learning + # transforms. + # @return [Types::TransformSortCriteria] # - # Jobs that are created without specifying a Glue version default to - # Glue 0.9. + # @!attribute [rw] tags + # Specifies to return only these tagged resources. + # @return [Hash] # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListMLTransformsRequest AWS API Documentation # + class ListMLTransformsRequest < Struct.new( + :next_token, + :max_results, + :filter, + :sort, + :tags) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] transform_ids + # The identifiers of all the machine learning transforms in the + # account, or the machine learning transforms with the specified tags. + # @return [Array] # - # [1]: https://docs.aws.amazon.com/glue/latest/dg/add-job.html + # @!attribute [rw] next_token + # A continuation token, if the returned list does not contain the last + # metric available. # @return [String] # - # @!attribute [rw] dpu_seconds - # This field populates only when an Auto Scaling job run completes, - # and represents the total time each executor ran during the lifecycle - # of a job run in seconds, multiplied by a DPU factor (1 for `G.1X` - # and 2 for `G.2X` workers). This value may be different than the - # `executionEngineRuntime` * `MaxCapacity` as in the case of Auto - # Scaling jobs, as the number of executors running at a given time may - # be less than the `MaxCapacity`. Therefore, it is possible that the - # value of `DPUSeconds` is less than `executionEngineRuntime` * - # `MaxCapacity`. - # @return [Float] - # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/JobRun AWS API Documentation + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListMLTransformsResponse AWS API Documentation # - class JobRun < Struct.new( - :id, - :attempt, - :previous_run_id, - :trigger_name, - :job_name, - :started_on, - :last_modified_on, - :completed_on, - :job_run_state, - :arguments, - :error_message, - :predecessor_runs, - :allocated_capacity, - :execution_time, - :timeout, - :max_capacity, - :worker_type, - :number_of_workers, - :security_configuration, - :log_group_name, - :notification_property, - :glue_version, - :dpu_seconds) + class ListMLTransformsResponse < Struct.new( + :transform_ids, + :next_token) SENSITIVE = [] include Aws::Structure end - # Specifies information used to update an existing job definition. The - # previous job definition is completely overwritten by this information. - # - # @note When making an API call, you may pass JobUpdate + # @note When making an API call, you may pass ListRegistriesInput # data as a hash: # # { - # description: "DescriptionString", - # log_uri: "UriString", - # role: "RoleString", - # execution_property: { - # max_concurrent_runs: 1, - # }, - # command: { - # name: "GenericString", - # script_location: "ScriptLocationString", - # python_version: "PythonVersionString", - # }, - # default_arguments: { - # "GenericString" => "GenericString", - # }, - # non_overridable_arguments: { - # "GenericString" => "GenericString", - # }, - # connections: { - # connections: ["GenericString"], - # }, - # max_retries: 1, - # allocated_capacity: 1, - # timeout: 1, - # max_capacity: 1.0, - # worker_type: "Standard", # accepts Standard, G.1X, G.2X - # number_of_workers: 1, - # security_configuration: "NameString", - # notification_property: { - # notify_delay_after: 1, - # }, - # glue_version: "GlueVersionString", + # max_results: 1, + # next_token: "SchemaRegistryTokenString", # } # - # @!attribute [rw] description - # Description of the job being defined. - # @return [String] - # - # @!attribute [rw] log_uri - # This field is reserved for future use. - # @return [String] + # @!attribute [rw] max_results + # Maximum number of results required per page. If the value is not + # supplied, this will be defaulted to 25 per page. + # @return [Integer] # - # @!attribute [rw] role - # The name or Amazon Resource Name (ARN) of the IAM role associated - # with this job (required). + # @!attribute [rw] next_token + # A continuation token, if this is a continuation call. # @return [String] # - # @!attribute [rw] execution_property - # An `ExecutionProperty` specifying the maximum number of concurrent - # runs allowed for this job. - # @return [Types::ExecutionProperty] - # - # @!attribute [rw] command - # The `JobCommand` that runs this job (required). - # @return [Types::JobCommand] - # - # @!attribute [rw] default_arguments - # The default arguments for this job. + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListRegistriesInput AWS API Documentation # - # You can specify arguments here that your own job-execution script - # consumes, as well as arguments that Glue itself consumes. + class ListRegistriesInput < Struct.new( + :max_results, + :next_token) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] registries + # An array of `RegistryDetailedListItem` objects containing minimal + # details of each registry. + # @return [Array] # - # For information about how to specify and consume your own Job - # arguments, see the [Calling Glue APIs in Python][1] topic in the - # developer guide. + # @!attribute [rw] next_token + # A continuation token for paginating the returned list of tokens, + # returned if the current segment of the list is not the last. + # @return [String] # - # For information about the key-value pairs that Glue consumes to set - # up your job, see the [Special Parameters Used by Glue][2] topic in - # the developer guide. + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListRegistriesResponse AWS API Documentation # + class ListRegistriesResponse < Struct.new( + :registries, + :next_token) + SENSITIVE = [] + include Aws::Structure + end + + # @note When making an API call, you may pass ListSchemaVersionsInput + # data as a hash: # + # { + # schema_id: { # required + # schema_arn: "GlueResourceArn", + # schema_name: "SchemaRegistryNameString", + # registry_name: "SchemaRegistryNameString", + # }, + # max_results: 1, + # next_token: "SchemaRegistryTokenString", + # } # - # [1]: https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html - # [2]: https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html - # @return [Hash] + # @!attribute [rw] schema_id + # This is a wrapper structure to contain schema identity fields. The + # structure contains: # - # @!attribute [rw] non_overridable_arguments - # Non-overridable arguments for this job, specified as name-value - # pairs. - # @return [Hash] + # * SchemaId$SchemaArn: The Amazon Resource Name (ARN) of the schema. + # Either `SchemaArn` or `SchemaName` and `RegistryName` has to be + # provided. # - # @!attribute [rw] connections - # The connections used for this job. - # @return [Types::ConnectionsList] + # * SchemaId$SchemaName: The name of the schema. Either `SchemaArn` or + # `SchemaName` and `RegistryName` has to be provided. + # @return [Types::SchemaId] # - # @!attribute [rw] max_retries - # The maximum number of times to retry this job if it fails. + # @!attribute [rw] max_results + # Maximum number of results required per page. If the value is not + # supplied, this will be defaulted to 25 per page. # @return [Integer] # - # @!attribute [rw] allocated_capacity - # This field is deprecated. Use `MaxCapacity` instead. + # @!attribute [rw] next_token + # A continuation token, if this is a continuation call. + # @return [String] # - # The number of Glue data processing units (DPUs) to allocate to this - # job. You can allocate from 2 to 100 DPUs; the default is 10. A DPU - # is a relative measure of processing power that consists of 4 vCPUs - # of compute capacity and 16 GB of memory. For more information, see - # the [Glue pricing page][1]. + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListSchemaVersionsInput AWS API Documentation # + class ListSchemaVersionsInput < Struct.new( + :schema_id, + :max_results, + :next_token) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] schemas + # An array of `SchemaVersionList` objects containing details of each + # schema version. + # @return [Array] # + # @!attribute [rw] next_token + # A continuation token for paginating the returned list of tokens, + # returned if the current segment of the list is not the last. + # @return [String] # - # [1]: https://aws.amazon.com/glue/pricing/ - # @return [Integer] + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListSchemaVersionsResponse AWS API Documentation # - # @!attribute [rw] timeout - # The job timeout in minutes. This is the maximum time that a job run - # can consume resources before it is terminated and enters `TIMEOUT` - # status. The default is 2,880 minutes (48 hours). - # @return [Integer] + class ListSchemaVersionsResponse < Struct.new( + :schemas, + :next_token) + SENSITIVE = [] + include Aws::Structure + end + + # @note When making an API call, you may pass ListSchemasInput + # data as a hash: # - # @!attribute [rw] max_capacity - # For Glue version 1.0 or earlier jobs, using the standard worker - # type, the number of Glue data processing units (DPUs) that can be - # allocated when this job runs. A DPU is a relative measure of - # processing power that consists of 4 vCPUs of compute capacity and 16 - # GB of memory. For more information, see the [Glue pricing page][1]. + # { + # registry_id: { + # registry_name: "SchemaRegistryNameString", + # registry_arn: "GlueResourceArn", + # }, + # max_results: 1, + # next_token: "SchemaRegistryTokenString", + # } + # + # @!attribute [rw] registry_id + # A wrapper structure that may contain the registry name and Amazon + # Resource Name (ARN). + # @return [Types::RegistryId] # - # Do not set `Max Capacity` if using `WorkerType` and - # `NumberOfWorkers`. + # @!attribute [rw] max_results + # Maximum number of results required per page. If the value is not + # supplied, this will be defaulted to 25 per page. + # @return [Integer] # - # The value that can be allocated for `MaxCapacity` depends on whether - # you are running a Python shell job or an Apache Spark ETL job: + # @!attribute [rw] next_token + # A continuation token, if this is a continuation call. + # @return [String] # - # * When you specify a Python shell job - # (`JobCommand.Name`="pythonshell"), you can allocate either - # 0.0625 or 1 DPU. The default is 0.0625 DPU. + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListSchemasInput AWS API Documentation # - # * When you specify an Apache Spark ETL job - # (`JobCommand.Name`="glueetl") or Apache Spark streaming ETL job - # (`JobCommand.Name`="gluestreaming"), you can allocate from 2 to - # 100 DPUs. The default is 10 DPUs. This job type cannot have a - # fractional DPU allocation. + class ListSchemasInput < Struct.new( + :registry_id, + :max_results, + :next_token) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] schemas + # An array of `SchemaListItem` objects containing details of each + # schema. + # @return [Array] # - # For Glue version 2.0 jobs, you cannot instead specify a `Maximum - # capacity`. Instead, you should specify a `Worker type` and the - # `Number of workers`. + # @!attribute [rw] next_token + # A continuation token for paginating the returned list of tokens, + # returned if the current segment of the list is not the last. + # @return [String] # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListSchemasResponse AWS API Documentation # + class ListSchemasResponse < Struct.new( + :schemas, + :next_token) + SENSITIVE = [] + include Aws::Structure + end + + # @note When making an API call, you may pass ListSessionsRequest + # data as a hash: # - # [1]: https://aws.amazon.com/glue/pricing/ - # @return [Float] + # { + # next_token: "OrchestrationToken", + # max_results: 1, + # tags: { + # "TagKey" => "TagValue", + # }, + # request_origin: "OrchestrationNameString", + # } # - # @!attribute [rw] worker_type - # The type of predefined worker that is allocated when a job runs. - # Accepts a value of Standard, G.1X, or G.2X. + # @!attribute [rw] next_token + # The token for the next set of results, or null if there are no more + # result. + # @return [String] # - # * For the `Standard` worker type, each worker provides 4 vCPU, 16 GB - # of memory and a 50GB disk, and 2 executors per worker. + # @!attribute [rw] max_results + # The maximum number of results. + # @return [Integer] # - # * For the `G.1X` worker type, each worker maps to 1 DPU (4 vCPU, 16 - # GB of memory, 64 GB disk), and provides 1 executor per worker. We - # recommend this worker type for memory-intensive jobs. + # @!attribute [rw] tags + # Tags belonging to the session. + # @return [Hash] # - # * For the `G.2X` worker type, each worker maps to 2 DPU (8 vCPU, 32 - # GB of memory, 128 GB disk), and provides 1 executor per worker. We - # recommend this worker type for memory-intensive jobs. + # @!attribute [rw] request_origin + # The origin of the request. # @return [String] # - # @!attribute [rw] number_of_workers - # The number of workers of a defined `workerType` that are allocated - # when a job runs. + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListSessionsRequest AWS API Documentation # - # The maximum number of workers you can define are 299 for `G.1X`, and - # 149 for `G.2X`. - # @return [Integer] + class ListSessionsRequest < Struct.new( + :next_token, + :max_results, + :tags, + :request_origin) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] ids + # Returns the Id of the session. + # @return [Array] # - # @!attribute [rw] security_configuration - # The name of the `SecurityConfiguration` structure to be used with - # this job. + # @!attribute [rw] sessions + # Returns the session object. + # @return [Array] + # + # @!attribute [rw] next_token + # The token for the next set of results, or null if there are no more + # result. # @return [String] # - # @!attribute [rw] notification_property - # Specifies the configuration properties of a job notification. - # @return [Types::NotificationProperty] + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListSessionsResponse AWS API Documentation # - # @!attribute [rw] glue_version - # Glue version determines the versions of Apache Spark and Python that - # Glue supports. The Python version indicates the version supported - # for jobs of type Spark. + class ListSessionsResponse < Struct.new( + :ids, + :sessions, + :next_token) + SENSITIVE = [] + include Aws::Structure + end + + # @note When making an API call, you may pass ListStatementsRequest + # data as a hash: # - # For more information about the available Glue versions and - # corresponding Spark and Python versions, see [Glue version][1] in - # the developer guide. + # { + # session_id: "NameString", # required + # request_origin: "OrchestrationNameString", + # next_token: "OrchestrationToken", + # } # + # @!attribute [rw] session_id + # The Session ID of the statements. + # @return [String] # + # @!attribute [rw] request_origin + # The origin of the request to list statements. + # @return [String] # - # [1]: https://docs.aws.amazon.com/glue/latest/dg/add-job.html + # @!attribute [rw] next_token # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/JobUpdate AWS API Documentation + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListStatementsRequest AWS API Documentation # - class JobUpdate < Struct.new( - :description, - :log_uri, - :role, - :execution_property, - :command, - :default_arguments, - :non_overridable_arguments, - :connections, - :max_retries, - :allocated_capacity, - :timeout, - :max_capacity, - :worker_type, - :number_of_workers, - :security_configuration, - :notification_property, - :glue_version) + class ListStatementsRequest < Struct.new( + :session_id, + :request_origin, + :next_token) SENSITIVE = [] include Aws::Structure end - # A classifier for `JSON` content. + # @!attribute [rw] statements + # Returns the list of statements. + # @return [Array] # - # @!attribute [rw] name - # The name of the classifier. + # @!attribute [rw] next_token # @return [String] # - # @!attribute [rw] creation_time - # The time that this classifier was registered. - # @return [Time] + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListStatementsResponse AWS API Documentation # - # @!attribute [rw] last_updated - # The time that this classifier was last updated. - # @return [Time] + class ListStatementsResponse < Struct.new( + :statements, + :next_token) + SENSITIVE = [] + include Aws::Structure + end + + # @note When making an API call, you may pass ListTriggersRequest + # data as a hash: # - # @!attribute [rw] version - # The version of this classifier. + # { + # next_token: "GenericString", + # dependent_job_name: "NameString", + # max_results: 1, + # tags: { + # "TagKey" => "TagValue", + # }, + # } + # + # @!attribute [rw] next_token + # A continuation token, if this is a continuation request. + # @return [String] + # + # @!attribute [rw] dependent_job_name + # The name of the job for which to retrieve triggers. The trigger that + # can start this job is returned. If there is no such trigger, all + # triggers are returned. + # @return [String] + # + # @!attribute [rw] max_results + # The maximum size of a list to return. # @return [Integer] # - # @!attribute [rw] json_path - # A `JsonPath` string defining the JSON data for the classifier to - # classify. Glue supports a subset of JsonPath, as described in - # [Writing JsonPath Custom Classifiers][1]. + # @!attribute [rw] tags + # Specifies to return only these tagged resources. + # @return [Hash] # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListTriggersRequest AWS API Documentation # + class ListTriggersRequest < Struct.new( + :next_token, + :dependent_job_name, + :max_results, + :tags) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] trigger_names + # The names of all triggers in the account, or the triggers with the + # specified tags. + # @return [Array] # - # [1]: https://docs.aws.amazon.com/glue/latest/dg/custom-classifier.html#custom-classifier-json + # @!attribute [rw] next_token + # A continuation token, if the returned list does not contain the last + # metric available. # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/JsonClassifier AWS API Documentation + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListTriggersResponse AWS API Documentation # - class JsonClassifier < Struct.new( - :name, - :creation_time, - :last_updated, - :version, - :json_path) + class ListTriggersResponse < Struct.new( + :trigger_names, + :next_token) SENSITIVE = [] include Aws::Structure end - # A partition key pair consisting of a name and a type. + # @note When making an API call, you may pass ListWorkflowsRequest + # data as a hash: + # + # { + # next_token: "GenericString", + # max_results: 1, + # } # - # @!attribute [rw] name - # The name of a partition key. + # @!attribute [rw] next_token + # A continuation token, if this is a continuation request. # @return [String] # - # @!attribute [rw] type - # The type of a partition key. - # @return [String] + # @!attribute [rw] max_results + # The maximum size of a list to return. + # @return [Integer] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/KeySchemaElement AWS API Documentation + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListWorkflowsRequest AWS API Documentation # - class KeySchemaElement < Struct.new( - :name, - :type) + class ListWorkflowsRequest < Struct.new( + :next_token, + :max_results) SENSITIVE = [] include Aws::Structure end - # Specifies configuration properties for a labeling set generation task - # run. + # @!attribute [rw] workflows + # List of names of workflows in the account. + # @return [Array] # - # @!attribute [rw] output_s3_path - # The Amazon Simple Storage Service (Amazon S3) path where you will - # generate the labeling set. + # @!attribute [rw] next_token + # A continuation token, if not all workflow names have been returned. # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/LabelingSetGenerationTaskRunProperties AWS API Documentation + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListWorkflowsResponse AWS API Documentation # - class LabelingSetGenerationTaskRunProperties < Struct.new( - :output_s3_path) + class ListWorkflowsResponse < Struct.new( + :workflows, + :next_token) SENSITIVE = [] include Aws::Structure end - # Specifies AWS Lake Formation configuration settings for the crawler. + # The location of resources. # - # @note When making an API call, you may pass LakeFormationConfiguration + # @note When making an API call, you may pass Location # data as a hash: # # { - # use_lake_formation_credentials: false, - # account_id: "AccountId", + # jdbc: [ + # { + # name: "CodeGenArgName", # required + # value: "CodeGenArgValue", # required + # param: false, + # }, + # ], + # s3: [ + # { + # name: "CodeGenArgName", # required + # value: "CodeGenArgValue", # required + # param: false, + # }, + # ], + # dynamo_db: [ + # { + # name: "CodeGenArgName", # required + # value: "CodeGenArgValue", # required + # param: false, + # }, + # ], # } # - # @!attribute [rw] use_lake_formation_credentials - # Specifies whether to use AWS Lake Formation credentials for the - # crawler instead of the IAM role credentials. - # @return [Boolean] + # @!attribute [rw] jdbc + # A JDBC location. + # @return [Array] # - # @!attribute [rw] account_id - # Required for cross account crawls. For same account crawls as the - # target data, this can be left as null. - # @return [String] + # @!attribute [rw] s3 + # An Amazon Simple Storage Service (Amazon S3) location. + # @return [Array] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/LakeFormationConfiguration AWS API Documentation + # @!attribute [rw] dynamo_db + # An Amazon DynamoDB table location. + # @return [Array] # - class LakeFormationConfiguration < Struct.new( - :use_lake_formation_credentials, - :account_id) + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/Location AWS API Documentation + # + class Location < Struct.new( + :jdbc, + :s3, + :dynamo_db) SENSITIVE = [] include Aws::Structure end - # When there are multiple versions of a blueprint and the latest version - # has some errors, this attribute indicates the last successful - # blueprint definition that is available with the service. + # Defines column statistics supported for integer data columns. # - # @!attribute [rw] description - # The description of the blueprint. - # @return [String] + # @note When making an API call, you may pass LongColumnStatisticsData + # data as a hash: # - # @!attribute [rw] last_modified_on - # The date and time the blueprint was last modified. - # @return [Time] + # { + # minimum_value: 1, + # maximum_value: 1, + # number_of_nulls: 1, # required + # number_of_distinct_values: 1, # required + # } # - # @!attribute [rw] parameter_spec - # A JSON string specifying the parameters for the blueprint. - # @return [String] + # @!attribute [rw] minimum_value + # The lowest value in the column. + # @return [Integer] # - # @!attribute [rw] blueprint_location - # Specifies a path in Amazon S3 where the blueprint is published by - # the Glue developer. - # @return [String] + # @!attribute [rw] maximum_value + # The highest value in the column. + # @return [Integer] # - # @!attribute [rw] blueprint_service_location - # Specifies a path in Amazon S3 where the blueprint is copied when you - # create or update the blueprint. - # @return [String] + # @!attribute [rw] number_of_nulls + # The number of null values in the column. + # @return [Integer] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/LastActiveDefinition AWS API Documentation + # @!attribute [rw] number_of_distinct_values + # The number of distinct values in a column. + # @return [Integer] # - class LastActiveDefinition < Struct.new( - :description, - :last_modified_on, - :parameter_spec, - :blueprint_location, - :blueprint_service_location) + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/LongColumnStatisticsData AWS API Documentation + # + class LongColumnStatisticsData < Struct.new( + :minimum_value, + :maximum_value, + :number_of_nulls, + :number_of_distinct_values) SENSITIVE = [] include Aws::Structure end - # Status and error information about the most recent crawl. - # - # @!attribute [rw] status - # Status of the last crawl. - # @return [String] + # A structure for a machine learning transform. # - # @!attribute [rw] error_message - # If an error occurred, the error information about the last crawl. + # @!attribute [rw] transform_id + # The unique transform ID that is generated for the machine learning + # transform. The ID is guaranteed to be unique and does not change. # @return [String] # - # @!attribute [rw] log_group - # The log group for the last crawl. + # @!attribute [rw] name + # A user-defined name for the machine learning transform. Names are + # not guaranteed unique and can be changed at any time. # @return [String] # - # @!attribute [rw] log_stream - # The log stream for the last crawl. + # @!attribute [rw] description + # A user-defined, long-form description text for the machine learning + # transform. Descriptions are not guaranteed to be unique and can be + # changed at any time. # @return [String] # - # @!attribute [rw] message_prefix - # The prefix for a message about this crawl. + # @!attribute [rw] status + # The current status of the machine learning transform. # @return [String] # - # @!attribute [rw] start_time - # The time at which the crawl started. + # @!attribute [rw] created_on + # A timestamp. The time and date that this machine learning transform + # was created. # @return [Time] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/LastCrawlInfo AWS API Documentation + # @!attribute [rw] last_modified_on + # A timestamp. The last point in time when this machine learning + # transform was modified. + # @return [Time] # - class LastCrawlInfo < Struct.new( - :status, - :error_message, - :log_group, - :log_stream, - :message_prefix, - :start_time) - SENSITIVE = [] - include Aws::Structure - end - - # Specifies data lineage configuration settings for the crawler. + # @!attribute [rw] input_record_tables + # A list of Glue table definitions used by the transform. + # @return [Array] # - # @note When making an API call, you may pass LineageConfiguration - # data as a hash: + # @!attribute [rw] parameters + # A `TransformParameters` object. You can use parameters to tune + # (customize) the behavior of the machine learning transform by + # specifying what data it learns from and your preference on various + # tradeoffs (such as precious vs. recall, or accuracy vs. cost). + # @return [Types::TransformParameters] # - # { - # crawler_lineage_settings: "ENABLE", # accepts ENABLE, DISABLE - # } + # @!attribute [rw] evaluation_metrics + # An `EvaluationMetrics` object. Evaluation metrics provide an + # estimate of the quality of your machine learning transform. + # @return [Types::EvaluationMetrics] # - # @!attribute [rw] crawler_lineage_settings - # Specifies whether data lineage is enabled for the crawler. Valid - # values are: + # @!attribute [rw] label_count + # A count identifier for the labeling files generated by Glue for this + # transform. As you create a better transform, you can iteratively + # download, label, and upload the labeling file. + # @return [Integer] # - # * ENABLE: enables data lineage for the crawler + # @!attribute [rw] schema + # A map of key-value pairs representing the columns and data types + # that this transform can run against. Has an upper bound of 100 + # columns. + # @return [Array] # - # * DISABLE: disables data lineage for the crawler - # @return [String] + # @!attribute [rw] role + # The name or Amazon Resource Name (ARN) of the IAM role with the + # required permissions. The required permissions include both Glue + # service role permissions to Glue resources, and Amazon S3 + # permissions required by the transform. # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/LineageConfiguration AWS API Documentation + # * This role needs Glue service role permissions to allow access to + # resources in Glue. See [Attach a Policy to IAM Users That Access + # Glue][1]. # - class LineageConfiguration < Struct.new( - :crawler_lineage_settings) - SENSITIVE = [] - include Aws::Structure - end - - # @note When making an API call, you may pass ListBlueprintsRequest - # data as a hash: + # * This role needs permission to your Amazon Simple Storage Service + # (Amazon S3) sources, targets, temporary directory, scripts, and + # any libraries used by the task run for this transform. # - # { - # next_token: "GenericString", - # max_results: 1, - # tags: { - # "TagKey" => "TagValue", - # }, - # } # - # @!attribute [rw] next_token - # A continuation token, if this is a continuation request. - # @return [String] # - # @!attribute [rw] max_results - # The maximum size of a list to return. - # @return [Integer] + # [1]: https://docs.aws.amazon.com/glue/latest/dg/attach-policy-iam-user.html + # @return [String] # - # @!attribute [rw] tags - # Filters the list by an Amazon Web Services resource tag. - # @return [Hash] + # @!attribute [rw] glue_version + # This value determines which version of Glue this machine learning + # transform is compatible with. Glue 1.0 is recommended for most + # customers. If the value is not set, the Glue compatibility defaults + # to Glue 0.9. For more information, see [Glue Versions][1] in the + # developer guide. # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListBlueprintsRequest AWS API Documentation # - class ListBlueprintsRequest < Struct.new( - :next_token, - :max_results, - :tags) - SENSITIVE = [] - include Aws::Structure - end - - # @!attribute [rw] blueprints - # List of names of blueprints in the account. - # @return [Array] # - # @!attribute [rw] next_token - # A continuation token, if not all blueprint names have been returned. + # [1]: https://docs.aws.amazon.com/glue/latest/dg/release-notes.html#release-notes-versions # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListBlueprintsResponse AWS API Documentation + # @!attribute [rw] max_capacity + # The number of Glue data processing units (DPUs) that are allocated + # to task runs for this transform. You can allocate from 2 to 100 + # DPUs; the default is 10. A DPU is a relative measure of processing + # power that consists of 4 vCPUs of compute capacity and 16 GB of + # memory. For more information, see the [Glue pricing page][1]. # - class ListBlueprintsResponse < Struct.new( - :blueprints, - :next_token) - SENSITIVE = [] - include Aws::Structure - end - - # @note When making an API call, you may pass ListCrawlersRequest - # data as a hash: + # `MaxCapacity` is a mutually exclusive option with `NumberOfWorkers` + # and `WorkerType`. # - # { - # max_results: 1, - # next_token: "Token", - # tags: { - # "TagKey" => "TagValue", - # }, - # } + # * If either `NumberOfWorkers` or `WorkerType` is set, then + # `MaxCapacity` cannot be set. # - # @!attribute [rw] max_results - # The maximum size of a list to return. - # @return [Integer] + # * If `MaxCapacity` is set then neither `NumberOfWorkers` or + # `WorkerType` can be set. # - # @!attribute [rw] next_token - # A continuation token, if this is a continuation request. - # @return [String] + # * If `WorkerType` is set, then `NumberOfWorkers` is required (and + # vice versa). # - # @!attribute [rw] tags - # Specifies to return only these tagged resources. - # @return [Hash] + # * `MaxCapacity` and `NumberOfWorkers` must both be at least 1. # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListCrawlersRequest AWS API Documentation + # When the `WorkerType` field is set to a value other than `Standard`, + # the `MaxCapacity` field is set automatically and becomes read-only. # - class ListCrawlersRequest < Struct.new( - :max_results, - :next_token, - :tags) - SENSITIVE = [] - include Aws::Structure - end - - # @!attribute [rw] crawler_names - # The names of all crawlers in the account, or the crawlers with the - # specified tags. - # @return [Array] # - # @!attribute [rw] next_token - # A continuation token, if the returned list does not contain the last - # metric available. - # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListCrawlersResponse AWS API Documentation + # [1]: http://aws.amazon.com/glue/pricing/ + # @return [Float] # - class ListCrawlersResponse < Struct.new( - :crawler_names, - :next_token) - SENSITIVE = [] - include Aws::Structure - end - - # @note When making an API call, you may pass ListCustomEntityTypesRequest - # data as a hash: + # @!attribute [rw] worker_type + # The type of predefined worker that is allocated when a task of this + # transform runs. Accepts a value of Standard, G.1X, or G.2X. # - # { - # next_token: "PaginationToken", - # max_results: 1, - # } + # * For the `Standard` worker type, each worker provides 4 vCPU, 16 GB + # of memory and a 50GB disk, and 2 executors per worker. # - # @!attribute [rw] next_token - # A paginated token to offset the results. - # @return [String] + # * For the `G.1X` worker type, each worker provides 4 vCPU, 16 GB of + # memory and a 64GB disk, and 1 executor per worker. # - # @!attribute [rw] max_results - # The maximum number of results to return. - # @return [Integer] + # * For the `G.2X` worker type, each worker provides 8 vCPU, 32 GB of + # memory and a 128GB disk, and 1 executor per worker. # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListCustomEntityTypesRequest AWS API Documentation + # `MaxCapacity` is a mutually exclusive option with `NumberOfWorkers` + # and `WorkerType`. # - class ListCustomEntityTypesRequest < Struct.new( - :next_token, - :max_results) - SENSITIVE = [] - include Aws::Structure - end - - # @!attribute [rw] custom_entity_types - # A list of `CustomEntityType` objects representing custom patterns. - # @return [Array] + # * If either `NumberOfWorkers` or `WorkerType` is set, then + # `MaxCapacity` cannot be set. # - # @!attribute [rw] next_token - # A pagination token, if more results are available. - # @return [String] + # * If `MaxCapacity` is set then neither `NumberOfWorkers` or + # `WorkerType` can be set. # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListCustomEntityTypesResponse AWS API Documentation + # * If `WorkerType` is set, then `NumberOfWorkers` is required (and + # vice versa). # - class ListCustomEntityTypesResponse < Struct.new( - :custom_entity_types, - :next_token) - SENSITIVE = [] - include Aws::Structure - end - - # @note When making an API call, you may pass ListDevEndpointsRequest - # data as a hash: + # * `MaxCapacity` and `NumberOfWorkers` must both be at least 1. + # @return [String] # - # { - # next_token: "GenericString", - # max_results: 1, - # tags: { - # "TagKey" => "TagValue", - # }, - # } + # @!attribute [rw] number_of_workers + # The number of workers of a defined `workerType` that are allocated + # when a task of the transform runs. # - # @!attribute [rw] next_token - # A continuation token, if this is a continuation request. - # @return [String] + # If `WorkerType` is set, then `NumberOfWorkers` is required (and vice + # versa). + # @return [Integer] # - # @!attribute [rw] max_results - # The maximum size of a list to return. + # @!attribute [rw] timeout + # The timeout in minutes of the machine learning transform. # @return [Integer] # - # @!attribute [rw] tags - # Specifies to return only these tagged resources. - # @return [Hash] + # @!attribute [rw] max_retries + # The maximum number of times to retry after an `MLTaskRun` of the + # machine learning transform fails. + # @return [Integer] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListDevEndpointsRequest AWS API Documentation + # @!attribute [rw] transform_encryption + # The encryption-at-rest settings of the transform that apply to + # accessing user data. Machine learning transforms can access user + # data encrypted in Amazon S3 using KMS. + # @return [Types::TransformEncryption] # - class ListDevEndpointsRequest < Struct.new( - :next_token, - :max_results, - :tags) + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/MLTransform AWS API Documentation + # + class MLTransform < Struct.new( + :transform_id, + :name, + :description, + :status, + :created_on, + :last_modified_on, + :input_record_tables, + :parameters, + :evaluation_metrics, + :label_count, + :schema, + :role, + :glue_version, + :max_capacity, + :worker_type, + :number_of_workers, + :timeout, + :max_retries, + :transform_encryption) SENSITIVE = [] include Aws::Structure end - # @!attribute [rw] dev_endpoint_names - # The names of all the `DevEndpoint`s in the account, or the - # `DevEndpoint`s with the specified tags. - # @return [Array] + # The machine learning transform is not ready to run. # - # @!attribute [rw] next_token - # A continuation token, if the returned list does not contain the last - # metric available. + # @!attribute [rw] message + # A message describing the problem. # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListDevEndpointsResponse AWS API Documentation + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/MLTransformNotReadyException AWS API Documentation # - class ListDevEndpointsResponse < Struct.new( - :dev_endpoint_names, - :next_token) + class MLTransformNotReadyException < Struct.new( + :message) SENSITIVE = [] include Aws::Structure end - # @note When making an API call, you may pass ListJobsRequest + # The encryption-at-rest settings of the transform that apply to + # accessing user data. + # + # @note When making an API call, you may pass MLUserDataEncryption # data as a hash: # # { - # next_token: "GenericString", - # max_results: 1, - # tags: { - # "TagKey" => "TagValue", - # }, + # ml_user_data_encryption_mode: "DISABLED", # required, accepts DISABLED, SSE-KMS + # kms_key_id: "NameString", # } # - # @!attribute [rw] next_token - # A continuation token, if this is a continuation request. - # @return [String] - # - # @!attribute [rw] max_results - # The maximum size of a list to return. - # @return [Integer] - # - # @!attribute [rw] tags - # Specifies to return only these tagged resources. - # @return [Hash] + # @!attribute [rw] ml_user_data_encryption_mode + # The encryption mode applied to user data. Valid values are: # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListJobsRequest AWS API Documentation + # * DISABLED: encryption is disabled # - class ListJobsRequest < Struct.new( - :next_token, - :max_results, - :tags) - SENSITIVE = [] - include Aws::Structure - end - - # @!attribute [rw] job_names - # The names of all jobs in the account, or the jobs with the specified - # tags. - # @return [Array] + # * SSEKMS: use of server-side encryption with Key Management Service + # (SSE-KMS) for user data stored in Amazon S3. + # @return [String] # - # @!attribute [rw] next_token - # A continuation token, if the returned list does not contain the last - # metric available. + # @!attribute [rw] kms_key_id + # The ID for the customer-provided KMS key. # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListJobsResponse AWS API Documentation + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/MLUserDataEncryption AWS API Documentation # - class ListJobsResponse < Struct.new( - :job_names, - :next_token) + class MLUserDataEncryption < Struct.new( + :ml_user_data_encryption_mode, + :kms_key_id) SENSITIVE = [] include Aws::Structure end - # @note When making an API call, you may pass ListMLTransformsRequest + # Specifies the mapping of data property keys. + # + # @note When making an API call, you may pass Mapping # data as a hash: # # { - # next_token: "PaginationToken", - # max_results: 1, - # filter: { - # name: "NameString", - # transform_type: "FIND_MATCHES", # accepts FIND_MATCHES - # status: "NOT_READY", # accepts NOT_READY, READY, DELETING - # glue_version: "GlueVersionString", - # created_before: Time.now, - # created_after: Time.now, - # last_modified_before: Time.now, - # last_modified_after: Time.now, - # schema: [ - # { - # name: "ColumnNameString", - # data_type: "ColumnTypeString", + # to_key: "EnclosedInStringProperty", + # from_path: ["EnclosedInStringProperty"], + # from_type: "EnclosedInStringProperty", + # to_type: "EnclosedInStringProperty", + # dropped: false, + # children: [ + # { + # to_key: "EnclosedInStringProperty", + # from_path: ["EnclosedInStringProperty"], + # from_type: "EnclosedInStringProperty", + # to_type: "EnclosedInStringProperty", + # dropped: false, + # children: { + # # recursive Mappings # }, - # ], - # }, - # sort: { - # column: "NAME", # required, accepts NAME, TRANSFORM_TYPE, STATUS, CREATED, LAST_MODIFIED - # sort_direction: "DESCENDING", # required, accepts DESCENDING, ASCENDING - # }, - # tags: { - # "TagKey" => "TagValue", - # }, + # }, + # ], # } # - # @!attribute [rw] next_token - # A continuation token, if this is a continuation request. + # @!attribute [rw] to_key + # After the apply mapping, what the name of the column should be. Can + # be the same as `FromPath`. # @return [String] # - # @!attribute [rw] max_results - # The maximum size of a list to return. - # @return [Integer] + # @!attribute [rw] from_path + # The table or column to be modified. + # @return [Array] # - # @!attribute [rw] filter - # A `TransformFilterCriteria` used to filter the machine learning - # transforms. - # @return [Types::TransformFilterCriteria] + # @!attribute [rw] from_type + # The type of the data to be modified. + # @return [String] # - # @!attribute [rw] sort - # A `TransformSortCriteria` used to sort the machine learning - # transforms. - # @return [Types::TransformSortCriteria] + # @!attribute [rw] to_type + # The data type that the data is to be modified to. + # @return [String] # - # @!attribute [rw] tags - # Specifies to return only these tagged resources. - # @return [Hash] + # @!attribute [rw] dropped + # If true, then the column is removed. + # @return [Boolean] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListMLTransformsRequest AWS API Documentation + # @!attribute [rw] children + # Only applicable to nested data structures. If you want to change the + # parent structure, but also one of its children, you can fill out + # this data strucutre. It is also `Mapping`, but its `FromPath` will + # be the parent's `FromPath` plus the `FromPath` from this structure. # - class ListMLTransformsRequest < Struct.new( - :next_token, - :max_results, - :filter, - :sort, - :tags) - SENSITIVE = [] - include Aws::Structure - end - - # @!attribute [rw] transform_ids - # The identifiers of all the machine learning transforms in the - # account, or the machine learning transforms with the specified tags. - # @return [Array] + # For the children part, suppose you have the structure: # - # @!attribute [rw] next_token - # A continuation token, if the returned list does not contain the last - # metric available. - # @return [String] + # `\{ "FromPath": "OuterStructure", "ToKey": "OuterStructure", + # "ToType": "Struct", "Dropped": false, "Chidlren": [\{ "FromPath": + # "inner", "ToKey": "inner", "ToType": "Double", "Dropped": false, \}] + # \}` # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListMLTransformsResponse AWS API Documentation + # You can specify a `Mapping` that looks like: # - class ListMLTransformsResponse < Struct.new( - :transform_ids, - :next_token) + # `\{ "FromPath": "OuterStructure", "ToKey": "OuterStructure", + # "ToType": "Struct", "Dropped": false, "Chidlren": [\{ "FromPath": + # "inner", "ToKey": "inner", "ToType": "Double", "Dropped": false, \}] + # \}` + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/Mapping AWS API Documentation + # + class Mapping < Struct.new( + :to_key, + :from_path, + :from_type, + :to_type, + :dropped, + :children) SENSITIVE = [] include Aws::Structure end - # @note When making an API call, you may pass ListRegistriesInput + # Defines a mapping. + # + # @note When making an API call, you may pass MappingEntry # data as a hash: # # { - # max_results: 1, - # next_token: "SchemaRegistryTokenString", + # source_table: "TableName", + # source_path: "SchemaPathString", + # source_type: "FieldType", + # target_table: "TableName", + # target_path: "SchemaPathString", + # target_type: "FieldType", # } # - # @!attribute [rw] max_results - # Maximum number of results required per page. If the value is not - # supplied, this will be defaulted to 25 per page. - # @return [Integer] + # @!attribute [rw] source_table + # The name of the source table. + # @return [String] # - # @!attribute [rw] next_token - # A continuation token, if this is a continuation call. + # @!attribute [rw] source_path + # The source path. # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListRegistriesInput AWS API Documentation + # @!attribute [rw] source_type + # The source type. + # @return [String] # - class ListRegistriesInput < Struct.new( - :max_results, - :next_token) - SENSITIVE = [] - include Aws::Structure - end - - # @!attribute [rw] registries - # An array of `RegistryDetailedListItem` objects containing minimal - # details of each registry. - # @return [Array] + # @!attribute [rw] target_table + # The target table. + # @return [String] # - # @!attribute [rw] next_token - # A continuation token for paginating the returned list of tokens, - # returned if the current segment of the list is not the last. + # @!attribute [rw] target_path + # The target path. # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListRegistriesResponse AWS API Documentation + # @!attribute [rw] target_type + # The target type. + # @return [String] # - class ListRegistriesResponse < Struct.new( - :registries, - :next_token) + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/MappingEntry AWS API Documentation + # + class MappingEntry < Struct.new( + :source_table, + :source_path, + :source_type, + :target_table, + :target_path, + :target_type) SENSITIVE = [] include Aws::Structure end - # @note When making an API call, you may pass ListSchemaVersionsInput + # Specifies a transform that merges a `DynamicFrame` with a staging + # `DynamicFrame` based on the specified primary keys to identify + # records. Duplicate records (records with the same primary keys) are + # not de-duplicated. + # + # @note When making an API call, you may pass Merge # data as a hash: # # { - # schema_id: { # required - # schema_arn: "GlueResourceArn", - # schema_name: "SchemaRegistryNameString", - # registry_name: "SchemaRegistryNameString", - # }, - # max_results: 1, - # next_token: "SchemaRegistryTokenString", + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # source: "NodeId", # required + # primary_keys: [ # required + # ["EnclosedInStringProperty"], + # ], # } # - # @!attribute [rw] schema_id - # This is a wrapper structure to contain schema identity fields. The - # structure contains: + # @!attribute [rw] name + # The name of the transform node. + # @return [String] # - # * SchemaId$SchemaArn: The Amazon Resource Name (ARN) of the schema. - # Either `SchemaArn` or `SchemaName` and `RegistryName` has to be - # provided. + # @!attribute [rw] inputs + # The data inputs identified by their node names. + # @return [Array] # - # * SchemaId$SchemaName: The name of the schema. Either `SchemaArn` or - # `SchemaName` and `RegistryName` has to be provided. - # @return [Types::SchemaId] + # @!attribute [rw] source + # The source `DynamicFrame` that will be merged with a staging + # `DynamicFrame`. + # @return [String] # - # @!attribute [rw] max_results - # Maximum number of results required per page. If the value is not - # supplied, this will be defaulted to 25 per page. - # @return [Integer] + # @!attribute [rw] primary_keys + # The list of primary key fields to match records from the source and + # staging dynamic frames. + # @return [Array>] # - # @!attribute [rw] next_token - # A continuation token, if this is a continuation call. + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/Merge AWS API Documentation + # + class Merge < Struct.new( + :name, + :inputs, + :source, + :primary_keys) + SENSITIVE = [] + include Aws::Structure + end + + # A structure containing metadata information for a schema version. + # + # @!attribute [rw] metadata_value + # The metadata key’s corresponding value. # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListSchemaVersionsInput AWS API Documentation + # @!attribute [rw] created_time + # The time at which the entry was created. + # @return [String] # - class ListSchemaVersionsInput < Struct.new( - :schema_id, - :max_results, - :next_token) + # @!attribute [rw] other_metadata_value_list + # Other metadata belonging to the same metadata key. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/MetadataInfo AWS API Documentation + # + class MetadataInfo < Struct.new( + :metadata_value, + :created_time, + :other_metadata_value_list) SENSITIVE = [] include Aws::Structure end - # @!attribute [rw] schemas - # An array of `SchemaVersionList` objects containing details of each - # schema version. - # @return [Array] + # A structure containing a key value pair for metadata. # - # @!attribute [rw] next_token - # A continuation token for paginating the returned list of tokens, - # returned if the current segment of the list is not the last. + # @note When making an API call, you may pass MetadataKeyValuePair + # data as a hash: + # + # { + # metadata_key: "MetadataKeyString", + # metadata_value: "MetadataValueString", + # } + # + # @!attribute [rw] metadata_key + # A metadata key. # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListSchemaVersionsResponse AWS API Documentation + # @!attribute [rw] metadata_value + # A metadata key’s corresponding value. + # @return [String] # - class ListSchemaVersionsResponse < Struct.new( - :schemas, - :next_token) + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/MetadataKeyValuePair AWS API Documentation + # + class MetadataKeyValuePair < Struct.new( + :metadata_key, + :metadata_value) SENSITIVE = [] include Aws::Structure end - # @note When making an API call, you may pass ListSchemasInput + # Specifies a Microsoft SQL server data source in the Glue Data Catalog. + # + # @note When making an API call, you may pass MicrosoftSQLServerCatalogSource # data as a hash: # # { - # registry_id: { - # registry_name: "SchemaRegistryNameString", - # registry_arn: "GlueResourceArn", - # }, - # max_results: 1, - # next_token: "SchemaRegistryTokenString", + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required # } # - # @!attribute [rw] registry_id - # A wrapper structure that may contain the registry name and Amazon - # Resource Name (ARN). - # @return [Types::RegistryId] + # @!attribute [rw] name + # The name of the data source. + # @return [String] # - # @!attribute [rw] max_results - # Maximum number of results required per page. If the value is not - # supplied, this will be defaulted to 25 per page. - # @return [Integer] + # @!attribute [rw] database + # The name of the database to read from. + # @return [String] # - # @!attribute [rw] next_token - # A continuation token, if this is a continuation call. + # @!attribute [rw] table + # The name of the table in the database to read from. # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListSchemasInput AWS API Documentation + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/MicrosoftSQLServerCatalogSource AWS API Documentation # - class ListSchemasInput < Struct.new( - :registry_id, - :max_results, - :next_token) + class MicrosoftSQLServerCatalogSource < Struct.new( + :name, + :database, + :table) SENSITIVE = [] include Aws::Structure end - # @!attribute [rw] schemas - # An array of `SchemaListItem` objects containing details of each - # schema. - # @return [Array] + # Specifies a target that uses Microsoft SQL. # - # @!attribute [rw] next_token - # A continuation token for paginating the returned list of tokens, - # returned if the current segment of the list is not the last. + # @note When making an API call, you may pass MicrosoftSQLServerCatalogTarget + # data as a hash: + # + # { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # } + # + # @!attribute [rw] name + # The name of the data target. # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListSchemasResponse AWS API Documentation + # @!attribute [rw] inputs + # The nodes that are inputs to the data target. + # @return [Array] # - class ListSchemasResponse < Struct.new( - :schemas, - :next_token) + # @!attribute [rw] database + # The name of the database to write to. + # @return [String] + # + # @!attribute [rw] table + # The name of the table in the database to write to. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/MicrosoftSQLServerCatalogTarget AWS API Documentation + # + class MicrosoftSQLServerCatalogTarget < Struct.new( + :name, + :inputs, + :database, + :table) SENSITIVE = [] include Aws::Structure end - # @note When making an API call, you may pass ListSessionsRequest + # Specifies an Amazon DocumentDB or MongoDB data store to crawl. + # + # @note When making an API call, you may pass MongoDBTarget # data as a hash: # # { - # next_token: "OrchestrationToken", - # max_results: 1, - # tags: { - # "TagKey" => "TagValue", - # }, - # request_origin: "OrchestrationNameString", + # connection_name: "ConnectionName", + # path: "Path", + # scan_all: false, # } # - # @!attribute [rw] next_token - # The token for the next set of results, or null if there are no more - # result. + # @!attribute [rw] connection_name + # The name of the connection to use to connect to the Amazon + # DocumentDB or MongoDB target. # @return [String] # - # @!attribute [rw] max_results - # The maximum number of results. - # @return [Integer] + # @!attribute [rw] path + # The path of the Amazon DocumentDB or MongoDB target + # (database/collection). + # @return [String] # - # @!attribute [rw] tags - # Tags belonging to the session. - # @return [Hash] + # @!attribute [rw] scan_all + # Indicates whether to scan all the records, or to sample rows from + # the table. Scanning all the records can take a long time when the + # table is not a high throughput table. # - # @!attribute [rw] request_origin - # The origin of the request. - # @return [String] + # A value of `true` means to scan all records, while a value of + # `false` means to sample the records. If no value is specified, the + # value defaults to `true`. + # @return [Boolean] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListSessionsRequest AWS API Documentation + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/MongoDBTarget AWS API Documentation # - class ListSessionsRequest < Struct.new( - :next_token, - :max_results, - :tags, - :request_origin) + class MongoDBTarget < Struct.new( + :connection_name, + :path, + :scan_all) SENSITIVE = [] include Aws::Structure end - # @!attribute [rw] ids - # Returns the Id of the session. - # @return [Array] + # Specifies a MySQL data source in the Glue Data Catalog. # - # @!attribute [rw] sessions - # Returns the session object. - # @return [Array] + # @note When making an API call, you may pass MySQLCatalogSource + # data as a hash: # - # @!attribute [rw] next_token - # The token for the next set of results, or null if there are no more - # result. + # { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # } + # + # @!attribute [rw] name + # The name of the data source. # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListSessionsResponse AWS API Documentation + # @!attribute [rw] database + # The name of the database to read from. + # @return [String] # - class ListSessionsResponse < Struct.new( - :ids, - :sessions, - :next_token) + # @!attribute [rw] table + # The name of the table in the database to read from. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/MySQLCatalogSource AWS API Documentation + # + class MySQLCatalogSource < Struct.new( + :name, + :database, + :table) SENSITIVE = [] include Aws::Structure end - # @note When making an API call, you may pass ListStatementsRequest + # Specifies a target that uses MySQL. + # + # @note When making an API call, you may pass MySQLCatalogTarget # data as a hash: # # { - # session_id: "NameString", # required - # request_origin: "OrchestrationNameString", - # next_token: "OrchestrationToken", + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required # } # - # @!attribute [rw] session_id - # The Session ID of the statements. + # @!attribute [rw] name + # The name of the data target. # @return [String] # - # @!attribute [rw] request_origin - # The origin of the request to list statements. + # @!attribute [rw] inputs + # The nodes that are inputs to the data target. + # @return [Array] + # + # @!attribute [rw] database + # The name of the database to write to. # @return [String] # - # @!attribute [rw] next_token + # @!attribute [rw] table + # The name of the table in the database to write to. # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListStatementsRequest AWS API Documentation + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/MySQLCatalogTarget AWS API Documentation # - class ListStatementsRequest < Struct.new( - :session_id, - :request_origin, - :next_token) + class MySQLCatalogTarget < Struct.new( + :name, + :inputs, + :database, + :table) SENSITIVE = [] include Aws::Structure end - # @!attribute [rw] statements - # Returns the list of statements. - # @return [Array] + # There is no applicable schedule. # - # @!attribute [rw] next_token + # @!attribute [rw] message + # A message describing the problem. # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListStatementsResponse AWS API Documentation + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/NoScheduleException AWS API Documentation # - class ListStatementsResponse < Struct.new( - :statements, - :next_token) + class NoScheduleException < Struct.new( + :message) SENSITIVE = [] include Aws::Structure end - # @note When making an API call, you may pass ListTriggersRequest - # data as a hash: + # A node represents an Glue component (trigger, crawler, or job) on a + # workflow graph. # - # { - # next_token: "GenericString", - # dependent_job_name: "NameString", - # max_results: 1, - # tags: { - # "TagKey" => "TagValue", - # }, - # } + # @!attribute [rw] type + # The type of Glue component represented by the node. + # @return [String] # - # @!attribute [rw] next_token - # A continuation token, if this is a continuation request. + # @!attribute [rw] name + # The name of the Glue component represented by the node. # @return [String] # - # @!attribute [rw] dependent_job_name - # The name of the job for which to retrieve triggers. The trigger that - # can start this job is returned. If there is no such trigger, all - # triggers are returned. + # @!attribute [rw] unique_id + # The unique Id assigned to the node within the workflow. # @return [String] # - # @!attribute [rw] max_results - # The maximum size of a list to return. - # @return [Integer] + # @!attribute [rw] trigger_details + # Details of the Trigger when the node represents a Trigger. + # @return [Types::TriggerNodeDetails] # - # @!attribute [rw] tags - # Specifies to return only these tagged resources. - # @return [Hash] + # @!attribute [rw] job_details + # Details of the Job when the node represents a Job. + # @return [Types::JobNodeDetails] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListTriggersRequest AWS API Documentation + # @!attribute [rw] crawler_details + # Details of the crawler when the node represents a crawler. + # @return [Types::CrawlerNodeDetails] # - class ListTriggersRequest < Struct.new( - :next_token, - :dependent_job_name, - :max_results, - :tags) + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/Node AWS API Documentation + # + class Node < Struct.new( + :type, + :name, + :unique_id, + :trigger_details, + :job_details, + :crawler_details) SENSITIVE = [] include Aws::Structure end - # @!attribute [rw] trigger_names - # The names of all triggers in the account, or the triggers with the - # specified tags. - # @return [Array] + # Specifies configuration properties of a notification. + # + # @note When making an API call, you may pass NotificationProperty + # data as a hash: + # + # { + # notify_delay_after: 1, + # } + # + # @!attribute [rw] notify_delay_after + # After a job run starts, the number of minutes to wait before sending + # a job run delay notification. + # @return [Integer] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/NotificationProperty AWS API Documentation + # + class NotificationProperty < Struct.new( + :notify_delay_after) + SENSITIVE = [] + include Aws::Structure + end + + # Represents whether certain values are recognized as null values for + # removal. + # + # @note When making an API call, you may pass NullCheckBoxList + # data as a hash: + # + # { + # is_empty: false, + # is_null_string: false, + # is_neg_one: false, + # } # - # @!attribute [rw] next_token - # A continuation token, if the returned list does not contain the last - # metric available. - # @return [String] + # @!attribute [rw] is_empty + # Specifies that an empty string is considered as a null value. + # @return [Boolean] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListTriggersResponse AWS API Documentation + # @!attribute [rw] is_null_string + # Specifies that a value spelling out the word 'null' is considered + # as a null value. + # @return [Boolean] # - class ListTriggersResponse < Struct.new( - :trigger_names, - :next_token) + # @!attribute [rw] is_neg_one + # Specifies that an integer value of -1 is considered as a null value. + # @return [Boolean] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/NullCheckBoxList AWS API Documentation + # + class NullCheckBoxList < Struct.new( + :is_empty, + :is_null_string, + :is_neg_one) SENSITIVE = [] include Aws::Structure end - # @note When making an API call, you may pass ListWorkflowsRequest + # Represents a custom null value such as a zeros or other value being + # used as a null placeholder unique to the dataset. + # + # @note When making an API call, you may pass NullValueField # data as a hash: # # { - # next_token: "GenericString", - # max_results: 1, + # value: "EnclosedInStringProperty", # required + # datatype: { # required + # id: "GenericLimitedString", # required + # label: "GenericLimitedString", # required + # }, # } # - # @!attribute [rw] next_token - # A continuation token, if this is a continuation request. + # @!attribute [rw] value + # The value of the null placeholder. # @return [String] # - # @!attribute [rw] max_results - # The maximum size of a list to return. - # @return [Integer] + # @!attribute [rw] datatype + # The datatype of the value. + # @return [Types::Datatype] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListWorkflowsRequest AWS API Documentation + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/NullValueField AWS API Documentation # - class ListWorkflowsRequest < Struct.new( - :next_token, - :max_results) + class NullValueField < Struct.new( + :value, + :datatype) SENSITIVE = [] include Aws::Structure end - # @!attribute [rw] workflows - # List of names of workflows in the account. - # @return [Array] + # The operation timed out. # - # @!attribute [rw] next_token - # A continuation token, if not all workflow names have been returned. + # @!attribute [rw] message + # A message describing the problem. # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListWorkflowsResponse AWS API Documentation + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/OperationTimeoutException AWS API Documentation # - class ListWorkflowsResponse < Struct.new( - :workflows, - :next_token) + class OperationTimeoutException < Struct.new( + :message) SENSITIVE = [] include Aws::Structure end - # The location of resources. + # Specifies an Oracle data source in the Glue Data Catalog. # - # @note When making an API call, you may pass Location + # @note When making an API call, you may pass OracleSQLCatalogSource # data as a hash: # # { - # jdbc: [ - # { - # name: "CodeGenArgName", # required - # value: "CodeGenArgValue", # required - # param: false, - # }, - # ], - # s3: [ - # { - # name: "CodeGenArgName", # required - # value: "CodeGenArgValue", # required - # param: false, - # }, - # ], - # dynamo_db: [ - # { - # name: "CodeGenArgName", # required - # value: "CodeGenArgValue", # required - # param: false, - # }, - # ], + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required # } # - # @!attribute [rw] jdbc - # A JDBC location. - # @return [Array] + # @!attribute [rw] name + # The name of the data source. + # @return [String] # - # @!attribute [rw] s3 - # An Amazon Simple Storage Service (Amazon S3) location. - # @return [Array] + # @!attribute [rw] database + # The name of the database to read from. + # @return [String] # - # @!attribute [rw] dynamo_db - # An Amazon DynamoDB table location. - # @return [Array] + # @!attribute [rw] table + # The name of the table in the database to read from. + # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/Location AWS API Documentation + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/OracleSQLCatalogSource AWS API Documentation # - class Location < Struct.new( - :jdbc, - :s3, - :dynamo_db) + class OracleSQLCatalogSource < Struct.new( + :name, + :database, + :table) SENSITIVE = [] include Aws::Structure end - # Defines column statistics supported for integer data columns. + # Specifies a target that uses Oracle SQL. # - # @note When making an API call, you may pass LongColumnStatisticsData + # @note When making an API call, you may pass OracleSQLCatalogTarget # data as a hash: # # { - # minimum_value: 1, - # maximum_value: 1, - # number_of_nulls: 1, # required - # number_of_distinct_values: 1, # required + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required # } # - # @!attribute [rw] minimum_value - # The lowest value in the column. - # @return [Integer] + # @!attribute [rw] name + # The name of the data target. + # @return [String] # - # @!attribute [rw] maximum_value - # The highest value in the column. - # @return [Integer] + # @!attribute [rw] inputs + # The nodes that are inputs to the data target. + # @return [Array] # - # @!attribute [rw] number_of_nulls - # The number of null values in the column. - # @return [Integer] + # @!attribute [rw] database + # The name of the database to write to. + # @return [String] # - # @!attribute [rw] number_of_distinct_values - # The number of distinct values in a column. - # @return [Integer] + # @!attribute [rw] table + # The name of the table in the database to write to. + # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/LongColumnStatisticsData AWS API Documentation + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/OracleSQLCatalogTarget AWS API Documentation # - class LongColumnStatisticsData < Struct.new( - :minimum_value, - :maximum_value, - :number_of_nulls, - :number_of_distinct_values) + class OracleSQLCatalogTarget < Struct.new( + :name, + :inputs, + :database, + :table) SENSITIVE = [] include Aws::Structure end - # A structure for a machine learning transform. + # Specifies the sort order of a sorted column. # - # @!attribute [rw] transform_id - # The unique transform ID that is generated for the machine learning - # transform. The ID is guaranteed to be unique and does not change. - # @return [String] + # @note When making an API call, you may pass Order + # data as a hash: # - # @!attribute [rw] name - # A user-defined name for the machine learning transform. Names are - # not guaranteed unique and can be changed at any time. + # { + # column: "NameString", # required + # sort_order: 1, # required + # } + # + # @!attribute [rw] column + # The name of the column. # @return [String] # - # @!attribute [rw] description - # A user-defined, long-form description text for the machine learning - # transform. Descriptions are not guaranteed to be unique and can be - # changed at any time. + # @!attribute [rw] sort_order + # Indicates that the column is sorted in ascending order (`== 1`), or + # in descending order (`==0`). + # @return [Integer] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/Order AWS API Documentation + # + class Order < Struct.new( + :column, + :sort_order) + SENSITIVE = [] + include Aws::Structure + end + + # A structure containing other metadata for a schema version belonging + # to the same metadata key. + # + # @!attribute [rw] metadata_value + # The metadata key’s corresponding value for the other metadata + # belonging to the same metadata key. # @return [String] # - # @!attribute [rw] status - # The current status of the machine learning transform. + # @!attribute [rw] created_time + # The time at which the entry was created. # @return [String] # - # @!attribute [rw] created_on - # A timestamp. The time and date that this machine learning transform - # was created. - # @return [Time] + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/OtherMetadataValueListItem AWS API Documentation # - # @!attribute [rw] last_modified_on - # A timestamp. The last point in time when this machine learning - # transform was modified. - # @return [Time] + class OtherMetadataValueListItem < Struct.new( + :metadata_value, + :created_time) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies a transform that identifies, removes or masks PII data. # - # @!attribute [rw] input_record_tables - # A list of Glue table definitions used by the transform. - # @return [Array] + # @note When making an API call, you may pass PIIDetection + # data as a hash: # - # @!attribute [rw] parameters - # A `TransformParameters` object. You can use parameters to tune - # (customize) the behavior of the machine learning transform by - # specifying what data it learns from and your preference on various - # tradeoffs (such as precious vs. recall, or accuracy vs. cost). - # @return [Types::TransformParameters] + # { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # pii_type: "RowAudit", # required, accepts RowAudit, RowMasking, ColumnAudit, ColumnMasking + # entity_types_to_detect: ["EnclosedInStringProperty"], # required + # output_column_name: "EnclosedInStringProperty", + # sample_fraction: 1.0, + # threshold_fraction: 1.0, + # mask_value: "MaskValue", + # } # - # @!attribute [rw] evaluation_metrics - # An `EvaluationMetrics` object. Evaluation metrics provide an - # estimate of the quality of your machine learning transform. - # @return [Types::EvaluationMetrics] + # @!attribute [rw] name + # The name of the transform node. + # @return [String] # - # @!attribute [rw] label_count - # A count identifier for the labeling files generated by Glue for this - # transform. As you create a better transform, you can iteratively - # download, label, and upload the labeling file. - # @return [Integer] + # @!attribute [rw] inputs + # The node ID inputs to the transform. + # @return [Array] # - # @!attribute [rw] schema - # A map of key-value pairs representing the columns and data types - # that this transform can run against. Has an upper bound of 100 - # columns. - # @return [Array] + # @!attribute [rw] pii_type + # Indicates the type of PIIDetection transform. + # @return [String] # - # @!attribute [rw] role - # The name or Amazon Resource Name (ARN) of the IAM role with the - # required permissions. The required permissions include both Glue - # service role permissions to Glue resources, and Amazon S3 - # permissions required by the transform. + # @!attribute [rw] entity_types_to_detect + # Indicates the types of entities the PIIDetection transform will + # identify as PII data. # - # * This role needs Glue service role permissions to allow access to - # resources in Glue. See [Attach a Policy to IAM Users That Access - # Glue][1]. + # PII type entities include: PERSON\_NAME, DATE, USA\_SNN, EMAIL, + # USA\_ITIN, USA\_PASSPORT\_NUMBER, PHONE\_NUMBER, BANK\_ACCOUNT, + # IP\_ADDRESS, MAC\_ADDRESS, USA\_CPT\_CODE, USA\_HCPCS\_CODE, + # USA\_NATIONAL\_DRUG\_CODE, USA\_MEDICARE\_BENEFICIARY\_IDENTIFIER, + # USA\_HEALTH\_INSURANCE\_CLAIM\_NUMBER,CREDIT\_CARD,USA\_NATIONAL\_PROVIDER\_IDENTIFIER,USA\_DEA\_NUMBER,USA\_DRIVING\_LICENSE + # @return [Array] # - # * This role needs permission to your Amazon Simple Storage Service - # (Amazon S3) sources, targets, temporary directory, scripts, and - # any libraries used by the task run for this transform. + # @!attribute [rw] output_column_name + # Indicates the output column name that will contain any entity type + # detected in that row. + # @return [String] # + # @!attribute [rw] sample_fraction + # Indicates the fraction of the data to sample when scanning for PII + # entities. + # @return [Float] # + # @!attribute [rw] threshold_fraction + # Indicates the fraction of the data that must be met in order for a + # column to be identified as PII data. + # @return [Float] # - # [1]: https://docs.aws.amazon.com/glue/latest/dg/attach-policy-iam-user.html + # @!attribute [rw] mask_value + # Indicates the value that will replace the detected entity. # @return [String] # - # @!attribute [rw] glue_version - # This value determines which version of Glue this machine learning - # transform is compatible with. Glue 1.0 is recommended for most - # customers. If the value is not set, the Glue compatibility defaults - # to Glue 0.9. For more information, see [Glue Versions][1] in the - # developer guide. + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/PIIDetection AWS API Documentation + # + class PIIDetection < Struct.new( + :name, + :inputs, + :pii_type, + :entity_types_to_detect, + :output_column_name, + :sample_fraction, + :threshold_fraction, + :mask_value) + SENSITIVE = [] + include Aws::Structure + end + + # Represents a slice of table data. + # + # @!attribute [rw] values + # The values of the partition. + # @return [Array] + # + # @!attribute [rw] database_name + # The name of the catalog database in which to create the partition. + # @return [String] + # + # @!attribute [rw] table_name + # The name of the database table in which to create the partition. + # @return [String] # + # @!attribute [rw] creation_time + # The time at which the partition was created. + # @return [Time] # + # @!attribute [rw] last_access_time + # The last time at which the partition was accessed. + # @return [Time] # - # [1]: https://docs.aws.amazon.com/glue/latest/dg/release-notes.html#release-notes-versions - # @return [String] + # @!attribute [rw] storage_descriptor + # Provides information about the physical location where the partition + # is stored. + # @return [Types::StorageDescriptor] # - # @!attribute [rw] max_capacity - # The number of Glue data processing units (DPUs) that are allocated - # to task runs for this transform. You can allocate from 2 to 100 - # DPUs; the default is 10. A DPU is a relative measure of processing - # power that consists of 4 vCPUs of compute capacity and 16 GB of - # memory. For more information, see the [Glue pricing page][1]. + # @!attribute [rw] parameters + # These key-value pairs define partition parameters. + # @return [Hash] # - # `MaxCapacity` is a mutually exclusive option with `NumberOfWorkers` - # and `WorkerType`. + # @!attribute [rw] last_analyzed_time + # The last time at which column statistics were computed for this + # partition. + # @return [Time] # - # * If either `NumberOfWorkers` or `WorkerType` is set, then - # `MaxCapacity` cannot be set. + # @!attribute [rw] catalog_id + # The ID of the Data Catalog in which the partition resides. + # @return [String] # - # * If `MaxCapacity` is set then neither `NumberOfWorkers` or - # `WorkerType` can be set. + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/Partition AWS API Documentation # - # * If `WorkerType` is set, then `NumberOfWorkers` is required (and - # vice versa). + class Partition < Struct.new( + :values, + :database_name, + :table_name, + :creation_time, + :last_access_time, + :storage_descriptor, + :parameters, + :last_analyzed_time, + :catalog_id) + SENSITIVE = [] + include Aws::Structure + end + + # Contains information about a partition error. # - # * `MaxCapacity` and `NumberOfWorkers` must both be at least 1. + # @!attribute [rw] partition_values + # The values that define the partition. + # @return [Array] # - # When the `WorkerType` field is set to a value other than `Standard`, - # the `MaxCapacity` field is set automatically and becomes read-only. + # @!attribute [rw] error_detail + # The details about the partition error. + # @return [Types::ErrorDetail] # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/PartitionError AWS API Documentation # + class PartitionError < Struct.new( + :partition_values, + :error_detail) + SENSITIVE = [] + include Aws::Structure + end + + # A structure for a partition index. # - # [1]: http://aws.amazon.com/glue/pricing/ - # @return [Float] + # @note When making an API call, you may pass PartitionIndex + # data as a hash: # - # @!attribute [rw] worker_type - # The type of predefined worker that is allocated when a task of this - # transform runs. Accepts a value of Standard, G.1X, or G.2X. + # { + # keys: ["NameString"], # required + # index_name: "NameString", # required + # } # - # * For the `Standard` worker type, each worker provides 4 vCPU, 16 GB - # of memory and a 50GB disk, and 2 executors per worker. + # @!attribute [rw] keys + # The keys for the partition index. + # @return [Array] # - # * For the `G.1X` worker type, each worker provides 4 vCPU, 16 GB of - # memory and a 64GB disk, and 1 executor per worker. + # @!attribute [rw] index_name + # The name of the partition index. + # @return [String] # - # * For the `G.2X` worker type, each worker provides 8 vCPU, 32 GB of - # memory and a 128GB disk, and 1 executor per worker. + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/PartitionIndex AWS API Documentation # - # `MaxCapacity` is a mutually exclusive option with `NumberOfWorkers` - # and `WorkerType`. + class PartitionIndex < Struct.new( + :keys, + :index_name) + SENSITIVE = [] + include Aws::Structure + end + + # A descriptor for a partition index in a table. # - # * If either `NumberOfWorkers` or `WorkerType` is set, then - # `MaxCapacity` cannot be set. + # @!attribute [rw] index_name + # The name of the partition index. + # @return [String] # - # * If `MaxCapacity` is set then neither `NumberOfWorkers` or - # `WorkerType` can be set. + # @!attribute [rw] keys + # A list of one or more keys, as `KeySchemaElement` structures, for + # the partition index. + # @return [Array] # - # * If `WorkerType` is set, then `NumberOfWorkers` is required (and - # vice versa). + # @!attribute [rw] index_status + # The status of the partition index. # - # * `MaxCapacity` and `NumberOfWorkers` must both be at least 1. - # @return [String] + # The possible statuses are: # - # @!attribute [rw] number_of_workers - # The number of workers of a defined `workerType` that are allocated - # when a task of the transform runs. + # * CREATING: The index is being created. When an index is in a + # CREATING state, the index or its table cannot be deleted. # - # If `WorkerType` is set, then `NumberOfWorkers` is required (and vice - # versa). - # @return [Integer] + # * ACTIVE: The index creation succeeds. # - # @!attribute [rw] timeout - # The timeout in minutes of the machine learning transform. - # @return [Integer] + # * FAILED: The index creation fails. # - # @!attribute [rw] max_retries - # The maximum number of times to retry after an `MLTaskRun` of the - # machine learning transform fails. - # @return [Integer] + # * DELETING: The index is deleted from the list of indexes. + # @return [String] # - # @!attribute [rw] transform_encryption - # The encryption-at-rest settings of the transform that apply to - # accessing user data. Machine learning transforms can access user - # data encrypted in Amazon S3 using KMS. - # @return [Types::TransformEncryption] + # @!attribute [rw] backfill_errors + # A list of errors that can occur when registering partition indexes + # for an existing table. + # @return [Array] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/MLTransform AWS API Documentation + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/PartitionIndexDescriptor AWS API Documentation # - class MLTransform < Struct.new( - :transform_id, - :name, - :description, - :status, - :created_on, - :last_modified_on, - :input_record_tables, - :parameters, - :evaluation_metrics, - :label_count, - :schema, - :role, - :glue_version, - :max_capacity, - :worker_type, - :number_of_workers, - :timeout, - :max_retries, - :transform_encryption) + class PartitionIndexDescriptor < Struct.new( + :index_name, + :keys, + :index_status, + :backfill_errors) SENSITIVE = [] include Aws::Structure end - # The machine learning transform is not ready to run. + # The structure used to create and update a partition. + # + # @note When making an API call, you may pass PartitionInput + # data as a hash: + # + # { + # values: ["ValueString"], + # last_access_time: Time.now, + # storage_descriptor: { + # columns: [ + # { + # name: "NameString", # required + # type: "ColumnTypeString", + # comment: "CommentString", + # parameters: { + # "KeyString" => "ParametersMapValue", + # }, + # }, + # ], + # location: "LocationString", + # additional_locations: ["LocationString"], + # input_format: "FormatString", + # output_format: "FormatString", + # compressed: false, + # number_of_buckets: 1, + # serde_info: { + # name: "NameString", + # serialization_library: "NameString", + # parameters: { + # "KeyString" => "ParametersMapValue", + # }, + # }, + # bucket_columns: ["NameString"], + # sort_columns: [ + # { + # column: "NameString", # required + # sort_order: 1, # required + # }, + # ], + # parameters: { + # "KeyString" => "ParametersMapValue", + # }, + # skewed_info: { + # skewed_column_names: ["NameString"], + # skewed_column_values: ["ColumnValuesString"], + # skewed_column_value_location_maps: { + # "ColumnValuesString" => "ColumnValuesString", + # }, + # }, + # stored_as_sub_directories: false, + # schema_reference: { + # schema_id: { + # schema_arn: "GlueResourceArn", + # schema_name: "SchemaRegistryNameString", + # registry_name: "SchemaRegistryNameString", + # }, + # schema_version_id: "SchemaVersionIdString", + # schema_version_number: 1, + # }, + # }, + # parameters: { + # "KeyString" => "ParametersMapValue", + # }, + # last_analyzed_time: Time.now, + # } + # + # @!attribute [rw] values + # The values of the partition. Although this parameter is not required + # by the SDK, you must specify this parameter for a valid input. + # + # The values for the keys for the new partition must be passed as an + # array of String objects that must be ordered in the same order as + # the partition keys appearing in the Amazon S3 prefix. Otherwise Glue + # will add the values to the wrong keys. + # @return [Array] + # + # @!attribute [rw] last_access_time + # The last time at which the partition was accessed. + # @return [Time] # - # @!attribute [rw] message - # A message describing the problem. - # @return [String] + # @!attribute [rw] storage_descriptor + # Provides information about the physical location where the partition + # is stored. + # @return [Types::StorageDescriptor] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/MLTransformNotReadyException AWS API Documentation + # @!attribute [rw] parameters + # These key-value pairs define partition parameters. + # @return [Hash] # - class MLTransformNotReadyException < Struct.new( - :message) + # @!attribute [rw] last_analyzed_time + # The last time at which column statistics were computed for this + # partition. + # @return [Time] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/PartitionInput AWS API Documentation + # + class PartitionInput < Struct.new( + :values, + :last_access_time, + :storage_descriptor, + :parameters, + :last_analyzed_time) SENSITIVE = [] include Aws::Structure end - # The encryption-at-rest settings of the transform that apply to - # accessing user data. + # Contains a list of values defining partitions. # - # @note When making an API call, you may pass MLUserDataEncryption + # @note When making an API call, you may pass PartitionValueList # data as a hash: # # { - # ml_user_data_encryption_mode: "DISABLED", # required, accepts DISABLED, SSE-KMS - # kms_key_id: "NameString", + # values: ["ValueString"], # required # } # - # @!attribute [rw] ml_user_data_encryption_mode - # The encryption mode applied to user data. Valid values are: - # - # * DISABLED: encryption is disabled + # @!attribute [rw] values + # The list of values. + # @return [Array] # - # * SSEKMS: use of server-side encryption with Key Management Service - # (SSE-KMS) for user data stored in Amazon S3. - # @return [String] + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/PartitionValueList AWS API Documentation # - # @!attribute [rw] kms_key_id - # The ID for the customer-provided KMS key. + class PartitionValueList < Struct.new( + :values) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] message # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/MLUserDataEncryption AWS API Documentation + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/PermissionTypeMismatchException AWS API Documentation # - class MLUserDataEncryption < Struct.new( - :ml_user_data_encryption_mode, - :kms_key_id) + class PermissionTypeMismatchException < Struct.new( + :message) SENSITIVE = [] include Aws::Structure end - # Defines a mapping. + # Specifies the physical requirements for a connection. # - # @note When making an API call, you may pass MappingEntry + # @note When making an API call, you may pass PhysicalConnectionRequirements # data as a hash: # # { - # source_table: "TableName", - # source_path: "SchemaPathString", - # source_type: "FieldType", - # target_table: "TableName", - # target_path: "SchemaPathString", - # target_type: "FieldType", + # subnet_id: "NameString", + # security_group_id_list: ["NameString"], + # availability_zone: "NameString", # } # - # @!attribute [rw] source_table - # The name of the source table. - # @return [String] - # - # @!attribute [rw] source_path - # The source path. - # @return [String] - # - # @!attribute [rw] source_type - # The source type. - # @return [String] - # - # @!attribute [rw] target_table - # The target table. - # @return [String] - # - # @!attribute [rw] target_path - # The target path. - # @return [String] - # - # @!attribute [rw] target_type - # The target type. + # @!attribute [rw] subnet_id + # The subnet ID used by the connection. # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/MappingEntry AWS API Documentation - # - class MappingEntry < Struct.new( - :source_table, - :source_path, - :source_type, - :target_table, - :target_path, - :target_type) - SENSITIVE = [] - include Aws::Structure - end - - # A structure containing metadata information for a schema version. - # - # @!attribute [rw] metadata_value - # The metadata key’s corresponding value. - # @return [String] + # @!attribute [rw] security_group_id_list + # The security group ID list used by the connection. + # @return [Array] # - # @!attribute [rw] created_time - # The time at which the entry was created. + # @!attribute [rw] availability_zone + # The connection's Availability Zone. This field is redundant because + # the specified subnet implies the Availability Zone to be used. + # Currently the field must be populated, but it will be deprecated in + # the future. # @return [String] # - # @!attribute [rw] other_metadata_value_list - # Other metadata belonging to the same metadata key. - # @return [Array] - # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/MetadataInfo AWS API Documentation + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/PhysicalConnectionRequirements AWS API Documentation # - class MetadataInfo < Struct.new( - :metadata_value, - :created_time, - :other_metadata_value_list) + class PhysicalConnectionRequirements < Struct.new( + :subnet_id, + :security_group_id_list, + :availability_zone) SENSITIVE = [] include Aws::Structure end - # A structure containing a key value pair for metadata. + # Specifies a PostgresSQL data source in the Glue Data Catalog. # - # @note When making an API call, you may pass MetadataKeyValuePair + # @note When making an API call, you may pass PostgreSQLCatalogSource # data as a hash: # # { - # metadata_key: "MetadataKeyString", - # metadata_value: "MetadataValueString", + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required # } # - # @!attribute [rw] metadata_key - # A metadata key. + # @!attribute [rw] name + # The name of the data source. # @return [String] # - # @!attribute [rw] metadata_value - # A metadata key’s corresponding value. + # @!attribute [rw] database + # The name of the database to read from. # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/MetadataKeyValuePair AWS API Documentation + # @!attribute [rw] table + # The name of the table in the database to read from. + # @return [String] # - class MetadataKeyValuePair < Struct.new( - :metadata_key, - :metadata_value) + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/PostgreSQLCatalogSource AWS API Documentation + # + class PostgreSQLCatalogSource < Struct.new( + :name, + :database, + :table) SENSITIVE = [] include Aws::Structure end - # Specifies an Amazon DocumentDB or MongoDB data store to crawl. + # Specifies a target that uses Postgres SQL. # - # @note When making an API call, you may pass MongoDBTarget + # @note When making an API call, you may pass PostgreSQLCatalogTarget # data as a hash: # # { - # connection_name: "ConnectionName", - # path: "Path", - # scan_all: false, + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required # } # - # @!attribute [rw] connection_name - # The name of the connection to use to connect to the Amazon - # DocumentDB or MongoDB target. + # @!attribute [rw] name + # The name of the data target. # @return [String] # - # @!attribute [rw] path - # The path of the Amazon DocumentDB or MongoDB target - # (database/collection). - # @return [String] + # @!attribute [rw] inputs + # The nodes that are inputs to the data target. + # @return [Array] # - # @!attribute [rw] scan_all - # Indicates whether to scan all the records, or to sample rows from - # the table. Scanning all the records can take a long time when the - # table is not a high throughput table. + # @!attribute [rw] database + # The name of the database to write to. + # @return [String] # - # A value of `true` means to scan all records, while a value of - # `false` means to sample the records. If no value is specified, the - # value defaults to `true`. - # @return [Boolean] + # @!attribute [rw] table + # The name of the table in the database to write to. + # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/MongoDBTarget AWS API Documentation + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/PostgreSQLCatalogTarget AWS API Documentation # - class MongoDBTarget < Struct.new( - :connection_name, - :path, - :scan_all) + class PostgreSQLCatalogTarget < Struct.new( + :name, + :inputs, + :database, + :table) SENSITIVE = [] include Aws::Structure end - # There is no applicable schedule. + # A job run that was used in the predicate of a conditional trigger that + # triggered this job run. # - # @!attribute [rw] message - # A message describing the problem. + # @!attribute [rw] job_name + # The name of the job definition used by the predecessor job run. # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/NoScheduleException AWS API Documentation + # @!attribute [rw] run_id + # The job-run ID of the predecessor job run. + # @return [String] # - class NoScheduleException < Struct.new( - :message) + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/Predecessor AWS API Documentation + # + class Predecessor < Struct.new( + :job_name, + :run_id) SENSITIVE = [] include Aws::Structure end - # A node represents an Glue component (trigger, crawler, or job) on a - # workflow graph. + # Defines the predicate of the trigger, which determines when it fires. # - # @!attribute [rw] type - # The type of Glue component represented by the node. - # @return [String] + # @note When making an API call, you may pass Predicate + # data as a hash: # - # @!attribute [rw] name - # The name of the Glue component represented by the node. - # @return [String] + # { + # logical: "AND", # accepts AND, ANY + # conditions: [ + # { + # logical_operator: "EQUALS", # accepts EQUALS + # job_name: "NameString", + # state: "STARTING", # accepts STARTING, RUNNING, STOPPING, STOPPED, SUCCEEDED, FAILED, TIMEOUT + # crawler_name: "NameString", + # crawl_state: "RUNNING", # accepts RUNNING, CANCELLING, CANCELLED, SUCCEEDED, FAILED + # }, + # ], + # } # - # @!attribute [rw] unique_id - # The unique Id assigned to the node within the workflow. + # @!attribute [rw] logical + # An optional field if only one condition is listed. If multiple + # conditions are listed, then this field is required. # @return [String] # - # @!attribute [rw] trigger_details - # Details of the Trigger when the node represents a Trigger. - # @return [Types::TriggerNodeDetails] - # - # @!attribute [rw] job_details - # Details of the Job when the node represents a Job. - # @return [Types::JobNodeDetails] - # - # @!attribute [rw] crawler_details - # Details of the crawler when the node represents a crawler. - # @return [Types::CrawlerNodeDetails] + # @!attribute [rw] conditions + # A list of the conditions that determine when the trigger will fire. + # @return [Array] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/Node AWS API Documentation + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/Predicate AWS API Documentation # - class Node < Struct.new( - :type, - :name, - :unique_id, - :trigger_details, - :job_details, - :crawler_details) + class Predicate < Struct.new( + :logical, + :conditions) SENSITIVE = [] include Aws::Structure end - # Specifies configuration properties of a notification. + # Permissions granted to a principal. # - # @note When making an API call, you may pass NotificationProperty + # @note When making an API call, you may pass PrincipalPermissions # data as a hash: # # { - # notify_delay_after: 1, + # principal: { + # data_lake_principal_identifier: "DataLakePrincipalString", + # }, + # permissions: ["ALL"], # accepts ALL, SELECT, ALTER, DROP, DELETE, INSERT, CREATE_DATABASE, CREATE_TABLE, DATA_LOCATION_ACCESS # } # - # @!attribute [rw] notify_delay_after - # After a job run starts, the number of minutes to wait before sending - # a job run delay notification. - # @return [Integer] - # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/NotificationProperty AWS API Documentation - # - class NotificationProperty < Struct.new( - :notify_delay_after) - SENSITIVE = [] - include Aws::Structure - end - - # The operation timed out. + # @!attribute [rw] principal + # The principal who is granted permissions. + # @return [Types::DataLakePrincipal] # - # @!attribute [rw] message - # A message describing the problem. - # @return [String] + # @!attribute [rw] permissions + # The permissions that are granted to the principal. + # @return [Array] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/OperationTimeoutException AWS API Documentation + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/PrincipalPermissions AWS API Documentation # - class OperationTimeoutException < Struct.new( - :message) + class PrincipalPermissions < Struct.new( + :principal, + :permissions) SENSITIVE = [] include Aws::Structure end - # Specifies the sort order of a sorted column. + # Defines a property predicate. # - # @note When making an API call, you may pass Order + # @note When making an API call, you may pass PropertyPredicate # data as a hash: # # { - # column: "NameString", # required - # sort_order: 1, # required + # key: "ValueString", + # value: "ValueString", + # comparator: "EQUALS", # accepts EQUALS, GREATER_THAN, LESS_THAN, GREATER_THAN_EQUALS, LESS_THAN_EQUALS # } # - # @!attribute [rw] column - # The name of the column. + # @!attribute [rw] key + # The key of the property. # @return [String] # - # @!attribute [rw] sort_order - # Indicates that the column is sorted in ascending order (`== 1`), or - # in descending order (`==0`). - # @return [Integer] + # @!attribute [rw] value + # The value of the property. + # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/Order AWS API Documentation + # @!attribute [rw] comparator + # The comparator used to compare this property to others. + # @return [String] # - class Order < Struct.new( - :column, - :sort_order) + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/PropertyPredicate AWS API Documentation + # + class PropertyPredicate < Struct.new( + :key, + :value, + :comparator) SENSITIVE = [] include Aws::Structure end - # A structure containing other metadata for a schema version belonging - # to the same metadata key. + # @note When making an API call, you may pass PutDataCatalogEncryptionSettingsRequest + # data as a hash: # - # @!attribute [rw] metadata_value - # The metadata key’s corresponding value for the other metadata - # belonging to the same metadata key. - # @return [String] + # { + # catalog_id: "CatalogIdString", + # data_catalog_encryption_settings: { # required + # encryption_at_rest: { + # catalog_encryption_mode: "DISABLED", # required, accepts DISABLED, SSE-KMS + # sse_aws_kms_key_id: "NameString", + # }, + # connection_password_encryption: { + # return_connection_password_encrypted: false, # required + # aws_kms_key_id: "NameString", + # }, + # }, + # } # - # @!attribute [rw] created_time - # The time at which the entry was created. + # @!attribute [rw] catalog_id + # The ID of the Data Catalog to set the security configuration for. If + # none is provided, the Amazon Web Services account ID is used by + # default. # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/OtherMetadataValueListItem AWS API Documentation + # @!attribute [rw] data_catalog_encryption_settings + # The security configuration to set. + # @return [Types::DataCatalogEncryptionSettings] # - class OtherMetadataValueListItem < Struct.new( - :metadata_value, - :created_time) + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/PutDataCatalogEncryptionSettingsRequest AWS API Documentation + # + class PutDataCatalogEncryptionSettingsRequest < Struct.new( + :catalog_id, + :data_catalog_encryption_settings) SENSITIVE = [] include Aws::Structure end - # Represents a slice of table data. + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/PutDataCatalogEncryptionSettingsResponse AWS API Documentation # - # @!attribute [rw] values - # The values of the partition. - # @return [Array] + class PutDataCatalogEncryptionSettingsResponse < Aws::EmptyStructure; end + + # @note When making an API call, you may pass PutResourcePolicyRequest + # data as a hash: # - # @!attribute [rw] database_name - # The name of the catalog database in which to create the partition. + # { + # policy_in_json: "PolicyJsonString", # required + # resource_arn: "GlueResourceArn", + # policy_hash_condition: "HashString", + # policy_exists_condition: "MUST_EXIST", # accepts MUST_EXIST, NOT_EXIST, NONE + # enable_hybrid: "TRUE", # accepts TRUE, FALSE + # } + # + # @!attribute [rw] policy_in_json + # Contains the policy document to set, in JSON format. # @return [String] # - # @!attribute [rw] table_name - # The name of the database table in which to create the partition. + # @!attribute [rw] resource_arn + # Do not use. For internal use only. # @return [String] # - # @!attribute [rw] creation_time - # The time at which the partition was created. - # @return [Time] + # @!attribute [rw] policy_hash_condition + # The hash value returned when the previous policy was set using + # `PutResourcePolicy`. Its purpose is to prevent concurrent + # modifications of a policy. Do not use this parameter if no previous + # policy has been set. + # @return [String] # - # @!attribute [rw] last_access_time - # The last time at which the partition was accessed. - # @return [Time] + # @!attribute [rw] policy_exists_condition + # A value of `MUST_EXIST` is used to update a policy. A value of + # `NOT_EXIST` is used to create a new policy. If a value of `NONE` or + # a null value is used, the call does not depend on the existence of a + # policy. + # @return [String] # - # @!attribute [rw] storage_descriptor - # Provides information about the physical location where the partition - # is stored. - # @return [Types::StorageDescriptor] + # @!attribute [rw] enable_hybrid + # If `'TRUE'`, indicates that you are using both methods to grant + # cross-account access to Data Catalog resources: # - # @!attribute [rw] parameters - # These key-value pairs define partition parameters. - # @return [Hash] + # * By directly updating the resource policy with `PutResourePolicy` # - # @!attribute [rw] last_analyzed_time - # The last time at which column statistics were computed for this - # partition. - # @return [Time] + # * By using the **Grant permissions** command on the Amazon Web + # Services Management Console. # - # @!attribute [rw] catalog_id - # The ID of the Data Catalog in which the partition resides. + # Must be set to `'TRUE'` if you have already used the Management + # Console to grant cross-account access, otherwise the call fails. + # Default is 'FALSE'. # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/Partition AWS API Documentation + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/PutResourcePolicyRequest AWS API Documentation # - class Partition < Struct.new( - :values, - :database_name, - :table_name, - :creation_time, - :last_access_time, - :storage_descriptor, - :parameters, - :last_analyzed_time, - :catalog_id) + class PutResourcePolicyRequest < Struct.new( + :policy_in_json, + :resource_arn, + :policy_hash_condition, + :policy_exists_condition, + :enable_hybrid) SENSITIVE = [] include Aws::Structure end - # Contains information about a partition error. + # @!attribute [rw] policy_hash + # A hash of the policy that has just been set. This must be included + # in a subsequent call that overwrites or updates this policy. + # @return [String] # - # @!attribute [rw] partition_values - # The values that define the partition. - # @return [Array] + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/PutResourcePolicyResponse AWS API Documentation # - # @!attribute [rw] error_detail - # The details about the partition error. - # @return [Types::ErrorDetail] + class PutResourcePolicyResponse < Struct.new( + :policy_hash) + SENSITIVE = [] + include Aws::Structure + end + + # @note When making an API call, you may pass PutSchemaVersionMetadataInput + # data as a hash: + # + # { + # schema_id: { + # schema_arn: "GlueResourceArn", + # schema_name: "SchemaRegistryNameString", + # registry_name: "SchemaRegistryNameString", + # }, + # schema_version_number: { + # latest_version: false, + # version_number: 1, + # }, + # schema_version_id: "SchemaVersionIdString", + # metadata_key_value: { # required + # metadata_key: "MetadataKeyString", + # metadata_value: "MetadataValueString", + # }, + # } + # + # @!attribute [rw] schema_id + # The unique ID for the schema. + # @return [Types::SchemaId] + # + # @!attribute [rw] schema_version_number + # The version number of the schema. + # @return [Types::SchemaVersionNumber] + # + # @!attribute [rw] schema_version_id + # The unique version ID of the schema version. + # @return [String] + # + # @!attribute [rw] metadata_key_value + # The metadata key's corresponding value. + # @return [Types::MetadataKeyValuePair] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/PartitionError AWS API Documentation + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/PutSchemaVersionMetadataInput AWS API Documentation # - class PartitionError < Struct.new( - :partition_values, - :error_detail) + class PutSchemaVersionMetadataInput < Struct.new( + :schema_id, + :schema_version_number, + :schema_version_id, + :metadata_key_value) SENSITIVE = [] include Aws::Structure end - # A structure for a partition index. - # - # @note When making an API call, you may pass PartitionIndex - # data as a hash: - # - # { - # keys: ["NameString"], # required - # index_name: "NameString", # required - # } + # @!attribute [rw] schema_arn + # The Amazon Resource Name (ARN) for the schema. + # @return [String] # - # @!attribute [rw] keys - # The keys for the partition index. - # @return [Array] + # @!attribute [rw] schema_name + # The name for the schema. + # @return [String] # - # @!attribute [rw] index_name - # The name of the partition index. + # @!attribute [rw] registry_name + # The name for the registry. # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/PartitionIndex AWS API Documentation + # @!attribute [rw] latest_version + # The latest version of the schema. + # @return [Boolean] # - class PartitionIndex < Struct.new( - :keys, - :index_name) - SENSITIVE = [] - include Aws::Structure - end - - # A descriptor for a partition index in a table. + # @!attribute [rw] version_number + # The version number of the schema. + # @return [Integer] # - # @!attribute [rw] index_name - # The name of the partition index. + # @!attribute [rw] schema_version_id + # The unique version ID of the schema version. # @return [String] # - # @!attribute [rw] keys - # A list of one or more keys, as `KeySchemaElement` structures, for - # the partition index. - # @return [Array] + # @!attribute [rw] metadata_key + # The metadata key. + # @return [String] # - # @!attribute [rw] index_status - # The status of the partition index. + # @!attribute [rw] metadata_value + # The value of the metadata key. + # @return [String] # - # The possible statuses are: + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/PutSchemaVersionMetadataResponse AWS API Documentation # - # * CREATING: The index is being created. When an index is in a - # CREATING state, the index or its table cannot be deleted. + class PutSchemaVersionMetadataResponse < Struct.new( + :schema_arn, + :schema_name, + :registry_name, + :latest_version, + :version_number, + :schema_version_id, + :metadata_key, + :metadata_value) + SENSITIVE = [] + include Aws::Structure + end + + # @note When making an API call, you may pass PutWorkflowRunPropertiesRequest + # data as a hash: # - # * ACTIVE: The index creation succeeds. + # { + # name: "NameString", # required + # run_id: "IdString", # required + # run_properties: { # required + # "IdString" => "GenericString", + # }, + # } # - # * FAILED: The index creation fails. + # @!attribute [rw] name + # Name of the workflow which was run. + # @return [String] # - # * DELETING: The index is deleted from the list of indexes. + # @!attribute [rw] run_id + # The ID of the workflow run for which the run properties should be + # updated. # @return [String] # - # @!attribute [rw] backfill_errors - # A list of errors that can occur when registering partition indexes - # for an existing table. - # @return [Array] + # @!attribute [rw] run_properties + # The properties to put for the specified run. + # @return [Hash] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/PartitionIndexDescriptor AWS API Documentation + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/PutWorkflowRunPropertiesRequest AWS API Documentation # - class PartitionIndexDescriptor < Struct.new( - :index_name, - :keys, - :index_status, - :backfill_errors) + class PutWorkflowRunPropertiesRequest < Struct.new( + :name, + :run_id, + :run_properties) SENSITIVE = [] include Aws::Structure end - # The structure used to create and update a partition. + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/PutWorkflowRunPropertiesResponse AWS API Documentation # - # @note When making an API call, you may pass PartitionInput + class PutWorkflowRunPropertiesResponse < Aws::EmptyStructure; end + + # @note When making an API call, you may pass QuerySchemaVersionMetadataInput # data as a hash: # # { - # values: ["ValueString"], - # last_access_time: Time.now, - # storage_descriptor: { - # columns: [ - # { - # name: "NameString", # required - # type: "ColumnTypeString", - # comment: "CommentString", - # parameters: { - # "KeyString" => "ParametersMapValue", - # }, - # }, - # ], - # location: "LocationString", - # additional_locations: ["LocationString"], - # input_format: "FormatString", - # output_format: "FormatString", - # compressed: false, - # number_of_buckets: 1, - # serde_info: { - # name: "NameString", - # serialization_library: "NameString", - # parameters: { - # "KeyString" => "ParametersMapValue", - # }, - # }, - # bucket_columns: ["NameString"], - # sort_columns: [ - # { - # column: "NameString", # required - # sort_order: 1, # required - # }, - # ], - # parameters: { - # "KeyString" => "ParametersMapValue", - # }, - # skewed_info: { - # skewed_column_names: ["NameString"], - # skewed_column_values: ["ColumnValuesString"], - # skewed_column_value_location_maps: { - # "ColumnValuesString" => "ColumnValuesString", - # }, - # }, - # stored_as_sub_directories: false, - # schema_reference: { - # schema_id: { - # schema_arn: "GlueResourceArn", - # schema_name: "SchemaRegistryNameString", - # registry_name: "SchemaRegistryNameString", - # }, - # schema_version_id: "SchemaVersionIdString", - # schema_version_number: 1, - # }, + # schema_id: { + # schema_arn: "GlueResourceArn", + # schema_name: "SchemaRegistryNameString", + # registry_name: "SchemaRegistryNameString", # }, - # parameters: { - # "KeyString" => "ParametersMapValue", + # schema_version_number: { + # latest_version: false, + # version_number: 1, # }, - # last_analyzed_time: Time.now, + # schema_version_id: "SchemaVersionIdString", + # metadata_list: [ + # { + # metadata_key: "MetadataKeyString", + # metadata_value: "MetadataValueString", + # }, + # ], + # max_results: 1, + # next_token: "SchemaRegistryTokenString", # } # - # @!attribute [rw] values - # The values of the partition. Although this parameter is not required - # by the SDK, you must specify this parameter for a valid input. + # @!attribute [rw] schema_id + # A wrapper structure that may contain the schema name and Amazon + # Resource Name (ARN). + # @return [Types::SchemaId] # - # The values for the keys for the new partition must be passed as an - # array of String objects that must be ordered in the same order as - # the partition keys appearing in the Amazon S3 prefix. Otherwise Glue - # will add the values to the wrong keys. - # @return [Array] + # @!attribute [rw] schema_version_number + # The version number of the schema. + # @return [Types::SchemaVersionNumber] # - # @!attribute [rw] last_access_time - # The last time at which the partition was accessed. - # @return [Time] + # @!attribute [rw] schema_version_id + # The unique version ID of the schema version. + # @return [String] # - # @!attribute [rw] storage_descriptor - # Provides information about the physical location where the partition - # is stored. - # @return [Types::StorageDescriptor] + # @!attribute [rw] metadata_list + # Search key-value pairs for metadata, if they are not provided all + # the metadata information will be fetched. + # @return [Array] # - # @!attribute [rw] parameters - # These key-value pairs define partition parameters. - # @return [Hash] + # @!attribute [rw] max_results + # Maximum number of results required per page. If the value is not + # supplied, this will be defaulted to 25 per page. + # @return [Integer] + # + # @!attribute [rw] next_token + # A continuation token, if this is a continuation call. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/QuerySchemaVersionMetadataInput AWS API Documentation + # + class QuerySchemaVersionMetadataInput < Struct.new( + :schema_id, + :schema_version_number, + :schema_version_id, + :metadata_list, + :max_results, + :next_token) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] metadata_info_map + # A map of a metadata key and associated values. + # @return [Hash] + # + # @!attribute [rw] schema_version_id + # The unique version ID of the schema version. + # @return [String] # - # @!attribute [rw] last_analyzed_time - # The last time at which column statistics were computed for this - # partition. - # @return [Time] + # @!attribute [rw] next_token + # A continuation token for paginating the returned list of tokens, + # returned if the current segment of the list is not the last. + # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/PartitionInput AWS API Documentation + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/QuerySchemaVersionMetadataResponse AWS API Documentation # - class PartitionInput < Struct.new( - :values, - :last_access_time, - :storage_descriptor, - :parameters, - :last_analyzed_time) + class QuerySchemaVersionMetadataResponse < Struct.new( + :metadata_info_map, + :schema_version_id, + :next_token) SENSITIVE = [] include Aws::Structure end - # Contains a list of values defining partitions. + # When crawling an Amazon S3 data source after the first crawl is + # complete, specifies whether to crawl the entire dataset again or to + # crawl only folders that were added since the last crawler run. For + # more information, see [Incremental Crawls in Glue][1] in the developer + # guide. # - # @note When making an API call, you may pass PartitionValueList + # + # + # [1]: https://docs.aws.amazon.com/glue/latest/dg/incremental-crawls.html + # + # @note When making an API call, you may pass RecrawlPolicy # data as a hash: # # { - # values: ["ValueString"], # required + # recrawl_behavior: "CRAWL_EVERYTHING", # accepts CRAWL_EVERYTHING, CRAWL_NEW_FOLDERS_ONLY, CRAWL_EVENT_MODE # } # - # @!attribute [rw] values - # The list of values. - # @return [Array] + # @!attribute [rw] recrawl_behavior + # Specifies whether to crawl the entire dataset again or to crawl only + # folders that were added since the last crawler run. # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/PartitionValueList AWS API Documentation + # A value of `CRAWL_EVERYTHING` specifies crawling the entire dataset + # again. # - class PartitionValueList < Struct.new( - :values) - SENSITIVE = [] - include Aws::Structure - end - - # @!attribute [rw] message + # A value of `CRAWL_NEW_FOLDERS_ONLY` specifies crawling only folders + # that were added since the last crawler run. + # + # A value of `CRAWL_EVENT_MODE` specifies crawling only the changes + # identified by Amazon S3 events. # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/PermissionTypeMismatchException AWS API Documentation + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/RecrawlPolicy AWS API Documentation # - class PermissionTypeMismatchException < Struct.new( - :message) + class RecrawlPolicy < Struct.new( + :recrawl_behavior) SENSITIVE = [] include Aws::Structure end - # Specifies the physical requirements for a connection. + # Specifies an Amazon Redshift data store. # - # @note When making an API call, you may pass PhysicalConnectionRequirements + # @note When making an API call, you may pass RedshiftSource # data as a hash: # # { - # subnet_id: "NameString", - # security_group_id_list: ["NameString"], - # availability_zone: "NameString", + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # redshift_tmp_dir: "EnclosedInStringProperty", + # tmp_dir_iam_role: "EnclosedInStringProperty", # } # - # @!attribute [rw] subnet_id - # The subnet ID used by the connection. + # @!attribute [rw] name + # The name of the Amazon Redshift data store. # @return [String] # - # @!attribute [rw] security_group_id_list - # The security group ID list used by the connection. - # @return [Array] - # - # @!attribute [rw] availability_zone - # The connection's Availability Zone. This field is redundant because - # the specified subnet implies the Availability Zone to be used. - # Currently the field must be populated, but it will be deprecated in - # the future. + # @!attribute [rw] database + # The database to read from. # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/PhysicalConnectionRequirements AWS API Documentation - # - class PhysicalConnectionRequirements < Struct.new( - :subnet_id, - :security_group_id_list, - :availability_zone) - SENSITIVE = [] - include Aws::Structure - end - - # A job run that was used in the predicate of a conditional trigger that - # triggered this job run. + # @!attribute [rw] table + # The database table to read from. + # @return [String] # - # @!attribute [rw] job_name - # The name of the job definition used by the predecessor job run. + # @!attribute [rw] redshift_tmp_dir + # The Amazon S3 path where temporary data can be staged when copying + # out of the database. # @return [String] # - # @!attribute [rw] run_id - # The job-run ID of the predecessor job run. + # @!attribute [rw] tmp_dir_iam_role + # The IAM role with permissions. # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/Predecessor AWS API Documentation + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/RedshiftSource AWS API Documentation # - class Predecessor < Struct.new( - :job_name, - :run_id) + class RedshiftSource < Struct.new( + :name, + :database, + :table, + :redshift_tmp_dir, + :tmp_dir_iam_role) SENSITIVE = [] include Aws::Structure end - # Defines the predicate of the trigger, which determines when it fires. + # Specifies a target that uses Amazon Redshift. # - # @note When making an API call, you may pass Predicate + # @note When making an API call, you may pass RedshiftTarget # data as a hash: # # { - # logical: "AND", # accepts AND, ANY - # conditions: [ - # { - # logical_operator: "EQUALS", # accepts EQUALS - # job_name: "NameString", - # state: "STARTING", # accepts STARTING, RUNNING, STOPPING, STOPPED, SUCCEEDED, FAILED, TIMEOUT - # crawler_name: "NameString", - # crawl_state: "RUNNING", # accepts RUNNING, CANCELLING, CANCELLED, SUCCEEDED, FAILED - # }, - # ], + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # redshift_tmp_dir: "EnclosedInStringProperty", + # tmp_dir_iam_role: "EnclosedInStringProperty", + # upsert_redshift_options: { + # table_location: "EnclosedInStringProperty", + # connection_name: "EnclosedInStringProperty", + # upsert_keys: ["EnclosedInStringProperty"], + # }, # } # - # @!attribute [rw] logical - # An optional field if only one condition is listed. If multiple - # conditions are listed, then this field is required. + # @!attribute [rw] name + # The name of the data target. # @return [String] # - # @!attribute [rw] conditions - # A list of the conditions that determine when the trigger will fire. - # @return [Array] + # @!attribute [rw] inputs + # The nodes that are inputs to the data target. + # @return [Array] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/Predicate AWS API Documentation + # @!attribute [rw] database + # The name of the database to write to. + # @return [String] # - class Predicate < Struct.new( - :logical, - :conditions) + # @!attribute [rw] table + # The name of the table in the database to write to. + # @return [String] + # + # @!attribute [rw] redshift_tmp_dir + # The Amazon S3 path where temporary data can be staged when copying + # out of the database. + # @return [String] + # + # @!attribute [rw] tmp_dir_iam_role + # The IAM role with permissions. + # @return [String] + # + # @!attribute [rw] upsert_redshift_options + # The set of options to configure an upsert operation when writing to + # a Redshift target. + # @return [Types::UpsertRedshiftTargetOptions] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/RedshiftTarget AWS API Documentation + # + class RedshiftTarget < Struct.new( + :name, + :inputs, + :database, + :table, + :redshift_tmp_dir, + :tmp_dir_iam_role, + :upsert_redshift_options) SENSITIVE = [] include Aws::Structure end - # Permissions granted to a principal. - # - # @note When making an API call, you may pass PrincipalPermissions + # @note When making an API call, you may pass RegisterSchemaVersionInput # data as a hash: # # { - # principal: { - # data_lake_principal_identifier: "DataLakePrincipalString", + # schema_id: { # required + # schema_arn: "GlueResourceArn", + # schema_name: "SchemaRegistryNameString", + # registry_name: "SchemaRegistryNameString", # }, - # permissions: ["ALL"], # accepts ALL, SELECT, ALTER, DROP, DELETE, INSERT, CREATE_DATABASE, CREATE_TABLE, DATA_LOCATION_ACCESS + # schema_definition: "SchemaDefinitionString", # required # } # - # @!attribute [rw] principal - # The principal who is granted permissions. - # @return [Types::DataLakePrincipal] + # @!attribute [rw] schema_id + # This is a wrapper structure to contain schema identity fields. The + # structure contains: # - # @!attribute [rw] permissions - # The permissions that are granted to the principal. - # @return [Array] + # * SchemaId$SchemaArn: The Amazon Resource Name (ARN) of the schema. + # Either `SchemaArn` or `SchemaName` and `RegistryName` has to be + # provided. # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/PrincipalPermissions AWS API Documentation + # * SchemaId$SchemaName: The name of the schema. Either `SchemaArn` or + # `SchemaName` and `RegistryName` has to be provided. + # @return [Types::SchemaId] # - class PrincipalPermissions < Struct.new( - :principal, - :permissions) + # @!attribute [rw] schema_definition + # The schema definition using the `DataFormat` setting for the + # `SchemaName`. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/RegisterSchemaVersionInput AWS API Documentation + # + class RegisterSchemaVersionInput < Struct.new( + :schema_id, + :schema_definition) SENSITIVE = [] include Aws::Structure end - # Defines a property predicate. - # - # @note When making an API call, you may pass PropertyPredicate - # data as a hash: - # - # { - # key: "ValueString", - # value: "ValueString", - # comparator: "EQUALS", # accepts EQUALS, GREATER_THAN, LESS_THAN, GREATER_THAN_EQUALS, LESS_THAN_EQUALS - # } - # - # @!attribute [rw] key - # The key of the property. + # @!attribute [rw] schema_version_id + # The unique ID that represents the version of this schema. # @return [String] # - # @!attribute [rw] value - # The value of the property. - # @return [String] + # @!attribute [rw] version_number + # The version of this schema (for sync flow only, in case this is the + # first version). + # @return [Integer] # - # @!attribute [rw] comparator - # The comparator used to compare this property to others. + # @!attribute [rw] status + # The status of the schema version. # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/PropertyPredicate AWS API Documentation + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/RegisterSchemaVersionResponse AWS API Documentation # - class PropertyPredicate < Struct.new( - :key, - :value, - :comparator) + class RegisterSchemaVersionResponse < Struct.new( + :schema_version_id, + :version_number, + :status) SENSITIVE = [] include Aws::Structure end - # @note When making an API call, you may pass PutDataCatalogEncryptionSettingsRequest + # A wrapper structure that may contain the registry name and Amazon + # Resource Name (ARN). + # + # @note When making an API call, you may pass RegistryId # data as a hash: # # { - # catalog_id: "CatalogIdString", - # data_catalog_encryption_settings: { # required - # encryption_at_rest: { - # catalog_encryption_mode: "DISABLED", # required, accepts DISABLED, SSE-KMS - # sse_aws_kms_key_id: "NameString", - # }, - # connection_password_encryption: { - # return_connection_password_encrypted: false, # required - # aws_kms_key_id: "NameString", - # }, - # }, + # registry_name: "SchemaRegistryNameString", + # registry_arn: "GlueResourceArn", # } # - # @!attribute [rw] catalog_id - # The ID of the Data Catalog to set the security configuration for. If - # none is provided, the Amazon Web Services account ID is used by - # default. + # @!attribute [rw] registry_name + # Name of the registry. Used only for lookup. One of `RegistryArn` or + # `RegistryName` has to be provided. # @return [String] # - # @!attribute [rw] data_catalog_encryption_settings - # The security configuration to set. - # @return [Types::DataCatalogEncryptionSettings] + # @!attribute [rw] registry_arn + # Arn of the registry to be updated. One of `RegistryArn` or + # `RegistryName` has to be provided. + # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/PutDataCatalogEncryptionSettingsRequest AWS API Documentation + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/RegistryId AWS API Documentation # - class PutDataCatalogEncryptionSettingsRequest < Struct.new( - :catalog_id, - :data_catalog_encryption_settings) + class RegistryId < Struct.new( + :registry_name, + :registry_arn) SENSITIVE = [] include Aws::Structure end - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/PutDataCatalogEncryptionSettingsResponse AWS API Documentation - # - class PutDataCatalogEncryptionSettingsResponse < Aws::EmptyStructure; end - - # @note When making an API call, you may pass PutResourcePolicyRequest - # data as a hash: - # - # { - # policy_in_json: "PolicyJsonString", # required - # resource_arn: "GlueResourceArn", - # policy_hash_condition: "HashString", - # policy_exists_condition: "MUST_EXIST", # accepts MUST_EXIST, NOT_EXIST, NONE - # enable_hybrid: "TRUE", # accepts TRUE, FALSE - # } + # A structure containing the details for a registry. # - # @!attribute [rw] policy_in_json - # Contains the policy document to set, in JSON format. + # @!attribute [rw] registry_name + # The name of the registry. # @return [String] # - # @!attribute [rw] resource_arn - # Do not use. For internal use only. + # @!attribute [rw] registry_arn + # The Amazon Resource Name (ARN) of the registry. # @return [String] # - # @!attribute [rw] policy_hash_condition - # The hash value returned when the previous policy was set using - # `PutResourcePolicy`. Its purpose is to prevent concurrent - # modifications of a policy. Do not use this parameter if no previous - # policy has been set. + # @!attribute [rw] description + # A description of the registry. # @return [String] # - # @!attribute [rw] policy_exists_condition - # A value of `MUST_EXIST` is used to update a policy. A value of - # `NOT_EXIST` is used to create a new policy. If a value of `NONE` or - # a null value is used, the call does not depend on the existence of a - # policy. + # @!attribute [rw] status + # The status of the registry. # @return [String] # - # @!attribute [rw] enable_hybrid - # If `'TRUE'`, indicates that you are using both methods to grant - # cross-account access to Data Catalog resources: - # - # * By directly updating the resource policy with `PutResourePolicy` - # - # * By using the **Grant permissions** command on the Amazon Web - # Services Management Console. + # @!attribute [rw] created_time + # The data the registry was created. + # @return [String] # - # Must be set to `'TRUE'` if you have already used the Management - # Console to grant cross-account access, otherwise the call fails. - # Default is 'FALSE'. + # @!attribute [rw] updated_time + # The date the registry was updated. # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/PutResourcePolicyRequest AWS API Documentation + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/RegistryListItem AWS API Documentation # - class PutResourcePolicyRequest < Struct.new( - :policy_in_json, - :resource_arn, - :policy_hash_condition, - :policy_exists_condition, - :enable_hybrid) + class RegistryListItem < Struct.new( + :registry_name, + :registry_arn, + :description, + :status, + :created_time, + :updated_time) SENSITIVE = [] include Aws::Structure end - # @!attribute [rw] policy_hash - # A hash of the policy that has just been set. This must be included - # in a subsequent call that overwrites or updates this policy. + # Specifies a Relational database data source in the Glue Data Catalog. + # + # @note When making an API call, you may pass RelationalCatalogSource + # data as a hash: + # + # { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # } + # + # @!attribute [rw] name + # The name of the data source. # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/PutResourcePolicyResponse AWS API Documentation + # @!attribute [rw] database + # The name of the database to read from. + # @return [String] # - class PutResourcePolicyResponse < Struct.new( - :policy_hash) + # @!attribute [rw] table + # The name of the table in the database to read from. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/RelationalCatalogSource AWS API Documentation + # + class RelationalCatalogSource < Struct.new( + :name, + :database, + :table) SENSITIVE = [] include Aws::Structure end - # @note When making an API call, you may pass PutSchemaVersionMetadataInput + # @note When making an API call, you may pass RemoveSchemaVersionMetadataInput # data as a hash: # # { @@ -14143,7 +19352,8 @@ class PutResourcePolicyResponse < Struct.new( # } # # @!attribute [rw] schema_id - # The unique ID for the schema. + # A wrapper structure that may contain the schema name and Amazon + # Resource Name (ARN). # @return [Types::SchemaId] # # @!attribute [rw] schema_version_number @@ -14155,12 +19365,12 @@ class PutResourcePolicyResponse < Struct.new( # @return [String] # # @!attribute [rw] metadata_key_value - # The metadata key's corresponding value. + # The value of the metadata key. # @return [Types::MetadataKeyValuePair] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/PutSchemaVersionMetadataInput AWS API Documentation + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/RemoveSchemaVersionMetadataInput AWS API Documentation # - class PutSchemaVersionMetadataInput < Struct.new( + class RemoveSchemaVersionMetadataInput < Struct.new( :schema_id, :schema_version_number, :schema_version_id, @@ -14170,15 +19380,15 @@ class PutSchemaVersionMetadataInput < Struct.new( end # @!attribute [rw] schema_arn - # The Amazon Resource Name (ARN) for the schema. + # The Amazon Resource Name (ARN) of the schema. # @return [String] # # @!attribute [rw] schema_name - # The name for the schema. + # The name of the schema. # @return [String] # # @!attribute [rw] registry_name - # The name for the registry. + # The name of the registry. # @return [String] # # @!attribute [rw] latest_version @@ -14190,7 +19400,7 @@ class PutSchemaVersionMetadataInput < Struct.new( # @return [Integer] # # @!attribute [rw] schema_version_id - # The unique version ID of the schema version. + # The version ID for the schema version. # @return [String] # # @!attribute [rw] metadata_key @@ -14201,9 +19411,9 @@ class PutSchemaVersionMetadataInput < Struct.new( # The value of the metadata key. # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/PutSchemaVersionMetadataResponse AWS API Documentation + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/RemoveSchemaVersionMetadataResponse AWS API Documentation # - class PutSchemaVersionMetadataResponse < Struct.new( + class RemoveSchemaVersionMetadataResponse < Struct.new( :schema_arn, :schema_name, :registry_name, @@ -14216,606 +19426,978 @@ class PutSchemaVersionMetadataResponse < Struct.new( include Aws::Structure end - # @note When making an API call, you may pass PutWorkflowRunPropertiesRequest + # Specifies a transform that renames a single data property key. + # + # @note When making an API call, you may pass RenameField # data as a hash: # # { - # name: "NameString", # required - # run_id: "IdString", # required - # run_properties: { # required - # "IdString" => "GenericString", - # }, + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # source_path: ["EnclosedInStringProperty"], # required + # target_path: ["EnclosedInStringProperty"], # required # } # # @!attribute [rw] name - # Name of the workflow which was run. + # The name of the transform node. # @return [String] # - # @!attribute [rw] run_id - # The ID of the workflow run for which the run properties should be - # updated. - # @return [String] + # @!attribute [rw] inputs + # The data inputs identified by their node names. + # @return [Array] # - # @!attribute [rw] run_properties - # The properties to put for the specified run. - # @return [Hash] + # @!attribute [rw] source_path + # A JSON path to a variable in the data structure for the source data. + # @return [Array] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/PutWorkflowRunPropertiesRequest AWS API Documentation + # @!attribute [rw] target_path + # A JSON path to a variable in the data structure for the target data. + # @return [Array] # - class PutWorkflowRunPropertiesRequest < Struct.new( + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/RenameField AWS API Documentation + # + class RenameField < Struct.new( :name, - :run_id, - :run_properties) + :inputs, + :source_path, + :target_path) SENSITIVE = [] include Aws::Structure end - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/PutWorkflowRunPropertiesResponse AWS API Documentation - # - class PutWorkflowRunPropertiesResponse < Aws::EmptyStructure; end - - # @note When making an API call, you may pass QuerySchemaVersionMetadataInput + # @note When making an API call, you may pass ResetJobBookmarkRequest # data as a hash: # # { - # schema_id: { - # schema_arn: "GlueResourceArn", - # schema_name: "SchemaRegistryNameString", - # registry_name: "SchemaRegistryNameString", - # }, - # schema_version_number: { - # latest_version: false, - # version_number: 1, - # }, - # schema_version_id: "SchemaVersionIdString", - # metadata_list: [ - # { - # metadata_key: "MetadataKeyString", - # metadata_value: "MetadataValueString", - # }, - # ], - # max_results: 1, - # next_token: "SchemaRegistryTokenString", + # job_name: "JobName", # required + # run_id: "RunId", # } # - # @!attribute [rw] schema_id - # A wrapper structure that may contain the schema name and Amazon - # Resource Name (ARN). - # @return [Types::SchemaId] + # @!attribute [rw] job_name + # The name of the job in question. + # @return [String] # - # @!attribute [rw] schema_version_number - # The version number of the schema. - # @return [Types::SchemaVersionNumber] + # @!attribute [rw] run_id + # The unique run identifier associated with this job run. + # @return [String] # - # @!attribute [rw] schema_version_id - # The unique version ID of the schema version. + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ResetJobBookmarkRequest AWS API Documentation + # + class ResetJobBookmarkRequest < Struct.new( + :job_name, + :run_id) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] job_bookmark_entry + # The reset bookmark entry. + # @return [Types::JobBookmarkEntry] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ResetJobBookmarkResponse AWS API Documentation + # + class ResetJobBookmarkResponse < Struct.new( + :job_bookmark_entry) + SENSITIVE = [] + include Aws::Structure + end + + # A resource was not ready for a transaction. + # + # @!attribute [rw] message + # A message describing the problem. # @return [String] # - # @!attribute [rw] metadata_list - # Search key-value pairs for metadata, if they are not provided all - # the metadata information will be fetched. - # @return [Array] + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ResourceNotReadyException AWS API Documentation # - # @!attribute [rw] max_results - # Maximum number of results required per page. If the value is not - # supplied, this will be defaulted to 25 per page. - # @return [Integer] + class ResourceNotReadyException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + + # A resource numerical limit was exceeded. # - # @!attribute [rw] next_token - # A continuation token, if this is a continuation call. + # @!attribute [rw] message + # A message describing the problem. # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/QuerySchemaVersionMetadataInput AWS API Documentation + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ResourceNumberLimitExceededException AWS API Documentation # - class QuerySchemaVersionMetadataInput < Struct.new( - :schema_id, - :schema_version_number, - :schema_version_id, - :metadata_list, - :max_results, - :next_token) + class ResourceNumberLimitExceededException < Struct.new( + :message) SENSITIVE = [] include Aws::Structure end - # @!attribute [rw] metadata_info_map - # A map of a metadata key and associated values. - # @return [Hash] + # The URIs for function resources. # - # @!attribute [rw] schema_version_id - # The unique version ID of the schema version. + # @note When making an API call, you may pass ResourceUri + # data as a hash: + # + # { + # resource_type: "JAR", # accepts JAR, FILE, ARCHIVE + # uri: "URI", + # } + # + # @!attribute [rw] resource_type + # The type of the resource. # @return [String] # - # @!attribute [rw] next_token - # A continuation token for paginating the returned list of tokens, - # returned if the current segment of the list is not the last. + # @!attribute [rw] uri + # The URI for accessing the resource. # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/QuerySchemaVersionMetadataResponse AWS API Documentation + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ResourceUri AWS API Documentation # - class QuerySchemaVersionMetadataResponse < Struct.new( - :metadata_info_map, - :schema_version_id, - :next_token) + class ResourceUri < Struct.new( + :resource_type, + :uri) SENSITIVE = [] include Aws::Structure end - # When crawling an Amazon S3 data source after the first crawl is - # complete, specifies whether to crawl the entire dataset again or to - # crawl only folders that were added since the last crawler run. For - # more information, see [Incremental Crawls in Glue][1] in the developer - # guide. - # - # - # - # [1]: https://docs.aws.amazon.com/glue/latest/dg/incremental-crawls.html - # - # @note When making an API call, you may pass RecrawlPolicy + # @note When making an API call, you may pass ResumeWorkflowRunRequest # data as a hash: # # { - # recrawl_behavior: "CRAWL_EVERYTHING", # accepts CRAWL_EVERYTHING, CRAWL_NEW_FOLDERS_ONLY, CRAWL_EVENT_MODE + # name: "NameString", # required + # run_id: "IdString", # required + # node_ids: ["NameString"], # required # } # - # @!attribute [rw] recrawl_behavior - # Specifies whether to crawl the entire dataset again or to crawl only - # folders that were added since the last crawler run. + # @!attribute [rw] name + # The name of the workflow to resume. + # @return [String] # - # A value of `CRAWL_EVERYTHING` specifies crawling the entire dataset - # again. + # @!attribute [rw] run_id + # The ID of the workflow run to resume. + # @return [String] # - # A value of `CRAWL_NEW_FOLDERS_ONLY` specifies crawling only folders - # that were added since the last crawler run. + # @!attribute [rw] node_ids + # A list of the node IDs for the nodes you want to restart. The nodes + # that are to be restarted must have a run attempt in the original + # run. + # @return [Array] # - # A value of `CRAWL_EVENT_MODE` specifies crawling only the changes - # identified by Amazon S3 events. + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ResumeWorkflowRunRequest AWS API Documentation + # + class ResumeWorkflowRunRequest < Struct.new( + :name, + :run_id, + :node_ids) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] run_id + # The new ID assigned to the resumed workflow run. Each resume of a + # workflow run will have a new run ID. # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/RecrawlPolicy AWS API Documentation + # @!attribute [rw] node_ids + # A list of the node IDs for the nodes that were actually restarted. + # @return [Array] # - class RecrawlPolicy < Struct.new( - :recrawl_behavior) + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ResumeWorkflowRunResponse AWS API Documentation + # + class ResumeWorkflowRunResponse < Struct.new( + :run_id, + :node_ids) SENSITIVE = [] include Aws::Structure end - # @note When making an API call, you may pass RegisterSchemaVersionInput + # @note When making an API call, you may pass RunStatementRequest # data as a hash: # # { - # schema_id: { # required - # schema_arn: "GlueResourceArn", - # schema_name: "SchemaRegistryNameString", - # registry_name: "SchemaRegistryNameString", - # }, - # schema_definition: "SchemaDefinitionString", # required + # session_id: "NameString", # required + # code: "OrchestrationStatementCodeString", # required + # request_origin: "OrchestrationNameString", # } # - # @!attribute [rw] schema_id - # This is a wrapper structure to contain schema identity fields. The - # structure contains: - # - # * SchemaId$SchemaArn: The Amazon Resource Name (ARN) of the schema. - # Either `SchemaArn` or `SchemaName` and `RegistryName` has to be - # provided. + # @!attribute [rw] session_id + # The Session Id of the statement to be run. + # @return [String] # - # * SchemaId$SchemaName: The name of the schema. Either `SchemaArn` or - # `SchemaName` and `RegistryName` has to be provided. - # @return [Types::SchemaId] + # @!attribute [rw] code + # The statement code to be run. + # @return [String] # - # @!attribute [rw] schema_definition - # The schema definition using the `DataFormat` setting for the - # `SchemaName`. + # @!attribute [rw] request_origin + # The origin of the request. # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/RegisterSchemaVersionInput AWS API Documentation + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/RunStatementRequest AWS API Documentation # - class RegisterSchemaVersionInput < Struct.new( - :schema_id, - :schema_definition) + class RunStatementRequest < Struct.new( + :session_id, + :code, + :request_origin) SENSITIVE = [] include Aws::Structure end - # @!attribute [rw] schema_version_id - # The unique ID that represents the version of this schema. - # @return [String] - # - # @!attribute [rw] version_number - # The version of this schema (for sync flow only, in case this is the - # first version). + # @!attribute [rw] id + # Returns the Id of the statement that was run. # @return [Integer] # - # @!attribute [rw] status - # The status of the schema version. - # @return [String] - # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/RegisterSchemaVersionResponse AWS API Documentation + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/RunStatementResponse AWS API Documentation # - class RegisterSchemaVersionResponse < Struct.new( - :schema_version_id, - :version_number, - :status) + class RunStatementResponse < Struct.new( + :id) SENSITIVE = [] include Aws::Structure end - # A wrapper structure that may contain the registry name and Amazon - # Resource Name (ARN). + # Specifies an Amazon S3 data store in the Glue Data Catalog. # - # @note When making an API call, you may pass RegistryId + # @note When making an API call, you may pass S3CatalogSource # data as a hash: # # { - # registry_name: "SchemaRegistryNameString", - # registry_arn: "GlueResourceArn", + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # partition_predicate: "EnclosedInStringProperty", + # additional_options: { + # bounded_size: 1, + # bounded_files: 1, + # }, # } # - # @!attribute [rw] registry_name - # Name of the registry. Used only for lookup. One of `RegistryArn` or - # `RegistryName` has to be provided. + # @!attribute [rw] name + # The name of the data store. # @return [String] # - # @!attribute [rw] registry_arn - # Arn of the registry to be updated. One of `RegistryArn` or - # `RegistryName` has to be provided. + # @!attribute [rw] database + # The database to read from. # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/RegistryId AWS API Documentation + # @!attribute [rw] table + # The database table to read from. + # @return [String] # - class RegistryId < Struct.new( - :registry_name, - :registry_arn) + # @!attribute [rw] partition_predicate + # Partitions satisfying this predicate are deleted. Files within the + # retention period in these partitions are not deleted. Set to `""` – + # empty by default. + # @return [String] + # + # @!attribute [rw] additional_options + # Specifies additional connection options. + # @return [Types::S3SourceAdditionalOptions] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/S3CatalogSource AWS API Documentation + # + class S3CatalogSource < Struct.new( + :name, + :database, + :table, + :partition_predicate, + :additional_options) SENSITIVE = [] include Aws::Structure end - # A structure containing the details for a registry. + # Specifies a data target that writes to Amazon S3 using the Glue Data + # Catalog. # - # @!attribute [rw] registry_name - # The name of the registry. - # @return [String] + # @note When making an API call, you may pass S3CatalogTarget + # data as a hash: # - # @!attribute [rw] registry_arn - # The Amazon Resource Name (ARN) of the registry. - # @return [String] + # { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # partition_keys: [ + # ["EnclosedInStringProperty"], + # ], + # table: "EnclosedInStringProperty", # required + # database: "EnclosedInStringProperty", # required + # schema_change_policy: { + # enable_update_catalog: false, + # update_behavior: "UPDATE_IN_DATABASE", # accepts UPDATE_IN_DATABASE, LOG + # }, + # } # - # @!attribute [rw] description - # A description of the registry. + # @!attribute [rw] name + # The name of the data target. # @return [String] # - # @!attribute [rw] status - # The status of the registry. - # @return [String] + # @!attribute [rw] inputs + # The nodes that are inputs to the data target. + # @return [Array] # - # @!attribute [rw] created_time - # The data the registry was created. + # @!attribute [rw] partition_keys + # Specifies native partitioning using a sequence of keys. + # @return [Array>] + # + # @!attribute [rw] table + # The name of the table in the database to write to. # @return [String] # - # @!attribute [rw] updated_time - # The date the registry was updated. + # @!attribute [rw] database + # The name of the database to write to. # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/RegistryListItem AWS API Documentation + # @!attribute [rw] schema_change_policy + # A policy that specifies update behavior for the crawler. + # @return [Types::CatalogSchemaChangePolicy] # - class RegistryListItem < Struct.new( - :registry_name, - :registry_arn, - :description, - :status, - :created_time, - :updated_time) + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/S3CatalogTarget AWS API Documentation + # + class S3CatalogTarget < Struct.new( + :name, + :inputs, + :partition_keys, + :table, + :database, + :schema_change_policy) SENSITIVE = [] include Aws::Structure end - # @note When making an API call, you may pass RemoveSchemaVersionMetadataInput + # Specifies a command-separated value (CSV) data store stored in Amazon + # S3. + # + # @note When making an API call, you may pass S3CsvSource # data as a hash: # # { - # schema_id: { - # schema_arn: "GlueResourceArn", - # schema_name: "SchemaRegistryNameString", - # registry_name: "SchemaRegistryNameString", - # }, - # schema_version_number: { - # latest_version: false, - # version_number: 1, - # }, - # schema_version_id: "SchemaVersionIdString", - # metadata_key_value: { # required - # metadata_key: "MetadataKeyString", - # metadata_value: "MetadataValueString", + # name: "NodeName", # required + # paths: ["EnclosedInStringProperty"], # required + # compression_type: "gzip", # accepts gzip, bzip2 + # exclusions: ["EnclosedInStringProperty"], + # group_size: "EnclosedInStringProperty", + # group_files: "EnclosedInStringProperty", + # recurse: false, + # max_band: 1, + # max_files_in_band: 1, + # additional_options: { + # bounded_size: 1, + # bounded_files: 1, + # enable_sample_path: false, + # sample_path: "EnclosedInStringProperty", # }, + # separator: "comma", # required, accepts comma, ctrla, pipe, semicolon, tab + # escaper: "EnclosedInStringPropertyWithQuote", + # quote_char: "quote", # required, accepts quote, quillemet, single_quote, disabled + # multiline: false, + # with_header: false, + # write_header: false, + # skip_first: false, + # optimize_performance: false, + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], # } # - # @!attribute [rw] schema_id - # A wrapper structure that may contain the schema name and Amazon - # Resource Name (ARN). - # @return [Types::SchemaId] - # - # @!attribute [rw] schema_version_number - # The version number of the schema. - # @return [Types::SchemaVersionNumber] - # - # @!attribute [rw] schema_version_id - # The unique version ID of the schema version. + # @!attribute [rw] name + # The name of the data store. # @return [String] # - # @!attribute [rw] metadata_key_value - # The value of the metadata key. - # @return [Types::MetadataKeyValuePair] - # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/RemoveSchemaVersionMetadataInput AWS API Documentation + # @!attribute [rw] paths + # A list of the Amazon S3 paths to read from. + # @return [Array] # - class RemoveSchemaVersionMetadataInput < Struct.new( - :schema_id, - :schema_version_number, - :schema_version_id, - :metadata_key_value) - SENSITIVE = [] - include Aws::Structure - end - - # @!attribute [rw] schema_arn - # The Amazon Resource Name (ARN) of the schema. + # @!attribute [rw] compression_type + # Specifies how the data is compressed. This is generally not + # necessary if the data has a standard file extension. Possible values + # are `"gzip"` and `"bzip"`). # @return [String] # - # @!attribute [rw] schema_name - # The name of the schema. + # @!attribute [rw] exclusions + # A string containing a JSON list of Unix-style glob patterns to + # exclude. For example, "\[\\"**.pdf\\"\]" excludes all PDF + # files. + # @return [Array] + # + # @!attribute [rw] group_size + # The target group size in bytes. The default is computed based on the + # input data size and the size of your cluster. When there are fewer + # than 50,000 input files, `"groupFiles"` must be set to + # `"inPartition"` for this to take effect. # @return [String] # - # @!attribute [rw] registry_name - # The name of the registry. + # @!attribute [rw] group_files + # Grouping files is turned on by default when the input contains more + # than 50,000 files. To turn on grouping with fewer than 50,000 files, + # set this parameter to "inPartition". To disable grouping when + # there are more than 50,000 files, set this parameter to `"none"`. # @return [String] # - # @!attribute [rw] latest_version - # The latest version of the schema. + # @!attribute [rw] recurse + # If set to true, recursively reads files in all subdirectories under + # the specified paths. # @return [Boolean] # - # @!attribute [rw] version_number - # The version number of the schema. + # @!attribute [rw] max_band + # This option controls the duration in milliseconds after which the s3 + # listing is likely to be consistent. Files with modification + # timestamps falling within the last maxBand milliseconds are tracked + # specially when using JobBookmarks to account for Amazon S3 eventual + # consistency. Most users don't need to set this option. The default + # is 900000 milliseconds, or 15 minutes. # @return [Integer] # - # @!attribute [rw] schema_version_id - # The version ID for the schema version. + # @!attribute [rw] max_files_in_band + # This option specifies the maximum number of files to save from the + # last maxBand seconds. If this number is exceeded, extra files are + # skipped and only processed in the next job run. + # @return [Integer] + # + # @!attribute [rw] additional_options + # Specifies additional connection options. + # @return [Types::S3DirectSourceAdditionalOptions] + # + # @!attribute [rw] separator + # Specifies the delimiter character. The default is a comma: ",", + # but any other character can be specified. # @return [String] # - # @!attribute [rw] metadata_key - # The metadata key. + # @!attribute [rw] escaper + # Specifies a character to use for escaping. This option is used only + # when reading CSV files. The default value is `none`. If enabled, the + # character which immediately follows is used as-is, except for a + # small set of well-known escapes (`\n`, `\r`, `\t`, and `\0`). # @return [String] # - # @!attribute [rw] metadata_value - # The value of the metadata key. + # @!attribute [rw] quote_char + # Specifies the character to use for quoting. The default is a double + # quote: `'"'`. Set this to `-1` to turn off quoting entirely. # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/RemoveSchemaVersionMetadataResponse AWS API Documentation + # @!attribute [rw] multiline + # A Boolean value that specifies whether a single record can span + # multiple lines. This can occur when a field contains a quoted + # new-line character. You must set this option to True if any record + # spans multiple lines. The default value is `False`, which allows for + # more aggressive file-splitting during parsing. + # @return [Boolean] # - class RemoveSchemaVersionMetadataResponse < Struct.new( - :schema_arn, - :schema_name, - :registry_name, - :latest_version, - :version_number, - :schema_version_id, - :metadata_key, - :metadata_value) + # @!attribute [rw] with_header + # A Boolean value that specifies whether to treat the first line as a + # header. The default value is `False`. + # @return [Boolean] + # + # @!attribute [rw] write_header + # A Boolean value that specifies whether to write the header to + # output. The default value is `True`. + # @return [Boolean] + # + # @!attribute [rw] skip_first + # A Boolean value that specifies whether to skip the first data line. + # The default value is `False`. + # @return [Boolean] + # + # @!attribute [rw] optimize_performance + # A Boolean value that specifies whether to use the advanced SIMD CSV + # reader along with Apache Arrow based columnar memory formats. Only + # available in Glue version 3.0. + # @return [Boolean] + # + # @!attribute [rw] output_schemas + # Specifies the data schema for the S3 CSV source. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/S3CsvSource AWS API Documentation + # + class S3CsvSource < Struct.new( + :name, + :paths, + :compression_type, + :exclusions, + :group_size, + :group_files, + :recurse, + :max_band, + :max_files_in_band, + :additional_options, + :separator, + :escaper, + :quote_char, + :multiline, + :with_header, + :write_header, + :skip_first, + :optimize_performance, + :output_schemas) SENSITIVE = [] include Aws::Structure end - # @note When making an API call, you may pass ResetJobBookmarkRequest + # Specifies additional connection options for the Amazon S3 data store. + # + # @note When making an API call, you may pass S3DirectSourceAdditionalOptions # data as a hash: # # { - # job_name: "JobName", # required - # run_id: "RunId", + # bounded_size: 1, + # bounded_files: 1, + # enable_sample_path: false, + # sample_path: "EnclosedInStringProperty", # } # - # @!attribute [rw] job_name - # The name of the job in question. - # @return [String] + # @!attribute [rw] bounded_size + # Sets the upper limit for the target size of the dataset in bytes + # that will be processed. + # @return [Integer] # - # @!attribute [rw] run_id - # The unique run identifier associated with this job run. + # @!attribute [rw] bounded_files + # Sets the upper limit for the target number of files that will be + # processed. + # @return [Integer] + # + # @!attribute [rw] enable_sample_path + # Sets option to enable a sample path. + # @return [Boolean] + # + # @!attribute [rw] sample_path + # If enabled, specifies the sample path. # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ResetJobBookmarkRequest AWS API Documentation + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/S3DirectSourceAdditionalOptions AWS API Documentation # - class ResetJobBookmarkRequest < Struct.new( - :job_name, - :run_id) + class S3DirectSourceAdditionalOptions < Struct.new( + :bounded_size, + :bounded_files, + :enable_sample_path, + :sample_path) SENSITIVE = [] include Aws::Structure end - # @!attribute [rw] job_bookmark_entry - # The reset bookmark entry. - # @return [Types::JobBookmarkEntry] + # Specifies a data target that writes to Amazon S3. # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ResetJobBookmarkResponse AWS API Documentation + # @note When making an API call, you may pass S3DirectTarget + # data as a hash: # - class ResetJobBookmarkResponse < Struct.new( - :job_bookmark_entry) - SENSITIVE = [] - include Aws::Structure - end - - # A resource was not ready for a transaction. + # { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # partition_keys: [ + # ["EnclosedInStringProperty"], + # ], + # path: "EnclosedInStringProperty", # required + # compression: "EnclosedInStringProperty", + # format: "json", # required, accepts json, csv, avro, orc, parquet + # schema_change_policy: { + # enable_update_catalog: false, + # update_behavior: "UPDATE_IN_DATABASE", # accepts UPDATE_IN_DATABASE, LOG + # table: "EnclosedInStringProperty", + # database: "EnclosedInStringProperty", + # }, + # } # - # @!attribute [rw] message - # A message describing the problem. + # @!attribute [rw] name + # The name of the data target. # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ResourceNotReadyException AWS API Documentation + # @!attribute [rw] inputs + # The nodes that are inputs to the data target. + # @return [Array] # - class ResourceNotReadyException < Struct.new( - :message) - SENSITIVE = [] - include Aws::Structure - end - - # A resource numerical limit was exceeded. + # @!attribute [rw] partition_keys + # Specifies native partitioning using a sequence of keys. + # @return [Array>] # - # @!attribute [rw] message - # A message describing the problem. + # @!attribute [rw] path + # A single Amazon S3 path to write to. # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ResourceNumberLimitExceededException AWS API Documentation + # @!attribute [rw] compression + # Specifies how the data is compressed. This is generally not + # necessary if the data has a standard file extension. Possible values + # are `"gzip"` and `"bzip"`). + # @return [String] # - class ResourceNumberLimitExceededException < Struct.new( - :message) + # @!attribute [rw] format + # Specifies the data output format for the target. + # @return [String] + # + # @!attribute [rw] schema_change_policy + # A policy that specifies update behavior for the crawler. + # @return [Types::DirectSchemaChangePolicy] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/S3DirectTarget AWS API Documentation + # + class S3DirectTarget < Struct.new( + :name, + :inputs, + :partition_keys, + :path, + :compression, + :format, + :schema_change_policy) SENSITIVE = [] include Aws::Structure end - # The URIs for function resources. + # Specifies how Amazon Simple Storage Service (Amazon S3) data should be + # encrypted. # - # @note When making an API call, you may pass ResourceUri + # @note When making an API call, you may pass S3Encryption # data as a hash: # # { - # resource_type: "JAR", # accepts JAR, FILE, ARCHIVE - # uri: "URI", + # s3_encryption_mode: "DISABLED", # accepts DISABLED, SSE-KMS, SSE-S3 + # kms_key_arn: "KmsKeyArn", # } # - # @!attribute [rw] resource_type - # The type of the resource. + # @!attribute [rw] s3_encryption_mode + # The encryption mode to use for Amazon S3 data. # @return [String] # - # @!attribute [rw] uri - # The URI for accessing the resource. + # @!attribute [rw] kms_key_arn + # The Amazon Resource Name (ARN) of the KMS key to be used to encrypt + # the data. # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ResourceUri AWS API Documentation + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/S3Encryption AWS API Documentation # - class ResourceUri < Struct.new( - :resource_type, - :uri) + class S3Encryption < Struct.new( + :s3_encryption_mode, + :kms_key_arn) SENSITIVE = [] include Aws::Structure end - # @note When making an API call, you may pass ResumeWorkflowRunRequest + # Specifies a data target that writes to Amazon S3 in Apache Parquet + # columnar storage. + # + # @note When making an API call, you may pass S3GlueParquetTarget # data as a hash: # # { - # name: "NameString", # required - # run_id: "IdString", # required - # node_ids: ["NameString"], # required + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # partition_keys: [ + # ["EnclosedInStringProperty"], + # ], + # path: "EnclosedInStringProperty", # required + # compression: "snappy", # accepts snappy, lzo, gzip, uncompressed, none + # schema_change_policy: { + # enable_update_catalog: false, + # update_behavior: "UPDATE_IN_DATABASE", # accepts UPDATE_IN_DATABASE, LOG + # table: "EnclosedInStringProperty", + # database: "EnclosedInStringProperty", + # }, # } # # @!attribute [rw] name - # The name of the workflow to resume. + # The name of the data target. # @return [String] # - # @!attribute [rw] run_id - # The ID of the workflow run to resume. + # @!attribute [rw] inputs + # The nodes that are inputs to the data target. + # @return [Array] + # + # @!attribute [rw] partition_keys + # Specifies native partitioning using a sequence of keys. + # @return [Array>] + # + # @!attribute [rw] path + # A single Amazon S3 path to write to. # @return [String] # - # @!attribute [rw] node_ids - # A list of the node IDs for the nodes you want to restart. The nodes - # that are to be restarted must have a run attempt in the original - # run. - # @return [Array] + # @!attribute [rw] compression + # Specifies how the data is compressed. This is generally not + # necessary if the data has a standard file extension. Possible values + # are `"gzip"` and `"bzip"`). + # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ResumeWorkflowRunRequest AWS API Documentation + # @!attribute [rw] schema_change_policy + # A policy that specifies update behavior for the crawler. + # @return [Types::DirectSchemaChangePolicy] # - class ResumeWorkflowRunRequest < Struct.new( + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/S3GlueParquetTarget AWS API Documentation + # + class S3GlueParquetTarget < Struct.new( :name, - :run_id, - :node_ids) + :inputs, + :partition_keys, + :path, + :compression, + :schema_change_policy) SENSITIVE = [] include Aws::Structure end - # @!attribute [rw] run_id - # The new ID assigned to the resumed workflow run. Each resume of a - # workflow run will have a new run ID. + # Specifies a JSON data store stored in Amazon S3. + # + # @note When making an API call, you may pass S3JsonSource + # data as a hash: + # + # { + # name: "NodeName", # required + # paths: ["EnclosedInStringProperty"], # required + # compression_type: "gzip", # accepts gzip, bzip2 + # exclusions: ["EnclosedInStringProperty"], + # group_size: "EnclosedInStringProperty", + # group_files: "EnclosedInStringProperty", + # recurse: false, + # max_band: 1, + # max_files_in_band: 1, + # additional_options: { + # bounded_size: 1, + # bounded_files: 1, + # enable_sample_path: false, + # sample_path: "EnclosedInStringProperty", + # }, + # json_path: "EnclosedInStringProperty", + # multiline: false, + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # } + # + # @!attribute [rw] name + # The name of the data store. # @return [String] # - # @!attribute [rw] node_ids - # A list of the node IDs for the nodes that were actually restarted. + # @!attribute [rw] paths + # A list of the Amazon S3 paths to read from. # @return [Array] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ResumeWorkflowRunResponse AWS API Documentation + # @!attribute [rw] compression_type + # Specifies how the data is compressed. This is generally not + # necessary if the data has a standard file extension. Possible values + # are `"gzip"` and `"bzip"`). + # @return [String] # - class ResumeWorkflowRunResponse < Struct.new( - :run_id, - :node_ids) + # @!attribute [rw] exclusions + # A string containing a JSON list of Unix-style glob patterns to + # exclude. For example, "\[\\"**.pdf\\"\]" excludes all PDF + # files. + # @return [Array] + # + # @!attribute [rw] group_size + # The target group size in bytes. The default is computed based on the + # input data size and the size of your cluster. When there are fewer + # than 50,000 input files, `"groupFiles"` must be set to + # `"inPartition"` for this to take effect. + # @return [String] + # + # @!attribute [rw] group_files + # Grouping files is turned on by default when the input contains more + # than 50,000 files. To turn on grouping with fewer than 50,000 files, + # set this parameter to "inPartition". To disable grouping when + # there are more than 50,000 files, set this parameter to `"none"`. + # @return [String] + # + # @!attribute [rw] recurse + # If set to true, recursively reads files in all subdirectories under + # the specified paths. + # @return [Boolean] + # + # @!attribute [rw] max_band + # This option controls the duration in milliseconds after which the s3 + # listing is likely to be consistent. Files with modification + # timestamps falling within the last maxBand milliseconds are tracked + # specially when using JobBookmarks to account for Amazon S3 eventual + # consistency. Most users don't need to set this option. The default + # is 900000 milliseconds, or 15 minutes. + # @return [Integer] + # + # @!attribute [rw] max_files_in_band + # This option specifies the maximum number of files to save from the + # last maxBand seconds. If this number is exceeded, extra files are + # skipped and only processed in the next job run. + # @return [Integer] + # + # @!attribute [rw] additional_options + # Specifies additional connection options. + # @return [Types::S3DirectSourceAdditionalOptions] + # + # @!attribute [rw] json_path + # A JsonPath string defining the JSON data. + # @return [String] + # + # @!attribute [rw] multiline + # A Boolean value that specifies whether a single record can span + # multiple lines. This can occur when a field contains a quoted + # new-line character. You must set this option to True if any record + # spans multiple lines. The default value is `False`, which allows for + # more aggressive file-splitting during parsing. + # @return [Boolean] + # + # @!attribute [rw] output_schemas + # Specifies the data schema for the S3 JSON source. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/S3JsonSource AWS API Documentation + # + class S3JsonSource < Struct.new( + :name, + :paths, + :compression_type, + :exclusions, + :group_size, + :group_files, + :recurse, + :max_band, + :max_files_in_band, + :additional_options, + :json_path, + :multiline, + :output_schemas) SENSITIVE = [] include Aws::Structure end - # @note When making an API call, you may pass RunStatementRequest + # Specifies an Apache Parquet data store stored in Amazon S3. + # + # @note When making an API call, you may pass S3ParquetSource # data as a hash: # # { - # session_id: "NameString", # required - # code: "OrchestrationStatementCodeString", # required - # request_origin: "OrchestrationNameString", + # name: "NodeName", # required + # paths: ["EnclosedInStringProperty"], # required + # compression_type: "snappy", # accepts snappy, lzo, gzip, uncompressed, none + # exclusions: ["EnclosedInStringProperty"], + # group_size: "EnclosedInStringProperty", + # group_files: "EnclosedInStringProperty", + # recurse: false, + # max_band: 1, + # max_files_in_band: 1, + # additional_options: { + # bounded_size: 1, + # bounded_files: 1, + # enable_sample_path: false, + # sample_path: "EnclosedInStringProperty", + # }, + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], # } # - # @!attribute [rw] session_id - # The Session Id of the statement to be run. + # @!attribute [rw] name + # The name of the data store. # @return [String] # - # @!attribute [rw] code - # The statement code to be run. + # @!attribute [rw] paths + # A list of the Amazon S3 paths to read from. + # @return [Array] + # + # @!attribute [rw] compression_type + # Specifies how the data is compressed. This is generally not + # necessary if the data has a standard file extension. Possible values + # are `"gzip"` and `"bzip"`). # @return [String] # - # @!attribute [rw] request_origin - # The origin of the request. + # @!attribute [rw] exclusions + # A string containing a JSON list of Unix-style glob patterns to + # exclude. For example, "\[\\"**.pdf\\"\]" excludes all PDF + # files. + # @return [Array] + # + # @!attribute [rw] group_size + # The target group size in bytes. The default is computed based on the + # input data size and the size of your cluster. When there are fewer + # than 50,000 input files, `"groupFiles"` must be set to + # `"inPartition"` for this to take effect. # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/RunStatementRequest AWS API Documentation + # @!attribute [rw] group_files + # Grouping files is turned on by default when the input contains more + # than 50,000 files. To turn on grouping with fewer than 50,000 files, + # set this parameter to "inPartition". To disable grouping when + # there are more than 50,000 files, set this parameter to `"none"`. + # @return [String] # - class RunStatementRequest < Struct.new( - :session_id, - :code, - :request_origin) - SENSITIVE = [] - include Aws::Structure - end - - # @!attribute [rw] id - # Returns the Id of the statement that was run. + # @!attribute [rw] recurse + # If set to true, recursively reads files in all subdirectories under + # the specified paths. + # @return [Boolean] + # + # @!attribute [rw] max_band + # This option controls the duration in milliseconds after which the s3 + # listing is likely to be consistent. Files with modification + # timestamps falling within the last maxBand milliseconds are tracked + # specially when using JobBookmarks to account for Amazon S3 eventual + # consistency. Most users don't need to set this option. The default + # is 900000 milliseconds, or 15 minutes. # @return [Integer] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/RunStatementResponse AWS API Documentation + # @!attribute [rw] max_files_in_band + # This option specifies the maximum number of files to save from the + # last maxBand seconds. If this number is exceeded, extra files are + # skipped and only processed in the next job run. + # @return [Integer] # - class RunStatementResponse < Struct.new( - :id) + # @!attribute [rw] additional_options + # Specifies additional connection options. + # @return [Types::S3DirectSourceAdditionalOptions] + # + # @!attribute [rw] output_schemas + # Specifies the data schema for the S3 Parquet source. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/S3ParquetSource AWS API Documentation + # + class S3ParquetSource < Struct.new( + :name, + :paths, + :compression_type, + :exclusions, + :group_size, + :group_files, + :recurse, + :max_band, + :max_files_in_band, + :additional_options, + :output_schemas) SENSITIVE = [] include Aws::Structure end - # Specifies how Amazon Simple Storage Service (Amazon S3) data should be - # encrypted. + # Specifies additional connection options for the Amazon S3 data store. # - # @note When making an API call, you may pass S3Encryption + # @note When making an API call, you may pass S3SourceAdditionalOptions # data as a hash: # # { - # s3_encryption_mode: "DISABLED", # accepts DISABLED, SSE-KMS, SSE-S3 - # kms_key_arn: "KmsKeyArn", + # bounded_size: 1, + # bounded_files: 1, # } # - # @!attribute [rw] s3_encryption_mode - # The encryption mode to use for Amazon S3 data. - # @return [String] + # @!attribute [rw] bounded_size + # Sets the upper limit for the target size of the dataset in bytes + # that will be processed. + # @return [Integer] # - # @!attribute [rw] kms_key_arn - # The Amazon Resource Name (ARN) of the KMS key to be used to encrypt - # the data. - # @return [String] + # @!attribute [rw] bounded_files + # Sets the upper limit for the target number of files that will be + # processed. + # @return [Integer] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/S3Encryption AWS API Documentation + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/S3SourceAdditionalOptions AWS API Documentation # - class S3Encryption < Struct.new( - :s3_encryption_mode, - :kms_key_arn) + class S3SourceAdditionalOptions < Struct.new( + :bounded_size, + :bounded_files) SENSITIVE = [] include Aws::Structure end @@ -15367,6 +20949,77 @@ class Segment < Struct.new( include Aws::Structure end + # Specifies a transform that chooses the data property keys that you + # want to keep. + # + # @note When making an API call, you may pass SelectFields + # data as a hash: + # + # { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # paths: [ # required + # ["EnclosedInStringProperty"], + # ], + # } + # + # @!attribute [rw] name + # The name of the transform node. + # @return [String] + # + # @!attribute [rw] inputs + # The data inputs identified by their node names. + # @return [Array] + # + # @!attribute [rw] paths + # A JSON path to a variable in the data structure. + # @return [Array>] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/SelectFields AWS API Documentation + # + class SelectFields < Struct.new( + :name, + :inputs, + :paths) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies a transform that chooses one `DynamicFrame` from a + # collection of `DynamicFrames`. The output is the selected + # `DynamicFrame` + # + # @note When making an API call, you may pass SelectFromCollection + # data as a hash: + # + # { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # index: 1, # required + # } + # + # @!attribute [rw] name + # The name of the transform node. + # @return [String] + # + # @!attribute [rw] inputs + # The data inputs identified by their node names. + # @return [Array] + # + # @!attribute [rw] index + # The index for the DynamicFrame to be selected. + # @return [Integer] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/SelectFromCollection AWS API Documentation + # + class SelectFromCollection < Struct.new( + :name, + :inputs, + :index) + SENSITIVE = [] + include Aws::Structure + end + # Information about a serialization/deserialization program (SerDe) that # serves as an extractor and loader. # @@ -15485,95 +21138,413 @@ class Session < Struct.new( include Aws::Structure end - # The `SessionCommand` that runs the job. + # The `SessionCommand` that runs the job. + # + # @note When making an API call, you may pass SessionCommand + # data as a hash: + # + # { + # name: "NameString", + # python_version: "PythonVersionString", + # } + # + # @!attribute [rw] name + # Specifies the name of the SessionCommand.Can be 'glueetl' or + # 'gluestreaming'. + # @return [String] + # + # @!attribute [rw] python_version + # Specifies the Python version. The Python version indicates the + # version supported for jobs of type Spark. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/SessionCommand AWS API Documentation + # + class SessionCommand < Struct.new( + :name, + :python_version) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies skewed values in a table. Skewed values are those that occur + # with very high frequency. + # + # @note When making an API call, you may pass SkewedInfo + # data as a hash: + # + # { + # skewed_column_names: ["NameString"], + # skewed_column_values: ["ColumnValuesString"], + # skewed_column_value_location_maps: { + # "ColumnValuesString" => "ColumnValuesString", + # }, + # } + # + # @!attribute [rw] skewed_column_names + # A list of names of columns that contain skewed values. + # @return [Array] + # + # @!attribute [rw] skewed_column_values + # A list of values that appear so frequently as to be considered + # skewed. + # @return [Array] + # + # @!attribute [rw] skewed_column_value_location_maps + # A mapping of skewed values to the columns that contain them. + # @return [Hash] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/SkewedInfo AWS API Documentation + # + class SkewedInfo < Struct.new( + :skewed_column_names, + :skewed_column_values, + :skewed_column_value_location_maps) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies a field to sort by and a sort order. + # + # @note When making an API call, you may pass SortCriterion + # data as a hash: + # + # { + # field_name: "ValueString", + # sort: "ASC", # accepts ASC, DESC + # } + # + # @!attribute [rw] field_name + # The name of the field on which to sort. + # @return [String] + # + # @!attribute [rw] sort + # An ascending or descending sort. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/SortCriterion AWS API Documentation + # + class SortCriterion < Struct.new( + :field_name, + :sort) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies a connector to an Apache Spark data source. + # + # @note When making an API call, you may pass SparkConnectorSource + # data as a hash: + # + # { + # name: "NodeName", # required + # connection_name: "EnclosedInStringProperty", # required + # connector_name: "EnclosedInStringProperty", # required + # connection_type: "EnclosedInStringProperty", # required + # additional_options: { + # "EnclosedInStringProperty" => "EnclosedInStringProperty", + # }, + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # } + # + # @!attribute [rw] name + # The name of the data source. + # @return [String] + # + # @!attribute [rw] connection_name + # The name of the connection that is associated with the connector. + # @return [String] + # + # @!attribute [rw] connector_name + # The name of a connector that assists with accessing the data store + # in Glue Studio. + # @return [String] + # + # @!attribute [rw] connection_type + # The type of connection, such as marketplace.spark or custom.spark, + # designating a connection to an Apache Spark data store. + # @return [String] + # + # @!attribute [rw] additional_options + # Additional connection options for the connector. + # @return [Hash] + # + # @!attribute [rw] output_schemas + # Specifies data schema for the custom spark source. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/SparkConnectorSource AWS API Documentation + # + class SparkConnectorSource < Struct.new( + :name, + :connection_name, + :connector_name, + :connection_type, + :additional_options, + :output_schemas) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies a target that uses an Apache Spark connector. + # + # @note When making an API call, you may pass SparkConnectorTarget + # data as a hash: + # + # { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # connection_name: "EnclosedInStringProperty", # required + # connector_name: "EnclosedInStringProperty", # required + # connection_type: "EnclosedInStringProperty", # required + # additional_options: { + # "EnclosedInStringProperty" => "EnclosedInStringProperty", + # }, + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # } + # + # @!attribute [rw] name + # The name of the data target. + # @return [String] + # + # @!attribute [rw] inputs + # The nodes that are inputs to the data target. + # @return [Array] + # + # @!attribute [rw] connection_name + # The name of a connection for an Apache Spark connector. + # @return [String] + # + # @!attribute [rw] connector_name + # The name of an Apache Spark connector. + # @return [String] + # + # @!attribute [rw] connection_type + # The type of connection, such as marketplace.spark or custom.spark, + # designating a connection to an Apache Spark data store. + # @return [String] + # + # @!attribute [rw] additional_options + # Additional connection options for the connector. + # @return [Hash] + # + # @!attribute [rw] output_schemas + # Specifies the data schema for the custom spark target. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/SparkConnectorTarget AWS API Documentation + # + class SparkConnectorTarget < Struct.new( + :name, + :inputs, + :connection_name, + :connector_name, + :connection_type, + :additional_options, + :output_schemas) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies a transform where you enter a SQL query using Spark SQL + # syntax to transform the data. The output is a single `DynamicFrame`. + # + # @note When making an API call, you may pass SparkSQL + # data as a hash: + # + # { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # sql_query: "SqlQuery", # required + # sql_aliases: [ # required + # { + # from: "NodeId", # required + # alias: "EnclosedInStringPropertyWithQuote", # required + # }, + # ], + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # } + # + # @!attribute [rw] name + # The name of the transform node. + # @return [String] + # + # @!attribute [rw] inputs + # The data inputs identified by their node names. You can associate a + # table name with each input node to use in the SQL query. The name + # you choose must meet the Spark SQL naming restrictions. + # @return [Array] + # + # @!attribute [rw] sql_query + # A SQL query that must use Spark SQL syntax and return a single data + # set. + # @return [String] + # + # @!attribute [rw] sql_aliases + # A list of aliases. An alias allows you to specify what name to use + # in the SQL for a given input. For example, you have a datasource + # named "MyDataSource". If you specify `From` as MyDataSource, and + # `Alias` as SqlName, then in your SQL you can do: + # + # `select * from SqlName` + # + # and that gets data from MyDataSource. + # @return [Array] + # + # @!attribute [rw] output_schemas + # Specifies the data schema for the SparkSQL transform. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/SparkSQL AWS API Documentation + # + class SparkSQL < Struct.new( + :name, + :inputs, + :sql_query, + :sql_aliases, + :output_schemas) + SENSITIVE = [] + include Aws::Structure + end + + # Specifies a transform that writes samples of the data to an Amazon S3 + # bucket. # - # @note When making an API call, you may pass SessionCommand + # @note When making an API call, you may pass Spigot # data as a hash: # # { - # name: "NameString", - # python_version: "PythonVersionString", + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # path: "EnclosedInStringProperty", # required + # topk: 1, + # prob: 1.0, # } # # @!attribute [rw] name - # Specifies the name of the SessionCommand.Can be 'glueetl' or - # 'gluestreaming'. + # The name of the transform node. # @return [String] # - # @!attribute [rw] python_version - # Specifies the Python version. The Python version indicates the - # version supported for jobs of type Spark. + # @!attribute [rw] inputs + # The data inputs identified by their node names. + # @return [Array] + # + # @!attribute [rw] path + # A path in Amazon S3 where the transform will write a subset of + # records from the dataset to a JSON file in an Amazon S3 bucket. # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/SessionCommand AWS API Documentation + # @!attribute [rw] topk + # Specifies a number of records to write starting from the beginning + # of the dataset. + # @return [Integer] # - class SessionCommand < Struct.new( + # @!attribute [rw] prob + # The probability (a decimal value with a maximum value of 1) of + # picking any given record. A value of 1 indicates that each row read + # from the dataset should be included in the sample output. + # @return [Float] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/Spigot AWS API Documentation + # + class Spigot < Struct.new( :name, - :python_version) + :inputs, + :path, + :topk, + :prob) SENSITIVE = [] include Aws::Structure end - # Specifies skewed values in a table. Skewed values are those that occur - # with very high frequency. + # Specifies a transform that splits data property keys into two + # `DynamicFrames`. The output is a collection of `DynamicFrames`\: one + # with selected data property keys, and one with the remaining data + # property keys. # - # @note When making an API call, you may pass SkewedInfo + # @note When making an API call, you may pass SplitFields # data as a hash: # # { - # skewed_column_names: ["NameString"], - # skewed_column_values: ["ColumnValuesString"], - # skewed_column_value_location_maps: { - # "ColumnValuesString" => "ColumnValuesString", - # }, + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # paths: [ # required + # ["EnclosedInStringProperty"], + # ], # } # - # @!attribute [rw] skewed_column_names - # A list of names of columns that contain skewed values. - # @return [Array] + # @!attribute [rw] name + # The name of the transform node. + # @return [String] # - # @!attribute [rw] skewed_column_values - # A list of values that appear so frequently as to be considered - # skewed. + # @!attribute [rw] inputs + # The data inputs identified by their node names. # @return [Array] # - # @!attribute [rw] skewed_column_value_location_maps - # A mapping of skewed values to the columns that contain them. - # @return [Hash] + # @!attribute [rw] paths + # A JSON path to a variable in the data structure. + # @return [Array>] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/SkewedInfo AWS API Documentation + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/SplitFields AWS API Documentation # - class SkewedInfo < Struct.new( - :skewed_column_names, - :skewed_column_values, - :skewed_column_value_location_maps) + class SplitFields < Struct.new( + :name, + :inputs, + :paths) SENSITIVE = [] include Aws::Structure end - # Specifies a field to sort by and a sort order. + # Represents a single entry in the list of values for `SqlAliases`. # - # @note When making an API call, you may pass SortCriterion + # @note When making an API call, you may pass SqlAlias # data as a hash: # # { - # field_name: "ValueString", - # sort: "ASC", # accepts ASC, DESC + # from: "NodeId", # required + # alias: "EnclosedInStringPropertyWithQuote", # required # } # - # @!attribute [rw] field_name - # The name of the field on which to sort. + # @!attribute [rw] from + # A table, or a column in a table. # @return [String] # - # @!attribute [rw] sort - # An ascending or descending sort. + # @!attribute [rw] alias + # A temporary name given to a table, or a column in a table. # @return [String] # - # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/SortCriterion AWS API Documentation + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/SqlAlias AWS API Documentation # - class SortCriterion < Struct.new( - :field_name, - :sort) + class SqlAlias < Struct.new( + :from, + :alias) SENSITIVE = [] include Aws::Structure end @@ -16451,6 +22422,34 @@ class StorageDescriptor < Struct.new( include Aws::Structure end + # Specifies options related to data preview for viewing a sample of your + # data. + # + # @note When making an API call, you may pass StreamingDataPreviewOptions + # data as a hash: + # + # { + # polling_time: 1, + # record_polling_limit: 1, + # } + # + # @!attribute [rw] polling_time + # The polling time in milliseconds. + # @return [Integer] + # + # @!attribute [rw] record_polling_limit + # The limit to the number of records polled. + # @return [Integer] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StreamingDataPreviewOptions AWS API Documentation + # + class StreamingDataPreviewOptions < Struct.new( + :polling_time, + :record_polling_limit) + SENSITIVE = [] + include Aws::Structure + end + # Defines column statistics supported for character sequence data # values. # @@ -17471,6 +23470,46 @@ class UnfilteredPartition < Struct.new( include Aws::Structure end + # Specifies a transform that combines the rows from two or more datasets + # into a single result. + # + # @note When making an API call, you may pass Union + # data as a hash: + # + # { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # union_type: "ALL", # required, accepts ALL, DISTINCT + # } + # + # @!attribute [rw] name + # The name of the transform node. + # @return [String] + # + # @!attribute [rw] inputs + # The node ID inputs to the transform. + # @return [Array] + # + # @!attribute [rw] union_type + # Indicates the type of Union transform. + # + # Specify `ALL` to join all rows from data sources to the resulting + # DynamicFrame. The resulting union does not remove duplicate rows. + # + # Specify `DISTINCT` to remove duplicate rows in the resulting + # DynamicFrame. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/Union AWS API Documentation + # + class Union < Struct.new( + :name, + :inputs, + :union_type) + SENSITIVE = [] + include Aws::Structure + end + # @note When making an API call, you may pass UntagResourceRequest # data as a hash: # @@ -18365,6 +24404,674 @@ class UpdateGrokClassifierRequest < Struct.new( # notify_delay_after: 1, # }, # glue_version: "GlueVersionString", + # code_gen_configuration_nodes: { + # "NodeId" => { + # athena_connector_source: { + # name: "NodeName", # required + # connection_name: "EnclosedInStringProperty", # required + # connector_name: "EnclosedInStringProperty", # required + # connection_type: "EnclosedInStringProperty", # required + # connection_table: "EnclosedInStringPropertyWithQuote", + # schema_name: "EnclosedInStringProperty", # required + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # jdbc_connector_source: { + # name: "NodeName", # required + # connection_name: "EnclosedInStringProperty", # required + # connector_name: "EnclosedInStringProperty", # required + # connection_type: "EnclosedInStringProperty", # required + # additional_options: { + # filter_predicate: "EnclosedInStringProperty", + # partition_column: "EnclosedInStringProperty", + # lower_bound: 1, + # upper_bound: 1, + # num_partitions: 1, + # job_bookmark_keys: ["EnclosedInStringProperty"], + # job_bookmark_keys_sort_order: "EnclosedInStringProperty", + # data_type_mapping: { + # "ARRAY" => "DATE", # accepts DATE, STRING, TIMESTAMP, INT, FLOAT, LONG, BIGDECIMAL, BYTE, SHORT, DOUBLE + # }, + # }, + # connection_table: "EnclosedInStringPropertyWithQuote", + # query: "SqlQuery", + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # spark_connector_source: { + # name: "NodeName", # required + # connection_name: "EnclosedInStringProperty", # required + # connector_name: "EnclosedInStringProperty", # required + # connection_type: "EnclosedInStringProperty", # required + # additional_options: { + # "EnclosedInStringProperty" => "EnclosedInStringProperty", + # }, + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # redshift_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # redshift_tmp_dir: "EnclosedInStringProperty", + # tmp_dir_iam_role: "EnclosedInStringProperty", + # }, + # s3_catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # partition_predicate: "EnclosedInStringProperty", + # additional_options: { + # bounded_size: 1, + # bounded_files: 1, + # }, + # }, + # s3_csv_source: { + # name: "NodeName", # required + # paths: ["EnclosedInStringProperty"], # required + # compression_type: "gzip", # accepts gzip, bzip2 + # exclusions: ["EnclosedInStringProperty"], + # group_size: "EnclosedInStringProperty", + # group_files: "EnclosedInStringProperty", + # recurse: false, + # max_band: 1, + # max_files_in_band: 1, + # additional_options: { + # bounded_size: 1, + # bounded_files: 1, + # enable_sample_path: false, + # sample_path: "EnclosedInStringProperty", + # }, + # separator: "comma", # required, accepts comma, ctrla, pipe, semicolon, tab + # escaper: "EnclosedInStringPropertyWithQuote", + # quote_char: "quote", # required, accepts quote, quillemet, single_quote, disabled + # multiline: false, + # with_header: false, + # write_header: false, + # skip_first: false, + # optimize_performance: false, + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # s3_json_source: { + # name: "NodeName", # required + # paths: ["EnclosedInStringProperty"], # required + # compression_type: "gzip", # accepts gzip, bzip2 + # exclusions: ["EnclosedInStringProperty"], + # group_size: "EnclosedInStringProperty", + # group_files: "EnclosedInStringProperty", + # recurse: false, + # max_band: 1, + # max_files_in_band: 1, + # additional_options: { + # bounded_size: 1, + # bounded_files: 1, + # enable_sample_path: false, + # sample_path: "EnclosedInStringProperty", + # }, + # json_path: "EnclosedInStringProperty", + # multiline: false, + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # s3_parquet_source: { + # name: "NodeName", # required + # paths: ["EnclosedInStringProperty"], # required + # compression_type: "snappy", # accepts snappy, lzo, gzip, uncompressed, none + # exclusions: ["EnclosedInStringProperty"], + # group_size: "EnclosedInStringProperty", + # group_files: "EnclosedInStringProperty", + # recurse: false, + # max_band: 1, + # max_files_in_band: 1, + # additional_options: { + # bounded_size: 1, + # bounded_files: 1, + # enable_sample_path: false, + # sample_path: "EnclosedInStringProperty", + # }, + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # relational_catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # dynamo_db_catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # jdbc_connector_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # connection_name: "EnclosedInStringProperty", # required + # connection_table: "EnclosedInStringPropertyWithQuote", # required + # connector_name: "EnclosedInStringProperty", # required + # connection_type: "EnclosedInStringProperty", # required + # additional_options: { + # "EnclosedInStringProperty" => "EnclosedInStringProperty", + # }, + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # spark_connector_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # connection_name: "EnclosedInStringProperty", # required + # connector_name: "EnclosedInStringProperty", # required + # connection_type: "EnclosedInStringProperty", # required + # additional_options: { + # "EnclosedInStringProperty" => "EnclosedInStringProperty", + # }, + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # catalog_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # redshift_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # redshift_tmp_dir: "EnclosedInStringProperty", + # tmp_dir_iam_role: "EnclosedInStringProperty", + # upsert_redshift_options: { + # table_location: "EnclosedInStringProperty", + # connection_name: "EnclosedInStringProperty", + # upsert_keys: ["EnclosedInStringProperty"], + # }, + # }, + # s3_catalog_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # partition_keys: [ + # ["EnclosedInStringProperty"], + # ], + # table: "EnclosedInStringProperty", # required + # database: "EnclosedInStringProperty", # required + # schema_change_policy: { + # enable_update_catalog: false, + # update_behavior: "UPDATE_IN_DATABASE", # accepts UPDATE_IN_DATABASE, LOG + # }, + # }, + # s3_glue_parquet_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # partition_keys: [ + # ["EnclosedInStringProperty"], + # ], + # path: "EnclosedInStringProperty", # required + # compression: "snappy", # accepts snappy, lzo, gzip, uncompressed, none + # schema_change_policy: { + # enable_update_catalog: false, + # update_behavior: "UPDATE_IN_DATABASE", # accepts UPDATE_IN_DATABASE, LOG + # table: "EnclosedInStringProperty", + # database: "EnclosedInStringProperty", + # }, + # }, + # s3_direct_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # partition_keys: [ + # ["EnclosedInStringProperty"], + # ], + # path: "EnclosedInStringProperty", # required + # compression: "EnclosedInStringProperty", + # format: "json", # required, accepts json, csv, avro, orc, parquet + # schema_change_policy: { + # enable_update_catalog: false, + # update_behavior: "UPDATE_IN_DATABASE", # accepts UPDATE_IN_DATABASE, LOG + # table: "EnclosedInStringProperty", + # database: "EnclosedInStringProperty", + # }, + # }, + # apply_mapping: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # mapping: [ # required + # { + # to_key: "EnclosedInStringProperty", + # from_path: ["EnclosedInStringProperty"], + # from_type: "EnclosedInStringProperty", + # to_type: "EnclosedInStringProperty", + # dropped: false, + # children: { + # # recursive Mappings + # }, + # }, + # ], + # }, + # select_fields: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # paths: [ # required + # ["EnclosedInStringProperty"], + # ], + # }, + # drop_fields: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # paths: [ # required + # ["EnclosedInStringProperty"], + # ], + # }, + # rename_field: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # source_path: ["EnclosedInStringProperty"], # required + # target_path: ["EnclosedInStringProperty"], # required + # }, + # spigot: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # path: "EnclosedInStringProperty", # required + # topk: 1, + # prob: 1.0, + # }, + # join: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # join_type: "equijoin", # required, accepts equijoin, left, right, outer, leftsemi, leftanti + # columns: [ # required + # { + # from: "EnclosedInStringProperty", # required + # keys: [ # required + # ["EnclosedInStringProperty"], + # ], + # }, + # ], + # }, + # split_fields: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # paths: [ # required + # ["EnclosedInStringProperty"], + # ], + # }, + # select_from_collection: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # index: 1, # required + # }, + # fill_missing_values: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # imputed_path: "EnclosedInStringProperty", # required + # filled_path: "EnclosedInStringProperty", + # }, + # filter: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # logical_operator: "AND", # required, accepts AND, OR + # filters: [ # required + # { + # operation: "EQ", # required, accepts EQ, LT, GT, LTE, GTE, REGEX, ISNULL + # negated: false, + # values: [ # required + # { + # type: "COLUMNEXTRACTED", # required, accepts COLUMNEXTRACTED, CONSTANT + # value: ["EnclosedInStringProperty"], # required + # }, + # ], + # }, + # ], + # }, + # custom_code: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # code: "ExtendedString", # required + # class_name: "EnclosedInStringProperty", # required + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # spark_sql: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # sql_query: "SqlQuery", # required + # sql_aliases: [ # required + # { + # from: "NodeId", # required + # alias: "EnclosedInStringPropertyWithQuote", # required + # }, + # ], + # output_schemas: [ + # { + # columns: [ + # { + # name: "GlueStudioColumnNameString", # required + # type: "ColumnTypeString", + # }, + # ], + # }, + # ], + # }, + # direct_kinesis_source: { + # name: "NodeName", # required + # window_size: 1, + # detect_schema: false, + # streaming_options: { + # endpoint_url: "EnclosedInStringProperty", + # stream_name: "EnclosedInStringProperty", + # classification: "EnclosedInStringProperty", + # delimiter: "EnclosedInStringProperty", + # starting_position: "latest", # accepts latest, trim_horizon, earliest + # max_fetch_time_in_ms: 1, + # max_fetch_records_per_shard: 1, + # max_record_per_read: 1, + # add_idle_time_between_reads: false, + # idle_time_between_reads_in_ms: 1, + # describe_shard_interval: 1, + # num_retries: 1, + # retry_interval_ms: 1, + # max_retry_interval_ms: 1, + # avoid_empty_batches: false, + # stream_arn: "EnclosedInStringProperty", + # role_arn: "EnclosedInStringProperty", + # role_session_name: "EnclosedInStringProperty", + # }, + # data_preview_options: { + # polling_time: 1, + # record_polling_limit: 1, + # }, + # }, + # direct_kafka_source: { + # name: "NodeName", # required + # streaming_options: { + # bootstrap_servers: "EnclosedInStringProperty", + # security_protocol: "EnclosedInStringProperty", + # connection_name: "EnclosedInStringProperty", + # topic_name: "EnclosedInStringProperty", + # assign: "EnclosedInStringProperty", + # subscribe_pattern: "EnclosedInStringProperty", + # classification: "EnclosedInStringProperty", + # delimiter: "EnclosedInStringProperty", + # starting_offsets: "EnclosedInStringProperty", + # ending_offsets: "EnclosedInStringProperty", + # poll_timeout_ms: 1, + # num_retries: 1, + # retry_interval_ms: 1, + # max_offsets_per_trigger: 1, + # min_partitions: 1, + # }, + # window_size: 1, + # detect_schema: false, + # data_preview_options: { + # polling_time: 1, + # record_polling_limit: 1, + # }, + # }, + # catalog_kinesis_source: { + # name: "NodeName", # required + # window_size: 1, + # detect_schema: false, + # table: "EnclosedInStringProperty", # required + # database: "EnclosedInStringProperty", # required + # streaming_options: { + # endpoint_url: "EnclosedInStringProperty", + # stream_name: "EnclosedInStringProperty", + # classification: "EnclosedInStringProperty", + # delimiter: "EnclosedInStringProperty", + # starting_position: "latest", # accepts latest, trim_horizon, earliest + # max_fetch_time_in_ms: 1, + # max_fetch_records_per_shard: 1, + # max_record_per_read: 1, + # add_idle_time_between_reads: false, + # idle_time_between_reads_in_ms: 1, + # describe_shard_interval: 1, + # num_retries: 1, + # retry_interval_ms: 1, + # max_retry_interval_ms: 1, + # avoid_empty_batches: false, + # stream_arn: "EnclosedInStringProperty", + # role_arn: "EnclosedInStringProperty", + # role_session_name: "EnclosedInStringProperty", + # }, + # data_preview_options: { + # polling_time: 1, + # record_polling_limit: 1, + # }, + # }, + # catalog_kafka_source: { + # name: "NodeName", # required + # window_size: 1, + # detect_schema: false, + # table: "EnclosedInStringProperty", # required + # database: "EnclosedInStringProperty", # required + # streaming_options: { + # bootstrap_servers: "EnclosedInStringProperty", + # security_protocol: "EnclosedInStringProperty", + # connection_name: "EnclosedInStringProperty", + # topic_name: "EnclosedInStringProperty", + # assign: "EnclosedInStringProperty", + # subscribe_pattern: "EnclosedInStringProperty", + # classification: "EnclosedInStringProperty", + # delimiter: "EnclosedInStringProperty", + # starting_offsets: "EnclosedInStringProperty", + # ending_offsets: "EnclosedInStringProperty", + # poll_timeout_ms: 1, + # num_retries: 1, + # retry_interval_ms: 1, + # max_offsets_per_trigger: 1, + # min_partitions: 1, + # }, + # data_preview_options: { + # polling_time: 1, + # record_polling_limit: 1, + # }, + # }, + # drop_null_fields: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # null_check_box_list: { + # is_empty: false, + # is_null_string: false, + # is_neg_one: false, + # }, + # null_text_list: [ + # { + # value: "EnclosedInStringProperty", # required + # datatype: { # required + # id: "GenericLimitedString", # required + # label: "GenericLimitedString", # required + # }, + # }, + # ], + # }, + # merge: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # source: "NodeId", # required + # primary_keys: [ # required + # ["EnclosedInStringProperty"], + # ], + # }, + # union: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # union_type: "ALL", # required, accepts ALL, DISTINCT + # }, + # pii_detection: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # pii_type: "RowAudit", # required, accepts RowAudit, RowMasking, ColumnAudit, ColumnMasking + # entity_types_to_detect: ["EnclosedInStringProperty"], # required + # output_column_name: "EnclosedInStringProperty", + # sample_fraction: 1.0, + # threshold_fraction: 1.0, + # mask_value: "MaskValue", + # }, + # aggregate: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # groups: [ # required + # ["EnclosedInStringProperty"], + # ], + # aggs: [ # required + # { + # column: ["EnclosedInStringProperty"], # required + # agg_func: "avg", # required, accepts avg, countDistinct, count, first, last, kurtosis, max, min, skewness, stddev_samp, stddev_pop, sum, sumDistinct, var_samp, var_pop + # }, + # ], + # }, + # drop_duplicates: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # columns: [ + # ["GenericLimitedString"], + # ], + # }, + # governed_catalog_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # partition_keys: [ + # ["EnclosedInStringProperty"], + # ], + # table: "EnclosedInStringProperty", # required + # database: "EnclosedInStringProperty", # required + # schema_change_policy: { + # enable_update_catalog: false, + # update_behavior: "UPDATE_IN_DATABASE", # accepts UPDATE_IN_DATABASE, LOG + # }, + # }, + # governed_catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # partition_predicate: "EnclosedInStringProperty", + # additional_options: { + # bounded_size: 1, + # bounded_files: 1, + # }, + # }, + # microsoft_sql_server_catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # my_sql_catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # oracle_sql_catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # postgre_sql_catalog_source: { + # name: "NodeName", # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # microsoft_sql_server_catalog_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # my_sql_catalog_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # oracle_sql_catalog_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # postgre_sql_catalog_target: { + # name: "NodeName", # required + # inputs: ["NodeId"], # required + # database: "EnclosedInStringProperty", # required + # table: "EnclosedInStringProperty", # required + # }, + # }, + # }, # }, # } # @@ -19159,6 +25866,40 @@ class UpdateXMLClassifierRequest < Struct.new( include Aws::Structure end + # The options to configure an upsert operation when writing to a + # Redshift target . + # + # @note When making an API call, you may pass UpsertRedshiftTargetOptions + # data as a hash: + # + # { + # table_location: "EnclosedInStringProperty", + # connection_name: "EnclosedInStringProperty", + # upsert_keys: ["EnclosedInStringProperty"], + # } + # + # @!attribute [rw] table_location + # The physical location of the Redshift table. + # @return [String] + # + # @!attribute [rw] connection_name + # The name of the connection to use to write to Redshift. + # @return [String] + # + # @!attribute [rw] upsert_keys + # The keys used to determine whether to perform an update or insert. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpsertRedshiftTargetOptions AWS API Documentation + # + class UpsertRedshiftTargetOptions < Struct.new( + :table_location, + :connection_name, + :upsert_keys) + SENSITIVE = [] + include Aws::Structure + end + # Represents the equivalent of a Hive user-defined function (`UDF`) # definition. # diff --git a/gems/aws-sdk-kms/CHANGELOG.md b/gems/aws-sdk-kms/CHANGELOG.md index a71e40ac836..b69f59b5899 100644 --- a/gems/aws-sdk-kms/CHANGELOG.md +++ b/gems/aws-sdk-kms/CHANGELOG.md @@ -1,6 +1,11 @@ Unreleased Changes ------------------ +1.57.0 (2022-05-17) +------------------ + +* Feature - Add HMAC best practice tip, annual rotation of AWS managed keys. + 1.56.0 (2022-04-19) ------------------ diff --git a/gems/aws-sdk-kms/VERSION b/gems/aws-sdk-kms/VERSION index 3ebf789f5a8..373aea97570 100644 --- a/gems/aws-sdk-kms/VERSION +++ b/gems/aws-sdk-kms/VERSION @@ -1 +1 @@ -1.56.0 +1.57.0 diff --git a/gems/aws-sdk-kms/lib/aws-sdk-kms.rb b/gems/aws-sdk-kms/lib/aws-sdk-kms.rb index 0b19d3c4040..0109fb440a8 100644 --- a/gems/aws-sdk-kms/lib/aws-sdk-kms.rb +++ b/gems/aws-sdk-kms/lib/aws-sdk-kms.rb @@ -48,6 +48,6 @@ # @!group service module Aws::KMS - GEM_VERSION = '1.56.0' + GEM_VERSION = '1.57.0' end diff --git a/gems/aws-sdk-kms/lib/aws-sdk-kms/client.rb b/gems/aws-sdk-kms/lib/aws-sdk-kms/client.rb index 4384ce85188..89c31051d89 100644 --- a/gems/aws-sdk-kms/lib/aws-sdk-kms/client.rb +++ b/gems/aws-sdk-kms/lib/aws-sdk-kms/client.rb @@ -1102,11 +1102,11 @@ def create_grant(params = {}, options = {}) # # Asymmetric KMS keys contain an RSA key pair or an Elliptic Curve # (ECC) key pair. The private key in an asymmetric KMS key never - # leaves AWS KMS unencrypted. However, you can use the GetPublicKey + # leaves KMS unencrypted. However, you can use the GetPublicKey # operation to download the public key so it can be used outside of - # AWS KMS. KMS keys with RSA key pairs can be used to encrypt or - # decrypt data or sign and verify messages (but not both). KMS keys - # with ECC key pairs can be used only to sign and verify messages. For + # KMS. KMS keys with RSA key pairs can be used to encrypt or decrypt + # data or sign and verify messages (but not both). KMS keys with ECC + # key pairs can be used only to sign and verify messages. For # information about asymmetric KMS keys, see [Asymmetric KMS keys][2] # in the *Key Management Service Developer Guide*. # @@ -1230,16 +1230,19 @@ def create_grant(params = {}, options = {}) # [8]: https://docs.aws.amazon.com/kms/latest/developerguide/iam-policies.html#iam-policy-example-create-key # # @option params [String] :policy - # The key policy to attach to the KMS key. + # The key policy to attach to the KMS key. If you do not specify a key + # policy, KMS attaches a default key policy to the KMS key. For more + # information, see [Default key policy][1] in the *Key Management + # Service Developer Guide*. # # If you provide a key policy, it must meet the following criteria: # - # * If you don't set `BypassPolicyLockoutSafetyCheck` to true, the key - # policy must allow the principal that is making the `CreateKey` + # * If you don't set `BypassPolicyLockoutSafetyCheck` to `True`, the + # key policy must allow the principal that is making the `CreateKey` # request to make a subsequent PutKeyPolicy request on the KMS key. # This reduces the risk that the KMS key becomes unmanageable. For # more information, refer to the scenario in the [Default Key - # Policy][1] section of the Key Management Service Developer + # Policy][2] section of the Key Management Service Developer # Guide . # # * Each statement in the key policy must contain one or more @@ -1249,14 +1252,23 @@ def create_grant(params = {}, options = {}) # delay before including the new principal in a key policy because the # new principal might not be immediately visible to KMS. For more # information, see [Changes that I make are not always immediately - # visible][2] in the *Amazon Web Services Identity and Access + # visible][3] in the *Amazon Web Services Identity and Access # Management User Guide*. # - # If you do not provide a key policy, KMS attaches a default key policy - # to the KMS key. For more information, see [Default Key Policy][3] in - # the *Key Management Service Developer Guide*. + # A key policy document must conform to the following rules. # - # The key policy size quota is 32 kilobytes (32768 bytes). + # * Up to 32 kilobytes (32768 bytes) + # + # * Must be UTF-8 encoded + # + # * The only Unicode characters that are permitted in a key policy + # document are the horizontal tab (U+0009), linefeed (U+000A), + # carriage return (U+000D), and characters in the range U+0020 to + # U+00FF. + # + # * The `Sid` element in a key policy statement can include spaces. + # (Spaces are prohibited in the `Sid` element of an IAM policy + # document.) # # For help writing and formatting a JSON policy document, see the [IAM # JSON Policy Reference][4] in the Identity and Access Management @@ -1264,9 +1276,9 @@ def create_grant(params = {}, options = {}) # # # - # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam - # [2]: https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency - # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam + # [3]: https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency # [4]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html # # @option params [String] :description @@ -1319,13 +1331,13 @@ def create_grant(params = {}, options = {}) # Service Developer Guide . # # The `KeySpec` determines whether the KMS key contains a symmetric key - # or an asymmetric key pair. It also determines the algorithms that the - # KMS key supports. You can't change the `KeySpec` after the KMS key is - # created. To further restrict the algorithms that can be used with the - # KMS key, use a condition key in its key policy or IAM policy. For more - # information, see [kms:EncryptionAlgorithm][2], [kms:MacAlgorithm][3] - # or [kms:Signing Algorithm][4] in the Key Management Service - # Developer Guide . + # or an asymmetric key pair. It also determines the cryptographic + # algorithms that the KMS key supports. You can't change the `KeySpec` + # after the KMS key is created. To further restrict the algorithms that + # can be used with the KMS key, use a condition key in its key policy or + # IAM policy. For more information, see [kms:EncryptionAlgorithm][2], + # [kms:MacAlgorithm][3] or [kms:Signing Algorithm][4] in the Key + # Management Service Developer Guide . # # [Amazon Web Services services that are integrated with KMS][5] use # symmetric encryption KMS keys to protect your data. These services do @@ -1501,9 +1513,10 @@ def create_grant(params = {}, options = {}) # This value creates a *primary key*, not a replica. To create a # *replica key*, use the ReplicateKey operation. # - # You can create a symmetric or asymmetric multi-Region key, and you can - # create a multi-Region key with imported key material. However, you - # cannot create a multi-Region key in a custom key store. + # You can create a multi-Region version of a symmetric encryption KMS + # key, an HMAC KMS key, an asymmetric KMS key, or a KMS key with + # imported key material. However, you cannot create a multi-Region key + # in a custom key store. # # # @@ -1833,10 +1846,10 @@ def create_key(params = {}, options = {}) # # The `Decrypt` operation also decrypts ciphertext that was encrypted # outside of KMS by the public key in an KMS asymmetric KMS key. - # However, it cannot decrypt symmetric ciphertext produced by other - # libraries, such as the [Amazon Web Services Encryption SDK][2] or - # [Amazon S3 client-side encryption][3]. These libraries return a - # ciphertext format that is incompatible with KMS. + # However, it cannot decrypt ciphertext produced by other libraries, + # such as the [Amazon Web Services Encryption SDK][2] or [Amazon S3 + # client-side encryption][3]. These libraries return a ciphertext format + # that is incompatible with KMS. # # If the ciphertext was encrypted under a symmetric encryption KMS key, # the `KeyId` parameter is optional. KMS can get this information from @@ -2802,23 +2815,37 @@ def disable_key(params = {}, options = {}) req.send_request(options) end - # Disables [automatic rotation of the key material][1] for the specified + # Disables [automatic rotation of the key material][1] of the specified # symmetric encryption KMS key. # - # You cannot enable automatic rotation of [asymmetric KMS keys][2], - # [HMAC KMS keys][3], KMS keys with [imported key material][4], or KMS - # keys in a [custom key store][5]. To enable or disable automatic - # rotation of a set of related [multi-Region keys][6], set the property - # on the primary key. + # Automatic key rotation is supported only on symmetric encryption KMS + # keys. You cannot enable or disable automatic rotation of [asymmetric + # KMS keys][2], [HMAC KMS keys][3], KMS keys with [imported key + # material][4], or KMS keys in a [custom key store][5]. The key rotation + # status of these KMS keys is always `false`. To enable or disable + # automatic rotation of a set of related [multi-Region keys][6], set the + # property on the primary key. + # + # You can enable (EnableKeyRotation) and disable automatic rotation of + # the key material in [customer managed KMS keys][7]. Key material + # rotation of [Amazon Web Services managed KMS keys][8] is not + # configurable. KMS always rotates the key material for every year. + # Rotation of [Amazon Web Services owned KMS keys][9] varies. + # + # In May 2022, KMS changed the rotation schedule for Amazon Web Services + # managed keys from every three years to every year. For details, see + # EnableKeyRotation. + # + # # # The KMS key that you use for this operation must be in a compatible - # key state. For details, see [Key states of KMS keys][7] in the *Key + # key state. For details, see [Key states of KMS keys][10] in the *Key # Management Service Developer Guide*. # # **Cross-account use**\: No. You cannot perform this operation on a KMS # key in a different Amazon Web Services account. # - # **Required permissions**\: [kms:DisableKeyRotation][8] (key policy) + # **Required permissions**\: [kms:DisableKeyRotation][11] (key policy) # # **Related operations:** # @@ -2834,8 +2861,11 @@ def disable_key(params = {}, options = {}) # [4]: https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html # [5]: https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html # [6]: https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate - # [7]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html - # [8]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + # [7]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk + # [8]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk + # [9]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-owned-cmk + # [10]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html + # [11]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html # # @option params [required, String] :key_id # Identifies a symmetric encryption KMS key. You cannot enable or @@ -3035,23 +3065,49 @@ def enable_key(params = {}, options = {}) req.send_request(options) end - # Enables [automatic rotation of the key material][1] for the specified + # Enables [automatic rotation of the key material][1] of the specified # symmetric encryption KMS key. # - # You cannot enable automatic rotation of [asymmetric KMS keys][2], - # [HMAC KMS keys][3], KMS keys with [imported key material][4], or KMS - # keys in a [custom key store][5]. To enable or disable automatic - # rotation of a set of related [multi-Region keys][6], set the property - # on the primary key. + # When you enable automatic rotation of a[customer managed KMS key][2], + # KMS rotates the key material of the KMS key one year (approximately + # 365 days) from the enable date and every year thereafter. You can + # monitor rotation of the key material for your KMS keys in CloudTrail + # and Amazon CloudWatch. To disable rotation of the key material in a + # customer managed KMS key, use the DisableKeyRotation operation. + # + # Automatic key rotation is supported only on [symmetric encryption KMS + # keys][3]. You cannot enable or disable automatic rotation of + # [asymmetric KMS keys][4], [HMAC KMS keys][5], KMS keys with [imported + # key material][6], or KMS keys in a [custom key store][7]. The key + # rotation status of these KMS keys is always `false`. To enable or + # disable automatic rotation of a set of related [multi-Region keys][8], + # set the property on the primary key. + # + # You cannot enable or disable automatic rotation [Amazon Web Services + # managed KMS keys][9]. KMS always rotates the key material of Amazon + # Web Services managed keys every year. Rotation of [Amazon Web Services + # owned KMS keys][10] varies. + # + # In May 2022, KMS changed the rotation schedule for Amazon Web Services + # managed keys from every three years (approximately 1,095 days) to + # every year (approximately 365 days). + # + # New Amazon Web Services managed keys are automatically rotated one + # year after they are created, and approximately every year thereafter. + # + # Existing Amazon Web Services managed keys are automatically rotated + # one year after their most recent rotation, and every year thereafter. + # + # # # The KMS key that you use for this operation must be in a compatible - # key state. For details, see [Key states of KMS keys][7] in the *Key + # key state. For details, see [Key states of KMS keys][11] in the *Key # Management Service Developer Guide*. # # **Cross-account use**\: No. You cannot perform this operation on a KMS # key in a different Amazon Web Services account. # - # **Required permissions**\: [kms:EnableKeyRotation][8] (key policy) + # **Required permissions**\: [kms:EnableKeyRotation][12] (key policy) # # **Related operations:** # @@ -3062,20 +3118,25 @@ def enable_key(params = {}, options = {}) # # # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html - # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html - # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html - # [4]: https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html - # [5]: https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html - # [6]: https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate - # [7]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html - # [8]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#symmetric-cmks + # [4]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html + # [5]: https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html + # [6]: https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html + # [7]: https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html + # [8]: https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate + # [9]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk + # [10]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-owned-cmk + # [11]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html + # [12]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html # # @option params [required, String] :key_id - # Identifies a symmetric encryption KMS key. You cannot enable automatic - # rotation of [asymmetric KMS keys][1], [HMAC KMS keys][2], KMS keys - # with [imported key material][3], or KMS keys in a [custom key - # store][4]. To enable or disable automatic rotation of a set of related - # [multi-Region keys][5], set the property on the primary key. + # Identifies a symmetric encryption KMS key. You cannot enable or + # disable automatic rotation of [asymmetric KMS keys][1], [HMAC KMS + # keys][2], KMS keys with [imported key material][3], or KMS keys in a + # [custom key store][4]. The key rotation status of these KMS keys is + # always `false`. To enable or disable automatic rotation of a set of + # related [multi-Region keys][5], set the property on the primary key. # # Specify the key ID or key ARN of the KMS key. # @@ -3948,16 +4009,17 @@ def generate_data_key_pair_without_plaintext(params = {}, options = {}) # # This operation is useful for systems that need to encrypt data at some # point, but not immediately. When you need to encrypt the data, you - # call the Decrypt operation on the encrypted copy of the key. It's - # also useful in distributed systems with different levels of trust. For - # example, you might store encrypted data in containers. One component - # of your system creates new containers and stores an encrypted data key - # with each container. Then, a different component puts the data into - # the containers. That component first decrypts the data key, uses the - # plaintext data key to encrypt data, puts the encrypted data into the - # container, and then destroys the plaintext data key. In this system, - # the component that creates the containers never sees the plaintext - # data key. + # call the Decrypt operation on the encrypted copy of the key. + # + # It's also useful in distributed systems with different levels of + # trust. For example, you might store encrypted data in containers. One + # component of your system creates new containers and stores an + # encrypted data key with each container. Then, a different component + # puts the data into the containers. That component first decrypts the + # data key, uses the plaintext data key to encrypt data, puts the + # encrypted data into the container, and then destroys the plaintext + # data key. In this system, the component that creates the containers + # never sees the plaintext data key. # # To request an asymmetric data key pair, use the GenerateDataKeyPair or # GenerateDataKeyPairWithoutPlaintext operations. @@ -4135,6 +4197,15 @@ def generate_data_key_without_plaintext(params = {}, options = {}) # see [HMAC keys in KMS][2] in the Key Management Service # Developer Guide . # + # Best practices recommend that you limit the time during which any + # signing mechanism, including an HMAC, is effective. This deters an + # attack where the actor uses a signed message to establish validity + # repeatedly or long after the message is superseded. HMAC tags do not + # include a timestamp, but you can include a timestamp in the token or + # message to help you detect when its time to refresh the HMAC. + # + # + # # The KMS key that you use for this operation must be in a compatible # key state. For details, see [Key states of KMS keys][3] in the *Key # Management Service Developer Guide*. @@ -4391,31 +4462,56 @@ def get_key_policy(params = {}, options = {}) # Gets a Boolean value that indicates whether [automatic rotation of the # key material][1] is enabled for the specified KMS key. # - # You cannot enable automatic rotation of [asymmetric KMS keys][2], - # [HMAC KMS keys][3], KMS keys with [imported key material][4], or KMS - # keys in a [custom key store][5]. To enable or disable automatic - # rotation of a set of related [multi-Region keys][6], set the property - # on the primary key. The key rotation status for these KMS keys is - # always `false`. + # When you enable automatic rotation for [customer managed KMS keys][2], + # KMS rotates the key material of the KMS key one year (approximately + # 365 days) from the enable date and every year thereafter. You can + # monitor rotation of the key material for your KMS keys in CloudTrail + # and Amazon CloudWatch. + # + # Automatic key rotation is supported only on [symmetric encryption KMS + # keys][3]. You cannot enable or disable automatic rotation of + # [asymmetric KMS keys][4], [HMAC KMS keys][5], KMS keys with [imported + # key material][6], or KMS keys in a [custom key store][7]. The key + # rotation status of these KMS keys is always `false`. To enable or + # disable automatic rotation of a set of related [multi-Region keys][8], + # set the property on the primary key.. + # + # You can enable (EnableKeyRotation) and disable automatic rotation + # (DisableKeyRotation) of the key material in customer managed KMS keys. + # Key material rotation of [Amazon Web Services managed KMS keys][9] is + # not configurable. KMS always rotates the key material in Amazon Web + # Services managed KMS keys every year. The key rotation status for + # Amazon Web Services managed KMS keys is always `true`. + # + # In May 2022, KMS changed the rotation schedule for Amazon Web Services + # managed keys from every three years to every year. For details, see + # EnableKeyRotation. + # + # # # The KMS key that you use for this operation must be in a compatible - # key state. For details, see [Key states of KMS keys][7] in the *Key + # key state. For details, see [Key states of KMS keys][10] in the *Key # Management Service Developer Guide*. # # * Disabled: The key rotation status does not change when you disable a # KMS key. However, while the KMS key is disabled, KMS does not rotate - # the key material. + # the key material. When you re-enable the KMS key, rotation resumes. + # If the key material in the re-enabled KMS key hasn't been rotated + # in one year, KMS rotates it immediately, and every year thereafter. + # If it's been less than a year since the key material in the + # re-enabled KMS key was rotated, the KMS key resumes its prior + # rotation schedule. # # * Pending deletion: While a KMS key is pending deletion, its key # rotation status is `false` and KMS does not rotate the key material. - # If you cancel the deletion, the original key rotation status is - # restored. + # If you cancel the deletion, the original key rotation status returns + # to `true`. # # **Cross-account use**\: Yes. To perform this operation on a KMS key in # a different Amazon Web Services account, specify the key ARN in the # value of the `KeyId` parameter. # - # **Required permissions**\: [kms:GetKeyRotationStatus][8] (key policy) + # **Required permissions**\: [kms:GetKeyRotationStatus][11] (key policy) # # **Related operations:** # @@ -4426,13 +4522,16 @@ def get_key_policy(params = {}, options = {}) # # # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html - # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html - # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html - # [4]: https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html - # [5]: https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html - # [6]: https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate - # [7]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html - # [8]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#symmetric-cmks + # [4]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html + # [5]: https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html + # [6]: https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html + # [7]: https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html + # [8]: https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate + # [9]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk + # [10]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html + # [11]: https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html # # @option params [required, String] :key_id # Gets the rotation status for the specified KMS key. @@ -5811,15 +5910,25 @@ def list_retirable_grants(params = {}, options = {}) # visible][2] in the *Amazon Web Services Identity and Access # Management User Guide*. # - # The key policy cannot exceed 32 kilobytes (32768 bytes). For more - # information, see [Resource Quotas][3] in the *Key Management Service - # Developer Guide*. + # A key policy document must conform to the following rules. + # + # * Up to 32 kilobytes (32768 bytes) + # + # * Must be UTF-8 encoded + # + # * The only Unicode characters that are permitted in a key policy + # document are the horizontal tab (U+0009), linefeed (U+000A), + # carriage return (U+000D), and characters in the range U+0020 to + # U+00FF. + # + # * The `Sid` element in a key policy statement can include spaces. + # (Spaces are prohibited in the `Sid` element of an IAM policy + # document.) # # # # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam # [2]: https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency - # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/resource-limits.html # # @option params [Boolean] :bypass_policy_lockout_safety_check # A flag to indicate whether to bypass the key policy lockout safety @@ -6357,7 +6466,20 @@ def re_encrypt(params = {}, options = {}) # visible][3] in the Identity and Access Management User # Guide . # - # * The key policy size quota is 32 kilobytes (32768 bytes). + # A key policy document must conform to the following rules. + # + # * Up to 32 kilobytes (32768 bytes) + # + # * Must be UTF-8 encoded + # + # * The only Unicode characters that are permitted in a key policy + # document are the horizontal tab (U+0009), linefeed (U+000A), + # carriage return (U+000D), and characters in the range U+0020 to + # U+00FF. + # + # * The `Sid` element in a key policy statement can include spaces. + # (Spaces are prohibited in the `Sid` element of an IAM policy + # document.) # # # @@ -6814,7 +6936,7 @@ def revoke_grant(params = {}, options = {}) # The waiting period, specified in number of days. After the waiting # period ends, KMS deletes the KMS key. # - # If the KMS key is a multi-Region primary key with replicas, the + # If the KMS key is a multi-Region primary key with replica keys, the # waiting period begins when the last of its replica keys is deleted. # Otherwise, the waiting period begins immediately. # @@ -6900,6 +7022,15 @@ def schedule_key_deletion(params = {}, options = {}) # When signing a message, be sure to record the KMS key and the signing # algorithm. This information is required to verify the signature. # + # Best practices recommend that you limit the time during which any + # signature is effective. This deters an attack where the actor uses a + # signed message to establish validity repeatedly or long after the + # message is superseded. Signatures do not include a timestamp, but you + # can include a timestamp in the signed message to help you detect when + # its time to refresh the signature. + # + # + # # To verify the signature that this operation generates, use the Verify # operation. Or use the GetPublicKey operation to download the public # key and then use the public key to verify the signature outside of @@ -8033,7 +8164,7 @@ def build_request(operation_name, params = {}) params: params, config: config) context[:gem_name] = 'aws-sdk-kms' - context[:gem_version] = '1.56.0' + context[:gem_version] = '1.57.0' Seahorse::Client::Request.new(handlers, context) end diff --git a/gems/aws-sdk-kms/lib/aws-sdk-kms/types.rb b/gems/aws-sdk-kms/lib/aws-sdk-kms/types.rb index 4b425490de6..131e9b7ebdc 100644 --- a/gems/aws-sdk-kms/lib/aws-sdk-kms/types.rb +++ b/gems/aws-sdk-kms/lib/aws-sdk-kms/types.rb @@ -642,16 +642,19 @@ class CreateGrantResponse < Struct.new( # } # # @!attribute [rw] policy - # The key policy to attach to the KMS key. + # The key policy to attach to the KMS key. If you do not specify a key + # policy, KMS attaches a default key policy to the KMS key. For more + # information, see [Default key policy][1] in the *Key Management + # Service Developer Guide*. # # If you provide a key policy, it must meet the following criteria: # - # * If you don't set `BypassPolicyLockoutSafetyCheck` to true, the + # * If you don't set `BypassPolicyLockoutSafetyCheck` to `True`, the # key policy must allow the principal that is making the `CreateKey` # request to make a subsequent PutKeyPolicy request on the KMS key. # This reduces the risk that the KMS key becomes unmanageable. For # more information, refer to the scenario in the [Default Key - # Policy][1] section of the Key Management Service Developer + # Policy][2] section of the Key Management Service Developer # Guide . # # * Each statement in the key policy must contain one or more @@ -661,14 +664,23 @@ class CreateGrantResponse < Struct.new( # enforce a delay before including the new principal in a key policy # because the new principal might not be immediately visible to KMS. # For more information, see [Changes that I make are not always - # immediately visible][2] in the *Amazon Web Services Identity and + # immediately visible][3] in the *Amazon Web Services Identity and # Access Management User Guide*. # - # If you do not provide a key policy, KMS attaches a default key - # policy to the KMS key. For more information, see [Default Key - # Policy][3] in the *Key Management Service Developer Guide*. + # A key policy document must conform to the following rules. + # + # * Up to 32 kilobytes (32768 bytes) + # + # * Must be UTF-8 encoded + # + # * The only Unicode characters that are permitted in a key policy + # document are the horizontal tab (U+0009), linefeed (U+000A), + # carriage return (U+000D), and characters in the range U+0020 to + # U+00FF. # - # The key policy size quota is 32 kilobytes (32768 bytes). + # * The `Sid` element in a key policy statement can include spaces. + # (Spaces are prohibited in the `Sid` element of an IAM policy + # document.) # # For help writing and formatting a JSON policy document, see the [IAM # JSON Policy Reference][4] in the Identity and Access @@ -676,9 +688,9 @@ class CreateGrantResponse < Struct.new( # # # - # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam - # [2]: https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency - # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default + # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default + # [2]: https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam + # [3]: https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency # [4]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html # @return [String] # @@ -735,13 +747,14 @@ class CreateGrantResponse < Struct.new( # Management Service Developer Guide . # # The `KeySpec` determines whether the KMS key contains a symmetric - # key or an asymmetric key pair. It also determines the algorithms - # that the KMS key supports. You can't change the `KeySpec` after the - # KMS key is created. To further restrict the algorithms that can be - # used with the KMS key, use a condition key in its key policy or IAM - # policy. For more information, see [kms:EncryptionAlgorithm][2], - # [kms:MacAlgorithm][3] or [kms:Signing Algorithm][4] in the - # Key Management Service Developer Guide . + # key or an asymmetric key pair. It also determines the cryptographic + # algorithms that the KMS key supports. You can't change the + # `KeySpec` after the KMS key is created. To further restrict the + # algorithms that can be used with the KMS key, use a condition key in + # its key policy or IAM policy. For more information, see + # [kms:EncryptionAlgorithm][2], [kms:MacAlgorithm][3] or [kms:Signing + # Algorithm][4] in the Key Management Service Developer + # Guide . # # [Amazon Web Services services that are integrated with KMS][5] use # symmetric encryption KMS keys to protect your data. These services @@ -924,9 +937,10 @@ class CreateGrantResponse < Struct.new( # This value creates a *primary key*, not a replica. To create a # *replica key*, use the ReplicateKey operation. # - # You can create a symmetric or asymmetric multi-Region key, and you - # can create a multi-Region key with imported key material. However, - # you cannot create a multi-Region key in a custom key store. + # You can create a multi-Region version of a symmetric encryption KMS + # key, an HMAC KMS key, an asymmetric KMS key, or a KMS key with + # imported key material. However, you cannot create a multi-Region key + # in a custom key store. # # # @@ -1709,10 +1723,11 @@ class EnableKeyRequest < Struct.new( # } # # @!attribute [rw] key_id - # Identifies a symmetric encryption KMS key. You cannot enable - # automatic rotation of [asymmetric KMS keys][1], [HMAC KMS keys][2], - # KMS keys with [imported key material][3], or KMS keys in a [custom - # key store][4]. To enable or disable automatic rotation of a set of + # Identifies a symmetric encryption KMS key. You cannot enable or + # disable automatic rotation of [asymmetric KMS keys][1], [HMAC KMS + # keys][2], KMS keys with [imported key material][3], or KMS keys in a + # [custom key store][4]. The key rotation status of these KMS keys is + # always `false`. To enable or disable automatic rotation of a set of # related [multi-Region keys][5], set the property on the primary key. # # Specify the key ID or key ARN of the KMS key. @@ -3091,8 +3106,8 @@ class ImportKeyMaterialRequest < Struct.new( class ImportKeyMaterialResponse < Aws::EmptyStructure; end # The request was rejected because the specified KMS key cannot decrypt - # the data. The `KeyId` in a `Decrypt` request and the `SourceKeyId` in - # a `ReEncrypt` request must identify the same KMS key that was used to + # the data. The `KeyId` in a Decrypt request and the `SourceKeyId` in a + # ReEncrypt request must identify the same KMS key that was used to # encrypt the ciphertext. # # @!attribute [rw] message @@ -4226,15 +4241,25 @@ class NotFoundException < Struct.new( # immediately visible][2] in the *Amazon Web Services Identity and # Access Management User Guide*. # - # The key policy cannot exceed 32 kilobytes (32768 bytes). For more - # information, see [Resource Quotas][3] in the *Key Management Service - # Developer Guide*. + # A key policy document must conform to the following rules. + # + # * Up to 32 kilobytes (32768 bytes) + # + # * Must be UTF-8 encoded + # + # * The only Unicode characters that are permitted in a key policy + # document are the horizontal tab (U+0009), linefeed (U+000A), + # carriage return (U+000D), and characters in the range U+0020 to + # U+00FF. + # + # * The `Sid` element in a key policy statement can include spaces. + # (Spaces are prohibited in the `Sid` element of an IAM policy + # document.) # # # # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam # [2]: https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency - # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/resource-limits.html # @return [String] # # @!attribute [rw] bypass_policy_lockout_safety_check @@ -4596,7 +4621,20 @@ class ReEncryptResponse < Struct.new( # immediately visible][3] in the Identity and Access # Management User Guide . # - # * The key policy size quota is 32 kilobytes (32768 bytes). + # A key policy document must conform to the following rules. + # + # * Up to 32 kilobytes (32768 bytes) + # + # * Must be UTF-8 encoded + # + # * The only Unicode characters that are permitted in a key policy + # document are the horizontal tab (U+0009), linefeed (U+000A), + # carriage return (U+000D), and characters in the range U+0020 to + # U+00FF. + # + # * The `Sid` element in a key policy statement can include spaces. + # (Spaces are prohibited in the `Sid` element of an IAM policy + # document.) # # # @@ -4840,7 +4878,7 @@ class RevokeGrantRequest < Struct.new( # The waiting period, specified in number of days. After the waiting # period ends, KMS deletes the KMS key. # - # If the KMS key is a multi-Region primary key with replicas, the + # If the KMS key is a multi-Region primary key with replica keys, the # waiting period begins when the last of its replica keys is deleted. # Otherwise, the waiting period begins immediately. #