From fb4a680b277c0c95715100c9ae2a43ababc1b215 Mon Sep 17 00:00:00 2001 From: Serge Smertin Date: Mon, 15 Jun 2020 00:40:23 +0200 Subject: [PATCH 01/13] go-fmt cleanup --- databricks/mounts_test.go | 24 ++++----- ...source_databricks_azure_adls_gen1_mount.go | 2 +- ...source_databricks_azure_adls_gen2_mount.go | 8 +-- .../resource_databricks_azure_blob_mount.go | 2 +- .../resource_databricks_job_aws_test.go | 52 +++++++++---------- .../resource_databricks_job_azure_test.go | 34 ++++++------ 6 files changed, 61 insertions(+), 61 deletions(-) diff --git a/databricks/mounts_test.go b/databricks/mounts_test.go index ac6c2d7f0..a8dd4f7eb 100644 --- a/databricks/mounts_test.go +++ b/databricks/mounts_test.go @@ -8,16 +8,16 @@ import ( func TestValidateMountDirectory(t *testing.T) { testCases := []struct { - directory string - errorCount int - }{ - {"", 0}, - {"/directory", 0}, - {"directory", 1}, - } - for _, tc := range testCases { - _, errs := ValidateMountDirectory(tc.directory, "key") - - assert.Lenf(t, errs, tc.errorCount, "directory '%s' does not generate the expected error count", tc.directory) - } + directory string + errorCount int + }{ + {"", 0}, + {"/directory", 0}, + {"directory", 1}, + } + for _, tc := range testCases { + _, errs := ValidateMountDirectory(tc.directory, "key") + + assert.Lenf(t, errs, tc.errorCount, "directory '%s' does not generate the expected error count", tc.directory) + } } diff --git a/databricks/resource_databricks_azure_adls_gen1_mount.go b/databricks/resource_databricks_azure_adls_gen1_mount.go index 4d6d28bd6..ee798a46f 100644 --- a/databricks/resource_databricks_azure_adls_gen1_mount.go +++ b/databricks/resource_databricks_azure_adls_gen1_mount.go @@ -39,7 +39,7 @@ func resourceAzureAdlsGen1Mount() *schema.Resource { Optional: true, Computed: true, //Default: "/", - ForceNew: true, + ForceNew: true, ValidateFunc: ValidateMountDirectory, }, "mount_name": { diff --git a/databricks/resource_databricks_azure_adls_gen2_mount.go b/databricks/resource_databricks_azure_adls_gen2_mount.go index 36388f7e7..4991dc836 100644 --- a/databricks/resource_databricks_azure_adls_gen2_mount.go +++ b/databricks/resource_databricks_azure_adls_gen2_mount.go @@ -32,10 +32,10 @@ func resourceAzureAdlsGen2Mount() *schema.Resource { ForceNew: true, }, "directory": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, ValidateFunc: ValidateMountDirectory, }, "mount_name": { diff --git a/databricks/resource_databricks_azure_blob_mount.go b/databricks/resource_databricks_azure_blob_mount.go index d9b6e9ba7..ac4143390 100644 --- a/databricks/resource_databricks_azure_blob_mount.go +++ b/databricks/resource_databricks_azure_blob_mount.go @@ -37,7 +37,7 @@ func resourceAzureBlobMount() *schema.Resource { Optional: true, Computed: true, //Default: "/", - ForceNew: true, + ForceNew: true, ValidateFunc: ValidateMountDirectory, }, "mount_name": { diff --git a/databricks/resource_databricks_job_aws_test.go b/databricks/resource_databricks_job_aws_test.go index 972e08cea..e423ed498 100644 --- a/databricks/resource_databricks_job_aws_test.go +++ b/databricks/resource_databricks_job_aws_test.go @@ -114,30 +114,30 @@ func testAwsJobValuesNewCluster(t *testing.T, job *model.Job) resource.TestCheck } func testAwsJobResourceNewCluster() string { - return fmt.Sprintf(` - resource "databricks_job" "my_job" { - new_cluster { - autoscale { - min_workers = 2 - max_workers = 3 - } - spark_version = "6.4.x-scala2.11" - aws_attributes { - availability = "SPOT" - zone_id = "us-east-1a" - spot_bid_price_percent = "100" - first_on_demand = 1 - ebs_volume_type = "GENERAL_PURPOSE_SSD" - ebs_volume_count = 1 - ebs_volume_size = 32 - } - node_type_id = "r3.xlarge" - } - notebook_path = "/Users/jane.doe@databricks.com/my-demo-notebook" - name = "my-demo-notebook" - timeout_seconds = 3600 - max_retries = 1 - max_concurrent_runs = 1 - } - `) + return ` + resource "databricks_job" "my_job" { + new_cluster { + autoscale { + min_workers = 2 + max_workers = 3 + } + spark_version = "6.4.x-scala2.11" + aws_attributes { + availability = "SPOT" + zone_id = "us-east-1a" + spot_bid_price_percent = "100" + first_on_demand = 1 + ebs_volume_type = "GENERAL_PURPOSE_SSD" + ebs_volume_count = 1 + ebs_volume_size = 32 + } + node_type_id = "r3.xlarge" + } + notebook_path = "/Users/jane.doe@databricks.com/my-demo-notebook" + name = "my-demo-notebook" + timeout_seconds = 3600 + max_retries = 1 + max_concurrent_runs = 1 + } + ` } diff --git a/databricks/resource_databricks_job_azure_test.go b/databricks/resource_databricks_job_azure_test.go index 2abab1ec0..58712e442 100644 --- a/databricks/resource_databricks_job_azure_test.go +++ b/databricks/resource_databricks_job_azure_test.go @@ -106,21 +106,21 @@ func testAzureJobValuesNewCluster(t *testing.T, job *model.Job) resource.TestChe } func testAzureJobResourceNewCluster() string { - return fmt.Sprintf(` - resource "databricks_job" "my_job" { - new_cluster { - autoscale { - min_workers = 2 - max_workers = 3 - } - spark_version = "6.4.x-scala2.11" - node_type_id = "Standard_DS3_v2" - } - notebook_path = "/Users/jane.doe@databricks.com/my-demo-notebook" - name = "my-demo-notebook" - timeout_seconds = 3600 - max_retries = 1 - max_concurrent_runs = 1 - } - `) + return ` + resource "databricks_job" "my_job" { + new_cluster { + autoscale { + min_workers = 2 + max_workers = 3 + } + spark_version = "6.4.x-scala2.11" + node_type_id = "Standard_DS3_v2" + } + notebook_path = "/Users/jane.doe@databricks.com/my-demo-notebook" + name = "my-demo-notebook" + timeout_seconds = 3600 + max_retries = 1 + max_concurrent_runs = 1 + } + ` } From fdb9b3d05414f635818e21b6c6a2d6b5b8b58ad1 Mon Sep 17 00:00:00 2001 From: Serge Smertin Date: Mon, 15 Jun 2020 00:57:04 +0200 Subject: [PATCH 02/13] Make linter happy & remove redundant &schema.Schema --- .../data_source_databricks_dbfs_file.go | 8 +- .../data_source_databricks_dbfs_file_paths.go | 6 +- ...ta_source_databricks_default_user_roles.go | 4 +- databricks/data_source_databricks_notebook.go | 12 +-- .../data_source_databricks_notebook_paths.go | 6 +- databricks/data_source_databricks_zones.go | 4 +- databricks/provider.go | 16 ++-- .../resource_databricks_aws_s3_mount.go | 2 +- ...source_databricks_azure_adls_gen1_mount.go | 2 +- ...source_databricks_azure_adls_gen2_mount.go | 2 +- .../resource_databricks_azure_blob_mount.go | 2 +- databricks/resource_databricks_cluster.go | 58 +++++------ databricks/resource_databricks_dbfs_file.go | 12 +-- .../resource_databricks_dbfs_file_sync.go | 12 +-- .../resource_databricks_instance_pool.go | 24 ++--- .../resource_databricks_instance_profile.go | 4 +- databricks/resource_databricks_job.go | 96 +++++++++---------- .../resource_databricks_mws_credentials.go | 6 +- .../resource_databricks_mws_networks.go | 2 +- ...e_databricks_mws_storage_configurations.go | 4 +- .../resource_databricks_mws_workspaces.go | 12 +-- databricks/resource_databricks_notebook.go | 16 ++-- databricks/resource_databricks_scim_group.go | 10 +- databricks/resource_databricks_scim_user.go | 14 +-- databricks/resource_databricks_secret.go | 8 +- databricks/resource_databricks_secret_acl.go | 6 +- .../resource_databricks_secret_scope.go | 6 +- databricks/resource_databricks_token.go | 10 +- 28 files changed, 182 insertions(+), 182 deletions(-) diff --git a/databricks/data_source_databricks_dbfs_file.go b/databricks/data_source_databricks_dbfs_file.go index 8c6ef6bd9..143f6d541 100644 --- a/databricks/data_source_databricks_dbfs_file.go +++ b/databricks/data_source_databricks_dbfs_file.go @@ -9,22 +9,22 @@ func dataSourceDBFSFile() *schema.Resource { return &schema.Resource{ Read: dataSourceDBFSFileRead, Schema: map[string]*schema.Schema{ - "path": &schema.Schema{ + "path": { Type: schema.TypeString, Required: true, ForceNew: true, }, - "limit_file_size": &schema.Schema{ + "limit_file_size": { Type: schema.TypeBool, Required: true, ForceNew: true, }, - "content": &schema.Schema{ + "content": { Type: schema.TypeString, Computed: true, ForceNew: true, }, - "file_size": &schema.Schema{ + "file_size": { Type: schema.TypeInt, Computed: true, }, diff --git a/databricks/data_source_databricks_dbfs_file_paths.go b/databricks/data_source_databricks_dbfs_file_paths.go index 70738ff26..bfe097373 100644 --- a/databricks/data_source_databricks_dbfs_file_paths.go +++ b/databricks/data_source_databricks_dbfs_file_paths.go @@ -9,17 +9,17 @@ func dataSourceDBFSFilePaths() *schema.Resource { return &schema.Resource{ Read: dataSourceDBFSFilePathsRead, Schema: map[string]*schema.Schema{ - "path": &schema.Schema{ + "path": { Type: schema.TypeString, Required: true, ForceNew: true, }, - "recursive": &schema.Schema{ + "recursive": { Type: schema.TypeBool, Required: true, ForceNew: true, }, - "path_list": &schema.Schema{ + "path_list": { Type: schema.TypeSet, Computed: true, Elem: &schema.Resource{ diff --git a/databricks/data_source_databricks_default_user_roles.go b/databricks/data_source_databricks_default_user_roles.go index 059def6eb..2a5b2b277 100644 --- a/databricks/data_source_databricks_default_user_roles.go +++ b/databricks/data_source_databricks_default_user_roles.go @@ -25,12 +25,12 @@ func dataSourceDefaultUserRoles() *schema.Resource { return err }, Schema: map[string]*schema.Schema{ - "default_username": &schema.Schema{ + "default_username": { Type: schema.TypeString, Required: true, ForceNew: true, }, - "roles": &schema.Schema{ + "roles": { Type: schema.TypeList, Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, diff --git a/databricks/data_source_databricks_notebook.go b/databricks/data_source_databricks_notebook.go index 33115e895..67f0380e3 100644 --- a/databricks/data_source_databricks_notebook.go +++ b/databricks/data_source_databricks_notebook.go @@ -12,12 +12,12 @@ func dataSourceNotebook() *schema.Resource { Read: dataSourceNotebookRead, Schema: map[string]*schema.Schema{ - "path": &schema.Schema{ + "path": { Type: schema.TypeString, Required: true, ForceNew: true, }, - "format": &schema.Schema{ + "format": { Type: schema.TypeString, Required: true, ForceNew: true, @@ -27,19 +27,19 @@ func dataSourceNotebook() *schema.Resource { string(model.HTML), }, false), }, - "content": &schema.Schema{ + "content": { Type: schema.TypeString, Computed: true, }, - "language": &schema.Schema{ + "language": { Type: schema.TypeString, Computed: true, }, - "object_type": &schema.Schema{ + "object_type": { Type: schema.TypeString, Computed: true, }, - "object_id": &schema.Schema{ + "object_id": { Type: schema.TypeInt, Computed: true, }, diff --git a/databricks/data_source_databricks_notebook_paths.go b/databricks/data_source_databricks_notebook_paths.go index 5f18ea3a2..1a3b9548b 100644 --- a/databricks/data_source_databricks_notebook_paths.go +++ b/databricks/data_source_databricks_notebook_paths.go @@ -12,17 +12,17 @@ func dataSourceNotebookPaths() *schema.Resource { Read: dataSourceNotebookPathsRead, Schema: map[string]*schema.Schema{ - "path": &schema.Schema{ + "path": { Type: schema.TypeString, Required: true, ForceNew: true, }, - "recursive": &schema.Schema{ + "recursive": { Type: schema.TypeBool, Required: true, ForceNew: true, }, - "notebook_path_list": &schema.Schema{ + "notebook_path_list": { Type: schema.TypeSet, Computed: true, Elem: &schema.Resource{ diff --git a/databricks/data_source_databricks_zones.go b/databricks/data_source_databricks_zones.go index cf26ecb2f..3aa37f1dd 100644 --- a/databricks/data_source_databricks_zones.go +++ b/databricks/data_source_databricks_zones.go @@ -24,12 +24,12 @@ func dataSourceClusterZones() *schema.Resource { return err }, Schema: map[string]*schema.Schema{ - "default_zone": &schema.Schema{ + "default_zone": { Type: schema.TypeString, Computed: true, ForceNew: true, }, - "zones": &schema.Schema{ + "zones": { Type: schema.TypeList, Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, diff --git a/databricks/provider.go b/databricks/provider.go index 5a9d42024..fc6432673 100644 --- a/databricks/provider.go +++ b/databricks/provider.go @@ -50,30 +50,30 @@ func Provider(version string) terraform.ResourceProvider { "databricks_mws_workspaces": resourceMWSWorkspaces(), }, Schema: map[string]*schema.Schema{ - "host": &schema.Schema{ + "host": { Type: schema.TypeString, Optional: true, DefaultFunc: schema.EnvDefaultFunc("DATABRICKS_HOST", nil), }, - "token": &schema.Schema{ + "token": { Type: schema.TypeString, Optional: true, Sensitive: true, DefaultFunc: schema.EnvDefaultFunc("DATABRICKS_TOKEN", nil), ConflictsWith: []string{"basic_auth"}, }, - "basic_auth": &schema.Schema{ + "basic_auth": { Type: schema.TypeList, Optional: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "username": &schema.Schema{ + "username": { Type: schema.TypeString, Required: true, DefaultFunc: schema.EnvDefaultFunc("DATABRICKS_USERNAME", nil), }, - "password": &schema.Schema{ + "password": { Type: schema.TypeString, Sensitive: true, Required: true, @@ -83,7 +83,7 @@ func Provider(version string) terraform.ResourceProvider { }, ConflictsWith: []string{"token"}, }, - "config_file": &schema.Schema{ + "config_file": { Type: schema.TypeString, Optional: true, DefaultFunc: schema.EnvDefaultFunc("DATABRICKS_CONFIG_FILE", "~/.databrickscfg"), @@ -92,14 +92,14 @@ func Provider(version string) terraform.ResourceProvider { "in ~/.databrickscfg. Check https://docs.databricks.com/dev-tools/cli/index.html#set-up-authentication for docs. Config\n" + "file credetials will only be used when host/token are not provided.", }, - "profile": &schema.Schema{ + "profile": { Type: schema.TypeString, Optional: true, Default: "DEFAULT", Description: "Connection profile specified within ~/.databrickscfg. Please check\n" + "https://docs.databricks.com/dev-tools/cli/index.html#connection-profiles for documentation.", }, - "azure_auth": &schema.Schema{ + "azure_auth": { Type: schema.TypeMap, Optional: true, Elem: &schema.Resource{ diff --git a/databricks/resource_databricks_aws_s3_mount.go b/databricks/resource_databricks_aws_s3_mount.go index f7361d532..5a9b20bba 100644 --- a/databricks/resource_databricks_aws_s3_mount.go +++ b/databricks/resource_databricks_aws_s3_mount.go @@ -12,7 +12,7 @@ func resourceAWSS3Mount() *schema.Resource { Delete: resourceAWSS3Delete, Schema: map[string]*schema.Schema{ - "cluster_id": &schema.Schema{ + "cluster_id": { Type: schema.TypeString, Required: true, ForceNew: true, diff --git a/databricks/resource_databricks_azure_adls_gen1_mount.go b/databricks/resource_databricks_azure_adls_gen1_mount.go index ee798a46f..1218eeac7 100644 --- a/databricks/resource_databricks_azure_adls_gen1_mount.go +++ b/databricks/resource_databricks_azure_adls_gen1_mount.go @@ -17,7 +17,7 @@ func resourceAzureAdlsGen1Mount() *schema.Resource { Delete: resourceAzureAdlsGen1Delete, Schema: map[string]*schema.Schema{ - "cluster_id": &schema.Schema{ + "cluster_id": { Type: schema.TypeString, Required: true, ForceNew: true, diff --git a/databricks/resource_databricks_azure_adls_gen2_mount.go b/databricks/resource_databricks_azure_adls_gen2_mount.go index 4991dc836..0f75df9b7 100644 --- a/databricks/resource_databricks_azure_adls_gen2_mount.go +++ b/databricks/resource_databricks_azure_adls_gen2_mount.go @@ -16,7 +16,7 @@ func resourceAzureAdlsGen2Mount() *schema.Resource { Delete: resourceAzureAdlsGen2Delete, Schema: map[string]*schema.Schema{ - "cluster_id": &schema.Schema{ + "cluster_id": { Type: schema.TypeString, Required: true, ForceNew: true, diff --git a/databricks/resource_databricks_azure_blob_mount.go b/databricks/resource_databricks_azure_blob_mount.go index ac4143390..760f2d245 100644 --- a/databricks/resource_databricks_azure_blob_mount.go +++ b/databricks/resource_databricks_azure_blob_mount.go @@ -17,7 +17,7 @@ func resourceAzureBlobMount() *schema.Resource { Delete: resourceAzureBlobMountDelete, Schema: map[string]*schema.Schema{ - "cluster_id": &schema.Schema{ + "cluster_id": { Type: schema.TypeString, Required: true, ForceNew: true, diff --git a/databricks/resource_databricks_cluster.go b/databricks/resource_databricks_cluster.go index a22a87a06..15b092079 100644 --- a/databricks/resource_databricks_cluster.go +++ b/databricks/resource_databricks_cluster.go @@ -19,12 +19,12 @@ func resourceCluster() *schema.Resource { Delete: resourceClusterDelete, Schema: map[string]*schema.Schema{ - "num_workers": &schema.Schema{ + "num_workers": { Type: schema.TypeInt, Optional: true, ConflictsWith: []string{"autoscale"}, }, - "autoscale": &schema.Schema{ + "autoscale": { Type: schema.TypeSet, Optional: true, MaxItems: 1, @@ -43,20 +43,20 @@ func resourceCluster() *schema.Resource { }, ConflictsWith: []string{"num_workers"}, }, - "cluster_name": &schema.Schema{ + "cluster_name": { Type: schema.TypeString, Optional: true, Computed: true, }, - "spark_version": &schema.Schema{ + "spark_version": { Type: schema.TypeString, Required: true, }, - "spark_conf": &schema.Schema{ + "spark_conf": { Type: schema.TypeMap, Optional: true, }, - "aws_attributes": &schema.Schema{ + "aws_attributes": { Type: schema.TypeList, Optional: true, MaxItems: 1, @@ -99,29 +99,29 @@ func resourceCluster() *schema.Resource { }, }, }, - "driver_node_type_id": &schema.Schema{ + "driver_node_type_id": { Type: schema.TypeString, Optional: true, Computed: true, ConflictsWith: []string{"instance_pool_id"}, }, - "node_type_id": &schema.Schema{ + "node_type_id": { Type: schema.TypeString, Optional: true, ConflictsWith: []string{"instance_pool_id"}, AtLeastOneOf: []string{"instance_pool_id"}, }, - "ssh_public_keys": &schema.Schema{ + "ssh_public_keys": { Type: schema.TypeSet, Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, // TODO: Validate less than 10 values }, - "custom_tags": &schema.Schema{ + "custom_tags": { Type: schema.TypeMap, Optional: true, }, - "cluster_log_conf": &schema.Schema{ + "cluster_log_conf": { Type: schema.TypeList, Optional: true, MaxItems: 1, @@ -190,7 +190,7 @@ func resourceCluster() *schema.Resource { }, }, }, - "init_scripts": &schema.Schema{ + "init_scripts": { Type: schema.TypeList, Optional: true, MaxItems: 10, @@ -251,7 +251,7 @@ func resourceCluster() *schema.Resource { }, }, }, - "docker_image": &schema.Schema{ + "docker_image": { Type: schema.TypeList, Optional: true, MaxItems: 1, @@ -282,33 +282,33 @@ func resourceCluster() *schema.Resource { }, }, }, - "spark_env_vars": &schema.Schema{ + "spark_env_vars": { Type: schema.TypeMap, Optional: true, }, - "autotermination_minutes": &schema.Schema{ + "autotermination_minutes": { Type: schema.TypeInt, Optional: true, Default: 60, //Computed: true, }, - "enable_elastic_disk": &schema.Schema{ + "enable_elastic_disk": { Type: schema.TypeBool, Optional: true, Computed: true, }, - "instance_pool_id": &schema.Schema{ + "instance_pool_id": { Type: schema.TypeString, Optional: true, ConflictsWith: []string{"node_type_id", "driver_node_type_id", "aws_attributes"}, AtLeastOneOf: []string{"node_type_id"}, }, - "idempotency_token": &schema.Schema{ + "idempotency_token": { Type: schema.TypeInt, Optional: true, ForceNew: true, }, - "library_jar": &schema.Schema{ + "library_jar": { Type: schema.TypeSet, Optional: true, ConfigMode: schema.SchemaConfigModeAttr, @@ -332,7 +332,7 @@ func resourceCluster() *schema.Resource { }, }, }, - "library_egg": &schema.Schema{ + "library_egg": { Type: schema.TypeSet, Optional: true, ConfigMode: schema.SchemaConfigModeAttr, @@ -356,7 +356,7 @@ func resourceCluster() *schema.Resource { }, }, }, - "library_whl": &schema.Schema{ + "library_whl": { Type: schema.TypeSet, Optional: true, ConfigMode: schema.SchemaConfigModeAttr, @@ -380,7 +380,7 @@ func resourceCluster() *schema.Resource { }, }, }, - "library_pypi": &schema.Schema{ + "library_pypi": { Type: schema.TypeSet, Optional: true, ConfigMode: schema.SchemaConfigModeAttr, @@ -409,7 +409,7 @@ func resourceCluster() *schema.Resource { }, }, }, - "library_maven": &schema.Schema{ + "library_maven": { Type: schema.TypeSet, Optional: true, ConfigMode: schema.SchemaConfigModeAttr, @@ -442,7 +442,7 @@ func resourceCluster() *schema.Resource { }, }, }, - "library_cran": &schema.Schema{ + "library_cran": { Type: schema.TypeSet, Optional: true, ConfigMode: schema.SchemaConfigModeAttr, @@ -467,23 +467,23 @@ func resourceCluster() *schema.Resource { }, }, }, - "cluster_id": &schema.Schema{ + "cluster_id": { Type: schema.TypeString, Computed: true, }, - "default_tags": &schema.Schema{ + "default_tags": { Type: schema.TypeMap, Computed: true, }, - "state": &schema.Schema{ + "state": { Type: schema.TypeString, Computed: true, }, - "state_message": &schema.Schema{ + "state_message": { Type: schema.TypeString, Computed: true, }, - "single_user_name": &schema.Schema{ + "single_user_name": { Type: schema.TypeString, Optional: true, }, diff --git a/databricks/resource_databricks_dbfs_file.go b/databricks/resource_databricks_dbfs_file.go index 1aa442b86..6cdaefadc 100644 --- a/databricks/resource_databricks_dbfs_file.go +++ b/databricks/resource_databricks_dbfs_file.go @@ -17,31 +17,31 @@ func resourceDBFSFile() *schema.Resource { Update: resourceDBFSFileUpdate, Schema: map[string]*schema.Schema{ - "content": &schema.Schema{ + "content": { Type: schema.TypeString, Required: true, ForceNew: true, }, - "path": &schema.Schema{ + "path": { Type: schema.TypeString, Required: true, ForceNew: true, }, - "overwrite": &schema.Schema{ + "overwrite": { Type: schema.TypeBool, Optional: true, Default: false, }, - "mkdirs": &schema.Schema{ + "mkdirs": { Type: schema.TypeBool, Optional: true, Default: true, }, - "validate_remote_file": &schema.Schema{ + "validate_remote_file": { Type: schema.TypeBool, Optional: true, }, - "file_size": &schema.Schema{ + "file_size": { Type: schema.TypeInt, Computed: true, }, diff --git a/databricks/resource_databricks_dbfs_file_sync.go b/databricks/resource_databricks_dbfs_file_sync.go index 094012819..5143c0c73 100644 --- a/databricks/resource_databricks_dbfs_file_sync.go +++ b/databricks/resource_databricks_dbfs_file_sync.go @@ -15,31 +15,31 @@ func resourceDBFSFileSync() *schema.Resource { Update: resourceDBFSFileSyncUpdate, Schema: map[string]*schema.Schema{ - "src_path": &schema.Schema{ + "src_path": { Type: schema.TypeString, Required: true, ForceNew: true, }, - "tgt_path": &schema.Schema{ + "tgt_path": { Type: schema.TypeString, Required: true, ForceNew: true, }, - "file_size": &schema.Schema{ + "file_size": { Type: schema.TypeInt, Required: true, ForceNew: true, }, - "mkdirs": &schema.Schema{ + "mkdirs": { Type: schema.TypeBool, Optional: true, Default: true, }, - "host": &schema.Schema{ + "host": { Type: schema.TypeString, Optional: true, }, - "token": &schema.Schema{ + "token": { Type: schema.TypeString, Optional: true, Sensitive: true, diff --git a/databricks/resource_databricks_instance_pool.go b/databricks/resource_databricks_instance_pool.go index 822e33a21..3374d379b 100644 --- a/databricks/resource_databricks_instance_pool.go +++ b/databricks/resource_databricks_instance_pool.go @@ -23,23 +23,23 @@ func resourceInstancePool() *schema.Resource { }, Schema: map[string]*schema.Schema{ - "instance_pool_name": &schema.Schema{ + "instance_pool_name": { Type: schema.TypeString, Required: true, }, - "min_idle_instances": &schema.Schema{ + "min_idle_instances": { Type: schema.TypeInt, Required: true, }, - "max_capacity": &schema.Schema{ + "max_capacity": { Type: schema.TypeInt, Required: true, }, - "idle_instance_autotermination_minutes": &schema.Schema{ + "idle_instance_autotermination_minutes": { Type: schema.TypeInt, Required: true, }, - "aws_attributes": &schema.Schema{ + "aws_attributes": { Type: schema.TypeList, Optional: true, MaxItems: 1, @@ -66,27 +66,27 @@ func resourceInstancePool() *schema.Resource { }, }, }, - "node_type_id": &schema.Schema{ + "node_type_id": { Type: schema.TypeString, Required: true, ForceNew: true, }, - "default_tags": &schema.Schema{ + "default_tags": { Type: schema.TypeMap, Computed: true, }, - "custom_tags": &schema.Schema{ + "custom_tags": { Type: schema.TypeMap, Optional: true, ForceNew: true, }, - "enable_elastic_disk": &schema.Schema{ + "enable_elastic_disk": { Type: schema.TypeBool, Optional: true, ForceNew: true, Default: true, }, - "disk_spec": &schema.Schema{ + "disk_spec": { Type: schema.TypeList, MaxItems: 1, Optional: true, @@ -128,7 +128,7 @@ func resourceInstancePool() *schema.Resource { }, }, }, - "preloaded_spark_versions": &schema.Schema{ + "preloaded_spark_versions": { Type: schema.TypeList, Optional: true, ForceNew: true, @@ -137,7 +137,7 @@ func resourceInstancePool() *schema.Resource { }, }, //TODO: Determine what this does from a state management perspective - "state": &schema.Schema{ + "state": { Type: schema.TypeString, Optional: true, Computed: true, diff --git a/databricks/resource_databricks_instance_profile.go b/databricks/resource_databricks_instance_profile.go index 6cae646a6..0cc0578d9 100644 --- a/databricks/resource_databricks_instance_profile.go +++ b/databricks/resource_databricks_instance_profile.go @@ -15,12 +15,12 @@ func resourceInstanceProfile() *schema.Resource { Delete: resourceInstanceProfileDelete, Schema: map[string]*schema.Schema{ - "instance_profile_arn": &schema.Schema{ + "instance_profile_arn": { Type: schema.TypeString, Required: true, ForceNew: true, }, - "skip_validation": &schema.Schema{ + "skip_validation": { Type: schema.TypeBool, Required: true, ForceNew: true, diff --git a/databricks/resource_databricks_job.go b/databricks/resource_databricks_job.go index 35a8100d9..e197a0ab4 100644 --- a/databricks/resource_databricks_job.go +++ b/databricks/resource_databricks_job.go @@ -20,21 +20,21 @@ func resourceJob() *schema.Resource { Delete: resourceJobDelete, Schema: map[string]*schema.Schema{ - "existing_cluster_id": &schema.Schema{ + "existing_cluster_id": { Type: schema.TypeString, Optional: true, }, - "new_cluster": &schema.Schema{ + "new_cluster": { Type: schema.TypeList, Optional: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "num_workers": &schema.Schema{ + "num_workers": { Type: schema.TypeInt, Optional: true, }, - "autoscale": &schema.Schema{ + "autoscale": { Type: schema.TypeSet, Optional: true, MaxItems: 1, @@ -51,20 +51,20 @@ func resourceJob() *schema.Resource { }, }, }, - "cluster_name": &schema.Schema{ + "cluster_name": { Type: schema.TypeString, Optional: true, Computed: true, }, - "spark_version": &schema.Schema{ + "spark_version": { Type: schema.TypeString, Optional: true, }, - "spark_conf": &schema.Schema{ + "spark_conf": { Type: schema.TypeMap, Optional: true, }, - "aws_attributes": &schema.Schema{ + "aws_attributes": { Type: schema.TypeList, Optional: true, MaxItems: 1, @@ -107,27 +107,27 @@ func resourceJob() *schema.Resource { }, }, }, - "driver_node_type_id": &schema.Schema{ + "driver_node_type_id": { Type: schema.TypeString, Optional: true, Computed: true, }, - "node_type_id": &schema.Schema{ + "node_type_id": { Type: schema.TypeString, Optional: true, }, - "ssh_public_keys": &schema.Schema{ + "ssh_public_keys": { Type: schema.TypeSet, Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, Set: schema.HashString, // TODO: Validate less than 10 values }, - "custom_tags": &schema.Schema{ + "custom_tags": { Type: schema.TypeMap, Optional: true, }, - "cluster_log_conf": &schema.Schema{ + "cluster_log_conf": { Type: schema.TypeList, Optional: true, MaxItems: 1, @@ -196,7 +196,7 @@ func resourceJob() *schema.Resource { }, }, }, - "init_scripts": &schema.Schema{ + "init_scripts": { Type: schema.TypeList, Optional: true, MaxItems: 10, @@ -257,7 +257,7 @@ func resourceJob() *schema.Resource { }, }, //TODO: Docker does not seem to be supported by jobs - "docker_image": &schema.Schema{ + "docker_image": { Type: schema.TypeList, Optional: true, MaxItems: 1, @@ -288,51 +288,51 @@ func resourceJob() *schema.Resource { }, }, }, - "spark_env_vars": &schema.Schema{ + "spark_env_vars": { Type: schema.TypeMap, Optional: true, }, //TODO: This should probably be removed jobs dont auto terminate - "autotermination_minutes": &schema.Schema{ + "autotermination_minutes": { Type: schema.TypeInt, Optional: true, Computed: true, }, - "enable_elastic_disk": &schema.Schema{ + "enable_elastic_disk": { Type: schema.TypeBool, Optional: true, Computed: true, }, - "instance_pool_id": &schema.Schema{ + "instance_pool_id": { Type: schema.TypeString, Optional: true, }, }, }, }, - "name": &schema.Schema{ + "name": { Type: schema.TypeString, Optional: true, }, - "library_jar": &schema.Schema{ + "library_jar": { Type: schema.TypeSet, Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, Set: schema.HashString, }, - "library_egg": &schema.Schema{ + "library_egg": { Type: schema.TypeSet, Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, Set: schema.HashString, }, - "library_whl": &schema.Schema{ + "library_whl": { Type: schema.TypeSet, Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, Set: schema.HashString, }, - "library_pypi": &schema.Schema{ + "library_pypi": { Type: schema.TypeSet, Optional: true, Elem: &schema.Resource{ @@ -348,7 +348,7 @@ func resourceJob() *schema.Resource { }, }, }, - "library_maven": &schema.Schema{ + "library_maven": { Type: schema.TypeSet, Optional: true, Elem: &schema.Resource{ @@ -369,7 +369,7 @@ func resourceJob() *schema.Resource { }, }, }, - "library_cran": &schema.Schema{ + "library_cran": { Type: schema.TypeSet, Optional: true, Elem: &schema.Resource{ @@ -385,72 +385,72 @@ func resourceJob() *schema.Resource { }, }, }, - "notebook_path": &schema.Schema{ + "notebook_path": { Type: schema.TypeString, Optional: true, AtLeastOneOf: []string{"jar_main_class_name", "spark_submit_parameters", "python_file"}, ConflictsWith: []string{"jar_main_class_name", "spark_submit_parameters", "python_file"}, }, - "notebook_base_parameters": &schema.Schema{ + "notebook_base_parameters": { Type: schema.TypeMap, Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, }, - "jar_uri": &schema.Schema{ + "jar_uri": { Type: schema.TypeString, Optional: true, }, - "jar_main_class_name": &schema.Schema{ + "jar_main_class_name": { Type: schema.TypeString, Optional: true, AtLeastOneOf: []string{"python_file", "notebook_path", "spark_submit_parameters"}, ConflictsWith: []string{"python_file", "notebook_path", "spark_submit_parameters"}, }, - "jar_parameters": &schema.Schema{ + "jar_parameters": { Type: schema.TypeList, Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, }, - "python_file": &schema.Schema{ + "python_file": { Type: schema.TypeString, Optional: true, AtLeastOneOf: []string{"jar_main_class_name", "notebook_path", "spark_submit_parameters"}, ConflictsWith: []string{"jar_main_class_name", "notebook_path", "spark_submit_parameters"}, }, - "python_parameters": &schema.Schema{ + "python_parameters": { Type: schema.TypeList, Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, }, - "spark_submit_parameters": &schema.Schema{ + "spark_submit_parameters": { Type: schema.TypeList, Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, AtLeastOneOf: []string{"jar_main_class_name", "notebook_path", "python_file"}, ConflictsWith: []string{"jar_main_class_name", "notebook_path", "python_file"}, }, - "email_notifications": &schema.Schema{ + "email_notifications": { Type: schema.TypeSet, Optional: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "on_start": &schema.Schema{ + "on_start": { Type: schema.TypeSet, Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, Set: schema.HashString, }, - "on_success": &schema.Schema{ + "on_success": { Type: schema.TypeSet, Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, }, - "on_failure": &schema.Schema{ + "on_failure": { Type: schema.TypeSet, Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, }, - "no_alert_for_skipped_runs": &schema.Schema{ + "no_alert_for_skipped_runs": { Type: schema.TypeBool, Optional: true, }, @@ -458,23 +458,23 @@ func resourceJob() *schema.Resource { }, MaxItems: 1, }, - "timeout_seconds": &schema.Schema{ + "timeout_seconds": { Type: schema.TypeInt, Optional: true, }, - "max_retries": &schema.Schema{ + "max_retries": { Type: schema.TypeInt, Optional: true, }, - "min_retry_interval_millis": &schema.Schema{ + "min_retry_interval_millis": { Type: schema.TypeInt, Optional: true, }, - "retry_on_timeout": &schema.Schema{ + "retry_on_timeout": { Type: schema.TypeBool, Optional: true, }, - "schedule": &schema.Schema{ + "schedule": { Type: schema.TypeSet, Optional: true, MaxItems: 1, @@ -491,19 +491,19 @@ func resourceJob() *schema.Resource { }, }, }, - "max_concurrent_runs": &schema.Schema{ + "max_concurrent_runs": { Type: schema.TypeInt, Optional: true, }, - "job_id": &schema.Schema{ + "job_id": { Type: schema.TypeInt, Computed: true, }, - "creator_user_name": &schema.Schema{ + "creator_user_name": { Type: schema.TypeString, Computed: true, }, - "created_time": &schema.Schema{ + "created_time": { Type: schema.TypeInt, Computed: true, }, diff --git a/databricks/resource_databricks_mws_credentials.go b/databricks/resource_databricks_mws_credentials.go index 12656c5d9..0c4ba97c7 100644 --- a/databricks/resource_databricks_mws_credentials.go +++ b/databricks/resource_databricks_mws_credentials.go @@ -15,13 +15,13 @@ func resourceMWSCredentials() *schema.Resource { Delete: resourceMWSCredentialsDelete, Schema: map[string]*schema.Schema{ - "account_id": &schema.Schema{ + "account_id": { Type: schema.TypeString, Required: true, Sensitive: true, ForceNew: true, }, - "credentials_name": &schema.Schema{ + "credentials_name": { Type: schema.TypeString, Required: true, ForceNew: true, @@ -39,7 +39,7 @@ func resourceMWSCredentials() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "credentials_id": &schema.Schema{ + "credentials_id": { Type: schema.TypeString, Computed: true, }, diff --git a/databricks/resource_databricks_mws_networks.go b/databricks/resource_databricks_mws_networks.go index c1fa514c6..fcb479c13 100644 --- a/databricks/resource_databricks_mws_networks.go +++ b/databricks/resource_databricks_mws_networks.go @@ -17,7 +17,7 @@ func resourceMWSNetworks() *schema.Resource { Delete: resourceMWSNetworkDelete, Schema: map[string]*schema.Schema{ - "account_id": &schema.Schema{ + "account_id": { Type: schema.TypeString, Required: true, Sensitive: true, diff --git a/databricks/resource_databricks_mws_storage_configurations.go b/databricks/resource_databricks_mws_storage_configurations.go index 9df0394c6..177cf8267 100644 --- a/databricks/resource_databricks_mws_storage_configurations.go +++ b/databricks/resource_databricks_mws_storage_configurations.go @@ -14,13 +14,13 @@ func resourceMWSStorageConfigurations() *schema.Resource { Delete: resourceMWSStorageConfigurationsDelete, Schema: map[string]*schema.Schema{ - "account_id": &schema.Schema{ + "account_id": { Type: schema.TypeString, Required: true, Sensitive: true, ForceNew: true, }, - "storage_configuration_name": &schema.Schema{ + "storage_configuration_name": { Type: schema.TypeString, Required: true, ForceNew: true, diff --git a/databricks/resource_databricks_mws_workspaces.go b/databricks/resource_databricks_mws_workspaces.go index dcadd056c..08affe4d6 100644 --- a/databricks/resource_databricks_mws_workspaces.go +++ b/databricks/resource_databricks_mws_workspaces.go @@ -23,13 +23,13 @@ func resourceMWSWorkspaces() *schema.Resource { Delete: resourceMWSWorkspacesDelete, Schema: map[string]*schema.Schema{ - "account_id": &schema.Schema{ + "account_id": { Type: schema.TypeString, Sensitive: true, Required: true, ForceNew: true, }, - "workspace_name": &schema.Schema{ + "workspace_name": { Type: schema.TypeString, Required: true, ForceNew: true, @@ -87,19 +87,19 @@ func resourceMWSWorkspaces() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "workspace_status_message": &schema.Schema{ + "workspace_status_message": { Type: schema.TypeString, Computed: true, }, - "creation_time": &schema.Schema{ + "creation_time": { Type: schema.TypeInt, Computed: true, }, - "workspace_id": &schema.Schema{ + "workspace_id": { Type: schema.TypeInt, Computed: true, }, - "workspace_url": &schema.Schema{ + "workspace_url": { Type: schema.TypeString, Computed: true, }, diff --git a/databricks/resource_databricks_notebook.go b/databricks/resource_databricks_notebook.go index 4aff8a8ba..13ed636c2 100644 --- a/databricks/resource_databricks_notebook.go +++ b/databricks/resource_databricks_notebook.go @@ -28,7 +28,7 @@ func resourceNotebook() *schema.Resource { Delete: resourceNotebookDelete, Schema: map[string]*schema.Schema{ - "content": &schema.Schema{ + "content": { Type: schema.TypeString, Required: true, ForceNew: true, @@ -42,12 +42,12 @@ func resourceNotebook() *schema.Resource { }, ValidateFunc: validation.StringIsBase64, }, - "path": &schema.Schema{ + "path": { Type: schema.TypeString, Required: true, ForceNew: true, }, - "language": &schema.Schema{ + "language": { Type: schema.TypeString, Optional: true, ForceNew: true, @@ -58,19 +58,19 @@ func resourceNotebook() *schema.Resource { string(model.SQL), }, false), }, - "overwrite": &schema.Schema{ + "overwrite": { Type: schema.TypeBool, Optional: true, Default: false, ForceNew: true, }, - "mkdirs": &schema.Schema{ + "mkdirs": { Type: schema.TypeBool, Optional: true, Default: true, ForceNew: true, }, - "format": &schema.Schema{ + "format": { Type: schema.TypeString, Optional: true, Default: string(model.Source), @@ -82,11 +82,11 @@ func resourceNotebook() *schema.Resource { string(model.HTML), }, false), }, - "object_type": &schema.Schema{ + "object_type": { Type: schema.TypeString, Computed: true, }, - "object_id": &schema.Schema{ + "object_id": { Type: schema.TypeInt, Computed: true, }, diff --git a/databricks/resource_databricks_scim_group.go b/databricks/resource_databricks_scim_group.go index 9ad506d82..e674529fc 100644 --- a/databricks/resource_databricks_scim_group.go +++ b/databricks/resource_databricks_scim_group.go @@ -17,11 +17,11 @@ func resourceScimGroup() *schema.Resource { Delete: resourceScimGroupDelete, Schema: map[string]*schema.Schema{ - "display_name": &schema.Schema{ + "display_name": { Type: schema.TypeString, Required: true, }, - "members": &schema.Schema{ + "members": { Type: schema.TypeSet, Optional: true, //Computed: true, @@ -29,7 +29,7 @@ func resourceScimGroup() *schema.Resource { Elem: &schema.Schema{Type: schema.TypeString}, Set: schema.HashString, }, - "roles": &schema.Schema{ + "roles": { Type: schema.TypeSet, Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, @@ -49,13 +49,13 @@ func resourceScimGroup() *schema.Resource { return false }, }, - "inherited_roles": &schema.Schema{ + "inherited_roles": { Type: schema.TypeSet, Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, Set: schema.HashString, }, - "entitlements": &schema.Schema{ + "entitlements": { Type: schema.TypeSet, Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, diff --git a/databricks/resource_databricks_scim_user.go b/databricks/resource_databricks_scim_user.go index 3d718153d..61e0687a9 100644 --- a/databricks/resource_databricks_scim_user.go +++ b/databricks/resource_databricks_scim_user.go @@ -19,41 +19,41 @@ func resourceScimUser() *schema.Resource { Delete: resourceScimUserDelete, Schema: map[string]*schema.Schema{ - "user_name": &schema.Schema{ + "user_name": { Type: schema.TypeString, Required: true, ForceNew: true, }, - "display_name": &schema.Schema{ + "display_name": { Type: schema.TypeString, Optional: true, }, - "roles": &schema.Schema{ + "roles": { Type: schema.TypeSet, Optional: true, ConfigMode: schema.SchemaConfigModeAttr, Elem: &schema.Schema{Type: schema.TypeString}, Set: schema.HashString, }, - "entitlements": &schema.Schema{ + "entitlements": { Type: schema.TypeSet, Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, Set: schema.HashString, }, - "inherited_roles": &schema.Schema{ + "inherited_roles": { Type: schema.TypeSet, Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, Set: schema.HashString, }, - "default_roles": &schema.Schema{ + "default_roles": { Type: schema.TypeSet, Required: true, Elem: &schema.Schema{Type: schema.TypeString}, Set: schema.HashString, }, - "set_admin": &schema.Schema{ + "set_admin": { Type: schema.TypeBool, Optional: true, Default: false, diff --git a/databricks/resource_databricks_secret.go b/databricks/resource_databricks_secret.go index ac6eb3243..44d2fbb2b 100644 --- a/databricks/resource_databricks_secret.go +++ b/databricks/resource_databricks_secret.go @@ -16,23 +16,23 @@ func resourceSecret() *schema.Resource { Delete: resourceSecretDelete, Schema: map[string]*schema.Schema{ - "string_value": &schema.Schema{ + "string_value": { Type: schema.TypeString, Required: true, ForceNew: true, Sensitive: true, }, - "scope": &schema.Schema{ + "scope": { Type: schema.TypeString, Required: true, ForceNew: true, }, - "key": &schema.Schema{ + "key": { Type: schema.TypeString, Required: true, ForceNew: true, }, - "last_updated_timestamp": &schema.Schema{ + "last_updated_timestamp": { Type: schema.TypeInt, Computed: true, }, diff --git a/databricks/resource_databricks_secret_acl.go b/databricks/resource_databricks_secret_acl.go index 47758497c..e2ffff41f 100644 --- a/databricks/resource_databricks_secret_acl.go +++ b/databricks/resource_databricks_secret_acl.go @@ -16,17 +16,17 @@ func resourceSecretACL() *schema.Resource { Delete: resourceSecretACLDelete, Schema: map[string]*schema.Schema{ - "scope": &schema.Schema{ + "scope": { Type: schema.TypeString, Required: true, ForceNew: true, }, - "principal": &schema.Schema{ + "principal": { Type: schema.TypeString, Required: true, ForceNew: true, }, - "permission": &schema.Schema{ + "permission": { Type: schema.TypeString, Required: true, ForceNew: true, diff --git a/databricks/resource_databricks_secret_scope.go b/databricks/resource_databricks_secret_scope.go index a262edc67..4327f7b33 100644 --- a/databricks/resource_databricks_secret_scope.go +++ b/databricks/resource_databricks_secret_scope.go @@ -15,18 +15,18 @@ func resourceSecretScope() *schema.Resource { Delete: resourceSecretScopeDelete, Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ + "name": { Type: schema.TypeString, Required: true, ForceNew: true, }, - "initial_manage_principal": &schema.Schema{ + "initial_manage_principal": { Type: schema.TypeString, Optional: true, ForceNew: true, Default: "users", }, - "backend_type": &schema.Schema{ + "backend_type": { Type: schema.TypeString, Computed: true, }, diff --git a/databricks/resource_databricks_token.go b/databricks/resource_databricks_token.go index 27ddd26e5..bb3fdc467 100644 --- a/databricks/resource_databricks_token.go +++ b/databricks/resource_databricks_token.go @@ -15,29 +15,29 @@ func resourceToken() *schema.Resource { Delete: resourceTokenDelete, Schema: map[string]*schema.Schema{ - "lifetime_seconds": &schema.Schema{ + "lifetime_seconds": { Type: schema.TypeInt, Optional: true, ForceNew: true, Default: 0, }, - "comment": &schema.Schema{ + "comment": { Type: schema.TypeString, Optional: true, ForceNew: true, Default: "", }, - "creation_time": &schema.Schema{ + "creation_time": { Type: schema.TypeInt, Optional: true, Computed: true, }, - "token_value": &schema.Schema{ + "token_value": { Type: schema.TypeString, Computed: true, Sensitive: true, }, - "expiry_time": &schema.Schema{ + "expiry_time": { Type: schema.TypeInt, Optional: true, Computed: true, From f57c17d3298bdaf62ff24789754467d20de93ded Mon Sep 17 00:00:00 2001 From: Serge Smertin Date: Mon, 15 Jun 2020 01:04:36 +0200 Subject: [PATCH 03/13] Made lint even more happy by adjusting naming conventions and having VSCode not complain at all --- databricks/azure_auth.go | 4 +- ...e_databricks_azure_adls_gen2_mount_test.go | 10 ++-- ...source_databricks_azure_blob_mount_test.go | 26 +++++----- .../resource_databricks_mws_credentials.go | 22 ++++----- ...rce_databricks_mws_credentials_mws_test.go | 28 +++++------ .../resource_databricks_mws_networks.go | 20 ++++---- ...source_databricks_mws_networks_mws_test.go | 26 +++++----- ...e_databricks_mws_storage_configurations.go | 20 ++++---- ...cks_mws_storage_configurations_mws_test.go | 26 +++++----- .../resource_databricks_mws_workspaces.go | 48 +++++++++---------- ...urce_databricks_mws_workspaces_mws_test.go | 22 ++++----- databricks/utils.go | 14 +++--- 12 files changed, 133 insertions(+), 133 deletions(-) diff --git a/databricks/azure_auth.go b/databricks/azure_auth.go index 4b8685f71..361ae80d1 100644 --- a/databricks/azure_auth.go +++ b/databricks/azure_auth.go @@ -87,10 +87,10 @@ func (a *AzureAuth) getWorkspaceID(config *service.DBApiClientConfig) error { "Authorization": "Bearer " + a.ManagementToken, } type apiVersion struct { - ApiVersion string `url:"api-version"` + APIVersion string `url:"api-version"` } uriPayload := apiVersion{ - ApiVersion: "2018-04-01", + APIVersion: "2018-04-01", } var responseMap map[string]interface{} resp, err := service.PerformQuery(config, http.MethodGet, url, "2.0", headers, false, true, uriPayload, nil) diff --git a/databricks/resource_databricks_azure_adls_gen2_mount_test.go b/databricks/resource_databricks_azure_adls_gen2_mount_test.go index e13df844a..109624158 100644 --- a/databricks/resource_databricks_azure_adls_gen2_mount_test.go +++ b/databricks/resource_databricks_azure_adls_gen2_mount_test.go @@ -14,7 +14,7 @@ import ( ) func TestAccAzureAdlsGen2Mount_correctly_mounts(t *testing.T) { - terraformToApply := testAccAzureAdlsGen2Mount_correctly_mounts() + terraformToApply := testAccAzureAdlsGen2MountCorrectlyMounts() resource.Test(t, resource.TestCase{ Providers: testAccProviders, @@ -27,7 +27,7 @@ func TestAccAzureAdlsGen2Mount_correctly_mounts(t *testing.T) { } func TestAccAzureAdlsGen2Mount_cluster_deleted_correctly_mounts(t *testing.T) { - terraformToApply := testAccAzureAdlsGen2Mount_correctly_mounts() + terraformToApply := testAccAzureAdlsGen2MountCorrectlyMounts() var cluster model.ClusterInfo resource.Test(t, resource.TestCase{ @@ -50,7 +50,7 @@ func TestAccAzureAdlsGen2Mount_cluster_deleted_correctly_mounts(t *testing.T) { } func TestAccAzureAdlsGen2Mount_capture_error(t *testing.T) { - terraformToApply := testAccAzureAdlsGen2Mount_capture_error() + terraformToApply := testAccAzureAdlsGen2MountCaptureError() resource.Test(t, resource.TestCase{ Providers: testAccProviders, @@ -65,7 +65,7 @@ func TestAccAzureAdlsGen2Mount_capture_error(t *testing.T) { }) } -func testAccAzureAdlsGen2Mount_correctly_mounts() string { +func testAccAzureAdlsGen2MountCorrectlyMounts() string { clientID := os.Getenv("ARM_CLIENT_ID") clientSecret := os.Getenv("ARM_CLIENT_SECRET") tenantID := os.Getenv("ARM_TENANT_ID") @@ -129,7 +129,7 @@ func testAccAzureAdlsGen2Mount_correctly_mounts() string { return definition } -func testAccAzureAdlsGen2Mount_capture_error() string { +func testAccAzureAdlsGen2MountCaptureError() string { clientID := os.Getenv("ARM_CLIENT_ID") clientSecret := os.Getenv("ARM_CLIENT_SECRET") tenantID := os.Getenv("ARM_TENANT_ID") diff --git a/databricks/resource_databricks_azure_blob_mount_test.go b/databricks/resource_databricks_azure_blob_mount_test.go index 6940569cd..026b23a60 100644 --- a/databricks/resource_databricks_azure_blob_mount_test.go +++ b/databricks/resource_databricks_azure_blob_mount_test.go @@ -13,7 +13,7 @@ import ( ) func TestAccAzureBlobMount_correctly_mounts(t *testing.T) { - terraformToApply := testAccAzureBlobMount_correctly_mounts() + terraformToApply := testAccAzureBlobMountCorrectlyMounts() var clusterInfo model.ClusterInfo var azureBlobMount AzureBlobMount @@ -23,8 +23,8 @@ func TestAccAzureBlobMount_correctly_mounts(t *testing.T) { { Config: terraformToApply, Check: resource.ComposeTestCheckFunc( - testAccAzureBlobMount_cluster_exists("databricks_cluster.cluster", &clusterInfo), - testAccAzureBlobMount_mount_exists("databricks_azure_blob_mount.mount", &azureBlobMount, &clusterInfo), + testAccAzureBlobMountClusterExists("databricks_cluster.cluster", &clusterInfo), + testAccAzureBlobMountMountExists("databricks_azure_blob_mount.mount", &azureBlobMount, &clusterInfo), ), }, { @@ -35,7 +35,7 @@ func TestAccAzureBlobMount_correctly_mounts(t *testing.T) { }, Config: terraformToApply, Check: resource.ComposeTestCheckFunc( - testAccAzureBlobMount_mount_exists("databricks_azure_blob_mount.mount", &azureBlobMount, &clusterInfo), + testAccAzureBlobMountMountExists("databricks_azure_blob_mount.mount", &azureBlobMount, &clusterInfo), ), }, }, @@ -43,7 +43,7 @@ func TestAccAzureBlobMount_correctly_mounts(t *testing.T) { } func TestAccAzureBlobMount_cluster_deleted_correctly_mounts(t *testing.T) { - terraformToApply := testAccAzureBlobMount_correctly_mounts() + terraformToApply := testAccAzureBlobMountCorrectlyMounts() var clusterInfo model.ClusterInfo var azureBlobMount AzureBlobMount @@ -53,8 +53,8 @@ func TestAccAzureBlobMount_cluster_deleted_correctly_mounts(t *testing.T) { { Config: terraformToApply, Check: resource.ComposeTestCheckFunc( - testAccAzureBlobMount_cluster_exists("databricks_cluster.cluster", &clusterInfo), - testAccAzureBlobMount_mount_exists("databricks_azure_blob_mount.mount", &azureBlobMount, &clusterInfo), + testAccAzureBlobMountClusterExists("databricks_cluster.cluster", &clusterInfo), + testAccAzureBlobMountMountExists("databricks_azure_blob_mount.mount", &azureBlobMount, &clusterInfo), ), }, { @@ -65,14 +65,14 @@ func TestAccAzureBlobMount_cluster_deleted_correctly_mounts(t *testing.T) { }, Config: terraformToApply, Check: resource.ComposeTestCheckFunc( - testAccAzureBlobMount_mount_exists("databricks_azure_blob_mount.mount", &azureBlobMount, &clusterInfo), + testAccAzureBlobMountMountExists("databricks_azure_blob_mount.mount", &azureBlobMount, &clusterInfo), ), }, }, }) } -func testAccAzureBlobMount_cluster_exists(n string, clusterInfo *model.ClusterInfo) resource.TestCheckFunc { +func testAccAzureBlobMountClusterExists(n string, clusterInfo *model.ClusterInfo) resource.TestCheckFunc { return func(s *terraform.State) error { // find the corresponding state object rs, ok := s.RootModule().Resources[n] @@ -93,7 +93,7 @@ func testAccAzureBlobMount_cluster_exists(n string, clusterInfo *model.ClusterIn } } -func testAccAzureBlobMount_mount_exists(n string, azureBlobMount *AzureBlobMount, clusterInfo *model.ClusterInfo) resource.TestCheckFunc { +func testAccAzureBlobMountMountExists(n string, azureBlobMount *AzureBlobMount, clusterInfo *model.ClusterInfo) resource.TestCheckFunc { return func(s *terraform.State) error { // find the corresponding state object rs, ok := s.RootModule().Resources[n] @@ -113,9 +113,9 @@ func testAccAzureBlobMount_mount_exists(n string, azureBlobMount *AzureBlobMount tokenSecretScope, tokenSecretKey) client := testAccProvider.Meta().(*service.DBApiClient) - cluster_id := clusterInfo.ClusterID + clusterID := clusterInfo.ClusterID - message, err := blobMount.Read(client, cluster_id) + message, err := blobMount.Read(client, clusterID) if err != nil { return fmt.Errorf("Error reading the mount %s: error %s", message, err) } @@ -126,7 +126,7 @@ func testAccAzureBlobMount_mount_exists(n string, azureBlobMount *AzureBlobMount } } -func testAccAzureBlobMount_correctly_mounts() string { +func testAccAzureBlobMountCorrectlyMounts() string { clientID := os.Getenv("ARM_CLIENT_ID") clientSecret := os.Getenv("ARM_CLIENT_SECRET") tenantID := os.Getenv("ARM_TENANT_ID") diff --git a/databricks/resource_databricks_mws_credentials.go b/databricks/resource_databricks_mws_credentials.go index 0c4ba97c7..648fe37cb 100644 --- a/databricks/resource_databricks_mws_credentials.go +++ b/databricks/resource_databricks_mws_credentials.go @@ -51,30 +51,30 @@ func resourceMWSCredentialsCreate(d *schema.ResourceData, m interface{}) error { client := m.(*service.DBApiClient) credentialsName := d.Get("credentials_name").(string) roleArn := d.Get("role_arn").(string) - mwsAcctId := d.Get("account_id").(string) - credentials, err := client.MWSCredentials().Create(mwsAcctId, credentialsName, roleArn) + mwsAcctID := d.Get("account_id").(string) + credentials, err := client.MWSCredentials().Create(mwsAcctID, credentialsName, roleArn) if err != nil { return err } - credentialsResourceId := PackagedMWSIds{ - MwsAcctId: mwsAcctId, - ResourceId: credentials.CredentialsID, + credentialsResourceID := PackagedMWSIds{ + MwsAcctID: mwsAcctID, + ResourceID: credentials.CredentialsID, } - d.SetId(packMWSAccountId(credentialsResourceId)) + d.SetId(packMWSAccountID(credentialsResourceID)) return resourceMWSCredentialsRead(d, m) } func resourceMWSCredentialsRead(d *schema.ResourceData, m interface{}) error { id := d.Id() client := m.(*service.DBApiClient) - packagedMwsId, err := unpackMWSAccountId(id) + packagedMwsID, err := unpackMWSAccountID(id) if err != nil { return err } - credentials, err := client.MWSCredentials().Read(packagedMwsId.MwsAcctId, packagedMwsId.ResourceId) + credentials, err := client.MWSCredentials().Read(packagedMwsID.MwsAcctID, packagedMwsID.ResourceID) if err != nil { if isMWSCredentialsMissing(err.Error()) { - log.Printf("Missing e2 credentials with id: %s.", packagedMwsId.ResourceId) + log.Printf("Missing e2 credentials with id: %s.", packagedMwsID.ResourceID) d.SetId("") return nil } @@ -106,11 +106,11 @@ func resourceMWSCredentialsRead(d *schema.ResourceData, m interface{}) error { func resourceMWSCredentialsDelete(d *schema.ResourceData, m interface{}) error { id := d.Id() client := m.(*service.DBApiClient) - packagedMwsId, err := unpackMWSAccountId(id) + packagedMwsID, err := unpackMWSAccountID(id) if err != nil { return err } - err = client.MWSCredentials().Delete(packagedMwsId.MwsAcctId, packagedMwsId.ResourceId) + err = client.MWSCredentials().Delete(packagedMwsID.MwsAcctID, packagedMwsID.ResourceID) return err } diff --git a/databricks/resource_databricks_mws_credentials_mws_test.go b/databricks/resource_databricks_mws_credentials_mws_test.go index 3ba79f62d..17101e324 100644 --- a/databricks/resource_databricks_mws_credentials_mws_test.go +++ b/databricks/resource_databricks_mws_credentials_mws_test.go @@ -17,9 +17,9 @@ func TestAccMWSCredentials(t *testing.T) { // the acctest package includes many helpers such as RandStringFromCharSet // See https://godoc.org/github.com/hashicorp/terraform-plugin-sdk/helper/acctest //scope := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) - mwsAcctId := os.Getenv("DATABRICKS_MWS_ACCT_ID") + mwsAcctID := os.Getenv("DATABRICKS_MWS_ACCT_ID") mwsHost := os.Getenv("DATABRICKS_MWS_HOST") - awsAcctId := "999999999999" + awsAcctID := "999999999999" credentialsName := "test-mws-credentials-tf" roleName := "terraform-creds-role" @@ -29,26 +29,26 @@ func TestAccMWSCredentials(t *testing.T) { Steps: []resource.TestStep{ { // use a dynamic configuration with the random name from above - Config: testMWSCredentialsCreate(mwsAcctId, mwsHost, awsAcctId, roleName, credentialsName), + Config: testMWSCredentialsCreate(mwsAcctID, mwsHost, awsAcctID, roleName, credentialsName), // compose a basic test, checking both remote and local values Check: resource.ComposeTestCheckFunc( // query the API to retrieve the tokenInfo object testMWSCredentialsResourceExists("databricks_mws_credentials.my_e2_credentials", &MWSCredentials, t), // verify local values - resource.TestCheckResourceAttr("databricks_mws_credentials.my_e2_credentials", "account_id", mwsAcctId), + resource.TestCheckResourceAttr("databricks_mws_credentials.my_e2_credentials", "account_id", mwsAcctID), resource.TestCheckResourceAttr("databricks_mws_credentials.my_e2_credentials", "credentials_name", credentialsName), ), Destroy: false, }, { // use a dynamic configuration with the random name from above - Config: testMWSCredentialsCreate(mwsAcctId, mwsHost, awsAcctId, roleName, credentialsName), + Config: testMWSCredentialsCreate(mwsAcctID, mwsHost, awsAcctID, roleName, credentialsName), // compose a basic test, checking both remote and local values Check: resource.ComposeTestCheckFunc( // query the API to retrieve the tokenInfo object testMWSCredentialsResourceExists("databricks_mws_credentials.my_e2_credentials", &MWSCredentials, t), // verify local values - resource.TestCheckResourceAttr("databricks_mws_credentials.my_e2_credentials", "account_id", mwsAcctId), + resource.TestCheckResourceAttr("databricks_mws_credentials.my_e2_credentials", "account_id", mwsAcctID), resource.TestCheckResourceAttr("databricks_mws_credentials.my_e2_credentials", "credentials_name", credentialsName), ), ExpectNonEmptyPlan: false, @@ -63,11 +63,11 @@ func TestAccMWSCredentials(t *testing.T) { } }, // use a dynamic configuration with the random name from above - Config: testMWSCredentialsCreate(mwsAcctId, mwsHost, awsAcctId, roleName, credentialsName), + Config: testMWSCredentialsCreate(mwsAcctID, mwsHost, awsAcctID, roleName, credentialsName), // compose a basic test, checking both remote and local values Check: resource.ComposeTestCheckFunc( // verify local values - resource.TestCheckResourceAttr("databricks_mws_credentials.my_e2_credentials", "account_id", mwsAcctId), + resource.TestCheckResourceAttr("databricks_mws_credentials.my_e2_credentials", "account_id", mwsAcctID), resource.TestCheckResourceAttr("databricks_mws_credentials.my_e2_credentials", "credentials_name", credentialsName), ), Destroy: false, @@ -83,11 +83,11 @@ func testMWSCredentialsResourceDestroy(s *terraform.State) error { if rs.Type != "databricks_mws_credentials" { continue } - packagedMWSIds, err := unpackMWSAccountId(rs.Primary.ID) + packagedMWSIds, err := unpackMWSAccountID(rs.Primary.ID) if err != nil { return err } - _, err = client.MWSCredentials().Read(packagedMWSIds.MwsAcctId, packagedMWSIds.ResourceId) + _, err = client.MWSCredentials().Read(packagedMWSIds.MwsAcctID, packagedMWSIds.ResourceID) if err != nil { return nil } @@ -107,11 +107,11 @@ func testMWSCredentialsResourceExists(n string, mwsCreds *model.MWSCredentials, // retrieve the configured client from the test setup conn := getMWSClient() - packagedMWSIds, err := unpackMWSAccountId(rs.Primary.ID) + packagedMWSIds, err := unpackMWSAccountID(rs.Primary.ID) if err != nil { return err } - resp, err := conn.MWSCredentials().Read(packagedMWSIds.MwsAcctId, packagedMWSIds.ResourceId) + resp, err := conn.MWSCredentials().Read(packagedMWSIds.MwsAcctID, packagedMWSIds.ResourceID) if err != nil { return err } @@ -122,7 +122,7 @@ func testMWSCredentialsResourceExists(n string, mwsCreds *model.MWSCredentials, } } -func testMWSCredentialsCreate(mwsAcctId, mwsHost, awsAcctId, roleName, credentialsName string) string { +func testMWSCredentialsCreate(mwsAcctID, mwsHost, awsAcctID, roleName, credentialsName string) string { return fmt.Sprintf(` provider "databricks" { host = "%s" @@ -133,5 +133,5 @@ func testMWSCredentialsCreate(mwsAcctId, mwsHost, awsAcctId, roleName, credentia credentials_name = "%s" role_arn = "arn:aws:iam::%s:role/%s" } - `, mwsHost, mwsAcctId, credentialsName, awsAcctId, roleName) + `, mwsHost, mwsAcctID, credentialsName, awsAcctID, roleName) } diff --git a/databricks/resource_databricks_mws_networks.go b/databricks/resource_databricks_mws_networks.go index fcb479c13..70acdb83e 100644 --- a/databricks/resource_databricks_mws_networks.go +++ b/databricks/resource_databricks_mws_networks.go @@ -89,31 +89,31 @@ func resourceMWSNetworks() *schema.Resource { func resourceMWSNetworkCreate(d *schema.ResourceData, m interface{}) error { client := m.(*service.DBApiClient) networkName := d.Get("network_name").(string) - mwsAcctId := d.Get("account_id").(string) + mwsAcctID := d.Get("account_id").(string) VPCID := d.Get("vpc_id").(string) subnetIds := convertListInterfaceToString(d.Get("subnet_ids").(*schema.Set).List()) securityGroupIds := convertListInterfaceToString(d.Get("security_group_ids").(*schema.Set).List()) - network, err := client.MWSNetworks().Create(mwsAcctId, networkName, VPCID, subnetIds, securityGroupIds) + network, err := client.MWSNetworks().Create(mwsAcctID, networkName, VPCID, subnetIds, securityGroupIds) if err != nil { return err } - networksResourceId := PackagedMWSIds{ - MwsAcctId: mwsAcctId, - ResourceId: network.NetworkID, + networksResourceID := PackagedMWSIds{ + MwsAcctID: mwsAcctID, + ResourceID: network.NetworkID, } - d.SetId(packMWSAccountId(networksResourceId)) + d.SetId(packMWSAccountID(networksResourceID)) return resourceMWSNetworkRead(d, m) } func resourceMWSNetworkRead(d *schema.ResourceData, m interface{}) error { id := d.Id() client := m.(*service.DBApiClient) - packagedMwsId, err := unpackMWSAccountId(id) + packagedMwsID, err := unpackMWSAccountID(id) if err != nil { return err } - network, err := client.MWSNetworks().Read(packagedMwsId.MwsAcctId, packagedMwsId.ResourceId) + network, err := client.MWSNetworks().Read(packagedMwsID.MwsAcctID, packagedMwsID.ResourceID) if err != nil { if isMWSNetworkMissing(err.Error()) { log.Printf("Missing e2 network with id: %s.", id) @@ -173,11 +173,11 @@ func resourceMWSNetworkRead(d *schema.ResourceData, m interface{}) error { func resourceMWSNetworkDelete(d *schema.ResourceData, m interface{}) error { id := d.Id() client := m.(*service.DBApiClient) - packagedMwsId, err := unpackMWSAccountId(id) + packagedMwsID, err := unpackMWSAccountID(id) if err != nil { return err } - err = client.MWSNetworks().Delete(packagedMwsId.MwsAcctId, packagedMwsId.ResourceId) + err = client.MWSNetworks().Delete(packagedMwsID.MwsAcctID, packagedMwsID.ResourceID) return err } diff --git a/databricks/resource_databricks_mws_networks_mws_test.go b/databricks/resource_databricks_mws_networks_mws_test.go index 66ca8ffaa..2236f10d9 100644 --- a/databricks/resource_databricks_mws_networks_mws_test.go +++ b/databricks/resource_databricks_mws_networks_mws_test.go @@ -17,7 +17,7 @@ func TestAccMWSNetworks(t *testing.T) { // the acctest package includes many helpers such as RandStringFromCharSet // See https://godoc.org/github.com/hashicorp/terraform-plugin-sdk/helper/acctest //scope := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) - mwsAcctId := os.Getenv("DATABRICKS_MWS_ACCT_ID") + mwsAcctID := os.Getenv("DATABRICKS_MWS_ACCT_ID") mwsHost := os.Getenv("DATABRICKS_MWS_HOST") networkName := "test-mws-network-tf" @@ -33,26 +33,26 @@ func TestAccMWSNetworks(t *testing.T) { Steps: []resource.TestStep{ { // use a dynamic configuration with the random name from above - Config: testMWSNetworkCreate(mwsAcctId, mwsHost, networkName, vpc, subnet1, subnet2, sg1, sg2), + Config: testMWSNetworkCreate(mwsAcctID, mwsHost, networkName, vpc, subnet1, subnet2, sg1, sg2), // compose a basic test, checking both remote and local values Check: resource.ComposeTestCheckFunc( // query the API to retrieve the tokenInfo object testMWSNetworkResourceExists("databricks_mws_networks.my_network", &MWSNetwork, t), // verify local values - resource.TestCheckResourceAttr("databricks_mws_networks.my_network", "account_id", mwsAcctId), + resource.TestCheckResourceAttr("databricks_mws_networks.my_network", "account_id", mwsAcctID), resource.TestCheckResourceAttr("databricks_mws_networks.my_network", "network_name", networkName), ), Destroy: false, }, { // use a dynamic configuration with the random name from above - Config: testMWSNetworkCreate(mwsAcctId, mwsHost, networkName, vpc, subnet1, subnet2, sg1, sg2), + Config: testMWSNetworkCreate(mwsAcctID, mwsHost, networkName, vpc, subnet1, subnet2, sg1, sg2), // compose a basic test, checking both remote and local values Check: resource.ComposeTestCheckFunc( // query the API to retrieve the tokenInfo object testMWSNetworkResourceExists("databricks_mws_networks.my_network", &MWSNetwork, t), // verify local values - resource.TestCheckResourceAttr("databricks_mws_networks.my_network", "account_id", mwsAcctId), + resource.TestCheckResourceAttr("databricks_mws_networks.my_network", "account_id", mwsAcctID), resource.TestCheckResourceAttr("databricks_mws_networks.my_network", "network_name", networkName), ), ExpectNonEmptyPlan: false, @@ -67,13 +67,13 @@ func TestAccMWSNetworks(t *testing.T) { } }, // use a dynamic configuration with the random name from above - Config: testMWSNetworkCreate(mwsAcctId, mwsHost, networkName, vpc, subnet1, subnet2, sg1, sg2), + Config: testMWSNetworkCreate(mwsAcctID, mwsHost, networkName, vpc, subnet1, subnet2, sg1, sg2), // compose a basic test, checking both remote and local values Check: resource.ComposeTestCheckFunc( // query the API to retrieve the tokenInfo object testMWSNetworkResourceExists("databricks_mws_networks.my_network", &MWSNetwork, t), // verify local values - resource.TestCheckResourceAttr("databricks_mws_networks.my_network", "account_id", mwsAcctId), + resource.TestCheckResourceAttr("databricks_mws_networks.my_network", "account_id", mwsAcctID), resource.TestCheckResourceAttr("databricks_mws_networks.my_network", "network_name", networkName), ), ExpectNonEmptyPlan: false, @@ -90,11 +90,11 @@ func testMWSNetworkResourceDestroy(s *terraform.State) error { if rs.Type != "databricks_mws_storage_configurations" { continue } - packagedMWSIds, err := unpackMWSAccountId(rs.Primary.ID) + packagedMWSIds, err := unpackMWSAccountID(rs.Primary.ID) if err != nil { return err } - _, err = client.MWSNetworks().Read(packagedMWSIds.MwsAcctId, packagedMWSIds.ResourceId) + _, err = client.MWSNetworks().Read(packagedMWSIds.MwsAcctID, packagedMWSIds.ResourceID) if err != nil { return nil } @@ -114,11 +114,11 @@ func testMWSNetworkResourceExists(n string, mwsCreds *model.MWSNetwork, t *testi // retrieve the configured client from the test setup conn := getMWSClient() - packagedMWSIds, err := unpackMWSAccountId(rs.Primary.ID) + packagedMWSIds, err := unpackMWSAccountID(rs.Primary.ID) if err != nil { return err } - resp, err := conn.MWSNetworks().Read(packagedMWSIds.MwsAcctId, packagedMWSIds.ResourceId) + resp, err := conn.MWSNetworks().Read(packagedMWSIds.MwsAcctID, packagedMWSIds.ResourceID) if err != nil { return err } @@ -129,7 +129,7 @@ func testMWSNetworkResourceExists(n string, mwsCreds *model.MWSNetwork, t *testi } } -func testMWSNetworkCreate(mwsAcctId, mwsHost, networkName, vpcId, subnetId1, subnetId2, sgId1, sgId2 string) string { +func testMWSNetworkCreate(mwsAcctID, mwsHost, networkName, vpcID, subnetID1, subnetID2, sgID1, sgID2 string) string { return fmt.Sprintf(` provider "databricks" { host = "%s" @@ -149,5 +149,5 @@ func testMWSNetworkCreate(mwsAcctId, mwsHost, networkName, vpcId, subnetId1, sub ] } - `, mwsHost, mwsAcctId, networkName, vpcId, subnetId1, subnetId2, sgId1, sgId2) + `, mwsHost, mwsAcctID, networkName, vpcID, subnetID1, subnetID2, sgID1, sgID2) } diff --git a/databricks/resource_databricks_mws_storage_configurations.go b/databricks/resource_databricks_mws_storage_configurations.go index 177cf8267..a6c09168f 100644 --- a/databricks/resource_databricks_mws_storage_configurations.go +++ b/databricks/resource_databricks_mws_storage_configurations.go @@ -46,27 +46,27 @@ func resourceMWSStorageConfigurationsCreate(d *schema.ResourceData, m interface{ client := m.(*service.DBApiClient) storageConfigurationName := d.Get("storage_configuration_name").(string) bucketName := d.Get("bucket_name").(string) - mwsAcctId := d.Get("account_id").(string) - storageConfiguration, err := client.MWSStorageConfigurations().Create(mwsAcctId, storageConfigurationName, bucketName) + mwsAcctID := d.Get("account_id").(string) + storageConfiguration, err := client.MWSStorageConfigurations().Create(mwsAcctID, storageConfigurationName, bucketName) if err != nil { return err } - storageConfigurationResourceId := PackagedMWSIds{ - MwsAcctId: mwsAcctId, - ResourceId: storageConfiguration.StorageConfigurationID, + storageConfigurationResourceID := PackagedMWSIds{ + MwsAcctID: mwsAcctID, + ResourceID: storageConfiguration.StorageConfigurationID, } - d.SetId(packMWSAccountId(storageConfigurationResourceId)) + d.SetId(packMWSAccountID(storageConfigurationResourceID)) return resourceMWSStorageConfigurationsRead(d, m) } func resourceMWSStorageConfigurationsRead(d *schema.ResourceData, m interface{}) error { id := d.Id() client := m.(*service.DBApiClient) - packagedMwsId, err := unpackMWSAccountId(id) + packagedMwsID, err := unpackMWSAccountID(id) if err != nil { return err } - storageConifiguration, err := client.MWSStorageConfigurations().Read(packagedMwsId.MwsAcctId, packagedMwsId.ResourceId) + storageConifiguration, err := client.MWSStorageConfigurations().Read(packagedMwsID.MwsAcctID, packagedMwsID.ResourceID) if err != nil { if isE2StorageConfigurationsMissing(err.Error()) { log.Printf("Missing mws storage configurations with id: %s.", id) @@ -102,11 +102,11 @@ func resourceMWSStorageConfigurationsRead(d *schema.ResourceData, m interface{}) func resourceMWSStorageConfigurationsDelete(d *schema.ResourceData, m interface{}) error { id := d.Id() client := m.(*service.DBApiClient) - packagedMwsId, err := unpackMWSAccountId(id) + packagedMwsID, err := unpackMWSAccountID(id) if err != nil { return err } - err = client.MWSStorageConfigurations().Delete(packagedMwsId.MwsAcctId, packagedMwsId.ResourceId) + err = client.MWSStorageConfigurations().Delete(packagedMwsID.MwsAcctID, packagedMwsID.ResourceID) return err } diff --git a/databricks/resource_databricks_mws_storage_configurations_mws_test.go b/databricks/resource_databricks_mws_storage_configurations_mws_test.go index 35950da6d..41674cace 100644 --- a/databricks/resource_databricks_mws_storage_configurations_mws_test.go +++ b/databricks/resource_databricks_mws_storage_configurations_mws_test.go @@ -17,7 +17,7 @@ func TestAccMWSStorageConfigurations(t *testing.T) { // the acctest package includes many helpers such as RandStringFromCharSet // See https://godoc.org/github.com/hashicorp/terraform-plugin-sdk/helper/acctest //scope := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) - mwsAcctId := os.Getenv("DATABRICKS_MWS_ACCT_ID") + mwsAcctID := os.Getenv("DATABRICKS_MWS_ACCT_ID") mwsHost := os.Getenv("DATABRICKS_MWS_HOST") storageConfigName := "test-mws-storage-configurations-tf" bucketName := "terraform-test-bucket" @@ -28,13 +28,13 @@ func TestAccMWSStorageConfigurations(t *testing.T) { Steps: []resource.TestStep{ { // use a dynamic configuration with the random name from above - Config: testMWSStorageConfigurationsCreate(mwsAcctId, mwsHost, storageConfigName, bucketName), + Config: testMWSStorageConfigurationsCreate(mwsAcctID, mwsHost, storageConfigName, bucketName), // compose a basic test, checking both remote and local values Check: resource.ComposeTestCheckFunc( // query the API to retrieve the tokenInfo object testMWSStorageConfigurationsResourceExists("databricks_mws_storage_configurations.my_mws_storage_configurations", &MWSStorageConfigurations, t), // verify local values - resource.TestCheckResourceAttr("databricks_mws_storage_configurations.my_mws_storage_configurations", "account_id", mwsAcctId), + resource.TestCheckResourceAttr("databricks_mws_storage_configurations.my_mws_storage_configurations", "account_id", mwsAcctID), resource.TestCheckResourceAttr("databricks_mws_storage_configurations.my_mws_storage_configurations", "storage_configuration_name", storageConfigName), resource.TestCheckResourceAttr("databricks_mws_storage_configurations.my_mws_storage_configurations", "bucket_name", bucketName), ), @@ -42,13 +42,13 @@ func TestAccMWSStorageConfigurations(t *testing.T) { }, { // use a dynamic configuration with the random name from above - Config: testMWSStorageConfigurationsCreate(mwsAcctId, mwsHost, storageConfigName, bucketName), + Config: testMWSStorageConfigurationsCreate(mwsAcctID, mwsHost, storageConfigName, bucketName), // compose a basic test, checking both remote and local values Check: resource.ComposeTestCheckFunc( // query the API to retrieve the tokenInfo object testMWSStorageConfigurationsResourceExists("databricks_mws_storage_configurations.my_mws_storage_configurations", &MWSStorageConfigurations, t), // verify local values - resource.TestCheckResourceAttr("databricks_mws_storage_configurations.my_mws_storage_configurations", "account_id", mwsAcctId), + resource.TestCheckResourceAttr("databricks_mws_storage_configurations.my_mws_storage_configurations", "account_id", mwsAcctID), resource.TestCheckResourceAttr("databricks_mws_storage_configurations.my_mws_storage_configurations", "storage_configuration_name", storageConfigName), resource.TestCheckResourceAttr("databricks_mws_storage_configurations.my_mws_storage_configurations", "bucket_name", bucketName), ), @@ -64,13 +64,13 @@ func TestAccMWSStorageConfigurations(t *testing.T) { } }, // use a dynamic configuration with the random name from above - Config: testMWSStorageConfigurationsCreate(mwsAcctId, mwsHost, storageConfigName, bucketName), + Config: testMWSStorageConfigurationsCreate(mwsAcctID, mwsHost, storageConfigName, bucketName), // compose a basic test, checking both remote and local values Check: resource.ComposeTestCheckFunc( // query the API to retrieve the tokenInfo object testMWSStorageConfigurationsResourceExists("databricks_mws_storage_configurations.my_mws_storage_configurations", &MWSStorageConfigurations, t), // verify local values - resource.TestCheckResourceAttr("databricks_mws_storage_configurations.my_mws_storage_configurations", "account_id", mwsAcctId), + resource.TestCheckResourceAttr("databricks_mws_storage_configurations.my_mws_storage_configurations", "account_id", mwsAcctID), resource.TestCheckResourceAttr("databricks_mws_storage_configurations.my_mws_storage_configurations", "storage_configuration_name", storageConfigName), resource.TestCheckResourceAttr("databricks_mws_storage_configurations.my_mws_storage_configurations", "bucket_name", bucketName), ), @@ -88,11 +88,11 @@ func testMWSStorageConfigurationsResourceDestroy(s *terraform.State) error { if rs.Type != "databricks_mws_storage_configurations" { continue } - packagedMWSIds, err := unpackMWSAccountId(rs.Primary.ID) + packagedMWSIds, err := unpackMWSAccountID(rs.Primary.ID) if err != nil { return err } - _, err = client.MWSStorageConfigurations().Read(packagedMWSIds.MwsAcctId, packagedMWSIds.ResourceId) + _, err = client.MWSStorageConfigurations().Read(packagedMWSIds.MwsAcctID, packagedMWSIds.ResourceID) if err != nil { return nil } @@ -112,11 +112,11 @@ func testMWSStorageConfigurationsResourceExists(n string, mwsCreds *model.MWSSto // retrieve the configured client from the test setup conn := getMWSClient() - packagedMWSIds, err := unpackMWSAccountId(rs.Primary.ID) + packagedMWSIds, err := unpackMWSAccountID(rs.Primary.ID) if err != nil { return err } - resp, err := conn.MWSStorageConfigurations().Read(packagedMWSIds.MwsAcctId, packagedMWSIds.ResourceId) + resp, err := conn.MWSStorageConfigurations().Read(packagedMWSIds.MwsAcctID, packagedMWSIds.ResourceID) if err != nil { return err } @@ -127,7 +127,7 @@ func testMWSStorageConfigurationsResourceExists(n string, mwsCreds *model.MWSSto } } -func testMWSStorageConfigurationsCreate(mwsAcctId, mwsHost, storageConfigName, bucketName string) string { +func testMWSStorageConfigurationsCreate(mwsAcctID, mwsHost, storageConfigName, bucketName string) string { return fmt.Sprintf(` provider "databricks" { host = "%s" @@ -138,5 +138,5 @@ func testMWSStorageConfigurationsCreate(mwsAcctId, mwsHost, storageConfigName, b storage_configuration_name = "%s" bucket_name = "%s" } - `, mwsHost, mwsAcctId, storageConfigName, bucketName) + `, mwsHost, mwsAcctID, storageConfigName, bucketName) } diff --git a/databricks/resource_databricks_mws_workspaces.go b/databricks/resource_databricks_mws_workspaces.go index 08affe4d6..0345ff21d 100644 --- a/databricks/resource_databricks_mws_workspaces.go +++ b/databricks/resource_databricks_mws_workspaces.go @@ -125,35 +125,35 @@ func resourceMWSWorkspaces() *schema.Resource { func resourceMWSWorkspacesCreate(d *schema.ResourceData, m interface{}) error { client := m.(*service.DBApiClient) - mwsAcctId := d.Get("account_id").(string) + mwsAcctID := d.Get("account_id").(string) workspaceName := d.Get("workspace_name").(string) deploymentName := d.Get("deployment_name").(string) awsRegion := d.Get("aws_region").(string) credentialsID := d.Get("credentials_id").(string) storageConfigurationID := d.Get("storage_configuration_id").(string) networkID := d.Get("network_id").(string) - customerManagedKeyId := d.Get("customer_managed_key_id").(string) - isNoPublicIpEnabled := d.Get("is_no_public_ip_enabled").(bool) + customerManagedKeyID := d.Get("customer_managed_key_id").(string) + isNoPublicIPEnabled := d.Get("is_no_public_ip_enabled").(bool) var workspace model.MWSWorkspace var err error - workspace, err = client.MWSWorkspaces().Create(mwsAcctId, workspaceName, deploymentName, awsRegion, credentialsID, storageConfigurationID, networkID, customerManagedKeyId, isNoPublicIpEnabled) + workspace, err = client.MWSWorkspaces().Create(mwsAcctID, workspaceName, deploymentName, awsRegion, credentialsID, storageConfigurationID, networkID, customerManagedKeyID, isNoPublicIPEnabled) // Sometimes workspaces api is buggy if err != nil { time.Sleep(15 * time.Second) - workspace, err = client.MWSWorkspaces().Create(mwsAcctId, workspaceName, deploymentName, awsRegion, credentialsID, storageConfigurationID, networkID, customerManagedKeyId, isNoPublicIpEnabled) + workspace, err = client.MWSWorkspaces().Create(mwsAcctID, workspaceName, deploymentName, awsRegion, credentialsID, storageConfigurationID, networkID, customerManagedKeyID, isNoPublicIPEnabled) if err != nil { return err } } - workspaceResourceId := PackagedMWSIds{ - MwsAcctId: mwsAcctId, - ResourceId: strconv.Itoa(int(workspace.WorkspaceID)), + workspaceResourceID := PackagedMWSIds{ + MwsAcctID: mwsAcctID, + ResourceID: strconv.Itoa(int(workspace.WorkspaceID)), } - d.SetId(packMWSAccountId(workspaceResourceId)) - err = client.MWSWorkspaces().WaitForWorkspaceRunning(mwsAcctId, workspace.WorkspaceID, 10, 180) + d.SetId(packMWSAccountID(workspaceResourceID)) + err = client.MWSWorkspaces().WaitForWorkspaceRunning(mwsAcctID, workspace.WorkspaceID, 10, 180) if err != nil { if !reflect.ValueOf(networkID).IsZero() { - network, networkReadErr := client.MWSNetworks().Read(mwsAcctId, networkID) + network, networkReadErr := client.MWSNetworks().Read(mwsAcctID, networkID) if networkReadErr != nil { return fmt.Errorf("Workspace failed to create: %v, network read failure error: %v", err, networkReadErr) } @@ -167,16 +167,16 @@ func resourceMWSWorkspacesCreate(d *schema.ResourceData, m interface{}) error { func resourceMWSWorkspacesRead(d *schema.ResourceData, m interface{}) error { id := d.Id() client := m.(*service.DBApiClient) - packagedMwsId, err := unpackMWSAccountId(id) + packagedMwsID, err := unpackMWSAccountID(id) if err != nil { return err } - idInt64, err := strconv.ParseInt(packagedMwsId.ResourceId, 10, 64) + idInt64, err := strconv.ParseInt(packagedMwsID.ResourceID, 10, 64) if err != nil { return err } - workspace, err := client.MWSWorkspaces().Read(packagedMwsId.MwsAcctId, idInt64) + workspace, err := client.MWSWorkspaces().Read(packagedMwsID.MwsAcctID, idInt64) if err != nil { if isMWSWorkspaceMissing(err.Error(), id) { log.Printf("Missing e2 workspace with id: %s.", id) @@ -186,7 +186,7 @@ func resourceMWSWorkspacesRead(d *schema.ResourceData, m interface{}) error { return err } - err = client.MWSWorkspaces().WaitForWorkspaceRunning(packagedMwsId.MwsAcctId, idInt64, 10, 180) + err = client.MWSWorkspaces().WaitForWorkspaceRunning(packagedMwsID.MwsAcctID, idInt64, 10, 180) if err != nil { log.Println("WORKSPACE IS NOT RUNNING") err2 := d.Set("verify_workspace_runnning", false) @@ -270,11 +270,11 @@ func resourceMWSWorkspacesRead(d *schema.ResourceData, m interface{}) error { func resourceMWSWorkspacePatch(d *schema.ResourceData, m interface{}) error { id := d.Id() client := m.(*service.DBApiClient) - packagedMwsId, err := unpackMWSAccountId(id) + packagedMwsID, err := unpackMWSAccountID(id) if err != nil { return err } - idInt64, err := strconv.ParseInt(packagedMwsId.ResourceId, 10, 64) + idInt64, err := strconv.ParseInt(packagedMwsID.ResourceID, 10, 64) if err != nil { return err } @@ -282,14 +282,14 @@ func resourceMWSWorkspacePatch(d *schema.ResourceData, m interface{}) error { credentialsID := d.Get("credentials_id").(string) storageConfigurationID := d.Get("storage_configuration_id").(string) networkID := d.Get("network_id").(string) - customerManagedKeyId := d.Get("customer_managed_key_id").(string) - isNoPublicIpEnabled := d.Get("is_no_public_ip_enabled").(bool) + customerManagedKeyID := d.Get("customer_managed_key_id").(string) + isNoPublicIPEnabled := d.Get("is_no_public_ip_enabled").(bool) - err = client.MWSWorkspaces().Patch(packagedMwsId.MwsAcctId, idInt64, awsRegion, credentialsID, storageConfigurationID, networkID, customerManagedKeyId, isNoPublicIpEnabled) + err = client.MWSWorkspaces().Patch(packagedMwsID.MwsAcctID, idInt64, awsRegion, credentialsID, storageConfigurationID, networkID, customerManagedKeyID, isNoPublicIPEnabled) if err != nil { return err } - err = client.MWSWorkspaces().WaitForWorkspaceRunning(packagedMwsId.MwsAcctId, idInt64, 10, 180) + err = client.MWSWorkspaces().WaitForWorkspaceRunning(packagedMwsID.MwsAcctID, idInt64, 10, 180) if err != nil { return err } @@ -299,15 +299,15 @@ func resourceMWSWorkspacePatch(d *schema.ResourceData, m interface{}) error { func resourceMWSWorkspacesDelete(d *schema.ResourceData, m interface{}) error { id := d.Id() client := m.(*service.DBApiClient) - packagedMwsId, err := unpackMWSAccountId(id) + packagedMwsID, err := unpackMWSAccountID(id) if err != nil { return err } - idInt64, err := strconv.ParseInt(packagedMwsId.ResourceId, 10, 64) + idInt64, err := strconv.ParseInt(packagedMwsID.ResourceID, 10, 64) if err != nil { return err } - err = client.MWSWorkspaces().Delete(packagedMwsId.MwsAcctId, idInt64) + err = client.MWSWorkspaces().Delete(packagedMwsID.MwsAcctID, idInt64) return err } diff --git a/databricks/resource_databricks_mws_workspaces_mws_test.go b/databricks/resource_databricks_mws_workspaces_mws_test.go index f5150fedf..3e79d6a5c 100644 --- a/databricks/resource_databricks_mws_workspaces_mws_test.go +++ b/databricks/resource_databricks_mws_workspaces_mws_test.go @@ -18,7 +18,7 @@ func TestAccMWSWorkspaces(t *testing.T) { // the acctest package includes many helpers such as RandStringFromCharSet // See https://godoc.org/github.com/hashicorp/terraform-plugin-sdk/helper/acctest //scope := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) - mwsAcctId := os.Getenv("DATABRICKS_MWS_ACCT_ID") + mwsAcctID := os.Getenv("DATABRICKS_MWS_ACCT_ID") mwsHost := os.Getenv("DATABRICKS_MWS_HOST") credentialsName := "tf-workspace-test-creds" roleArn := os.Getenv("TEST_MWS_CROSS_ACCT_ROLE") @@ -28,7 +28,7 @@ func TestAccMWSWorkspaces(t *testing.T) { deploymentName := fmt.Sprintf("%s-dep", workspaceName) awsRegion := os.Getenv("DATABRICKS_MWS_AWS_REGION") networkName := "tf-workspace-test-network" - vpcId := os.Getenv("TEST_MWS_VPC_ID") + vpcID := os.Getenv("TEST_MWS_VPC_ID") subnet1 := os.Getenv("TEST_MWS_SUBNET_1") subnet2 := os.Getenv("TEST_MWS_SUBNET_2") sg := os.Getenv("TEST_MWS_SG") @@ -39,8 +39,8 @@ func TestAccMWSWorkspaces(t *testing.T) { Steps: []resource.TestStep{ { // use a dynamic configuration with the random name from above - Config: testMWSWorkspacesCreate(mwsAcctId, mwsHost, credentialsName, roleArn, storageConfigName, bucketName, - workspaceName, deploymentName, awsRegion, networkName, vpcId, subnet1, subnet2, sg), + Config: testMWSWorkspacesCreate(mwsAcctID, mwsHost, credentialsName, roleArn, storageConfigName, bucketName, + workspaceName, deploymentName, awsRegion, networkName, vpcID, subnet1, subnet2, sg), //// compose a basic test, checking both remote and local values Check: resource.ComposeTestCheckFunc( // verify that after creation the workspace_status is in a running state. @@ -59,15 +59,15 @@ func testMWSWorkspacesResourceDestroy(s *terraform.State) error { if rs.Type != "databricks_mws_workspaces" { continue } - packagedMWSIds, err := unpackMWSAccountId(rs.Primary.ID) + packagedMWSIds, err := unpackMWSAccountID(rs.Primary.ID) if err != nil { return err } - idInt64, err := strconv.ParseInt(packagedMWSIds.ResourceId, 10, 64) + idInt64, err := strconv.ParseInt(packagedMWSIds.ResourceID, 10, 64) if err != nil { return err } - _, err = client.MWSWorkspaces().Read(packagedMWSIds.MwsAcctId, idInt64) + _, err = client.MWSWorkspaces().Read(packagedMWSIds.MwsAcctID, idInt64) if err != nil { return nil } @@ -76,8 +76,8 @@ func testMWSWorkspacesResourceDestroy(s *terraform.State) error { return nil } -func testMWSWorkspacesCreate(mwsAcctId, mwsHost, credentialsName, roleArn, storageConfigName, bucketName, - workspaceName, deploymentName, awsRegion, networkName, vpcId, subnet1, subnet2, sg string) string { +func testMWSWorkspacesCreate(mwsAcctID, mwsHost, credentialsName, roleArn, storageConfigName, bucketName, + workspaceName, deploymentName, awsRegion, networkName, vpcID, subnet1, subnet2, sg string) string { return fmt.Sprintf(` provider "databricks" { host = "%[1]s" @@ -116,6 +116,6 @@ func testMWSWorkspacesCreate(mwsAcctId, mwsHost, credentialsName, roleArn, stora network_id = databricks_mws_networks.my_network.network_id verify_workspace_runnning = true } - `, mwsHost, mwsAcctId, credentialsName, roleArn, storageConfigName, bucketName, - workspaceName, deploymentName, awsRegion, networkName, vpcId, subnet1, subnet2, sg) + `, mwsHost, mwsAcctID, credentialsName, roleArn, storageConfigName, bucketName, + workspaceName, deploymentName, awsRegion, networkName, vpcID, subnet1, subnet2, sg) } diff --git a/databricks/utils.go b/databricks/utils.go index a4a53c55a..936bdda78 100644 --- a/databricks/utils.go +++ b/databricks/utils.go @@ -77,24 +77,24 @@ func isClusterMissing(errorMsg, resourceID string) bool { // PackagedMWSIds is a struct that contains both the MWS acct id and the ResourceId (resources are networks, creds, etc.) type PackagedMWSIds struct { - MwsAcctId string - ResourceId string + MwsAcctID string + ResourceID string } // Helps package up MWSAccountId with another id such as credentials id or network id // uses format mwsAcctId/otherId -func packMWSAccountId(idsToPackage PackagedMWSIds) string { - return fmt.Sprintf("%s/%s", idsToPackage.MwsAcctId, idsToPackage.ResourceId) +func packMWSAccountID(idsToPackage PackagedMWSIds) string { + return fmt.Sprintf("%s/%s", idsToPackage.MwsAcctID, idsToPackage.ResourceID) } // Helps unpackage MWSAccountId from another id such as credentials id or network id -func unpackMWSAccountId(combined string) (PackagedMWSIds, error) { +func unpackMWSAccountID(combined string) (PackagedMWSIds, error) { var packagedMWSIds PackagedMWSIds parts := strings.Split(combined, "/") if len(parts) != 2 { return packagedMWSIds, fmt.Errorf("unpacked account has more than or less than two parts, combined id: %s", combined) } - packagedMWSIds.MwsAcctId = parts[0] - packagedMWSIds.ResourceId = parts[1] + packagedMWSIds.MwsAcctID = parts[0] + packagedMWSIds.ResourceID = parts[1] return packagedMWSIds, nil } From 67edef8ce4726d65a41051a9aaf3a8b136469bbf Mon Sep 17 00:00:00 2001 From: Sriharsha Tikkireddy Date: Mon, 15 Jun 2020 06:18:07 -0400 Subject: [PATCH 04/13] added databricks_group, databricks_group_member & databricks_group_role to manage the scim resources separately --- databricks/provider.go | 4 + databricks/resource_databricks_group.go | 152 ++++++++++++++ .../resource_databricks_group_aws_test.go | 162 +++++++++++++++ .../resource_databricks_group_azure_test.go | 158 ++++++++++++++ .../resource_databricks_group_member.go | 126 +++++++++++ ...source_databricks_group_member_aws_test.go | 196 ++++++++++++++++++ ...urce_databricks_group_member_azure_test.go | 196 ++++++++++++++++++ databricks/resource_databricks_group_role.go | 123 +++++++++++ ...resource_databricks_group_role_aws_test.go | 134 ++++++++++++ databricks/utils.go | 22 ++ databricks/utils_test.go | 19 ++ go.mod | 1 + go.sum | 6 + 13 files changed, 1299 insertions(+) create mode 100644 databricks/resource_databricks_group.go create mode 100644 databricks/resource_databricks_group_aws_test.go create mode 100644 databricks/resource_databricks_group_azure_test.go create mode 100644 databricks/resource_databricks_group_member.go create mode 100644 databricks/resource_databricks_group_member_aws_test.go create mode 100644 databricks/resource_databricks_group_member_azure_test.go create mode 100644 databricks/resource_databricks_group_role.go create mode 100644 databricks/resource_databricks_group_role_aws_test.go diff --git a/databricks/provider.go b/databricks/provider.go index 5a9d42024..f4ab5c66d 100644 --- a/databricks/provider.go +++ b/databricks/provider.go @@ -33,6 +33,10 @@ func Provider(version string) terraform.ResourceProvider { "databricks_instance_pool": resourceInstancePool(), "databricks_scim_user": resourceScimUser(), "databricks_scim_group": resourceScimGroup(), + // Scim Group is split into multiple components for flexibility to pick and choose + "databricks_group": resourceGroup(), + "databricks_group_role": resourceGroupRole(), + "databricks_group_member": resourceGroupMember(), "databricks_notebook": resourceNotebook(), "databricks_cluster": resourceCluster(), "databricks_job": resourceJob(), diff --git a/databricks/resource_databricks_group.go b/databricks/resource_databricks_group.go new file mode 100644 index 000000000..8992adbdd --- /dev/null +++ b/databricks/resource_databricks_group.go @@ -0,0 +1,152 @@ +package databricks + +import ( + "github.com/databrickslabs/databricks-terraform/client/model" + "github.com/databrickslabs/databricks-terraform/client/service" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "log" +) + +func resourceGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceGroupCreate, + Update: resourceGroupUpdate, + Read: resourceGroupRead, + Delete: resourceGroupDelete, + + Schema: map[string]*schema.Schema{ + "display_name": { + Type: schema.TypeString, + ForceNew: true, + Required: true, + }, + "allow_cluster_create": { + Deprecated: "Will be deprecated in a future release for general permissions api", + Type: schema.TypeBool, + Optional: true, + }, + "allow_instance_pool_create": { + Deprecated: "Will be deprecated in a future release for general permissions api", + Type: schema.TypeBool, + Optional: true, + }, + }, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + } +} + + +func resourceGroupCreate(d *schema.ResourceData, m interface{}) error { + client := m.(*service.DBApiClient) + groupName := d.Get("display_name").(string) + allowClusterCreate := d.Get("allow_cluster_create").(bool) + allowInstancePoolCreate := d.Get("allow_instance_pool_create").(bool) + + // If entitlement flags are set to be true + var entitlementsList []string + if allowClusterCreate { + entitlementsList = append(entitlementsList, string(model.AllowClusterCreateEntitlement)) + } + if allowInstancePoolCreate { + entitlementsList = append(entitlementsList, string(model.AllowInstancePoolCreateEntitlement)) + } + + group, err := client.Groups().Create(groupName, nil, nil, entitlementsList) + if err != nil { + return err + } + d.SetId(group.ID) + return resourceGroupRead(d, m) +} + +func resourceGroupRead(d *schema.ResourceData, m interface{}) error { + id := d.Id() + client := m.(*service.DBApiClient) + group, err := client.Groups().Read(id) + if err != nil { + if isScimGroupMissing(err.Error(), id) { + log.Printf("Missing scim group with id: %s.", id) + d.SetId("") + return nil + } + return err + } + + err = d.Set("display_name", group.DisplayName) + if err != nil { + return err + } + + err = d.Set("allow_cluster_create",isGroupClusterCreateEntitled(&group)) + if err != nil { + return err + } + + err = d.Set("allow_instance_pool_create", isGroupInstancePoolCreateEntitled(&group)) + return err +} + +func resourceGroupUpdate(d *schema.ResourceData, m interface{}) error { + id := d.Id() + client := m.(*service.DBApiClient) + + // Handle entitlements update + var entitlementsAddList []string + var entitlementsRemoveList []string + // If allow_cluster_create has changed + if d.HasChange("allow_cluster_create") { + allowClusterCreate := d.Get("allow_cluster_create").(bool) + // Changed to true + if allowClusterCreate { + entitlementsAddList = append(entitlementsAddList, string(model.AllowClusterCreateEntitlement)) + } + // Changed to false + entitlementsRemoveList = append(entitlementsRemoveList, string(model.AllowClusterCreateEntitlement)) + } + // If allow_instance_pool_create has changed + if d.HasChange("allow_instance_pool_create") { + allowClusterCreate := d.Get("allow_instance_pool_create").(bool) + // Changed to true + if allowClusterCreate { + entitlementsAddList = append(entitlementsAddList, string(model.AllowClusterCreateEntitlement)) + } + // Changed to false + entitlementsRemoveList = append(entitlementsRemoveList, string(model.AllowClusterCreateEntitlement)) + } + + if entitlementsAddList != nil || entitlementsRemoveList != nil { + err := client.Groups().Patch(id, entitlementsAddList, entitlementsRemoveList, model.GroupEntitlementsPath) + if err != nil { + return err + } + } + + return nil +} + +func resourceGroupDelete(d *schema.ResourceData, m interface{}) error { + id := d.Id() + client := m.(*service.DBApiClient) + err := client.Groups().Delete(id) + return err +} + +func isGroupClusterCreateEntitled(group *model.Group) bool { + for _, entitlement := range(group.Entitlements) { + if entitlement.Value == model.AllowClusterCreateEntitlement { + return true + } + } + return false +} + +func isGroupInstancePoolCreateEntitled(group *model.Group) bool { + for _, entitlement := range(group.Entitlements) { + if entitlement.Value == model.AllowClusterCreateEntitlement { + return true + } + } + return false +} \ No newline at end of file diff --git a/databricks/resource_databricks_group_aws_test.go b/databricks/resource_databricks_group_aws_test.go new file mode 100644 index 000000000..b20ad6e35 --- /dev/null +++ b/databricks/resource_databricks_group_aws_test.go @@ -0,0 +1,162 @@ +package databricks + +import ( + "errors" + "fmt" + "github.com/databrickslabs/databricks-terraform/client/model" + "github.com/databrickslabs/databricks-terraform/client/service" + "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestAccAWSGroupResource(t *testing.T) { + var Group model.Group + // generate a random name for each tokenInfo test run, to avoid + // collisions from multiple concurrent tests. + // the acctest package includes many helpers such as RandStringFromCharSet + // See https://godoc.org/github.com/hashicorp/terraform-plugin-sdk/helper/acctest + //scope := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + randomStr := acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum) + displayName := fmt.Sprintf("tf group test %s", randomStr) + newDisplayName := fmt.Sprintf("new tf group test %s", randomStr) + resource.Test(t, resource.TestCase{ + Providers: testAccProviders, + CheckDestroy: testAWSGroupResourceDestroy, + Steps: []resource.TestStep{ + { + // use a dynamic configuration with the random name from above + Config: testAWSDatabricksGroup(displayName), + // compose a basic test, checking both remote and local values + Check: resource.ComposeTestCheckFunc( + // query the API to retrieve the tokenInfo object + testAWSGroupResourceExists("databricks_group.my_group", &Group, t), + // verify remote values + testAWSGroupValues(t, &Group, displayName,), + // verify local values + resource.TestCheckResourceAttr("databricks_group.my_group", "display_name", displayName), + ), + Destroy: false, + }, + { + // use a dynamic configuration with the random name from above + Config: testAWSDatabricksGroup(newDisplayName), + // test to see if new resource is attempted to be planned + PlanOnly: true, + ExpectNonEmptyPlan: true, + Destroy: false, + }, + { + ResourceName: "databricks_group.my_group", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSGroupResource_verify_entitlements(t *testing.T) { + var Group model.Group + // generate a random name for each tokenInfo test run, to avoid + // collisions from multiple concurrent tests. + // the acctest package includes many helpers such as RandStringFromCharSet + // See https://godoc.org/github.com/hashicorp/terraform-plugin-sdk/helper/acctest + //scope := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + randomStr := acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum) + displayName := fmt.Sprintf("tf group test %s", randomStr) + newDisplayName := fmt.Sprintf("new tf group test %s", randomStr) + resource.Test(t, resource.TestCase{ + Providers: testAccProviders, + CheckDestroy: testAWSGroupResourceDestroy, + Steps: []resource.TestStep{ + { + // use a dynamic configuration with the random name from above + Config: testAWSDatabricksGroupEntitlements(displayName, "true", "true"), + // compose a basic test, checking both remote and local values + Check: resource.ComposeTestCheckFunc( + // query the API to retrieve the tokenInfo object + testAWSGroupResourceExists("databricks_group.my_group", &Group, t), + // verify remote values + testAWSGroupValues(t, &Group, displayName,), + // verify local values + resource.TestCheckResourceAttr("databricks_group.my_group", "allow_cluster_create", "true"), + resource.TestCheckResourceAttr("databricks_group.my_group", "allow_instance_pool_create", "true"), + ), + Destroy: false, + }, + // Remove entitlements and expect a non empty plan + { + // use a dynamic configuration with the random name from above + Config: testAWSDatabricksGroup(newDisplayName), + // test to see if new resource is attempted to be planned + PlanOnly: true, + ExpectNonEmptyPlan: true, + Destroy: false, + }, + }, + }) +} + +func testAWSGroupResourceDestroy(s *terraform.State) error { + client := testAccProvider.Meta().(*service.DBApiClient) + for _, rs := range s.RootModule().Resources { + if rs.Type != "databricks_group" { + continue + } + _, err := client.Users().Read(rs.Primary.ID) + if err != nil { + return nil + } + return errors.New("resource Group is not cleaned up") + } + return nil +} + +func testAWSGroupValues(t *testing.T, group *model.Group, displayName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + assert.True(t, group.DisplayName == displayName) + return nil + } +} + +// testAccCheckTokenResourceExists queries the API and retrieves the matching Widget. +func testAWSGroupResourceExists(n string, group *model.Group, t *testing.T) resource.TestCheckFunc { + return func(s *terraform.State) error { + // find the corresponding state object + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + // retrieve the configured client from the test setup + conn := testAccProvider.Meta().(*service.DBApiClient) + resp, err := conn.Groups().Read(rs.Primary.ID) + if err != nil { + return err + } + + // If no error, assign the response Widget attribute to the widget pointer + *group = resp + return nil + } +} + +func testAWSDatabricksGroup(groupName string) string { + return fmt.Sprintf(` + resource "databricks_group" "my_group" { + display_name = "%s" + } + `, groupName) +} + +func testAWSDatabricksGroupEntitlements(groupName, allowClusterCreate, allowPoolCreate string) string { + return fmt.Sprintf(` + resource "databricks_group" "my_group" { + display_name = "%s" + allow_cluster_create = %s + allow_instance_pool_create = %s + } + `, groupName, allowClusterCreate, allowPoolCreate) +} diff --git a/databricks/resource_databricks_group_azure_test.go b/databricks/resource_databricks_group_azure_test.go new file mode 100644 index 000000000..489447693 --- /dev/null +++ b/databricks/resource_databricks_group_azure_test.go @@ -0,0 +1,158 @@ +package databricks + +import ( + "errors" + "fmt" + "github.com/databrickslabs/databricks-terraform/client/model" + "github.com/databrickslabs/databricks-terraform/client/service" + "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestAccAzureGroupResource(t *testing.T) { + var Group model.Group + // generate a random name for each tokenInfo test run, to avoid + // collisions from multiple concurrent tests. + // the acctest package includes many helpers such as RandStringFromCharSet + // See https://godoc.org/github.com/hashicorp/terraform-plugin-sdk/helper/acctest + //scope := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + randomStr := acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum) + displayName := fmt.Sprintf("tf group test %s", randomStr) + newDisplayName := fmt.Sprintf("new tf group test %s", randomStr) + resource.Test(t, resource.TestCase{ + Providers: testAccProviders, + CheckDestroy: testAzureGroupResourceDestroy, + Steps: []resource.TestStep{ + { + // use a dynamic configuration with the random name from above + Config: testAzureDatabricksGroup(displayName), + // compose a basic test, checking both remote and local values + Check: resource.ComposeTestCheckFunc( + // query the API to retrieve the tokenInfo object + testAzureGroupResourceExists("databricks_group.my_group", &Group, t), + // verify remote values + testAzureGroupValues(t, &Group, displayName,), + // verify local values + resource.TestCheckResourceAttr("databricks_group.my_group", "display_name", displayName), + ), + Destroy: false, + }, + { + // use a dynamic configuration with the random name from above + Config: testAzureDatabricksGroup(newDisplayName), + // test to see if new resource is attempted to be planned + PlanOnly: true, + ExpectNonEmptyPlan: true, + Destroy: false, + }, + }, + }) +} + +func TestAccAzureGroupResource_verify_entitlements(t *testing.T) { + //var secretScope model.Secre + var Group model.Group + // generate a random name for each tokenInfo test run, to avoid + // collisions from multiple concurrent tests. + // the acctest package includes many helpers such as RandStringFromCharSet + // See https://godoc.org/github.com/hashicorp/terraform-plugin-sdk/helper/acctest + //scope := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + randomStr := acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum) + displayName := fmt.Sprintf("tf group test %s", randomStr) + newDisplayName := fmt.Sprintf("new tf group test %s", randomStr) + resource.Test(t, resource.TestCase{ + Providers: testAccProviders, + CheckDestroy: testAzureGroupResourceDestroy, + Steps: []resource.TestStep{ + { + // use a dynamic configuration with the random name from above + Config: testAzureDatabricksGroupEntitlements(displayName, "true", "true"), + // compose a basic test, checking both remote and local values + Check: resource.ComposeTestCheckFunc( + // query the API to retrieve the tokenInfo object + testAzureGroupResourceExists("databricks_group.my_group", &Group, t), + // verify remote values + testAzureGroupValues(t, &Group, displayName,), + // verify local values + resource.TestCheckResourceAttr("databricks_group.my_group", "allow_cluster_create", "true"), + resource.TestCheckResourceAttr("databricks_group.my_group", "allow_instance_pool_create", "true"), + ), + Destroy: false, + }, + // Remove entitlements and expect a non empty plan + { + // use a dynamic configuration with the random name from above + Config: testAzureDatabricksGroup(newDisplayName), + // test to see if new resource is attempted to be planned + PlanOnly: true, + ExpectNonEmptyPlan: true, + Destroy: false, + }, + }, + }) +} + +func testAzureGroupResourceDestroy(s *terraform.State) error { + client := testAccProvider.Meta().(*service.DBApiClient) + for _, rs := range s.RootModule().Resources { + if rs.Type != "databricks_group" { + continue + } + _, err := client.Users().Read(rs.Primary.ID) + if err != nil { + return nil + } + return errors.New("resource Group is not cleaned up") + } + return nil +} + +func testAzureGroupValues(t *testing.T, group *model.Group, displayName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + assert.True(t, group.DisplayName == displayName) + return nil + } +} + +// testAccCheckTokenResourceExists queries the API and retrieves the matching Widget. +func testAzureGroupResourceExists(n string, group *model.Group, t *testing.T) resource.TestCheckFunc { + return func(s *terraform.State) error { + // find the corresponding state object + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + // retrieve the configured client from the test setup + conn := testAccProvider.Meta().(*service.DBApiClient) + resp, err := conn.Groups().Read(rs.Primary.ID) + if err != nil { + return err + } + + // If no error, assign the response Widget attribute to the widget pointer + *group = resp + return nil + } +} + +func testAzureDatabricksGroup(groupName string) string { + return fmt.Sprintf(` + resource "databricks_group" "my_group" { + display_name = "%s" + } + `, groupName) +} + +func testAzureDatabricksGroupEntitlements(groupName, allowClusterCreate, allowPoolCreate string) string { + return fmt.Sprintf(` + resource "databricks_group" "my_group" { + display_name = "%s" + allow_cluster_create = %s + allow_instance_pool_create = %s + } + `, groupName, allowClusterCreate, allowPoolCreate) +} diff --git a/databricks/resource_databricks_group_member.go b/databricks/resource_databricks_group_member.go new file mode 100644 index 000000000..37d20b762 --- /dev/null +++ b/databricks/resource_databricks_group_member.go @@ -0,0 +1,126 @@ +package databricks + +import ( + "fmt" + "github.com/databrickslabs/databricks-terraform/client/model" + "github.com/databrickslabs/databricks-terraform/client/service" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "log" + "strings" +) + +// Use this to manage individual members of a particular group +func resourceGroupMember() *schema.Resource { + return &schema.Resource{ + Create: resourceGroupMemberCreate, + Read: resourceGroupMemberRead, + Delete: resourceGroupMemberDelete, + + Schema: map[string]*schema.Schema{ + "group_id": { + Type: schema.TypeString, + ForceNew: true, + Required: true, + }, + "member_id": { + Type: schema.TypeString, + ForceNew: true, + Required: true, + }, + }, + } +} + + + +func resourceGroupMemberCreate(d *schema.ResourceData, m interface{}) error { + client := m.(*service.DBApiClient) + groupID := d.Get("group_id").(string) + memberID := d.Get("member_id").(string) + + groupMemberID := &GroupMemberID{ + GroupID: groupID, + MemberID: memberID, + } + + memberAddList := []string{groupMemberID.MemberID} + err := client.Groups().Patch(groupMemberID.GroupID, memberAddList, nil, model.GroupMembersPath) + if err != nil { + return err + } + + d.SetId(groupMemberID.String()) + return resourceGroupMemberRead(d, m) +} + +func resourceGroupMemberRead(d *schema.ResourceData, m interface{}) error { + id := d.Id() + client := m.(*service.DBApiClient) + groupMemberID := parseGroupMemberID(id) + group, err := client.Groups().Read(groupMemberID.GroupID) + + // First verify if the group exists + if err != nil { + if isScimGroupMissing(err.Error(), id) { + log.Printf("Missing group with id: %s.", id) + d.SetId("") + return nil + } + return err + } + + // Set Id to null if instance profile is not in group + if !iMemberInGroup(groupMemberID.MemberID, &group) { + log.Printf("Missing member %s in group with id: %s.", groupMemberID.MemberID, groupMemberID.GroupID) + d.SetId("") + return nil + } + + err = d.Set("group_id", groupMemberID.GroupID) + if err != nil { + return err + } + + err = d.Set("member_id", groupMemberID.MemberID) + return err +} + + + +func resourceGroupMemberDelete(d *schema.ResourceData, m interface{}) error { + id := d.Id() + client := m.(*service.DBApiClient) + groupMemberID := parseGroupMemberID(id) + + memberRemoveList := []string{groupMemberID.MemberID} + // Patch op to remove member from group + err := client.Groups().Patch(groupMemberID.GroupID, nil, memberRemoveList, model.GroupMembersPath) + return err +} + + +type GroupMemberID struct { + GroupID string + MemberID string +} + +func (g GroupMemberID) String() string { + return fmt.Sprintf("%s|%s", g.GroupID, g.MemberID) +} + +func parseGroupMemberID(id string) *GroupMemberID { + parts := strings.Split(id, "|") + return &GroupMemberID{ + GroupID:parts[0], + MemberID:parts[1], + } +} + +func iMemberInGroup(member string, group *model.Group) bool { + for _, groupMember := range(group.Members) { + if groupMember.Value == member { + return true + } + } + return false +} \ No newline at end of file diff --git a/databricks/resource_databricks_group_member_aws_test.go b/databricks/resource_databricks_group_member_aws_test.go new file mode 100644 index 000000000..b2748a51e --- /dev/null +++ b/databricks/resource_databricks_group_member_aws_test.go @@ -0,0 +1,196 @@ +package databricks + +import ( + "errors" + "fmt" + "github.com/databrickslabs/databricks-terraform/client/model" + "github.com/databrickslabs/databricks-terraform/client/service" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestAccAWSGroupMemberResource(t *testing.T) { + var group model.Group + // generate a random name for each tokenInfo test run, to avoid + // collisions from multiple concurrent tests. + // the acctest package includes many helpers such as RandStringFromCharSet + // See https://godoc.org/github.com/hashicorp/terraform-plugin-sdk/helper/acctest + + groupName := "group test" + var manuallyCreatedGroup *model.Group + defer func() { + client := testAccProvider.Meta().(*service.DBApiClient) + if client != nil && manuallyCreatedGroup != nil { + client.Groups().Delete(manuallyCreatedGroup.ID) + } + }() + + resource.Test(t, resource.TestCase{ + Providers: testAccProviders, + CheckDestroy: testAWSGroupMemberResourceDestroy, + Steps: []resource.TestStep{ + { + // use a dynamic configuration with the random name from above + // Creates 2 sub groups and adds as members to parent group + Config: testAWSGroupMemberResource(groupName), + + // compose a basic test, checking both remote and local values + Check: resource.ComposeTestCheckFunc( + // query the API to retrieve the tokenInfo object + testAWSGroupMemberResourceExists("databricks_group.my_group", &group, t), + // verify remote values + testAWSGroupMemberValues(t, &group, groupName, 2), + // verify local values + resource.TestCheckResourceAttr("databricks_group.my_group", "display_name", groupName), + ), + Destroy: false, + }, + { + PreConfig: func() { + // manually create subgroup c + client := testAccProvider.Meta().(*service.DBApiClient) + subGroupC, _ := client.Groups().Create("manually-created-group", nil, nil, nil) + manuallyCreatedGroup = &subGroupC + // Add new subgroup to current group + client.Groups().Patch(group.ID, []string{manuallyCreatedGroup.ID}, nil, model.GroupMembersPath) + }, + Config: testAWSGroupMemberResource(groupName), + // compose a basic test, checking both remote and local values + Check: resource.ComposeTestCheckFunc( + // query the API to retrieve the tokenInfo object + testAWSGroupMemberResourceExists("databricks_group.my_group", &group, t), + // verify remote values with manually added group + testAWSGroupMemberValues(t, &group, groupName, 3), + // verify local values + resource.TestCheckResourceAttr("databricks_group.my_group", "display_name", groupName), + ), + }, + { + // use a dynamic configuration with the random name from above + Config: testAWSGroupMemberResourceCreateNoMembers(groupName), + + // compose a basic test, checking both remote and local values + Check: resource.ComposeTestCheckFunc( + // query the API to retrieve the tokenInfo object + testAWSGroupMemberResourceExists("databricks_group.my_group", &group, t), + // verify remote values (we added a group manually in the prior test reflecting scim sync) + testAWSGroupMemberValues(t, &group, groupName, 1), + // verify local values + resource.TestCheckResourceAttr("databricks_group.my_group", "display_name", groupName), + ), + Destroy: false, + }, + { + // use a dynamic configuration with the random name from above + Config: testAWSGroupMemberResourceCreateNoMembers(groupName), + + // Test behavior to expect to attempt to create new role mapping because role is gone + PreConfig: func() { + client := testAccProvider.Meta().(*service.DBApiClient) + client.Groups().Delete(group.ID) + }, + PlanOnly: true, + ExpectNonEmptyPlan: true, + Destroy: false, + }, + { + // use a dynamic configuration with the random name from above + Config: testAWSGroupMemberResource(groupName), + + // Lets delete the manually created group + PreConfig: func() { + client := testAccProvider.Meta().(*service.DBApiClient) + client.Groups().Delete(manuallyCreatedGroup.ID) + manuallyCreatedGroup = nil + }, + // compose a basic test, checking both remote and local values + Check: resource.ComposeTestCheckFunc( + // query the API to retrieve the tokenInfo object + testAWSGroupMemberResourceExists("databricks_group.my_group", &group, t), + // verify remote values + testAWSGroupMemberValues(t, &group, groupName, 2), + // verify local values + resource.TestCheckResourceAttr("databricks_group.my_group", "display_name", groupName), + ), + Destroy: false, + }, + }, + }) +} + +func testAWSGroupMemberResourceDestroy(s *terraform.State) error { + client := testAccProvider.Meta().(*service.DBApiClient) + for _, rs := range s.RootModule().Resources { + if rs.Type != "databricks_group" { + continue + } + _, err := client.Users().Read(rs.Primary.ID) + if err != nil { + return nil + } + return errors.New("resource Group is not cleaned up") + } + return nil +} + +func testAWSGroupMemberValues(t *testing.T, group *model.Group, displayName string, memberCount int) resource.TestCheckFunc { + return func(s *terraform.State) error { + assert.True(t, group.DisplayName == displayName) + assert.Equal(t, memberCount,len(group.Members), "member count is not matching") + return nil + } +} + +// testAccCheckTokenResourceExists queries the API and retrieves the matching Widget. +func testAWSGroupMemberResourceExists(n string, group *model.Group, t *testing.T) resource.TestCheckFunc { + return func(s *terraform.State) error { + // find the corresponding state object + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + // retrieve the configured client from the test setup + conn := testAccProvider.Meta().(*service.DBApiClient) + resp, err := conn.Groups().Read(rs.Primary.ID) + if err != nil { + return err + } + + // If no error, assign the response Widget attribute to the widget pointer + *group = resp + return nil + } +} + +func testAWSGroupMemberResourceCreateNoMembers(groupName string) string { + return fmt.Sprintf(` + resource "databricks_group" "my_group" { + display_name = "%s" + } + `, groupName) +} + +func testAWSGroupMemberResource(groupName string) string { + return fmt.Sprintf(` + resource "databricks_group" "my_group" { + display_name = "%[1]s" + } + resource "databricks_group" "my_sub_group_a" { + display_name = "sub_a_%[1]s" + } + resource "databricks_group" "my_sub_group_b" { + display_name = "sub_b_%[1]s" + } + resource "databricks_group_member" "my_member_a" { + group_id = databricks_group.my_group.id + member_id = databricks_group.my_sub_group_a.id + } + resource "databricks_group_member" "my_member_b" { + group_id = databricks_group.my_group.id + member_id = databricks_group.my_sub_group_b.id + } + `, groupName) +} diff --git a/databricks/resource_databricks_group_member_azure_test.go b/databricks/resource_databricks_group_member_azure_test.go new file mode 100644 index 000000000..ec0d3fded --- /dev/null +++ b/databricks/resource_databricks_group_member_azure_test.go @@ -0,0 +1,196 @@ +package databricks + +import ( + "errors" + "fmt" + "github.com/databrickslabs/databricks-terraform/client/model" + "github.com/databrickslabs/databricks-terraform/client/service" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestAccAzureGroupMemberResource(t *testing.T) { + var group model.Group + // generate a random name for each tokenInfo test run, to avoid + // collisions from multiple concurrent tests. + // the acctest package includes many helpers such as RandStringFromCharSet + // See https://godoc.org/github.com/hashicorp/terraform-plugin-sdk/helper/acctest + + groupName := "group test" + var manuallyCreatedGroup *model.Group + defer func() { + client := testAccProvider.Meta().(*service.DBApiClient) + if client != nil && manuallyCreatedGroup != nil { + client.Groups().Delete(manuallyCreatedGroup.ID) + } + }() + + resource.Test(t, resource.TestCase{ + Providers: testAccProviders, + CheckDestroy: testAzureGroupMemberResourceDestroy, + Steps: []resource.TestStep{ + { + // use a dynamic configuration with the random name from above + // Creates 2 sub groups and adds as members to parent group + Config: testAzureGroupMemberResource(groupName), + + // compose a basic test, checking both remote and local values + Check: resource.ComposeTestCheckFunc( + // query the API to retrieve the tokenInfo object + testAzureGroupMemberResourceExists("databricks_group.my_group", &group, t), + // verify remote values + testAzureGroupMemberValues(t, &group, groupName, 2), + // verify local values + resource.TestCheckResourceAttr("databricks_group.my_group", "display_name", groupName), + ), + Destroy: false, + }, + { + PreConfig: func() { + // manually create subgroup c + client := testAccProvider.Meta().(*service.DBApiClient) + subGroupC, _ := client.Groups().Create("manually-created-group", nil, nil, nil) + manuallyCreatedGroup = &subGroupC + // Add new subgroup to current group + client.Groups().Patch(group.ID, []string{manuallyCreatedGroup.ID}, nil, model.GroupMembersPath) + }, + Config: testAzureGroupMemberResource(groupName), + // compose a basic test, checking both remote and local values + Check: resource.ComposeTestCheckFunc( + // query the API to retrieve the tokenInfo object + testAzureGroupMemberResourceExists("databricks_group.my_group", &group, t), + // verify remote values with manually added group + testAzureGroupMemberValues(t, &group, groupName, 3), + // verify local values + resource.TestCheckResourceAttr("databricks_group.my_group", "display_name", groupName), + ), + }, + { + // use a dynamic configuration with the random name from above + Config: testAzureGroupMemberResourceCreateNoMembers(groupName), + + // compose a basic test, checking both remote and local values + Check: resource.ComposeTestCheckFunc( + // query the API to retrieve the tokenInfo object + testAzureGroupMemberResourceExists("databricks_group.my_group", &group, t), + // verify remote values (we added a group manually in the prior test reflecting scim sync) + testAzureGroupMemberValues(t, &group, groupName, 1), + // verify local values + resource.TestCheckResourceAttr("databricks_group.my_group", "display_name", groupName), + ), + Destroy: false, + }, + { + // use a dynamic configuration with the random name from above + Config: testAzureGroupMemberResourceCreateNoMembers(groupName), + + // Test behavior to expect to attempt to create new role mapping because role is gone + PreConfig: func() { + client := testAccProvider.Meta().(*service.DBApiClient) + client.Groups().Delete(group.ID) + }, + PlanOnly: true, + ExpectNonEmptyPlan: true, + Destroy: false, + }, + { + // use a dynamic configuration with the random name from above + Config: testAzureGroupMemberResource(groupName), + + // Lets delete the manually created group + PreConfig: func() { + client := testAccProvider.Meta().(*service.DBApiClient) + client.Groups().Delete(manuallyCreatedGroup.ID) + manuallyCreatedGroup = nil + }, + // compose a basic test, checking both remote and local values + Check: resource.ComposeTestCheckFunc( + // query the API to retrieve the tokenInfo object + testAzureGroupMemberResourceExists("databricks_group.my_group", &group, t), + // verify remote values + testAzureGroupMemberValues(t, &group, groupName, 2), + // verify local values + resource.TestCheckResourceAttr("databricks_group.my_group", "display_name", groupName), + ), + Destroy: false, + }, + }, + }) +} + +func testAzureGroupMemberResourceDestroy(s *terraform.State) error { + client := testAccProvider.Meta().(*service.DBApiClient) + for _, rs := range s.RootModule().Resources { + if rs.Type != "databricks_group" { + continue + } + _, err := client.Users().Read(rs.Primary.ID) + if err != nil { + return nil + } + return errors.New("resource Group is not cleaned up") + } + return nil +} + +func testAzureGroupMemberValues(t *testing.T, group *model.Group, displayName string, memberCount int) resource.TestCheckFunc { + return func(s *terraform.State) error { + assert.True(t, group.DisplayName == displayName) + assert.Equal(t, memberCount,len(group.Members), "member count is not matching") + return nil + } +} + +// testAccCheckTokenResourceExists queries the API and retrieves the matching Widget. +func testAzureGroupMemberResourceExists(n string, group *model.Group, t *testing.T) resource.TestCheckFunc { + return func(s *terraform.State) error { + // find the corresponding state object + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + // retrieve the configured client from the test setup + conn := testAccProvider.Meta().(*service.DBApiClient) + resp, err := conn.Groups().Read(rs.Primary.ID) + if err != nil { + return err + } + + // If no error, assign the response Widget attribute to the widget pointer + *group = resp + return nil + } +} + +func testAzureGroupMemberResourceCreateNoMembers(groupName string) string { + return fmt.Sprintf(` + resource "databricks_group" "my_group" { + display_name = "%s" + } + `, groupName) +} + +func testAzureGroupMemberResource(groupName string) string { + return fmt.Sprintf(` + resource "databricks_group" "my_group" { + display_name = "%[1]s" + } + resource "databricks_group" "my_sub_group_a" { + display_name = "sub_a_%[1]s" + } + resource "databricks_group" "my_sub_group_b" { + display_name = "sub_b_%[1]s" + } + resource "databricks_group_member" "my_member_a" { + group_id = databricks_group.my_group.id + member_id = databricks_group.my_sub_group_a.id + } + resource "databricks_group_member" "my_member_b" { + group_id = databricks_group.my_group.id + member_id = databricks_group.my_sub_group_b.id + } + `, groupName) +} diff --git a/databricks/resource_databricks_group_role.go b/databricks/resource_databricks_group_role.go new file mode 100644 index 000000000..0cff9f3a3 --- /dev/null +++ b/databricks/resource_databricks_group_role.go @@ -0,0 +1,123 @@ +package databricks + +import ( + "fmt" + "github.com/databrickslabs/databricks-terraform/client/model" + "github.com/databrickslabs/databricks-terraform/client/service" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "log" + "strings" +) + +func resourceGroupRole() *schema.Resource { + return &schema.Resource{ + Create: resourceGroupRoleCreate, + Read: resourceGroupRoleRead, + Delete: resourceGroupRoleDelete, + + Schema: map[string]*schema.Schema{ + "group_id": { + Type: schema.TypeString, + ForceNew: true, + Required: true, + }, + "instance_profile_id": { + Type: schema.TypeString, + ForceNew: true, + Required: true, + ValidateFunc: ValidateInstanceProfileARN, + }, + }, + } +} + + + +func resourceGroupRoleCreate(d *schema.ResourceData, m interface{}) error { + client := m.(*service.DBApiClient) + groupID := d.Get("group_id").(string) + instanceProfileID := d.Get("instance_profile_id").(string) + groupRoleID := &GroupRoleID{ + GroupID: groupID, + InstanceProfileID: instanceProfileID, + } + + roleAddList := []string{groupRoleID.InstanceProfileID} + err := client.Groups().Patch(groupRoleID.GroupID, roleAddList, nil, model.GroupRolesPath) + if err != nil { + return err + } + + d.SetId(groupRoleID.String()) + return resourceGroupRoleRead(d, m) +} + +func resourceGroupRoleRead(d *schema.ResourceData, m interface{}) error { + id := d.Id() + client := m.(*service.DBApiClient) + groupRoleID := parseGroupRoleID(id) + group, err := client.Groups().Read(groupRoleID.GroupID) + + // First verify if the group exists + if err != nil { + if isScimGroupMissing(err.Error(), groupRoleID.GroupID) { + log.Printf("Missing group with id: %s.", groupRoleID.GroupID) + d.SetId("") + return nil + } + return err + } + + // Set Id to null if instance profile is not in group + if !iRoleInGroup(groupRoleID.InstanceProfileID, &group) { + log.Printf("Missing role %s in group with id: %s.", groupRoleID.InstanceProfileID, groupRoleID.GroupID) + d.SetId("") + return nil + } + + err = d.Set("group_id", groupRoleID.GroupID) + if err != nil { + return err + } + + err = d.Set("instance_profile_id", groupRoleID.InstanceProfileID) + return err +} + +func resourceGroupRoleDelete(d *schema.ResourceData, m interface{}) error { + id := d.Id() + client := m.(*service.DBApiClient) + groupRoleID := parseGroupRoleID(id) + + roleRemoveList := []string{groupRoleID.InstanceProfileID} + // Patch op to remove role from group + err := client.Groups().Patch(groupRoleID.GroupID, nil, roleRemoveList, model.GroupRolesPath) + return err +} + + +type GroupRoleID struct { + GroupID string + InstanceProfileID string +} + +func (g GroupRoleID) String() string { + return fmt.Sprintf("%s|%s", g.GroupID, g.InstanceProfileID) +} + +func parseGroupRoleID(id string) *GroupRoleID { + parts := strings.Split(id, "|") + return &GroupRoleID{ + GroupID:parts[0], + InstanceProfileID:parts[1], + } +} + +func iRoleInGroup(role string, group *model.Group) bool { + for _, groupRole := range(group.Roles) { + if groupRole.Value == role { + return true + } + } + return false +} \ No newline at end of file diff --git a/databricks/resource_databricks_group_role_aws_test.go b/databricks/resource_databricks_group_role_aws_test.go new file mode 100644 index 000000000..5e06d73e1 --- /dev/null +++ b/databricks/resource_databricks_group_role_aws_test.go @@ -0,0 +1,134 @@ +package databricks + +import ( + "errors" + "fmt" + "github.com/databrickslabs/databricks-terraform/client/model" + "github.com/databrickslabs/databricks-terraform/client/service" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestAccAWSGroupRoleResource(t *testing.T) { + var group model.Group + // generate a random name for each tokenInfo test run, to avoid + // collisions from multiple concurrent tests. + // the acctest package includes many helpers such as RandStringFromCharSet + // See https://godoc.org/github.com/hashicorp/terraform-plugin-sdk/helper/acctest + + groupName := "group test" + role := "arn:aws:iam::999999999999:instance-profile/terraform-group-test" + + resource.Test(t, resource.TestCase{ + Providers: testAccProviders, + CheckDestroy: testAWSGroupRoleResourceDestroy, + Steps: []resource.TestStep{ + { + // use a dynamic configuration with the random name from above + Config: testAWSGroupRoleResourceCreate(role, groupName), + + // compose a basic test, checking both remote and local values + Check: resource.ComposeTestCheckFunc( + // query the API to retrieve the tokenInfo object + testAWSGroupRoleResourceExists("databricks_group.my_group", &group, t), + // verify remote values + testAWSGroupRoleValues(t, &group, groupName, role), + // verify local values + resource.TestCheckResourceAttr("databricks_group.my_group", "display_name", groupName), + resource.TestCheckResourceAttr("databricks_group_role.my_group_role", "instance_profile_id", role), + + ), + Destroy: false, + }, + { + // use a dynamic configuration with the random name from above + Config: testAWSGroupRoleResourceCreate(role, groupName), + + // Test behavior to expect to attempt to create new role mapping because role is gone + PreConfig: func() { + client := testAccProvider.Meta().(*service.DBApiClient) + client.InstanceProfiles().Delete(role) + }, + PlanOnly: true, + ExpectNonEmptyPlan: true, + Destroy: false, + }, + { + // use a dynamic configuration with the random name from above + Config: testAWSGroupRoleResourceCreate(role, groupName), + + // Test behavior to expect to attempt to create new role mapping because role is gone + PreConfig: func() { + client := testAccProvider.Meta().(*service.DBApiClient) + client.Groups().Delete(group.ID) + }, + PlanOnly: true, + ExpectNonEmptyPlan: true, + Destroy: false, + }, + }, + }) +} + +func testAWSGroupRoleResourceDestroy(s *terraform.State) error { + client := testAccProvider.Meta().(*service.DBApiClient) + for _, rs := range s.RootModule().Resources { + if rs.Type != "databricks_group" { + continue + } + _, err := client.Users().Read(rs.Primary.ID) + if err != nil { + return nil + } + return errors.New("resource Group is not cleaned up") + } + return nil +} + +func testAWSGroupRoleValues(t *testing.T, group *model.Group, displayName, role string) resource.TestCheckFunc { + return func(s *terraform.State) error { + assert.True(t, group.DisplayName == displayName) + assert.True(t, iRoleInGroup(role, group), "role is not in group") + return nil + } +} + +// testAccCheckTokenResourceExists queries the API and retrieves the matching Widget. +func testAWSGroupRoleResourceExists(n string, group *model.Group, t *testing.T) resource.TestCheckFunc { + return func(s *terraform.State) error { + // find the corresponding state object + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + // retrieve the configured client from the test setup + conn := testAccProvider.Meta().(*service.DBApiClient) + resp, err := conn.Groups().Read(rs.Primary.ID) + if err != nil { + return err + } + + // If no error, assign the response Widget attribute to the widget pointer + *group = resp + return nil + } +} + +func testAWSGroupRoleResourceCreate(roleArn, groupName string) string { + return fmt.Sprintf(` + resource "databricks_instance_profile" "instance_profile" { + instance_profile_arn = "%s" + skip_validation = true + } + resource "databricks_group" "my_group" { + display_name = "%s" + } + resource "databricks_group_role" "my_group_role" { + group_id = databricks_group.my_group.id + instance_profile_id = databricks_instance_profile.instance_profile.id + } + `, roleArn, groupName) +} diff --git a/databricks/utils.go b/databricks/utils.go index a4a53c55a..5e7f67f3c 100644 --- a/databricks/utils.go +++ b/databricks/utils.go @@ -2,6 +2,7 @@ package databricks import ( "fmt" + "github.com/aws/aws-sdk-go/aws/arn" "strings" "time" @@ -98,3 +99,24 @@ func unpackMWSAccountId(combined string) (PackagedMWSIds, error) { packagedMWSIds.ResourceId = parts[1] return packagedMWSIds, nil } + +// ValidateAWSARN is a ValidateFunc that ensures the role id is a valid aws iam instance profile arn +func ValidateInstanceProfileARN(val interface{}, key string) (warns []string, errs []error) { + v := val.(string) + + if v == "" { + return nil, []error{fmt.Errorf("%s is empty got: %s, must be an aws instance profile arn", key, v, )} + } + + // Parse and verify instance profiles + instanceProfileArn, err := arn.Parse(v) + if err != nil { + return nil, []error{fmt.Errorf("%s is invalid got: %s recieved error: %w", key, v, err)} + } + // Verify instance profile resource type, Resource gets parsed as instance-profile/ + if !strings.HasPrefix(instanceProfileArn.Resource, "instance-profile") { + return nil, []error{fmt.Errorf("%s must be an instance profile resource, got: %s in %s", + key, instanceProfileArn.Resource , v)} + } + return nil, nil +} diff --git a/databricks/utils_test.go b/databricks/utils_test.go index 4bd9748af..d92a01d47 100644 --- a/databricks/utils_test.go +++ b/databricks/utils_test.go @@ -29,3 +29,22 @@ func TestIsClusterMissingFalseWhenErrorNotInCorrectFormat(t *testing.T) { assert.False(t, result) } + +func TestValidateInstanceProfileARN(t *testing.T) { + testCases := []struct { + instanceProfileARN string + errorCount int + }{ + {"arn:aws:iam::999999999999:instance-profile/my-fake-instance-profile", 0}, + {"arn:aws:iam::999999999999:role/not-an-instance-profile", 1}, + {"", 1}, + {"invalid-profile", 1}, + } + for _, tc := range testCases { + _, errs := ValidateInstanceProfileARN(tc.instanceProfileARN, "key") + + assert.Lenf(t, errs, tc.errorCount, "directory '%s' does not generate the expected error count", tc.instanceProfileARN) + } +} + + diff --git a/go.mod b/go.mod index 9a1bd8230..d29742a43 100644 --- a/go.mod +++ b/go.mod @@ -5,6 +5,7 @@ go 1.13 require ( github.com/Azure/go-autorest/autorest v0.10.2 github.com/Azure/go-autorest/autorest/adal v0.8.3 + github.com/aws/aws-sdk-go v1.32.1 github.com/fatih/color v1.9.0 // indirect github.com/google/go-querystring v1.0.0 github.com/hashicorp/go-retryablehttp v0.6.6 diff --git a/go.sum b/go.sum index 7863eb730..592ae4086 100644 --- a/go.sum +++ b/go.sum @@ -37,6 +37,8 @@ github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgI github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= github.com/aws/aws-sdk-go v1.25.3 h1:uM16hIw9BotjZKMZlX05SN2EFtaWfi/NonPKIARiBLQ= github.com/aws/aws-sdk-go v1.25.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.32.1 h1:0dy5DkMKNPH9mLWveAWA9ZTiKIEEvJJA6fbe0eCs19k= +github.com/aws/aws-sdk-go v1.32.1/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= @@ -57,6 +59,7 @@ github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5Kwzbycv github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= @@ -148,6 +151,8 @@ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpO github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc= +github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= @@ -297,6 +302,7 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191009170851-d66e71096ffb h1:TR699M2v0qoKTOHxeLgp6zPqaQNs74f01a/ob9W0qko= golang.org/x/net v0.0.0-20191009170851-d66e71096ffb/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= From 83f6e80aa57987b3db9ef31d05f850978343a93a Mon Sep 17 00:00:00 2001 From: Sriharsha Tikkireddy Date: Mon, 15 Jun 2020 06:21:51 -0400 Subject: [PATCH 05/13] format and linting errors resolved --- databricks/mounts_test.go | 24 +++++++++---------- databricks/provider.go | 16 ++++++------- ...source_databricks_azure_adls_gen1_mount.go | 2 +- ...source_databricks_azure_adls_gen2_mount.go | 8 +++---- .../resource_databricks_azure_blob_mount.go | 2 +- databricks/resource_databricks_group.go | 17 +++++++------ .../resource_databricks_group_aws_test.go | 14 +++++------ .../resource_databricks_group_azure_test.go | 12 +++++----- .../resource_databricks_group_member.go | 17 +++++-------- ...source_databricks_group_member_aws_test.go | 22 ++++++++++------- ...urce_databricks_group_member_azure_test.go | 18 ++++++++------ databricks/resource_databricks_group_role.go | 15 +++++------- ...resource_databricks_group_role_aws_test.go | 15 ++++++------ databricks/utils.go | 4 ++-- databricks/utils_test.go | 6 ++--- 15 files changed, 95 insertions(+), 97 deletions(-) diff --git a/databricks/mounts_test.go b/databricks/mounts_test.go index ac6c2d7f0..a8dd4f7eb 100644 --- a/databricks/mounts_test.go +++ b/databricks/mounts_test.go @@ -8,16 +8,16 @@ import ( func TestValidateMountDirectory(t *testing.T) { testCases := []struct { - directory string - errorCount int - }{ - {"", 0}, - {"/directory", 0}, - {"directory", 1}, - } - for _, tc := range testCases { - _, errs := ValidateMountDirectory(tc.directory, "key") - - assert.Lenf(t, errs, tc.errorCount, "directory '%s' does not generate the expected error count", tc.directory) - } + directory string + errorCount int + }{ + {"", 0}, + {"/directory", 0}, + {"directory", 1}, + } + for _, tc := range testCases { + _, errs := ValidateMountDirectory(tc.directory, "key") + + assert.Lenf(t, errs, tc.errorCount, "directory '%s' does not generate the expected error count", tc.directory) + } } diff --git a/databricks/provider.go b/databricks/provider.go index f4ab5c66d..5bf2a20b1 100644 --- a/databricks/provider.go +++ b/databricks/provider.go @@ -26,15 +26,15 @@ func Provider(version string) terraform.ResourceProvider { "databricks_zones": dataSourceClusterZones(), }, ResourcesMap: map[string]*schema.Resource{ - "databricks_token": resourceToken(), - "databricks_secret_scope": resourceSecretScope(), - "databricks_secret": resourceSecret(), - "databricks_secret_acl": resourceSecretACL(), - "databricks_instance_pool": resourceInstancePool(), - "databricks_scim_user": resourceScimUser(), - "databricks_scim_group": resourceScimGroup(), + "databricks_token": resourceToken(), + "databricks_secret_scope": resourceSecretScope(), + "databricks_secret": resourceSecret(), + "databricks_secret_acl": resourceSecretACL(), + "databricks_instance_pool": resourceInstancePool(), + "databricks_scim_user": resourceScimUser(), + "databricks_scim_group": resourceScimGroup(), // Scim Group is split into multiple components for flexibility to pick and choose - "databricks_group": resourceGroup(), + "databricks_group": resourceGroup(), "databricks_group_role": resourceGroupRole(), "databricks_group_member": resourceGroupMember(), "databricks_notebook": resourceNotebook(), diff --git a/databricks/resource_databricks_azure_adls_gen1_mount.go b/databricks/resource_databricks_azure_adls_gen1_mount.go index 4d6d28bd6..ee798a46f 100644 --- a/databricks/resource_databricks_azure_adls_gen1_mount.go +++ b/databricks/resource_databricks_azure_adls_gen1_mount.go @@ -39,7 +39,7 @@ func resourceAzureAdlsGen1Mount() *schema.Resource { Optional: true, Computed: true, //Default: "/", - ForceNew: true, + ForceNew: true, ValidateFunc: ValidateMountDirectory, }, "mount_name": { diff --git a/databricks/resource_databricks_azure_adls_gen2_mount.go b/databricks/resource_databricks_azure_adls_gen2_mount.go index 36388f7e7..4991dc836 100644 --- a/databricks/resource_databricks_azure_adls_gen2_mount.go +++ b/databricks/resource_databricks_azure_adls_gen2_mount.go @@ -32,10 +32,10 @@ func resourceAzureAdlsGen2Mount() *schema.Resource { ForceNew: true, }, "directory": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, ValidateFunc: ValidateMountDirectory, }, "mount_name": { diff --git a/databricks/resource_databricks_azure_blob_mount.go b/databricks/resource_databricks_azure_blob_mount.go index d9b6e9ba7..ac4143390 100644 --- a/databricks/resource_databricks_azure_blob_mount.go +++ b/databricks/resource_databricks_azure_blob_mount.go @@ -37,7 +37,7 @@ func resourceAzureBlobMount() *schema.Resource { Optional: true, Computed: true, //Default: "/", - ForceNew: true, + ForceNew: true, ValidateFunc: ValidateMountDirectory, }, "mount_name": { diff --git a/databricks/resource_databricks_group.go b/databricks/resource_databricks_group.go index 8992adbdd..b8d502619 100644 --- a/databricks/resource_databricks_group.go +++ b/databricks/resource_databricks_group.go @@ -22,13 +22,13 @@ func resourceGroup() *schema.Resource { }, "allow_cluster_create": { Deprecated: "Will be deprecated in a future release for general permissions api", - Type: schema.TypeBool, - Optional: true, + Type: schema.TypeBool, + Optional: true, }, "allow_instance_pool_create": { Deprecated: "Will be deprecated in a future release for general permissions api", - Type: schema.TypeBool, - Optional: true, + Type: schema.TypeBool, + Optional: true, }, }, Importer: &schema.ResourceImporter{ @@ -37,7 +37,6 @@ func resourceGroup() *schema.Resource { } } - func resourceGroupCreate(d *schema.ResourceData, m interface{}) error { client := m.(*service.DBApiClient) groupName := d.Get("display_name").(string) @@ -79,7 +78,7 @@ func resourceGroupRead(d *schema.ResourceData, m interface{}) error { return err } - err = d.Set("allow_cluster_create",isGroupClusterCreateEntitled(&group)) + err = d.Set("allow_cluster_create", isGroupClusterCreateEntitled(&group)) if err != nil { return err } @@ -134,7 +133,7 @@ func resourceGroupDelete(d *schema.ResourceData, m interface{}) error { } func isGroupClusterCreateEntitled(group *model.Group) bool { - for _, entitlement := range(group.Entitlements) { + for _, entitlement := range group.Entitlements { if entitlement.Value == model.AllowClusterCreateEntitlement { return true } @@ -143,10 +142,10 @@ func isGroupClusterCreateEntitled(group *model.Group) bool { } func isGroupInstancePoolCreateEntitled(group *model.Group) bool { - for _, entitlement := range(group.Entitlements) { + for _, entitlement := range group.Entitlements { if entitlement.Value == model.AllowClusterCreateEntitlement { return true } } return false -} \ No newline at end of file +} diff --git a/databricks/resource_databricks_group_aws_test.go b/databricks/resource_databricks_group_aws_test.go index b20ad6e35..675f3b3d1 100644 --- a/databricks/resource_databricks_group_aws_test.go +++ b/databricks/resource_databricks_group_aws_test.go @@ -22,7 +22,7 @@ func TestAccAWSGroupResource(t *testing.T) { randomStr := acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum) displayName := fmt.Sprintf("tf group test %s", randomStr) newDisplayName := fmt.Sprintf("new tf group test %s", randomStr) - resource.Test(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ Providers: testAccProviders, CheckDestroy: testAWSGroupResourceDestroy, Steps: []resource.TestStep{ @@ -34,7 +34,7 @@ func TestAccAWSGroupResource(t *testing.T) { // query the API to retrieve the tokenInfo object testAWSGroupResourceExists("databricks_group.my_group", &Group, t), // verify remote values - testAWSGroupValues(t, &Group, displayName,), + testAWSGroupValues(t, &Group, displayName), // verify local values resource.TestCheckResourceAttr("databricks_group.my_group", "display_name", displayName), ), @@ -44,9 +44,9 @@ func TestAccAWSGroupResource(t *testing.T) { // use a dynamic configuration with the random name from above Config: testAWSDatabricksGroup(newDisplayName), // test to see if new resource is attempted to be planned - PlanOnly: true, + PlanOnly: true, ExpectNonEmptyPlan: true, - Destroy: false, + Destroy: false, }, { ResourceName: "databricks_group.my_group", @@ -79,7 +79,7 @@ func TestAccAWSGroupResource_verify_entitlements(t *testing.T) { // query the API to retrieve the tokenInfo object testAWSGroupResourceExists("databricks_group.my_group", &Group, t), // verify remote values - testAWSGroupValues(t, &Group, displayName,), + testAWSGroupValues(t, &Group, displayName), // verify local values resource.TestCheckResourceAttr("databricks_group.my_group", "allow_cluster_create", "true"), resource.TestCheckResourceAttr("databricks_group.my_group", "allow_instance_pool_create", "true"), @@ -91,9 +91,9 @@ func TestAccAWSGroupResource_verify_entitlements(t *testing.T) { // use a dynamic configuration with the random name from above Config: testAWSDatabricksGroup(newDisplayName), // test to see if new resource is attempted to be planned - PlanOnly: true, + PlanOnly: true, ExpectNonEmptyPlan: true, - Destroy: false, + Destroy: false, }, }, }) diff --git a/databricks/resource_databricks_group_azure_test.go b/databricks/resource_databricks_group_azure_test.go index 489447693..984fb666d 100644 --- a/databricks/resource_databricks_group_azure_test.go +++ b/databricks/resource_databricks_group_azure_test.go @@ -34,7 +34,7 @@ func TestAccAzureGroupResource(t *testing.T) { // query the API to retrieve the tokenInfo object testAzureGroupResourceExists("databricks_group.my_group", &Group, t), // verify remote values - testAzureGroupValues(t, &Group, displayName,), + testAzureGroupValues(t, &Group, displayName), // verify local values resource.TestCheckResourceAttr("databricks_group.my_group", "display_name", displayName), ), @@ -44,9 +44,9 @@ func TestAccAzureGroupResource(t *testing.T) { // use a dynamic configuration with the random name from above Config: testAzureDatabricksGroup(newDisplayName), // test to see if new resource is attempted to be planned - PlanOnly: true, + PlanOnly: true, ExpectNonEmptyPlan: true, - Destroy: false, + Destroy: false, }, }, }) @@ -75,7 +75,7 @@ func TestAccAzureGroupResource_verify_entitlements(t *testing.T) { // query the API to retrieve the tokenInfo object testAzureGroupResourceExists("databricks_group.my_group", &Group, t), // verify remote values - testAzureGroupValues(t, &Group, displayName,), + testAzureGroupValues(t, &Group, displayName), // verify local values resource.TestCheckResourceAttr("databricks_group.my_group", "allow_cluster_create", "true"), resource.TestCheckResourceAttr("databricks_group.my_group", "allow_instance_pool_create", "true"), @@ -87,9 +87,9 @@ func TestAccAzureGroupResource_verify_entitlements(t *testing.T) { // use a dynamic configuration with the random name from above Config: testAzureDatabricksGroup(newDisplayName), // test to see if new resource is attempted to be planned - PlanOnly: true, + PlanOnly: true, ExpectNonEmptyPlan: true, - Destroy: false, + Destroy: false, }, }, }) diff --git a/databricks/resource_databricks_group_member.go b/databricks/resource_databricks_group_member.go index 37d20b762..33fbfbdd7 100644 --- a/databricks/resource_databricks_group_member.go +++ b/databricks/resource_databricks_group_member.go @@ -31,15 +31,13 @@ func resourceGroupMember() *schema.Resource { } } - - func resourceGroupMemberCreate(d *schema.ResourceData, m interface{}) error { client := m.(*service.DBApiClient) groupID := d.Get("group_id").(string) memberID := d.Get("member_id").(string) groupMemberID := &GroupMemberID{ - GroupID: groupID, + GroupID: groupID, MemberID: memberID, } @@ -85,8 +83,6 @@ func resourceGroupMemberRead(d *schema.ResourceData, m interface{}) error { return err } - - func resourceGroupMemberDelete(d *schema.ResourceData, m interface{}) error { id := d.Id() client := m.(*service.DBApiClient) @@ -98,9 +94,8 @@ func resourceGroupMemberDelete(d *schema.ResourceData, m interface{}) error { return err } - type GroupMemberID struct { - GroupID string + GroupID string MemberID string } @@ -111,16 +106,16 @@ func (g GroupMemberID) String() string { func parseGroupMemberID(id string) *GroupMemberID { parts := strings.Split(id, "|") return &GroupMemberID{ - GroupID:parts[0], - MemberID:parts[1], + GroupID: parts[0], + MemberID: parts[1], } } func iMemberInGroup(member string, group *model.Group) bool { - for _, groupMember := range(group.Members) { + for _, groupMember := range group.Members { if groupMember.Value == member { return true } } return false -} \ No newline at end of file +} diff --git a/databricks/resource_databricks_group_member_aws_test.go b/databricks/resource_databricks_group_member_aws_test.go index b2748a51e..8f1b9e699 100644 --- a/databricks/resource_databricks_group_member_aws_test.go +++ b/databricks/resource_databricks_group_member_aws_test.go @@ -23,7 +23,8 @@ func TestAccAWSGroupMemberResource(t *testing.T) { defer func() { client := testAccProvider.Meta().(*service.DBApiClient) if client != nil && manuallyCreatedGroup != nil { - client.Groups().Delete(manuallyCreatedGroup.ID) + err := client.Groups().Delete(manuallyCreatedGroup.ID) + assert.NoError(t, err, err) } }() @@ -49,12 +50,13 @@ func TestAccAWSGroupMemberResource(t *testing.T) { }, { PreConfig: func() { - // manually create subgroup c + // manually create subgroup c client := testAccProvider.Meta().(*service.DBApiClient) subGroupC, _ := client.Groups().Create("manually-created-group", nil, nil, nil) manuallyCreatedGroup = &subGroupC - // Add new subgroup to current group - client.Groups().Patch(group.ID, []string{manuallyCreatedGroup.ID}, nil, model.GroupMembersPath) + // Add new subgroup to current group + err := client.Groups().Patch(group.ID, []string{manuallyCreatedGroup.ID}, nil, model.GroupMembersPath) + assert.NoError(t, err, err) }, Config: testAWSGroupMemberResource(groupName), // compose a basic test, checking both remote and local values @@ -89,11 +91,12 @@ func TestAccAWSGroupMemberResource(t *testing.T) { // Test behavior to expect to attempt to create new role mapping because role is gone PreConfig: func() { client := testAccProvider.Meta().(*service.DBApiClient) - client.Groups().Delete(group.ID) + err := client.Groups().Delete(group.ID) + assert.NoError(t, err, err) }, - PlanOnly: true, + PlanOnly: true, ExpectNonEmptyPlan: true, - Destroy: false, + Destroy: false, }, { // use a dynamic configuration with the random name from above @@ -102,7 +105,8 @@ func TestAccAWSGroupMemberResource(t *testing.T) { // Lets delete the manually created group PreConfig: func() { client := testAccProvider.Meta().(*service.DBApiClient) - client.Groups().Delete(manuallyCreatedGroup.ID) + err := client.Groups().Delete(manuallyCreatedGroup.ID) + assert.NoError(t, err, err) manuallyCreatedGroup = nil }, // compose a basic test, checking both remote and local values @@ -138,7 +142,7 @@ func testAWSGroupMemberResourceDestroy(s *terraform.State) error { func testAWSGroupMemberValues(t *testing.T, group *model.Group, displayName string, memberCount int) resource.TestCheckFunc { return func(s *terraform.State) error { assert.True(t, group.DisplayName == displayName) - assert.Equal(t, memberCount,len(group.Members), "member count is not matching") + assert.Equal(t, memberCount, len(group.Members), "member count is not matching") return nil } } diff --git a/databricks/resource_databricks_group_member_azure_test.go b/databricks/resource_databricks_group_member_azure_test.go index ec0d3fded..8b8b355df 100644 --- a/databricks/resource_databricks_group_member_azure_test.go +++ b/databricks/resource_databricks_group_member_azure_test.go @@ -23,7 +23,8 @@ func TestAccAzureGroupMemberResource(t *testing.T) { defer func() { client := testAccProvider.Meta().(*service.DBApiClient) if client != nil && manuallyCreatedGroup != nil { - client.Groups().Delete(manuallyCreatedGroup.ID) + err := client.Groups().Delete(manuallyCreatedGroup.ID) + assert.NoError(t, err, err) } }() @@ -54,7 +55,8 @@ func TestAccAzureGroupMemberResource(t *testing.T) { subGroupC, _ := client.Groups().Create("manually-created-group", nil, nil, nil) manuallyCreatedGroup = &subGroupC // Add new subgroup to current group - client.Groups().Patch(group.ID, []string{manuallyCreatedGroup.ID}, nil, model.GroupMembersPath) + err := client.Groups().Patch(group.ID, []string{manuallyCreatedGroup.ID}, nil, model.GroupMembersPath) + assert.NoError(t, err, err) }, Config: testAzureGroupMemberResource(groupName), // compose a basic test, checking both remote and local values @@ -89,11 +91,12 @@ func TestAccAzureGroupMemberResource(t *testing.T) { // Test behavior to expect to attempt to create new role mapping because role is gone PreConfig: func() { client := testAccProvider.Meta().(*service.DBApiClient) - client.Groups().Delete(group.ID) + err := client.Groups().Delete(group.ID) + assert.NoError(t, err, err) }, - PlanOnly: true, + PlanOnly: true, ExpectNonEmptyPlan: true, - Destroy: false, + Destroy: false, }, { // use a dynamic configuration with the random name from above @@ -102,7 +105,8 @@ func TestAccAzureGroupMemberResource(t *testing.T) { // Lets delete the manually created group PreConfig: func() { client := testAccProvider.Meta().(*service.DBApiClient) - client.Groups().Delete(manuallyCreatedGroup.ID) + err := client.Groups().Delete(manuallyCreatedGroup.ID) + assert.NoError(t, err, err) manuallyCreatedGroup = nil }, // compose a basic test, checking both remote and local values @@ -138,7 +142,7 @@ func testAzureGroupMemberResourceDestroy(s *terraform.State) error { func testAzureGroupMemberValues(t *testing.T, group *model.Group, displayName string, memberCount int) resource.TestCheckFunc { return func(s *terraform.State) error { assert.True(t, group.DisplayName == displayName) - assert.Equal(t, memberCount,len(group.Members), "member count is not matching") + assert.Equal(t, memberCount, len(group.Members), "member count is not matching") return nil } } diff --git a/databricks/resource_databricks_group_role.go b/databricks/resource_databricks_group_role.go index 0cff9f3a3..34931547f 100644 --- a/databricks/resource_databricks_group_role.go +++ b/databricks/resource_databricks_group_role.go @@ -31,14 +31,12 @@ func resourceGroupRole() *schema.Resource { } } - - func resourceGroupRoleCreate(d *schema.ResourceData, m interface{}) error { client := m.(*service.DBApiClient) groupID := d.Get("group_id").(string) instanceProfileID := d.Get("instance_profile_id").(string) groupRoleID := &GroupRoleID{ - GroupID: groupID, + GroupID: groupID, InstanceProfileID: instanceProfileID, } @@ -95,9 +93,8 @@ func resourceGroupRoleDelete(d *schema.ResourceData, m interface{}) error { return err } - type GroupRoleID struct { - GroupID string + GroupID string InstanceProfileID string } @@ -108,16 +105,16 @@ func (g GroupRoleID) String() string { func parseGroupRoleID(id string) *GroupRoleID { parts := strings.Split(id, "|") return &GroupRoleID{ - GroupID:parts[0], - InstanceProfileID:parts[1], + GroupID: parts[0], + InstanceProfileID: parts[1], } } func iRoleInGroup(role string, group *model.Group) bool { - for _, groupRole := range(group.Roles) { + for _, groupRole := range group.Roles { if groupRole.Value == role { return true } } return false -} \ No newline at end of file +} diff --git a/databricks/resource_databricks_group_role_aws_test.go b/databricks/resource_databricks_group_role_aws_test.go index 5e06d73e1..d7c9b9d9d 100644 --- a/databricks/resource_databricks_group_role_aws_test.go +++ b/databricks/resource_databricks_group_role_aws_test.go @@ -38,7 +38,6 @@ func TestAccAWSGroupRoleResource(t *testing.T) { // verify local values resource.TestCheckResourceAttr("databricks_group.my_group", "display_name", groupName), resource.TestCheckResourceAttr("databricks_group_role.my_group_role", "instance_profile_id", role), - ), Destroy: false, }, @@ -49,11 +48,12 @@ func TestAccAWSGroupRoleResource(t *testing.T) { // Test behavior to expect to attempt to create new role mapping because role is gone PreConfig: func() { client := testAccProvider.Meta().(*service.DBApiClient) - client.InstanceProfiles().Delete(role) + err := client.InstanceProfiles().Delete(role) + assert.NoError(t, err, err) }, - PlanOnly: true, + PlanOnly: true, ExpectNonEmptyPlan: true, - Destroy: false, + Destroy: false, }, { // use a dynamic configuration with the random name from above @@ -62,11 +62,12 @@ func TestAccAWSGroupRoleResource(t *testing.T) { // Test behavior to expect to attempt to create new role mapping because role is gone PreConfig: func() { client := testAccProvider.Meta().(*service.DBApiClient) - client.Groups().Delete(group.ID) + err := client.Groups().Delete(group.ID) + assert.NoError(t, err, err) }, - PlanOnly: true, + PlanOnly: true, ExpectNonEmptyPlan: true, - Destroy: false, + Destroy: false, }, }, }) diff --git a/databricks/utils.go b/databricks/utils.go index 5e7f67f3c..2fa06ac35 100644 --- a/databricks/utils.go +++ b/databricks/utils.go @@ -105,7 +105,7 @@ func ValidateInstanceProfileARN(val interface{}, key string) (warns []string, er v := val.(string) if v == "" { - return nil, []error{fmt.Errorf("%s is empty got: %s, must be an aws instance profile arn", key, v, )} + return nil, []error{fmt.Errorf("%s is empty got: %s, must be an aws instance profile arn", key, v)} } // Parse and verify instance profiles @@ -116,7 +116,7 @@ func ValidateInstanceProfileARN(val interface{}, key string) (warns []string, er // Verify instance profile resource type, Resource gets parsed as instance-profile/ if !strings.HasPrefix(instanceProfileArn.Resource, "instance-profile") { return nil, []error{fmt.Errorf("%s must be an instance profile resource, got: %s in %s", - key, instanceProfileArn.Resource , v)} + key, instanceProfileArn.Resource, v)} } return nil, nil } diff --git a/databricks/utils_test.go b/databricks/utils_test.go index d92a01d47..5ef49ca30 100644 --- a/databricks/utils_test.go +++ b/databricks/utils_test.go @@ -32,8 +32,8 @@ func TestIsClusterMissingFalseWhenErrorNotInCorrectFormat(t *testing.T) { func TestValidateInstanceProfileARN(t *testing.T) { testCases := []struct { - instanceProfileARN string - errorCount int + instanceProfileARN string + errorCount int }{ {"arn:aws:iam::999999999999:instance-profile/my-fake-instance-profile", 0}, {"arn:aws:iam::999999999999:role/not-an-instance-profile", 1}, @@ -46,5 +46,3 @@ func TestValidateInstanceProfileARN(t *testing.T) { assert.Lenf(t, errs, tc.errorCount, "directory '%s' does not generate the expected error count", tc.instanceProfileARN) } } - - From 9fe788e43c55041d521672bbe7266ebc078ecbfb Mon Sep 17 00:00:00 2001 From: Sriharsha Tikkireddy Date: Mon, 15 Jun 2020 08:16:56 -0400 Subject: [PATCH 06/13] added golanglintci file credits to the hashicorp azurerm provider, makefile is set to lint with the file, also added goimports and gofmt -s for simplifying code, and removed if else chains with switch --- .golangci.yml | 35 ++++++++++++++ Makefile | 23 +++++---- client/model/notebook.go | 4 +- client/service/client.go | 4 +- client/service/clusters.go | 3 +- client/service/clusters_integration_test.go | 6 +-- client/service/clusters_test.go | 6 +-- client/service/commands.go | 9 ++-- client/service/commands_integration_test.go | 3 +- client/service/commands_test.go | 3 +- client/service/dbfs.go | 9 ++-- client/service/dbfs_integration_test.go | 3 +- client/service/groups.go | 9 ++-- client/service/groups_integration_test.go | 4 +- client/service/groups_test.go | 3 +- client/service/instance_pools.go | 3 +- .../instance_pools_integration_test.go | 4 +- client/service/instance_pools_test.go | 3 +- client/service/instance_profiles.go | 4 +- .../instance_profiles_integration_test.go | 3 +- client/service/instance_profiles_test.go | 3 +- client/service/jobs.go | 3 +- client/service/jobs_integration_test.go | 5 +- client/service/jobs_test.go | 3 +- client/service/libraries.go | 3 +- client/service/libraries_integration_test.go | 3 +- client/service/libraries_test.go | 3 +- client/service/main_test.go | 8 ++-- client/service/mask_utils.go | 1 - client/service/mws_credentials.go | 4 +- .../mws_credentials_integration_test.go | 3 +- client/service/mws_customer_managed_keys.go | 3 +- ..._customer_managed_keys_integration_test.go | 4 +- client/service/mws_networks.go | 7 +-- .../service/mws_networks_integration_test.go | 4 +- client/service/mws_storage_configurations.go | 2 +- ...storage_configurations_integration_test.go | 3 +- client/service/mws_workspaces.go | 6 +-- .../mws_workspaces_integration_test.go | 4 +- client/service/notebooks.go | 3 +- client/service/notebooks_integration_test.go | 8 ++-- client/service/notebooks_test.go | 3 +- client/service/secret_acls.go | 3 +- client/service/secret_acls_test.go | 3 +- client/service/secret_scopes.go | 3 +- client/service/secret_scopes_test.go | 4 +- client/service/secrets.go | 3 +- .../secrets_scopes_acls_integration_test.go | 4 +- client/service/secrets_test.go | 4 +- client/service/tokens.go | 3 +- client/service/tokens_integration_test.go | 4 +- client/service/tokens_test.go | 3 +- client/service/users.go | 13 +++-- client/service/users_integration_test.go | 7 ++- client/service/users_test.go | 7 +-- databricks/azure_auth.go | 15 +++--- .../data_source_databricks_notebook_paths.go | 1 + databricks/mounts.go | 3 +- databricks/provider.go | 47 +++++++++++-------- databricks/provider_test.go | 5 +- ...source_databricks_azure_blob_mount_test.go | 1 - databricks/resource_databricks_cluster.go | 41 ++++++++-------- databricks/resource_databricks_dbfs_file.go | 5 +- .../resource_databricks_dbfs_file_sync.go | 5 +- .../resource_databricks_instance_pool.go | 9 ++-- .../resource_databricks_instance_profile.go | 5 +- databricks/resource_databricks_job.go | 4 -- .../resource_databricks_job_aws_test.go | 9 ++-- .../resource_databricks_job_azure_test.go | 9 ++-- ...rce_databricks_mws_credentials_mws_test.go | 5 +- .../resource_databricks_mws_networks.go | 7 +-- ...source_databricks_mws_networks_mws_test.go | 5 +- ...e_databricks_mws_storage_configurations.go | 5 +- ...cks_mws_storage_configurations_mws_test.go | 5 +- .../resource_databricks_mws_workspaces.go | 3 +- ...urce_databricks_mws_workspaces_mws_test.go | 7 +-- databricks/resource_databricks_notebook.go | 3 +- databricks/resource_databricks_scim_group.go | 5 +- ...resource_databricks_scim_group_aws_test.go | 3 +- ...source_databricks_scim_group_azure_test.go | 3 +- databricks/resource_databricks_scim_user.go | 8 ++-- .../resource_databricks_scim_user_aws_test.go | 3 +- ...esource_databricks_scim_user_azure_test.go | 7 +-- databricks/resource_databricks_secret_acl.go | 9 ++-- ...resource_databricks_secret_acl_aws_test.go | 3 +- ...source_databricks_secret_acl_azure_test.go | 3 +- .../resource_databricks_secret_aws_test.go | 1 - .../resource_databricks_secret_azure_test.go | 1 - .../resource_databricks_secret_scope.go | 5 +- ...source_databricks_secret_scope_aws_test.go | 3 +- ...urce_databricks_secret_scope_azure_test.go | 3 +- databricks/resource_databricks_token.go | 5 +- .../resource_databricks_token_aws_test.go | 3 +- .../resource_databricks_token_azure_test.go | 3 +- databricks/utils.go | 1 - main.go | 3 +- 96 files changed, 325 insertions(+), 234 deletions(-) create mode 100644 .golangci.yml diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 000000000..8fc224c62 --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,35 @@ +run: + deadline: 10m10s + modules-download-mode: vendor + +issues: + max-per-linter: 0 + max-same-issues: 0 + +linters: + disable-all: true + enable: + - deadcode + - errcheck + - gocritic + - gofmt + - goimports + - gosimple + - govet + - ineffassign + - interfacer + - nakedret + - misspell + - staticcheck + - structcheck + - typecheck + - unused + - unconvert + - varcheck + - vet + - vetshadow + - whitespace + +linters-settings: + errcheck: + ignore: github.com/hashicorp/terraform-plugin-sdk/helper/schema:ForceNew|Set,fmt:.*,io:Close \ No newline at end of file diff --git a/Makefile b/Makefile index 9ae41145b..081b1aea4 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ default: build -test: +test: lint @echo "==> Running tests..." @gotestsum --format short-verbose --raw-command go test -v -json -short -coverprofile=coverage.txt ./... @@ -26,17 +26,22 @@ coverage-int: int int-build: int build -build: lint test fmt +build: lint test @echo "==> Building source code with go build..." @go build -mod vendor -v -o terraform-provider-databricks -lint: +lint: fmt @echo "==> Linting source code with golangci-lint..." - @golangci-lint run --skip-dirs-use-default --timeout 5m --build-tags=azure - @golangci-lint run --skip-dirs-use-default --timeout 5m --build-tags=aws + @golangci-lint run --skip-dirs-use-default --timeout 5m -fmt: lint +fmt: @echo "==> Formatting source code with gofmt..." + @goimports -w client + @goimports -w databricks + @goimports -w main.go + @gofmt -s -w client + @gofmt -s -w databricks + @gofmt -s -w main.go @go fmt ./... vendor: @@ -44,17 +49,17 @@ vendor: @go mod vendor # INTEGRATION TESTING WITH AZURE -terraform-acc-azure: fmt +terraform-acc-azure: lint @echo "==> Running Terraform Acceptance Tests for Azure..." @CLOUD_ENV="azure" TF_ACC=1 gotestsum --format short-verbose --raw-command go test -v -json -tags=azure -short -coverprofile=coverage.out ./... # INTEGRATION TESTING WITH AWS -terraform-acc-aws: fmt +terraform-acc-aws: lint @echo "==> Running Terraform Acceptance Tests for AWS..." @CLOUD_ENV="aws" TF_ACC=1 gotestsum --format short-verbose --raw-command go test -v -json -short -coverprofile=coverage.out -run 'TestAccAws' ./... # INTEGRATION TESTING WITH AWS -terraform-acc-mws: fmt +terraform-acc-mws: lint @echo "==> Running Terraform Acceptance Tests for Multiple Workspace APIs on AWS..." @/bin/bash integration-environment-mws/run.sh diff --git a/client/model/notebook.go b/client/model/notebook.go index f6ac24d81..fa078836d 100644 --- a/client/model/notebook.go +++ b/client/model/notebook.go @@ -1,6 +1,6 @@ package model -// Language is a custom type for langauge types in Databricks notebooks +// Language is a custom type for language types in Databricks notebooks type Language string // ObjectType is a custom type for object types in Databricks workspaces @@ -17,7 +17,7 @@ const ( DBC ExportFormat = "DBC" ) -// Different types of langauge formats available on Databricks +// Different types of language formats available on Databricks const ( Scala Language = "SCALA" Python Language = "PYTHON" diff --git a/client/service/client.go b/client/service/client.go index 4fe2d5f53..72979ea81 100644 --- a/client/service/client.go +++ b/client/service/client.go @@ -89,7 +89,7 @@ func (c *DBApiClientConfig) Setup() { retryMaximumDuration := 5 * time.Minute c.client = &retryablehttp.Client{ HTTPClient: &http.Client{ - Timeout: time.Duration(time.Duration(c.TimeoutSeconds) * time.Second), + Timeout: time.Duration(c.TimeoutSeconds) * time.Second, Transport: &http.Transport{ TLSClientConfig: &tls.Config{ InsecureSkipVerify: c.InsecureSkipVerify, @@ -292,7 +292,6 @@ func PerformQuery(config *DBApiClientConfig, method, path string, apiVersion str } requestURL += "?" + params.Encode() auditGetPayload(requestURL, secretsMask) - } else { if marshalJSON { bodyBytes, err := json.Marshal(data) @@ -303,7 +302,6 @@ func PerformQuery(config *DBApiClientConfig, method, path string, apiVersion str requestBody = bodyBytes } else { requestBody = []byte(data.(string)) - } auditNonGetPayload(method, requestURL, data, secretsMask) } diff --git a/client/service/clusters.go b/client/service/clusters.go index d05cfc2ea..1bf889597 100644 --- a/client/service/clusters.go +++ b/client/service/clusters.go @@ -3,10 +3,11 @@ package service import ( "encoding/json" "errors" - "github.com/databrickslabs/databricks-terraform/client/model" "log" "net/http" "time" + + "github.com/databrickslabs/databricks-terraform/client/model" ) // ClustersAPI is a struct that contains the Databricks api client to perform queries diff --git a/client/service/clusters_integration_test.go b/client/service/clusters_integration_test.go index aec197e69..7ae4b3c40 100644 --- a/client/service/clusters_integration_test.go +++ b/client/service/clusters_integration_test.go @@ -1,10 +1,11 @@ package service import ( - "github.com/databrickslabs/databricks-terraform/client/model" - "github.com/stretchr/testify/assert" "reflect" "testing" + + "github.com/databrickslabs/databricks-terraform/client/model" + "github.com/stretchr/testify/assert" ) func TestListClustersIntegration(t *testing.T) { @@ -72,5 +73,4 @@ func TestListClustersIntegration(t *testing.T) { clusterReadInfo, err = client.Clusters().Get(clusterInfo.ClusterID) assert.NoError(t, err, err) assert.True(t, clusterReadInfo.State == model.ClusterStateRunning) - } diff --git a/client/service/clusters_test.go b/client/service/clusters_test.go index 7b85266af..4be6dedb8 100644 --- a/client/service/clusters_test.go +++ b/client/service/clusters_test.go @@ -1,10 +1,11 @@ package service import ( - "github.com/databrickslabs/databricks-terraform/client/model" "net/http" "testing" "time" + + "github.com/databrickslabs/databricks-terraform/client/model" ) func TestClustersAPI_Create(t *testing.T) { @@ -226,7 +227,6 @@ func TestClustersAPI_Get(t *testing.T) { } func TestClustersAPI_List(t *testing.T) { - tests := []struct { name string response string @@ -302,7 +302,6 @@ func TestClustersAPI_List(t *testing.T) { } func TestClustersAPI_ListZones(t *testing.T) { - tests := []struct { name string response string @@ -351,7 +350,6 @@ func TestClustersAPI_ListZones(t *testing.T) { } func TestClustersAPI_ListNodeTypes(t *testing.T) { - tests := []struct { name string response string diff --git a/client/service/commands.go b/client/service/commands.go index 75330c45e..857bfed89 100644 --- a/client/service/commands.go +++ b/client/service/commands.go @@ -4,10 +4,11 @@ import ( "encoding/json" "errors" "fmt" - "github.com/databrickslabs/databricks-terraform/client/model" "log" "net/http" "time" + + "github.com/databrickslabs/databricks-terraform/client/model" ) // CommandsAPI exposes the Context & Commands API @@ -16,9 +17,9 @@ type CommandsAPI struct { } // Execute creates a spark context and executes a command and then closes context -func (a CommandsAPI) Execute(clusterID, langauge, commandStr string) (model.Command, error) { +func (a CommandsAPI) Execute(clusterID, language, commandStr string) (model.Command, error) { var resp model.Command - context, err := a.createContext(langauge, clusterID) + context, err := a.createContext(language, clusterID) if err != nil { return resp, err } @@ -26,7 +27,7 @@ func (a CommandsAPI) Execute(clusterID, langauge, commandStr string) (model.Comm if err != nil { return resp, err } - commandID, err := a.createCommand(context, clusterID, langauge, commandStr) + commandID, err := a.createCommand(context, clusterID, language, commandStr) if err != nil { return resp, err } diff --git a/client/service/commands_integration_test.go b/client/service/commands_integration_test.go index de6b70fff..daa97f1b4 100644 --- a/client/service/commands_integration_test.go +++ b/client/service/commands_integration_test.go @@ -1,9 +1,10 @@ package service import ( + "testing" + "github.com/databrickslabs/databricks-terraform/client/model" "github.com/stretchr/testify/assert" - "testing" ) func TestContext(t *testing.T) { diff --git a/client/service/commands_test.go b/client/service/commands_test.go index df9ca22e9..6481e7487 100644 --- a/client/service/commands_test.go +++ b/client/service/commands_test.go @@ -1,9 +1,10 @@ package service import ( - "github.com/databrickslabs/databricks-terraform/client/model" "net/http" "testing" + + "github.com/databrickslabs/databricks-terraform/client/model" ) func TestCommandsAPI_Execute(t *testing.T) { diff --git a/client/service/dbfs.go b/client/service/dbfs.go index 13a7fa7ee..ec8ba2a5b 100644 --- a/client/service/dbfs.go +++ b/client/service/dbfs.go @@ -3,9 +3,10 @@ package service import ( "encoding/base64" "encoding/json" - "github.com/databrickslabs/databricks-terraform/client/model" "log" "net/http" + + "github.com/databrickslabs/databricks-terraform/client/model" ) // DBFSAPI exposes the DBFS API @@ -53,7 +54,7 @@ func (a DBFSAPI) Read(path string) (string, error) { } bytesFetched = append(bytesFetched, bytes...) - offSet = offSet + length + offSet += length } resp := base64.StdEncoding.EncodeToString(bytesFetched) return resp, nil @@ -92,7 +93,7 @@ func (a DBFSAPI) Copy(src string, tgt string, client *DBApiClient, overwrite boo return err } - offSet = offSet + length + offSet += length } return err @@ -287,7 +288,7 @@ func split(buf []byte, lim int) [][]byte { chunks = append(chunks, chunk) } if len(buf) > 0 { - chunks = append(chunks, buf[:]) + chunks = append(chunks, buf) } return chunks } diff --git a/client/service/dbfs_integration_test.go b/client/service/dbfs_integration_test.go index 346a3e79c..f0a6240d5 100644 --- a/client/service/dbfs_integration_test.go +++ b/client/service/dbfs_integration_test.go @@ -3,8 +3,9 @@ package service import ( "bytes" "encoding/base64" - "github.com/stretchr/testify/assert" "testing" + + "github.com/stretchr/testify/assert" ) func GenString(times int) []byte { diff --git a/client/service/groups.go b/client/service/groups.go index 4cb747f9c..780e3dc34 100644 --- a/client/service/groups.go +++ b/client/service/groups.go @@ -4,10 +4,11 @@ import ( "encoding/json" "errors" "fmt" - "github.com/databrickslabs/databricks-terraform/client/model" "log" "net/http" "sort" + + "github.com/databrickslabs/databricks-terraform/client/model" ) // GroupsAPI exposes the scim groups API @@ -77,7 +78,7 @@ func (a GroupsAPI) Read(groupID string) (model.Group, error) { } groups = append(groups, inheritedGroupFull) } - inherited, unInherited, err := a.getInheritedAndNonInheritedRoles(group, groups) + inherited, unInherited := a.getInheritedAndNonInheritedRoles(group, groups) group.InheritedRoles = inherited group.UnInheritedRoles = unInherited @@ -154,7 +155,7 @@ func (a GroupsAPI) Delete(groupID string) error { return err } -func (a GroupsAPI) getInheritedAndNonInheritedRoles(group model.Group, groups []model.Group) (inherited []model.RoleListItem, unInherited []model.RoleListItem, err error) { +func (a GroupsAPI) getInheritedAndNonInheritedRoles(group model.Group, groups []model.Group) (inherited []model.RoleListItem, unInherited []model.RoleListItem) { allRoles := group.Roles var inheritedRoles []model.RoleListItem inheritedRolesKeys := []string{} @@ -175,5 +176,5 @@ func (a GroupsAPI) getInheritedAndNonInheritedRoles(group model.Group, groups [] unInherited = append(unInherited, role) } } - return inherited, unInherited, nil + return inherited, unInherited } diff --git a/client/service/groups_integration_test.go b/client/service/groups_integration_test.go index 29a737584..5e19d2374 100644 --- a/client/service/groups_integration_test.go +++ b/client/service/groups_integration_test.go @@ -2,9 +2,10 @@ package service import ( "fmt" + "testing" + "github.com/databrickslabs/databricks-terraform/client/model" "github.com/stretchr/testify/assert" - "testing" ) func TestFmt(t *testing.T) { @@ -110,5 +111,4 @@ func TestReadInheritedRolesFromGroup(t *testing.T) { } return false }(myTestGroupInfo.InheritedRoles, myTestRole)) - } diff --git a/client/service/groups_test.go b/client/service/groups_test.go index ee67ae3c9..e90b26260 100644 --- a/client/service/groups_test.go +++ b/client/service/groups_test.go @@ -1,9 +1,10 @@ package service import ( - "github.com/databrickslabs/databricks-terraform/client/model" "net/http" "testing" + + "github.com/databrickslabs/databricks-terraform/client/model" ) func TestScimGroupAPI_Create(t *testing.T) { diff --git a/client/service/instance_pools.go b/client/service/instance_pools.go index 2f6d9f1cf..6a33930c6 100644 --- a/client/service/instance_pools.go +++ b/client/service/instance_pools.go @@ -2,8 +2,9 @@ package service import ( "encoding/json" - "github.com/databrickslabs/databricks-terraform/client/model" "net/http" + + "github.com/databrickslabs/databricks-terraform/client/model" ) // InstancePoolsAPI exposes the instance pools api diff --git a/client/service/instance_pools_integration_test.go b/client/service/instance_pools_integration_test.go index 9c09c0b05..7da554bd2 100644 --- a/client/service/instance_pools_integration_test.go +++ b/client/service/instance_pools_integration_test.go @@ -1,9 +1,10 @@ package service import ( + "testing" + "github.com/databrickslabs/databricks-terraform/client/model" "github.com/stretchr/testify/assert" - "testing" ) func TestInstancePools(t *testing.T) { @@ -69,5 +70,4 @@ func TestInstancePools(t *testing.T) { poolReadInfo, err = client.InstancePools().Read(poolInfo.InstancePoolID) assert.NoError(t, err, err) assert.Equal(t, poolReadInfo.MaxCapacity, int32(20)) - } diff --git a/client/service/instance_pools_test.go b/client/service/instance_pools_test.go index 53bc7719b..c15c993cd 100644 --- a/client/service/instance_pools_test.go +++ b/client/service/instance_pools_test.go @@ -1,9 +1,10 @@ package service import ( - "github.com/databrickslabs/databricks-terraform/client/model" "net/http" "testing" + + "github.com/databrickslabs/databricks-terraform/client/model" ) //go:generate easytags $GOFILE diff --git a/client/service/instance_profiles.go b/client/service/instance_profiles.go index e72d59834..daa96d33c 100644 --- a/client/service/instance_profiles.go +++ b/client/service/instance_profiles.go @@ -3,8 +3,9 @@ package service import ( "encoding/json" "fmt" - "github.com/databrickslabs/databricks-terraform/client/model" "net/http" + + "github.com/databrickslabs/databricks-terraform/client/model" ) // InstanceProfilesAPI exposes the instance profiles api on the AWS deployment of Databricks @@ -45,7 +46,6 @@ func (a InstanceProfilesAPI) Read(instanceProfileARN string) (string, error) { // List lists all the instance profiles in the workspace func (a InstanceProfilesAPI) List() ([]model.InstanceProfileInfo, error) { - var instanceProfilesArnList struct { InstanceProfiles []model.InstanceProfileInfo `json:"instance_profiles,omitempty" url:"instance_profiles,omitempty"` } diff --git a/client/service/instance_profiles_integration_test.go b/client/service/instance_profiles_integration_test.go index 606c77792..42143fb72 100644 --- a/client/service/instance_profiles_integration_test.go +++ b/client/service/instance_profiles_integration_test.go @@ -1,8 +1,9 @@ package service import ( - "github.com/stretchr/testify/assert" "testing" + + "github.com/stretchr/testify/assert" ) func TestInstanceProfiles(t *testing.T) { diff --git a/client/service/instance_profiles_test.go b/client/service/instance_profiles_test.go index fa98a4e45..028b4db6f 100644 --- a/client/service/instance_profiles_test.go +++ b/client/service/instance_profiles_test.go @@ -1,9 +1,10 @@ package service import ( - "github.com/databrickslabs/databricks-terraform/client/model" "net/http" "testing" + + "github.com/databrickslabs/databricks-terraform/client/model" ) func TestInstanceProfilesAPI_Create(t *testing.T) { diff --git a/client/service/jobs.go b/client/service/jobs.go index 2391dc5c0..a1605e084 100644 --- a/client/service/jobs.go +++ b/client/service/jobs.go @@ -2,8 +2,9 @@ package service import ( "encoding/json" - "github.com/databrickslabs/databricks-terraform/client/model" "net/http" + + "github.com/databrickslabs/databricks-terraform/client/model" ) // JobsAPI exposes the Jobs API diff --git a/client/service/jobs_integration_test.go b/client/service/jobs_integration_test.go index a50f96886..8fb7d4c00 100644 --- a/client/service/jobs_integration_test.go +++ b/client/service/jobs_integration_test.go @@ -1,9 +1,10 @@ package service import ( + "testing" + "github.com/databrickslabs/databricks-terraform/client/model" "github.com/stretchr/testify/assert" - "testing" ) func TestJobsCreate(t *testing.T) { @@ -28,7 +29,7 @@ func TestJobsCreate(t *testing.T) { }, Name: "1-sri-test-job", Libraries: []model.Library{ - model.Library{ + { Maven: &model.Maven{ Coordinates: "org.jsoup:jsoup:1.7.2", }, diff --git a/client/service/jobs_test.go b/client/service/jobs_test.go index 3de1e3d77..8c50c90b4 100644 --- a/client/service/jobs_test.go +++ b/client/service/jobs_test.go @@ -1,9 +1,10 @@ package service import ( - "github.com/databrickslabs/databricks-terraform/client/model" "net/http" "testing" + + "github.com/databrickslabs/databricks-terraform/client/model" ) func TestJobsAPI_Create(t *testing.T) { diff --git a/client/service/libraries.go b/client/service/libraries.go index 37d1aeb5e..8be31fd43 100644 --- a/client/service/libraries.go +++ b/client/service/libraries.go @@ -2,8 +2,9 @@ package service import ( "encoding/json" - "github.com/databrickslabs/databricks-terraform/client/model" "net/http" + + "github.com/databrickslabs/databricks-terraform/client/model" ) // LibrariesAPI exposes the Library API diff --git a/client/service/libraries_integration_test.go b/client/service/libraries_integration_test.go index bda701ccc..a900d8d12 100644 --- a/client/service/libraries_integration_test.go +++ b/client/service/libraries_integration_test.go @@ -1,9 +1,10 @@ package service import ( + "testing" + "github.com/databrickslabs/databricks-terraform/client/model" "github.com/stretchr/testify/assert" - "testing" ) func TestLibraryCreate(t *testing.T) { diff --git a/client/service/libraries_test.go b/client/service/libraries_test.go index c550115a3..3b12091a6 100644 --- a/client/service/libraries_test.go +++ b/client/service/libraries_test.go @@ -1,9 +1,10 @@ package service import ( - "github.com/databrickslabs/databricks-terraform/client/model" "net/http" "testing" + + "github.com/databrickslabs/databricks-terraform/client/model" ) func TestLibrariesAPI_Create(t *testing.T) { diff --git a/client/service/main_test.go b/client/service/main_test.go index 0a895009c..d2828fa8b 100644 --- a/client/service/main_test.go +++ b/client/service/main_test.go @@ -4,9 +4,6 @@ import ( "encoding/base64" "encoding/json" "fmt" - "github.com/joho/godotenv" - "github.com/r3labs/diff" - "github.com/stretchr/testify/assert" "log" "net/http" "net/http/httptest" @@ -14,6 +11,10 @@ import ( "reflect" "strings" "testing" + + "github.com/joho/godotenv" + "github.com/r3labs/diff" + "github.com/stretchr/testify/assert" ) func TestMain(m *testing.M) { @@ -85,7 +86,6 @@ func AssertRequestWithMockServer(t *testing.T, rawPayloadArgs interface{}, reque rw.WriteHeader(responseStatus) _, err := rw.Write([]byte(response)) assert.NoError(t, err, err) - })) // Close the server when test finishes defer server.Close() diff --git a/client/service/mask_utils.go b/client/service/mask_utils.go index 381b3ae9a..babc24076 100644 --- a/client/service/mask_utils.go +++ b/client/service/mask_utils.go @@ -95,7 +95,6 @@ func maskRecursive(copy, original reflect.Value, mask bool) { default: copy.Set(original) } - } // SecretsMask is a struct that contains a list of secret strings to be redacted diff --git a/client/service/mws_credentials.go b/client/service/mws_credentials.go index ed6ffb7c6..027343ad0 100644 --- a/client/service/mws_credentials.go +++ b/client/service/mws_credentials.go @@ -3,8 +3,9 @@ package service import ( "encoding/json" "fmt" - "github.com/databrickslabs/databricks-terraform/client/model" "net/http" + + "github.com/databrickslabs/databricks-terraform/client/model" ) // MWSCredentialsAPI exposes the mws credentials API @@ -53,7 +54,6 @@ func (a MWSCredentialsAPI) Read(mwsAcctId, credentialsID string) (model.MWSCrede // Delete deletes the credentials object given a credentials id func (a MWSCredentialsAPI) Delete(mwsAcctId, credentialsID string) error { - credentialsAPIPath := fmt.Sprintf("/accounts/%s/credentials/%s", mwsAcctId, credentialsID) _, err := a.Client.performQuery(http.MethodDelete, credentialsAPIPath, "2.0", nil, nil, nil) diff --git a/client/service/mws_credentials_integration_test.go b/client/service/mws_credentials_integration_test.go index a0335e171..09adf32a5 100644 --- a/client/service/mws_credentials_integration_test.go +++ b/client/service/mws_credentials_integration_test.go @@ -1,9 +1,10 @@ package service import ( - "github.com/stretchr/testify/assert" "os" "testing" + + "github.com/stretchr/testify/assert" ) func TestMWSCreds(t *testing.T) { diff --git a/client/service/mws_customer_managed_keys.go b/client/service/mws_customer_managed_keys.go index 4fd0ebd5d..1a41346d6 100644 --- a/client/service/mws_customer_managed_keys.go +++ b/client/service/mws_customer_managed_keys.go @@ -4,8 +4,9 @@ import ( "encoding/json" "errors" "fmt" - "github.com/databrickslabs/databricks-terraform/client/model" "net/http" + + "github.com/databrickslabs/databricks-terraform/client/model" ) // MWSCustomerManagedKeysAPI exposes the mws customerManagedKeys API diff --git a/client/service/mws_customer_managed_keys_integration_test.go b/client/service/mws_customer_managed_keys_integration_test.go index 7278da1f5..2a4b52ede 100644 --- a/client/service/mws_customer_managed_keys_integration_test.go +++ b/client/service/mws_customer_managed_keys_integration_test.go @@ -1,9 +1,10 @@ package service import ( - "github.com/stretchr/testify/assert" "os" "testing" + + "github.com/stretchr/testify/assert" ) func TestMWSCustomerManagedKeys(t *testing.T) { @@ -15,5 +16,4 @@ func TestMWSCustomerManagedKeys(t *testing.T) { networksList, err := client.MWSCustomerManagedKeys().List(acctId) assert.NoError(t, err, err) t.Log(networksList) - } diff --git a/client/service/mws_networks.go b/client/service/mws_networks.go index 7364774ab..96ae28b65 100644 --- a/client/service/mws_networks.go +++ b/client/service/mws_networks.go @@ -3,8 +3,9 @@ package service import ( "encoding/json" "fmt" - "github.com/databrickslabs/databricks-terraform/client/model" "net/http" + + "github.com/databrickslabs/databricks-terraform/client/model" ) // MWSNetworksAPI exposes the mws networks API @@ -13,12 +14,12 @@ type MWSNetworksAPI struct { } // Create creates a set of MWS Networks for the BYOVPC -func (a MWSNetworksAPI) Create(mwsAcctId, networkName string, VPCID string, subnetIds []string, securityGroupIds []string) (model.MWSNetwork, error) { +func (a MWSNetworksAPI) Create(mwsAcctId, networkName string, vpcID string, subnetIds []string, securityGroupIds []string) (model.MWSNetwork, error) { var mwsNetwork model.MWSNetwork networksAPIPath := fmt.Sprintf("/accounts/%s/networks", mwsAcctId) mwsNetworksRequest := model.MWSNetwork{ NetworkName: networkName, - VPCID: VPCID, + VPCID: vpcID, SubnetIds: subnetIds, SecurityGroupIds: securityGroupIds, } diff --git a/client/service/mws_networks_integration_test.go b/client/service/mws_networks_integration_test.go index 7beb6d19b..b97e1baa7 100644 --- a/client/service/mws_networks_integration_test.go +++ b/client/service/mws_networks_integration_test.go @@ -1,9 +1,10 @@ package service import ( - "github.com/stretchr/testify/assert" "os" "testing" + + "github.com/stretchr/testify/assert" ) func TestMWSNetworks(t *testing.T) { @@ -27,5 +28,4 @@ func TestMWSNetworks(t *testing.T) { myNetworkFull, err := client.MWSNetworks().Read(acctId, myNetwork.NetworkID) assert.NoError(t, err, err) t.Log(myNetworkFull) - } diff --git a/client/service/mws_storage_configurations.go b/client/service/mws_storage_configurations.go index 06b7d4b8b..39a7f41fc 100644 --- a/client/service/mws_storage_configurations.go +++ b/client/service/mws_storage_configurations.go @@ -3,6 +3,7 @@ package service import ( "encoding/json" "fmt" + "github.com/databrickslabs/databricks-terraform/client/model" "net/http" @@ -52,7 +53,6 @@ func (a MWSStorageConfigurationsAPI) Read(mwsAcctId, storageConfigurationID stri // Delete deletes the configuration for the root s3 bucket func (a MWSStorageConfigurationsAPI) Delete(mwsAcctId, storageConfigurationID string) error { - storageConfigurationAPIPath := fmt.Sprintf("/accounts/%s/storage-configurations/%s", mwsAcctId, storageConfigurationID) _, err := a.Client.performQuery(http.MethodDelete, storageConfigurationAPIPath, "2.0", nil, nil, nil) diff --git a/client/service/mws_storage_configurations_integration_test.go b/client/service/mws_storage_configurations_integration_test.go index db9bc1434..9b24f6e8d 100644 --- a/client/service/mws_storage_configurations_integration_test.go +++ b/client/service/mws_storage_configurations_integration_test.go @@ -1,9 +1,10 @@ package service import ( - "github.com/stretchr/testify/assert" "os" "testing" + + "github.com/stretchr/testify/assert" ) func TestMWSStorageConfigurations(t *testing.T) { diff --git a/client/service/mws_workspaces.go b/client/service/mws_workspaces.go index a56fb6374..d2761ba4d 100644 --- a/client/service/mws_workspaces.go +++ b/client/service/mws_workspaces.go @@ -4,11 +4,12 @@ import ( "encoding/json" "errors" "fmt" - "github.com/databrickslabs/databricks-terraform/client/model" "log" "net/http" "reflect" "time" + + "github.com/databrickslabs/databricks-terraform/client/model" ) // MWSWorkspacesAPI exposes the mws workspaces API @@ -61,7 +62,7 @@ func (a MWSWorkspacesAPI) WaitForWorkspaceRunning(mwsAcctId string, workspaceID errChan <- nil } else if model.ContainsWorkspaceState(model.WorkspaceStatusesNonRunnable, workspace.WorkspaceStatus) { errChan <- errors.New("Workspace is in a non runnable state will not be able to transition to running, needs " + - "to be created again. Current state: " + string(workspace.WorkspaceStatus)) + "to be created again. Current state: " + workspace.WorkspaceStatus) } log.Println("Waiting for workspace to go to running, current state is: " + workspace.WorkspaceStatus) time.Sleep(sleepDurationSeconds * time.Second) @@ -116,7 +117,6 @@ func (a MWSWorkspacesAPI) Read(mwsAcctId string, workspaceID int64) (model.MWSWo // Delete will delete the configuration for the workspace given a workspace id and will not block. A follow up email // will be sent when the workspace is fully deleted. func (a MWSWorkspacesAPI) Delete(mwsAcctId string, workspaceID int64) error { - workspacesAPIPath := fmt.Sprintf("/accounts/%s/workspaces/%d", mwsAcctId, workspaceID) _, err := a.Client.performQuery(http.MethodDelete, workspacesAPIPath, "2.0", nil, nil, nil) diff --git a/client/service/mws_workspaces_integration_test.go b/client/service/mws_workspaces_integration_test.go index 071097efd..517933a1c 100644 --- a/client/service/mws_workspaces_integration_test.go +++ b/client/service/mws_workspaces_integration_test.go @@ -1,9 +1,10 @@ package service import ( - "github.com/stretchr/testify/assert" "os" "testing" + + "github.com/stretchr/testify/assert" ) func TestMWSWorkspace(t *testing.T) { @@ -15,5 +16,4 @@ func TestMWSWorkspace(t *testing.T) { workspaceList, err := client.MWSWorkspaces().List(acctId) assert.NoError(t, err, err) t.Log(workspaceList) - } diff --git a/client/service/notebooks.go b/client/service/notebooks.go index 9bf018ad1..239124462 100644 --- a/client/service/notebooks.go +++ b/client/service/notebooks.go @@ -2,8 +2,9 @@ package service import ( "encoding/json" - "github.com/databrickslabs/databricks-terraform/client/model" "net/http" + + "github.com/databrickslabs/databricks-terraform/client/model" ) // NotebooksAPI exposes the Notebooks API diff --git a/client/service/notebooks_integration_test.go b/client/service/notebooks_integration_test.go index 6e347ed86..dcaeb3383 100644 --- a/client/service/notebooks_integration_test.go +++ b/client/service/notebooks_integration_test.go @@ -6,14 +6,15 @@ import ( "bytes" "encoding/base64" "encoding/json" - "github.com/databrickslabs/databricks-terraform/client/model" - "github.com/stretchr/testify/assert" "hash/crc32" "io" "net/url" "sort" "strconv" "testing" + + "github.com/databrickslabs/databricks-terraform/client/model" + "github.com/stretchr/testify/assert" ) const fileContent string = "UEsDBBQACAgIAMALg1AAAAAAAAAAAAAAAAAKAAQAQ0RDLnB5dGhvbv7KAADtfYt32zbW57/CdbOzdsdQ8CIBpE3Puokz49k8+sVuZ/eLczwgAdqa6OGR5KRpkv99L/gSJYsSSctOk9DnNBVJ4OJ1cfG7FxcXH3be2sm0Px7tPNh5Pp7ZcDx+8xvZ2d8ZT/rnR2bnARU+CziVQkklpMD7OyM9tJD60eNHkGygR+dX+ty9uHw/uwA6+zvReDjUIzPdefDqQ4n8o/T1Oupkf+f8qg/vd2yoGLXSR1orH3FuKFKaBsgXjDMlYutTBXSmV+Hs/aUrPSt0XvxJ+l5fzcbw8nI87c+SepAeLtLA9/85vTgdDd+Y/sRDl959E8bT+9NJ/76xw/H9yET3p3p4ObD3J/rdnPZveaMIgzrM9MyVFPdH/emFdVWY2OnVYAYd8GEnq97FbDiAD0bPNDz9aPpvvWigp9OHpzt6BLSuZqc7P/14H97/BMn05PxqaEcJhU/waIw1/+ybc5u/mUDt3pbevXqd0p7a2dEoHidvIJmdTMaT4yuo8+T9zoPR1WCQvcsf3o0nb+LB+F1GApoymZ303fgSX/pSBIGvqGBJRw/7y5+kYvApbfdyLuYz112Dgb6cQp88iPVgavd3wv7I9EfnWTP6o8ur2ULDTH96OdDvs8Gb6XBgoT/e9c3sYj6YF7Z/fjGbP//+aDy4Go6meaveLz1f9t+OZ6veHZyfT+y5ThkjfX81tY/GMCLTGfQ/5BlPpkXlo6vpbDz8ZTCevbh0ebI6O6aAxCcXE6tN2pHZq+lv/WnfNSGncKkn8PrvfTvRk+jifTZw/Tg+Gk3tpBhJePHYDuzMZi/OB+NQD37Tk6zEAXDcdPYrZIFOAG7tzfpv3vQn1pj3/9uxQTjpR2+mPajFTjmxm3FpK/M50p+5yu24mXQxfvdo4W1W5Yu+sdmHR2Oz8v3LhN2LL/3p03H0xpqj0eHvevisnKn/SyIkntmZTqdCWp3pDLpueOwmUs4HKTP/MrFv+/bdcxA5J+NHQ3NknunLNMUoFRQqVqGxGCNtlEY8wj6SzGIUBRQzEpqAErvzab+hIBKFIApCRiJuQiQjGSD4waCk0EcRF5EVKoiiSLcTRHSFIDodRXrm/fijd/jiiffTenF0P2VHOzkjvWj69nTUNx/zV67DPoLYAEE0/Wjj2Eaz/lv7GPr3dIQ//gJdMR55JE8BvyimGGGGCPGwfIDVAxKcjkiekhYp6eqUNE/JipRsdUqWp+RFSr46Jc9T+kVKf3VK6KzT0Qrp7Ptfv3RmSSNXS2fOq6Qz56qTzp10vnXpzBjIZks4CmgcIi4ji5QfRSgisZUURzyK4ubSWc6lMzFWMx4hKYAq1xrWgVBHCKR2YLWmhLGWMJFtTTrTdtJ5ZN+VJHT6VJJ+BF+T0i4NXchBV+fw8xxBkTpYnbJKsjL69UtW7pNqySqrJKuPg06ydpL11iWrHwoZqNggozlGnOEAhT7AVOXHkigbRhEPmktWVUhWQbANuZKIKIC8nPkWhSHlyJgoJjKUnCczvoVk5VuTrKy9ZL0uXa9LWJJLQZHnlEUOuTqlzFOqIqVanbJKspLg65esPpaVktUJ3QrJ6neYtZOsty9ZRUyp4tZHCpAmYEqYvDLSAfJDLHyChTCiuWSVuJCszEaESa2RjTBFnIBk1UxbxDDMC42psYK1k6z+omSN+5Pp7AxEpPfQmwKfvOk5duqNE07bPQWm18ZOTnf2T3dmkyt7urM3/2bsoA/TM/v8ceFbfxTbyXF0YYd6ITOU5HKC6H5Q12axs3c6yiboblHdvVWAs66hNZ/gKTO8egU9kps7dhK5lxo84PdcLJ/kpoQexviv8B/eeb3/ihQ5aSknrZGTFjlZKSerkZMVOXkpJ6+Rkxc5/VJOf2PO1y3Xhmky/omJP9sQ6LsByUbBMcnMnjv2gZfDYnLufPiUTJ0sS3nZLmeG2QsifX3erIlNsy2AgnLmGSw3wGHDy1X5ocHQAxO3sBWSR5+fP06Z7HXycJz3SPr4YkX6Yzvp2+lTN7NeWh1dlJYx+HqYrqOueHjKJrmb4OXlIRVv/ek/YKjzEt0E3E8mXio3f9FubUtTLq3hRT8Usy3rg+PZJNnDSQXF9D+DnssZTyB1zzXzySQdo3zcPwAFO8g2eea9lcjREj+4SiQzMqtk1t0ZdyQDszLzEmdUkEkHvJrKnEdaEljmlgoyBe8knDKnfRXNgHYqk44MTLE+dFoGlT4li7pDEI+Stj5OxWA2UB8+bQtsBYRWg61KNVY5Db8DWx3YumWwRcNARaCwIt9ohTgHnKUiHiFrhIi1ML6gUXOwNd9HjjUoxX6gUBjwAHEJOCvUViAcQKoACxoa2w5sBYtg6zsvnoyHXlmAOopTrz+8HE9m3veno++8VHgCHDtOZIMjvfvKffCyN0+cTHUQywCy8p6OR8kysLu3752AvNnbX5U2F5Uux3EizzbnyQRjkyyL2jRkPMmF3kLe1y7vntPoixXm7F1/dnGWVvOs6IIvCZGuaMEqkCrq7jd1ILUDqR1IbQBSV0zADrd+3bhVseqN7aDCSBhgSkWHWzvceuu4FQcEh9InKKQBRTyMCJI4EMio2IZaRczSsDlupQVujXAUhAGVSGuGEddhgHRIIoTDmNjQGhrysB1uFUvbL0PjwNp33iPoi5n1gAFm2jtxM8BzQtc7fvTYc7Q84u3+AzjRu7o0LiFk9yY2HoCc8abjq0lkvel7YN/hKmRExHpkVJIR7eQGrpYV/qKswJ186OTDrcuHMIqkHwqLqHJ6J6EGhYTFCGvqB5xZXwH3NZYPvJAPkpKYCkFA4BDnWMM4Ci2XiFuso5Ay4uuWmwhqaRPBabXGCYVewvZlhRY0JRuDUmvOXCFkN5k3Zybe9wZjnb7c9+zIZTsDTHRuHzoVcd9LCJ05qPLw1PHJmdPLeo4OOStrs2m6SwCKD9dpcUnG+/OMew9OR57Xj+e18B4+9E53nvz69OnpTvLR80x4NesPpr142psMd+clZWpsmihvUO/dBDTP3hB4BvRJB7KTF07FjMeToZ6l+ulML2iljtxiM/Z6U/3WHkwT8bo774akOAusmFUuEcJJIkffIeXdBN8u0PJOT9PUPT3o6ykUGAJvniUpoB7zz0nX76YPpTbluVJ5PoUs+3maMqle34C2niXqOcNEifS7Czt6pmdOSfg1SXEwGOwufX8+nmVJUmmylMT+bqOrmd3NunzF4kEp+dr3xB1wVZVLmKjw44RczhGpW8665eyWlzPJYXYI6qNAUlhutNJImshHRGFfY8p06LPmy5k/h7uG0IjYAMGyxhCngHTDILSwZvosCiizoWwJdwleXM/mq9U640JpBXuYLxyrcK38BgSTX42tRcX+UcC4G9xOMHWC6ZYFU+BODWrfQWAnOJjVSBFiECEWR9LiWDLVXDAFhWAyKgLgKTSKmYgRjwIOerivUAzKeOxHUgvVVjBdO4j4n8Hp6Pjw6eGjEweun7x88cyrAMerHGTqemR3ew/d3kO391C593An1m23PorKVVVWWbc5od3hgm5VvQO4TxTxMXH4XvuIx9hZt41ENoq1pDYIQVttvqrOD9XKiMZc+BxF0o8QVxTUiZjGSBCqtRFhgElLrwyyfKo2N28/cWjfOxpFIKuBWxJ8v2oL/7PZqaXs7NTdTL/zfSzKaIwBNYdGAH7mFqOQ8xAF2jBJCNGRbeHsPj+gycIYNHrnf8UNAZCOBZIq1ogb7muusfQBY7Wb6UsnNKc2Go/MF+RvRK/5G61tQptttRqgPzn2VALE+XMJFOfHQSvAf3LQdIkCrUHBLygEpdzBxpwdIP+mAHnRD2tnR+cN9DV7AwU+Caq2RxR2J65X6ku+TzsU1aGouwhz4TOBrUJYMkA5lkRIhdggwbCvfGOUMS282OeHsZXPKYtViJg7KMiZClDIWITgiXIMix+TbVEUr9oeWStuF/dHjp4/enn47PD5yUHFNgn/grZJdn6ExWO0UC4CpkYxLNM/OR5IFr0f77tUPz3wnIj1/vIdUz/8G9a35IfXBxA0nnnGQmOTlmbirrJFa0pE2/vL6nw6atJCb9PfyQR0hFBHb7zd4RhU7ImNnH4d6cHAgxJmeyvLA5FnR2mJfxnMfsgYBi3PgL+cz37IK9IfeSsIRe91ic5wbK4GtpRrU+GuX1xyj2QZ1vWNl6UpJsmKxOHgyqapd/PU6ybSWgr7edOLybY2+cPqJswTAYfyH0oTNnmuk3Gv6NEbDChvMqDzbq5Rvc3OR2W/owVHms2s4oAerI1QkpcWOx4Yxw/ujwTlmZLXYm1de1mOxImoVkrneVSL2TaPfeG91Hjka9U09YfaUl0Tn6rbqWeqy2+pno6NalezmNVzFtxes0r+ZbXaNp8SNSd/uzki5nNkvexwTnB12vugvnxPxbusI9/dX3W6Etl+nDdofTqAAYW0m2Yi/2y6qbfzwaklTAp2SnTeWkziVISvajI7id+A3bcz5fbqsQCsgnnC8mp0U+5eO93UInhLCsy5zw4vZ++BqVZV+zqWaMBVUXKAoLCy3JxrXtVJ9LqRyK0Buh42mXyF2N1M94YilOIaY/rnRR368tKOTAc5tgY5kvFPzanNkcdm8s4w+AX0wjcEvO5K61rCKXVUr+kkShSv2bkTmDcUdB7Na9qWAKu3MBtbgLgW2Cxtc01+O9+4kNTitQagN0W9vB7oLTUp74bZpCY+cFavWhPEXA0vp9vq2gbrbp7UVfQ3Da9vLnoawcO2XOzP13vHP3+GganBx9/AwJTsPXWVxCYz6388LIuNea6akqGZdbeO/bqwt9fZp/Nl9T4dq/JrVLzbp+v26e5gny7iQkgeKBTpMEZcS4FCzUMUSWFNYAWnhjbep1Pz0J48xCSWRqIQB9IFZWYI2D5AjHLOw4gHPGq7T0d6/vXTAqejx4fHj14e/Xzo/f3o+OTFy//X4MQAwe2dh1IPHY4wO8HkAREPOJl76OzvEOwL6CDsKyYI9Z17/ibG++fLo5PDHefBkLDAzovcTL2TTIRZ0g8/u/25V69hiBJWeLWzfDvVDvA/IVgiIn2GfeTUGOrvpMl3/unoOY8WPej/kTYqZTX3eYt+RTn3lHx1BuNNbj5z74u6Lj7zvFfpfG3oWORytfFjGl+CRJotNbBRxl+0sxDNoJvmJD6cJr/cAe2dob5M/Ore2ES4J+/yAuD1W4cWKj+ARJ7p/mj6HAb1NHVx+bSuVv8eh6trkbq+JIRTL6FTN8inSb4kAeQ8MqkD4IrK5F42WR3gTV6F09QzZonU8+T3NohNrkZbrNeLdyPHXduhB9nOz+0kHbyWBF+vHc5RdotdyzHNs9+gwevrFw1AJLeZrg4w/NZGsvSn40Ey7Z7at3bQtNj+9OcB4LCDxIBXzgy9NLB6VHPKwyoO4v7zzfevxh/ybhzuOFkTj0D6VQ53JOjCb3VA/g6OLegg0Bq0WipDd/bX3c9nqUY+N0rFAZfKb37rn5qHjZU41IGmFMU6BCCvFEMyJhqJSGsJZXIhg5ZA3t/usd+WGP7m7vs3P0LQ9gBA+7O8n+PkcXdc4Zs6rtB0ec7lkjd90weIVfIWrb1WK06rQrwrzCpiBwlMOenW6m6tvv3DxDGJhQAR6UfEOccLiiSJCLKBpYH1KaMRab5Wz0NlShngAGOOQh4LxDlRSHGuUUAEZjZUNPDbrtXBqrX6Rka3tlE6yCqbm7ihze3Z4cu/JTa3S0jQj9Jq7ZZCv/2rb/5VCv7mHveaG9/wJsvbfmdS7EyKnUmxMyl2JsXOpNiZFL8BncVpH6xaZ6kIKygIDjqdpdNZbl9n4VpxHAY+sqELrx2pACmJDeJK+3GgWEAi0VxnYYXOggk2hlCB4sjZF2NQh1RoCJKQlAmhuS/8ljrLcnz/TGdZsjBWqSzeb4cvj49ePPcOjr0XTxIz3jW/gbqneTubY2dz7GyOX+f6DStx9frNadX6Hbh7fLv1u1u/b3v9jmNsaWQRD2MK66vmSGMWIWmoCKxvWRDh5uv3/PoNLUTAtJKIxZGPODUYacotCnzLA1jaleUtr98gcnH9zmODle6rGEzXBhm7dh3GyshhbQ2RtQu+f5YcpTobjM/vQ/6FJwxrW31CzlaH3MKGAX8Jq2PfopBS36ExhkIpNNKGklD5IfMZRhGk7E1H+vLyfQ/y/ufKzjKL342p+L7ftupW6pAYEyFKVIS4NQrJICQo5tRanzlYGW2uensqhErZqu6cIROYKMQwk4hJblEFOKyZb5DgOI6pT7gfhxvqfiMqhAnaqu4+QQYTn0hjEAndDbMRkQjGWqJAah4ZLGOY4RvqfiMqhOF27B4ECCRJ5BgSWWxB+dCCItBHKOgKPiExpowTsaHuN6LStu4EQxGcKRtIjrAP/3AiLVIKfmmfhpFQIVbxhrrfjErruguOAhYK6LMQsciFljcg6JURbtIxbCJlWKg28MzNqLSuuyIgIWQQKEkR1gw4FYAxDHQES18UchUbYgkjG+p+IyowV8kWFRB3SrOpOjBqoXlM+38s5KmygXZKww2UhqDyTk9QGiqi+AmKVRf1vFMabl9pMFyogJMQSR/AIMew1rjFHZB9yKU0AvC+31xpmF9yBGCTRhwrZGkA6zBx+kjICeLMh6Uutob7tKXSoFYb/V78cnL07Oi/Dxs4KLSNKJzunpN9Af9QGuwX/0DNSPLDWd98P8Fy8JwEHhT7kitApSTdTYd14/MuHMN1O0qb9/muhk/6Azs9cDVe3OpLV5PFjb6Uj9dubeYUX6Yt3grNeGUVGzV02B9trsrmjduh/n0bZPTb80UyZnyV5G3cNbPxTA+SDt9KTyfkjgFXtKX2+lPTvogruKUb3q9ieAuvpalDDUMoezsy4Y8XE2MnbtGctuYaSADLxvn77fmcJJDu0VVor41ZU7G8lU6a3ulQJ41/Mbuwk2+29c+vhm70tzNbk3hJ5lvsy7TlN+3M5uVCb/2c3Pt7gzI7/Xub+jdo0rJS//YronNI0Io6p5tO/759/TsklmLBLTLcJIf6IhSGoCRjqyIVaxXFQQv9e36XZ6RsHGFBkAisRDygEdIRwygUVCpODAe1vJ3+TfF2N+1Ke2cV+3dtz/212r7DK/560SQCutWfpGpkwd5UnItOVFVe+o3wQGyxSFLdQnILLSRrWph/Y0oGWyySVreQ3kIL6ZoW5t+IorLbPOjAy3XwIn3i4odVgBdRccpR+kx2mwcdeLl98OJz4QOzSWRE4nYUUhT6kUaEsogSpSMax83By/zKVB4TbhhjSLHAOnCkkTImRjYwinCuGBctTznSlReR3+yUY1t4QlecA/TZDc8B5rsgyycdX72Gj6npKz8FCILBqaxHiY+2E6Cus6ANjmuaHxEEeXU8ArlzMZ4d5Qdmlg4+duc6u3Od3bnO7lxnd66zO9d5/SRFd66zO9f5hWlpgfPjqtDSZMW5EBn4uDsX0mlpt6+laROH3ILuZAnjiPMYtDSsKaKGxkwIIiQJm2tp8+vusSGYi9CiKNAS6EfOy1oJFFkpiKQCc9vyXAilS1ra0Dgl7Tvv+NFj77ejA++Z2+bzjv/rqZfPYnfbpMcmxnNeIaejFXraplMgaX/fIJ4krpYFS+6eXQD4bv7f/vy3hMQsNAHytVKIhxojpWG6hpFPmIBZammLWFSli5ol4b4FqaJEBPM/sgZJAcUFxIqIaBbzoK2VBi8GgM+3mKbJTfeOlxITertNpmpzfdX2U9vjY5kyvkrxzgZ9hcVktfI+N+u8yrI6e0JixIF/nFNpsfHNAocvGpsaXudmjKTWadXTouZb6tLF3tvG2bLXDcq5yaGeBuXc6ABOk3JuclimSb/d5CBXk/bc5ABNk/bc5FCdK2eZOoN0C9wsfBUxEnJkgRJwWcCQdr0FrVCWYk7jqOr4lfP/dprb6Gr40kbjSaI/B/uJe2ly3c80db/qg/YMq//pQtyARHvLgyYk+lsWGSB1Cc1DKCRfFg7/J98rgiP89+mOc2Ia6t+vle+vKT8JTlFZh/nXtfUoh5TI6uHG99H4ajRb3w94oWC8oiD86VMmqrZtnYR8LW0jTpI+utCj8yXbTWEJaOzFNgac42zZsCI4xLgN12K3PbwdW1U9j74adJZdcLdpTYI1vJ/YBlqOaWGO2pJ7b9latCUuWbRfbaeeuZmvvdvoNuyETWtdWNO20wkr7PGt+8Nh2y2Nd7Zbt502Frtl2yGX7yze4aCX7L7bkEalLaWbE0s3nrbTtfl21C1JynRx/LOsfMbp9tmqt7XRuNHS1/llb9loztbcmigqgiE6UzvvjGad0ez2jeYqsIkS6DMMSiANCVKYWsRMRAjTsfR92dRopvD81kQRxYS62O3YUA2qsgvlzmKLrMA0wmEARYh2RjOiVhvNWvtlV5jD/C6Y0t0GU2pvl9iedaMLBNUFguoCQXWBoLpAUF0gqE7haaTwcOFiaVYoPAGuUnhg5eoUnk7huf1AUFSDpkMUYtrGgE8AsigCvxjDzMcCB8pGzRWe+e2SxgLS4TJGmDELMpVQpDHIVOwDtCCxBpWq5UFUUHiEv9JN6ClMB28wHr/x9MybXVj4fd5ck7lFh6CAdw5B3VS/66lOIqKowABhYnfRg7UCSelrQDSG8iAAUKxb2DZKl9NFRhgCE1xZLECUEII0CSWKA59DwQSHuK1DIFt9bCs5/OMdPT95UXnHg5564eno1+Oj53/LzwClB4Pcl6vT0Xjkhb2+cSeE4H+no3/+/fC59+zg5NHfDx97J+7hdOR5v/7y+ODk0Ds+TG6VSNI8f3FSpHNJkrRHz48PX0KaVf6HLQ0nN0aVHXhrD94CWQ3eCK6wVsMX7ncSvZPod3AQN9DJJaMs1BQkrrYoNAqDwgozVMWxFFQ1l+jzq3sYFppRzpELGoJ46Mcg2w1Hiru7wSPJDSatXbwXsNvsoj8xZ9H07Znz5M7E91kqw0A2lzw/xwn37Z7CRNDGxfx3G3sgRE539ubfjB04QZV9/rjwrT+K7SQVPwuZofC15vGJflfYHs6YSw65Tke5nX1dC1bZ0YOWdvTFe4FW3Q206n4gsuLGHVFQkiUKskZOWeRUpZxqY87tWUi6u3q+gAV7foh3zdzIuuV45mpz+T6d6YDveo5Y7Bxweq7lTybpsOWs8CFzSUiKmXdgIhxLLJJv7+f1zkYgY5hkrFZmXmKWCjIpD1RTmbNNSwLLDFRBZn5I2jHPnPZVNAPaqQg5MjDr+tBpGf751BhO/QhjM/KigZ5OH57uaMAJCFZdFMMk+Mn1U8JSP953qX564LkWeH/5jqkf0iFLfnp9EFHjmWcsyLwyItv50fTfLtAeX82A7roy0fb+slqfjpq00dv0dzLRkQ119MbbHY6nM29iIxgDL9KDgQclzPZWlgeozI7SEv8ymP2QrRtoeZH+y/nsh7wi/ZG3glD0XpfoDMfmamBLuTYV7vrFJfdIlmFd33hZmrXr+AoC4eDKphQe5o1JJMDapL0sqcMDtRKma//apLvVjZwnAh7mP6S4I/lZJ89+bcJuStcmu1cn0a20v8BWt9AFH2+n/Sn/1Wp/jaSNioapsK1+b4BMG3fjGmHg1kpQ0CC9l2YeD4yb8ckfzWdtBoNrtXWdkKhZ22aSut5q9ON9WIN+2qlnGPBx9fUequJ6DxlI3BkGOsPA7RsGFOc4NDpGYSidqVdIJOOIoTDmwkTEBETT5oaB+Z2AnApLrdvVoTJAnIQC6ZhrpKnbQ8dxGLOotWFA1LYM9CLooZl9MXlpYX5E9sQOL3+DngE9fsHWC4r6Cg1c1dTAL2bDQaGAVwLVXHi0UnCXLZ23Y8vElRGRCa7aiA4kFZ3I6kTW7R9X15GiJCBIUB9ESsR9pGLfXRMeEgkCi4ZR43AVCs9vJJIqNiFWPgqFo08tR6EVHCkdy4hgqoyWrUWWXNqJTvanlq4hX5BInUGwMwh+ewbBz7KDxyr9MIjb6qhY9bp7ALpV7w5WPUyp1n7EEZMsdC7WAklMYOkzgWKERlaZxpd3Kzy/ByDwbUiB0ZE2EXVu0IDRsaVIYUKE5lIK3PK8CeWrfTKWVr36gXT5Z1//yAIlukSFLlHAKyj4BYWglDuokfPu1979V7zI6Zdy+os5y1ElipysyMlLOXmNnLTIyUo52cacHVLokMKtIgXntVOtH5OKcI6KYtYF3e+Qwu0jhQibyEYiRNjoCHFfWaTdySALizi2JvB5G6RQCrofYk3coVdhfYE4VwASGAAH5gsdRVILRlve2Ev9rQfdr2u6W8YK7FqMdrfS3E4I+iLyfNuY83RzzPnuDoHuDoHuDoHuDoHuDoHuDoHuDoHuDoHuDoEvTOkE9bEyHBJxR4ZWK51SdOGQOqXz9pVOToSPIxIgGwU+4tJiJOMgRNIKpXxmreCmudI5v0MgpBJ0TK4QIEyCeOiiidvYRziWkRTCBCqOWyqdwZo7BFwmj6aXBjy6mjiu8dzUAeg/nnhuRoNA9hzY92J4MUlj+q7QQzcd6ru9M8SEqO4McScQ7logBL5PLdMUCRy6Sz+MQmFkGRLK15HAUQDaa3OBML9UgAgXnUdgFOiAI44DEAixYohhxhTHfqxM2/0qsSgQ4sl46CWxxXoJ30+9/vByPJm5navkW/ksiitsbYL4ahQlPFpKdDoyNvacLctlp7vJ9Dsz8b43GOv05b6XlH3mIN3D050FAxgtDGAAyLJ0LjDMw00x3Oj9eca9B+5scj+eF+k9fOid7jz59elTB/lGzom3FCJuMtydl7TvnQB42ksT5bXvOamZzu5dF8B7MjnrJzGNvUF/tptk2FtMA7L0zKShZpM0z8cju9eL9BSS50q4G7Xdvb097/Q0La6XWCh6zmQBJMa5ycId1AORPNSz9JSfUwxKZ/uyUJ+l3trrTfVbezA9cW92572dtMrCLMn6IP13ZN9lAdxPxgfGnQrPm11UDKqmB309nfsYQg3KX/897o+yqypmy4UWWaeTqHBN3PfSiOgLVN5d2IktJ+zlXQ2VcpjWO3j+uLBIuQPsD7152oTcyib1nOTZ3Ss3Ggbg3JpfU1JAfTevxFLWom5TEJbR7PD3ywlU8DlwkjtLP3R38vwf60IAe0XX9L53zfK877yXsLoliykpyFyN3KAVfLVAFRp0jaajtbdAi/ZSYnsrGrKiodCeM1gjz0AnunSrIrT1Qwr4Qff58Kn34dPpzpy7itHd99JEe0mZ6W935Ga5Z9Mv00+rC3tVniyvoehiulSmn08clz7Vjk93yi167GZAwtqu3k61STlvYQbM51TOfXPTZpnrekl3F6O/2JvXeX4/T1iml4VSyDLNx688sS/s6JmzGufEixKjMaAVN5OT1paIrmL9pTIfLr5wOmzeDjtzA73xtFbyB/gQkkcZJpyNvWQlzSGh24t1L6fjq0lk/9fUK3ZpE7DYy4t0fTIf7AfJUD9xhOad5pLMx/dBacosXXmQZ8jYai/pv+fjWdaFKZIpujBR2x1jL7NTlns+DPZ3G11B56/yjCayrjPG1+Eb7RTqyqv8kkhcq9VwRbrjHB3qvoMgXUYEOjTUxbY0LogoAzU5jBAPYhb43GoiRGPUTUpRiePYBASUe+tLoB8Yi0LLAOQHyo+oHzOMdUvULRdR9xwNx/3JdFZxrqOEkB/mWPUmsYi/FimliKiUUqzKQ8VXXez0TkrdgZRiJjKEsRipSFrEFQuQUpijyHCfxiEH1b35CQ4yDyVIpZVMUoGM9AniAiugLwjCLMA01lqLuO2hM9Xcl5Wu809pGyR97sta9mFd8l297h25n2xyzF0kMhplD1baiEZLL80FGi19RBdotPRQLdHo/Eab+Tvkakvj7chcmaldZrdt2B4JBMSFWK1CAqoCCQQgWTsk0CGB2z9+rkJfYCKRsQQj7nOOJIsDhLUwLAwDHbXwVSXzSKNWxVqp2EcaPiBuaYR0xBmSfmhtzABw6JbbhgxXbBs+ceqKdzSKYLEAbtED7ymoKKtsJ/izbQly1m0JdpP9zn0EqA0YtgzFkaVusgdIBswgjCMiQtAAJG0ea4LMg1AKoUmosY9Con2A/bEAYcJ9JKOYUe3D5NdtwwrLxSCU5U2bNZtxKy2mG2Z9M1PEi6vZKxL4rx94Rei7V33zwAPQue+VQeUDL8WE+14GF+cvFoDgA6+AYvteDvEeeBmw2/dy8FZK9/rPbw/xFa+22rKKIDwqwO4S+04wdoLxlgUjBbgTSqWQ9WmIOPE1UhE1KDYC4EsosDbNo/OSeUQLQYQNokgiy4Eq575AoSQxqMS+iXHALQ5bnthhZMlqa93W3BcUnpdeC8+7tgmrJPp2jiMvH0VePoa86ijw5zuG3BlLvinDxfweqHWTY6cL0PtlBOhtpEWWjULVIMqvAlECiy5ASgei7gBEaawCP+YolgGAKBEapH0ZoDiMjVHCj0Kjm4OoeYCUKIy5CglGgVUEcevuKySCIBXHOoyYFjFteT8Vo1Vb32vF7eLe99HzRy8Pnx0+Pzm4iy3wvyKE/lqOi73wsPS04t1fV79efjf/cTr66PXNx7Iw/5g6MmWSOXtaELPLydcnTXXdj7mG+/F09Fla6eGPZVj4cQEUfpzDMi+HZUlz8s3AIvlC0nzjK0nq1pvkh5tyH5MiSblIulAk3VTkPDmtXeRn6Ni00OJ109Khm3IfxTaMePPSi95rwSCL2RsPdpbdcyW3L91zJd+odP9j+l/BeilzFVpLRfYb9vzp6LbtW+PJeU9fOmDfm2Pmg5EevAf0cfh7ZBOM8cCL9MgFoYbGjgdvrfcv6DPAJf9yHsbZdWIgqAG2eOfAdqPM/3jqLYUn2F94UebjpU9Zvy69XeDra8RSGbqcJZOo//ph54u6xuGX9/wf/9Bvde2rHG58jcP9OWa7n7DC/csEJt3PtKn7wBn30yMYl+/r3ulgAETUifC++z1Aiu+/f/PuhqHeA7rcV+sbPZu8r1O9B/Vvo0guowhYndsoyn/V6UvkJ3Z2NRnlLYxvHrL/+yzR+oD2+c0I66/G+D4n9ubdbQbzD3izEbaJDMvzXL7n/651I8LlZDwbgxSrlXhhsm6oj57mdbE1Wa71xB30w/uuwQj3CO4JNJ1EvT/6l8m7+3kDG0xmWFfOUhY8Szz4a01seH5nJ/seKLj2nX5/Fg361m0QgZLv6PXNfnLtwc24glGxTkZuvrbiYOQli4M3jpJjFcZ7d9Ef2ER89kfn3gf86QP59IF+6p2ejmrfW9GrLTYSqcGobCo28r/sPFCpU5NK9pJ/sx7eT89duIVgg7BZ29NqVU9vmIOgdjcTs80XxwfepjEcUyZ5fqKkdwqg5tsBP9CjoH+vbnAE4gNaPOvprOU9SPFGn9t7RVe4bj2Y9WLdH+TvdrNEvSn0r37A6V6tMi4HMJS9wfi8D9l6ycmwZ063ORrNxvfu6dF4FF+N7gGNeymNewmNe0DjXk7jXkLjXkbj3jKNbJReTJ5Abe+Rewxqczl4v2uKdFmVqc++vDpzldc5eeGO87kDicCmveFVOuwv7TSNbnUwmej395Ip5A7eOfP97uLHjKq/mWiS/Ocr4MFJQav8LuMCuYUe7W2xJ9f24DZGnZDKoVJbnRJrCgp45fCBVuBMjm4In/bf2Dm5eKBnz/TlvZzqUsKis8jnIP2n4uSNDc+qlba6RWsPwukMlLdZKecqahklgvmfZI49Sta8lXMs2EYd56zvV7E+I/6dF5SOX7r30hvCGGU/s+HhwTaGJ+vilxY40o4iO11VpZqdXCzrydL9h53ce7lMfd4FSfvvqbQfXkwOAbnt5vkKDiQ1Rei2S1bt1pccsvzdDi6hDkWJ+VqRhCgbT6a/gsxark8uxBZp5PXBX2R1ZoDMp70spNCLSf+8P7qXBMNIf4PQsfb52OSMJlpxdJNWrmuUbLVcb630bbS9BwrI+N3R6O34DeghINJHU6evTY9GOX+vLpyoVmJ0qfR0ebre8G02+Gn6/1/g5aqSyp9zcLQ9CVIevxayon0RQtRcfSZXA5hwL+Hfw0T7HJc4MtNHq6d6OVvOGKQmtNx6wbgaXz3tj6yeHNv/uPVw2P/DmgKwDcxTG892r6fIydJqBNsf5sDtaR9aVCIGTxnP1pwmtXqjVuNvuZxqrHitN1L8WuoMVlftuF7LItrG+ko1nE4boGae/l4x/fI+OtbDJLan/X12DYIw0bY2eRuvUWzdviqKuKZ5oQa7HIyM2995s45tZM3V/46L+68rO3nvJP8oXfuiN25FdGgje9hdlSIvpKYMr+blvCmrmiBaL0IrOuzRhS112DIv1NVTtlHUNkADKDaTN5vwCW0vE5Ya07QRaXYnEBPmOcwfS0tZStAU3bSYsB7a2VoxNSXBypkwtHp6NbG/XGhQiNbMFULWj0ZVY3pFGwZ/vI/Gw0sny9o0Y2MBW6Y6dc6KB2tpbwBoj1P3iHu9ceyite1mz3nFiv7sj3vlGIwlbf3nq/7ArJilBT9cS5rPHabuhjqMaK/kjQnzHVS8896vU31un6YPc/pOLB/MZpN+mPRitvoWxZUzFSUtGcqch0Dv8fuRHvaj3/Skn4AVR/c3t/m0u/wlM9vJJtVNYeXquq6uI6sgn7JCUuVfrsJBP3r8c561gvzjIvOxy1uUNp+GN2/KiT6frmzHnJ1btyOh3b4RzQrCm0vyqxi1VNIaIu1aR4LNhTYdprnlGNef2avrXzGpCa0cfhf81ZkVdKolzPTF2PaSCzbcyCQEF5uSBos++OXoqR6d75YTXEcsm1uxgtr6+m8mmYPqGhJueuWMDrFTkXrPtdshfWZnF2NzEIHqPh1PjoaXg17fWV8s3k0TeGmKFhR2K7//W7/VDwK6iqZzo3cO/qPzarpr0yS0eSHF3GMPQMB5UUKaJ6eVPaW5VDGXnd9HnsOxSZossUtBvy7kzV8mJCjnVSReFj8PR8A9Nqdy7X1CiM2VhoTQ31KfkDxT/pgWqvyFtJlL+bTYLskc6LPMabV3lz8mpAijq0k90oNBnjJnuPK7JPdcSyjXGdaDUdYHk6vR7vW3aSN8cn3M0iMaSbbsZ1pOarByV45duWnrXUAVUseF2JtdWE+HY+Bcm7sl7HuwasP7yfxV4fnwYJMHxTUnh00uhjd2L/zLYPZD1vFo+ZyD836p64Tk6AzHBrS5Uq4afnmpiw2p42CTpSmOQtTys1t7XKKee11xpGJt8od1PGkSv5/SsYzaLkt7td3OqgcUqyYDOu/mOu5kG8KTL4QUvpmjIVtydPrUkg7HZTq1Or+WU9mqCLu1OLWuEyuvNVkW3FezwL61+HdF8N92/bvg8pvGDm5HiNUeqHwMskXj5o649addpbdnAqjuZ4CqvlNn3oQ6k29qB/HNZlVA8PJKU6ujayXy3N1dtRwMayXyDt+OB2/d8eyWTSXLTU2k8Pyfmm6hAVkjGq+7hRbr0WC9g3jea2f/DlNk/bk4vl3nFpO1LQG+YbaXBsLYOE98dnZuc5n7c41+2607HAUKKOLLt0UBpYq7SyyaDMZn9Uh3KPgscxNvIMDOzhwIPTurLcH2ve/15Hx6IwYEXdZfnt2pp3vNxa/29Fz0m6+VZer8fjNoVos566S9aXcFy91VPkngPfSWDxfsbpSOKaCnvmgiHecDtZ+MQW/5XELysuRHnzxnd7G095uHasobCSwgcM3xfn3/xPNzYzM7vDyDJm3I0R8tZ5jetkD4os6WrT1jUvmX2j7OCiW9B5x1PANNOlGp3XVCbesjV9VnwxAXC9n6oS1EiYvFMXUK7c1XfMA86oe6Ry6SxLewdNU8v6eaypQS8tpwhk/3i8Mw3nUzTC0sUW/oLgf9egrh5lFrMB7rDgvO5+4a1bLh0L6qTtSwuNeNmjctpvBtnnYU19SlL3WOX3MGuPMZvrafr+lq9fq6m83f6GxuZ17Pev7LP0+YMVAWm6JGAC+B3WXQVQG8VseCF5gyF0+7C+DVBfC65QBeJKDU8kAhKlwUVM0k0iGnSKk4IlGsbZsbY909SPkV0s5gIEOLaIAp4jHjSGKLkSFYBMT4Pm17hTRjW74VJrjjW2GyMa0fb7TZhTE1yW/jHpht3Edz87tkWgdWXaBxwwCxFXf9tAoV292T092T8xXfk5OgnGpsFLAKbMSYEh026rDRrWMjbYRh2lpEAsMR54BdlA4ZAsDEsdRRbGSLez1lgY1IEPJYBwb5NrKIUxwhja1AynKCfYu5lS2vzmC84p6c48RZZ35RTvPI6rd4QU4QdBfkdLP8zmd5wBRmiiDLsIJZrjGSUaBBIdJhQLGFedjiXkw1D2FsqR/TiLqLezHikhiktSDIcM5jCYuZL9uGMPYXZ/nsoj/5kq6BYNeugVjXgtu7BWIV0F8F9skK9UkUlGSJgqyRUxY5VSmn2pizUwW+KVhe9MO6udFdAvGFXAKxBaWJcVwNoYRfpTQJ0l0u2sGpO7hv0MqAaKVQpA0DzANwJ+SUgVKDLZPAhaEIGsMpiudwSlDDJRfIBnEASpORKJQS/gkpxlqZ2CrZEk4FVTdCrBO9DS+EoLd6IcTqiOoNIvtvTH7tcgi3VbwiLn/iblb/kojNyWtcFHHXrV+Myn89Mv/16PwZmmtz18TG2xzutvXrIvw3qMnyDQ8tuWl7tSl6+Aaju+bqhnZkxMf0v/Qv1zc+LvUMvFlPxuWQy2TUNTKqgsyWuvj2r3bYAsYSpPrWLVGxaU+g17pN+w5j3YHJymoasjhGBhsCGEsQpLniiPnW+lJGJA55c4xF5leX0jDikVDIj1mEOIsF0sKPUSgwJ8oVpVRLjCVusml/Ovrn3w9fHhZXJHsPH3pJJ68AWqStbeiW99tv2VvgptvXK4rpTFDdbvQ3sBvtlu/q3WhJKxZ9wqTfLfpfyqL/HATjQTKVvZdJ7JbpShiQMuqfHQVwbUMbB4gEWiFOeYiUEhxhoQwXkbvFnNZAAbCic8yICuAfIcQcBTCjKAdcgLCigAIiI1AYBM7SAlmEtZaYlheYc7xtFOAGbAUIYJ93f+iWnMFa7x0t0PiaHP9a7sJt2Xmwg0UdLPqTwaI2l44nmEZWI6HVl44LQinukNAXg4SyWxC+GjQkiObUDywyhjDEQ2VQ6DOFpE94aLXk1JAaaAjYm8pAYiGYosqh/gwNSQMYSQcCWeNLxIU2QFr6KLaUSi0jySxpiYZ6/iow9Ojl4cHJoff44OTg54PjQ+/oiff8xYl3+H+Pjk+OC2y0AvTIdqDnxqtXJ2QbCFmQpH5AK4SsVD5fKWQhV0A6t8gvRsh+wTbmWHKCTcBQqH3Q/uAB5KmKEWN+bMKAwHedACujpxfhWE9M3qdp/sDQiGsBOqP0NeQPo5SS5BFV1Dc4ME4sLY9ARUOWGfrT/wdQSwcINrvkudAyAAAu1AEAUEsBAhQAFAAICAgAwAuDUDa75LnQMgAALtQBAAoABAAAAAAAAAAAAAAAAAAAAENEQy5weXRob27+ygAAUEsFBgAAAAABAAEAPAAAAAwzAAAAAA==" @@ -85,7 +86,6 @@ func convertBase64ToCheckSum(b64 string) (string, error) { return strconv.Itoa(int(crc32.ChecksumIEEE(dataArr))), nil } return checksum, nil - } func convertZipBytesToCRC(b64 []byte) (string, error) { @@ -110,7 +110,7 @@ func convertZipBytesToCRC(b64 []byte) (string, error) { return strconv.Itoa(int(totalSum)), nil } -func getDBCCheckSumForCommands(fileIO io.ReadCloser) (int, error) { +func getDBCCheckSumForCommands(fileIO io.Reader) (int, error) { var stringBuff bytes.Buffer scanner := bufio.NewScanner(fileIO) buf := make([]byte, 0, 64*1024) diff --git a/client/service/notebooks_test.go b/client/service/notebooks_test.go index 0a01bf152..d1a6a66d8 100644 --- a/client/service/notebooks_test.go +++ b/client/service/notebooks_test.go @@ -1,9 +1,10 @@ package service import ( - "github.com/databrickslabs/databricks-terraform/client/model" "net/http" "testing" + + "github.com/databrickslabs/databricks-terraform/client/model" ) func TestNotebooksAPI_Create(t *testing.T) { diff --git a/client/service/secret_acls.go b/client/service/secret_acls.go index 586d740b3..49396ce6f 100644 --- a/client/service/secret_acls.go +++ b/client/service/secret_acls.go @@ -2,8 +2,9 @@ package service import ( "encoding/json" - "github.com/databrickslabs/databricks-terraform/client/model" "net/http" + + "github.com/databrickslabs/databricks-terraform/client/model" ) // SecretAclsAPI exposes the Secret ACL API diff --git a/client/service/secret_acls_test.go b/client/service/secret_acls_test.go index 3a1c7c74e..343df1036 100644 --- a/client/service/secret_acls_test.go +++ b/client/service/secret_acls_test.go @@ -1,9 +1,10 @@ package service import ( - "github.com/databrickslabs/databricks-terraform/client/model" "net/http" "testing" + + "github.com/databrickslabs/databricks-terraform/client/model" ) //go:generate easytags $GOFILE diff --git a/client/service/secret_scopes.go b/client/service/secret_scopes.go index 26c66951a..58031ecc9 100644 --- a/client/service/secret_scopes.go +++ b/client/service/secret_scopes.go @@ -3,8 +3,9 @@ package service import ( "encoding/json" "fmt" - "github.com/databrickslabs/databricks-terraform/client/model" "net/http" + + "github.com/databrickslabs/databricks-terraform/client/model" ) // SecretScopesAPI exposes the Secret Scopes API diff --git a/client/service/secret_scopes_test.go b/client/service/secret_scopes_test.go index be0714fe2..4a2e012e9 100644 --- a/client/service/secret_scopes_test.go +++ b/client/service/secret_scopes_test.go @@ -1,9 +1,10 @@ package service import ( - "github.com/databrickslabs/databricks-terraform/client/model" "net/http" "testing" + + "github.com/databrickslabs/databricks-terraform/client/model" ) //go:generate easytags $GOFILE @@ -111,7 +112,6 @@ func TestSecretScopesAPI_Read(t *testing.T) { } func TestSecretScopesAPI_List(t *testing.T) { - tests := []struct { name string response string diff --git a/client/service/secrets.go b/client/service/secrets.go index b5cb2bcae..4fa918d50 100644 --- a/client/service/secrets.go +++ b/client/service/secrets.go @@ -3,8 +3,9 @@ package service import ( "encoding/json" "fmt" - "github.com/databrickslabs/databricks-terraform/client/model" "net/http" + + "github.com/databrickslabs/databricks-terraform/client/model" ) // SecretsAPI exposes the Secrets API diff --git a/client/service/secrets_scopes_acls_integration_test.go b/client/service/secrets_scopes_acls_integration_test.go index 181af6eb4..964ad2f6a 100644 --- a/client/service/secrets_scopes_acls_integration_test.go +++ b/client/service/secrets_scopes_acls_integration_test.go @@ -1,9 +1,10 @@ package service import ( + "testing" + "github.com/databrickslabs/databricks-terraform/client/model" "github.com/stretchr/testify/assert" - "testing" ) func TestSecretsScopesAclsIntegration(t *testing.T) { @@ -64,5 +65,4 @@ func TestSecretsScopesAclsIntegration(t *testing.T) { err = client.SecretAcls().Delete(testScope, testPrincipal) assert.NoError(t, err, err) - } diff --git a/client/service/secrets_test.go b/client/service/secrets_test.go index b9168beb6..99454cdd0 100644 --- a/client/service/secrets_test.go +++ b/client/service/secrets_test.go @@ -1,9 +1,10 @@ package service import ( - "github.com/databrickslabs/databricks-terraform/client/model" "net/http" "testing" + + "github.com/databrickslabs/databricks-terraform/client/model" ) //go:generate easytags $GOFILE @@ -42,7 +43,6 @@ func TestSecretsAPI_Create(t *testing.T) { } func TestSecretsAPI_Delete(t *testing.T) { - type args struct { Scope string `json:"scope"` Key string `json:"key"` diff --git a/client/service/tokens.go b/client/service/tokens.go index bc1212328..beea0c37b 100644 --- a/client/service/tokens.go +++ b/client/service/tokens.go @@ -3,8 +3,9 @@ package service import ( "encoding/json" "fmt" - "github.com/databrickslabs/databricks-terraform/client/model" "net/http" + + "github.com/databrickslabs/databricks-terraform/client/model" ) // TokensAPI exposes the Secrets API diff --git a/client/service/tokens_integration_test.go b/client/service/tokens_integration_test.go index 61abdf233..63797e077 100644 --- a/client/service/tokens_integration_test.go +++ b/client/service/tokens_integration_test.go @@ -1,8 +1,9 @@ package service import ( - "github.com/stretchr/testify/assert" "testing" + + "github.com/stretchr/testify/assert" ) func TestCreateToken(t *testing.T) { @@ -30,5 +31,4 @@ func TestCreateToken(t *testing.T) { tokenList, err := client.Tokens().List() assert.NoError(t, err, err) assert.True(t, len(tokenList) > 0, "Token list is empty") - } diff --git a/client/service/tokens_test.go b/client/service/tokens_test.go index 5b99aebcc..62ef19faf 100644 --- a/client/service/tokens_test.go +++ b/client/service/tokens_test.go @@ -1,9 +1,10 @@ package service import ( - "github.com/databrickslabs/databricks-terraform/client/model" "net/http" "testing" + + "github.com/databrickslabs/databricks-terraform/client/model" ) func TestTokensAPI_Create(t *testing.T) { diff --git a/client/service/users.go b/client/service/users.go index 4ad575a47..a97a20d57 100644 --- a/client/service/users.go +++ b/client/service/users.go @@ -4,12 +4,13 @@ import ( "encoding/json" "errors" "fmt" - "github.com/databrickslabs/databricks-terraform/client/model" "log" "net/http" "sort" "strings" "time" + + "github.com/databrickslabs/databricks-terraform/client/model" ) // UsersAPI exposes the scim user API @@ -64,7 +65,7 @@ func (a UsersAPI) Read(userID string) (model.User, error) { } groups = append(groups, group) } - inherited, unInherited, err := a.getInheritedAndNonInheritedRoles(user, groups) + inherited, unInherited := a.getInheritedAndNonInheritedRoles(user, groups) user.InheritedRoles = inherited user.UnInheritedRoles = unInherited return user, err @@ -117,7 +118,6 @@ func (a UsersAPI) Update(userID string, userName string, displayName string, ent // Delete will delete the user given the user id func (a UsersAPI) Delete(userID string) error { - userPath := fmt.Sprintf("/preview/scim/v2/Users/%v", userID) _, err := a.Client.performQuery(http.MethodDelete, userPath, "2.0", scimHeaders, nil, nil) @@ -138,7 +138,7 @@ func (a UsersAPI) SetUserAsAdmin(userID string, adminGroupID string) error { addOperations = model.UserPatchOperations{ Op: "add", Value: &model.GroupsValue{ - Groups: []model.ValueListItem{model.ValueListItem{Value: adminGroupID}}, + Groups: []model.ValueListItem{{Value: adminGroupID}}, }, } userPatchRequest.Operations = append(userPatchRequest.Operations, addOperations) @@ -227,10 +227,9 @@ func (a UsersAPI) GetOrCreateDefaultMetaUser(metaUserDisplayName string, metaUse return newCreatedUser, err //newCreatedUserFullInfo, err := a.Read(newCreatedUser.ID) //return newCreatedUserFullInfo, err - } -func (a UsersAPI) getInheritedAndNonInheritedRoles(user model.User, groups []model.Group) (inherited []model.RoleListItem, unInherited []model.RoleListItem, err error) { +func (a UsersAPI) getInheritedAndNonInheritedRoles(user model.User, groups []model.Group) (inherited []model.RoleListItem, unInherited []model.RoleListItem) { allRoles := user.Roles var inheritedRoles []model.RoleListItem inheritedRolesKeys := []string{} @@ -251,5 +250,5 @@ func (a UsersAPI) getInheritedAndNonInheritedRoles(user model.User, groups []mod unInherited = append(unInherited, role) } } - return inherited, unInherited, nil + return inherited, unInherited } diff --git a/client/service/users_integration_test.go b/client/service/users_integration_test.go index eb134cf80..92dda3891 100644 --- a/client/service/users_integration_test.go +++ b/client/service/users_integration_test.go @@ -1,10 +1,11 @@ package service import ( - "github.com/databrickslabs/databricks-terraform/client/model" - "github.com/stretchr/testify/assert" "log" "testing" + + "github.com/databrickslabs/databricks-terraform/client/model" + "github.com/stretchr/testify/assert" ) func TestCreateUser(t *testing.T) { @@ -30,7 +31,6 @@ func TestCreateUser(t *testing.T) { err = client.Users().Update(user.ID, "newtestuser@databricks.com", "Test User", []string{string(model.AllowClusterCreateEntitlement)}, nil) //t.Log(user) assert.NoError(t, err, err) - } func TestCreateAdminUser(t *testing.T) { @@ -74,7 +74,6 @@ func TestCreateAdminUser(t *testing.T) { assert.NoError(t, err, err) assert.True(t, userIsAdmin == false) log.Println(userIsAdmin) - } // user id 101354 diff --git a/client/service/users_test.go b/client/service/users_test.go index c185eb61a..0ef9f1553 100644 --- a/client/service/users_test.go +++ b/client/service/users_test.go @@ -2,9 +2,10 @@ package service import ( "fmt" - "github.com/databrickslabs/databricks-terraform/client/model" "net/http" "testing" + + "github.com/databrickslabs/databricks-terraform/client/model" ) func TestScimUserAPI_Create(t *testing.T) { @@ -228,7 +229,7 @@ func TestScimUserAPI_SetUserAsAdmin(t *testing.T) { Operations: []model.UserPatchOperations{ { Op: "add", - Value: &model.GroupsValue{Groups: []model.ValueListItem{model.ValueListItem{Value: tt.args.AdminGroupID}}}, + Value: &model.GroupsValue{Groups: []model.ValueListItem{{Value: tt.args.AdminGroupID}}}, }, }, } @@ -340,7 +341,7 @@ func TestScimUserAPI_VerifyUserAsAdmin(t *testing.T) { Operations: []model.UserPatchOperations{ { Op: "add", - Value: &model.GroupsValue{Groups: []model.ValueListItem{model.ValueListItem{Value: tt.args.AdminGroupID}}}, + Value: &model.GroupsValue{Groups: []model.ValueListItem{{Value: tt.args.AdminGroupID}}}, }, }, } diff --git a/databricks/azure_auth.go b/databricks/azure_auth.go index 361ae80d1..1ce0170b0 100644 --- a/databricks/azure_auth.go +++ b/databricks/azure_auth.go @@ -3,12 +3,13 @@ package databricks import ( "encoding/json" "fmt" - "github.com/Azure/go-autorest/autorest/adal" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/databrickslabs/databricks-terraform/client/service" "log" "net/http" urlParse "net/url" + + "github.com/Azure/go-autorest/autorest/adal" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/databrickslabs/databricks-terraform/client/service" ) // List of management information @@ -49,7 +50,7 @@ type WorkspaceRequest struct { Location string `json:"location"` } -func (a *AzureAuth) getManagementToken(config *service.DBApiClientConfig) error { +func (a *AzureAuth) getManagementToken() error { log.Println("[DEBUG] Creating Azure Databricks management OAuth token.") mgmtTokenOAuthCfg, err := adal.NewOAuthConfigWithAPIVersion(azure.PublicCloud.ActiveDirectoryEndpoint, a.TokenPayload.TenantID, @@ -105,7 +106,7 @@ func (a *AzureAuth) getWorkspaceID(config *service.DBApiClientConfig) error { return err } -func (a *AzureAuth) getADBPlatformToken(clientConfig *service.DBApiClientConfig) error { +func (a *AzureAuth) getADBPlatformToken() error { log.Println("[DEBUG] Creating Azure Databricks management OAuth token.") platformTokenOAuthCfg, err := adal.NewOAuthConfigWithAPIVersion(azure.PublicCloud.ActiveDirectoryEndpoint, a.TokenPayload.TenantID, @@ -172,7 +173,7 @@ func (a *AzureAuth) initWorkspaceAndGetClient(config *service.DBApiClientConfig) //var dbClient service.DBApiClient // Get management token - err := a.getManagementToken(config) + err := a.getManagementToken() if err != nil { return err } @@ -184,7 +185,7 @@ func (a *AzureAuth) initWorkspaceAndGetClient(config *service.DBApiClientConfig) } // Get platform token - err = a.getADBPlatformToken(config) + err = a.getADBPlatformToken() if err != nil { return err } diff --git a/databricks/data_source_databricks_notebook_paths.go b/databricks/data_source_databricks_notebook_paths.go index 1a3b9548b..1db855da2 100644 --- a/databricks/data_source_databricks_notebook_paths.go +++ b/databricks/data_source_databricks_notebook_paths.go @@ -2,6 +2,7 @@ package databricks import ( "bytes" + "github.com/databrickslabs/databricks-terraform/client/service" "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" "github.com/hashicorp/terraform-plugin-sdk/helper/schema" diff --git a/databricks/mounts.go b/databricks/mounts.go index 8293431a2..e53932c07 100644 --- a/databricks/mounts.go +++ b/databricks/mounts.go @@ -3,10 +3,11 @@ package databricks import ( "errors" "fmt" - "github.com/databrickslabs/databricks-terraform/client/service" "log" "net/url" "strings" + + "github.com/databrickslabs/databricks-terraform/client/service" ) // Mount interface describes the functionality of any mount which is create, read and delete diff --git a/databricks/provider.go b/databricks/provider.go index fc6432673..424419810 100644 --- a/databricks/provider.go +++ b/databricks/provider.go @@ -163,7 +163,7 @@ func Provider(version string) terraform.ResourceProvider { return provider } -func providerConfigureAzureClient(d *schema.ResourceData, providerVersion string, config *service.DBApiClientConfig) (interface{}, error) { +func providerConfigureAzureClient(d *schema.ResourceData, config *service.DBApiClientConfig) (interface{}, error) { log.Println("Creating db client via azure auth!") azureAuth, _ := d.GetOk("azure_auth") azureAuthMap := azureAuth.(map[string]interface{}) @@ -199,32 +199,44 @@ func providerConfigureAzureClient(d *schema.ResourceData, providerVersion string // - provider config // - DATABRICKS_AZURE_* environment variables // - ARM_* environment variables - if subscriptionID, ok := azureAuthMap["subscription_id"].(string); ok { + + subscriptionID, ok := azureAuthMap["subscription_id"].(string) + switch { + case ok: tokenPayload.SubscriptionID = subscriptionID - } else if os.Getenv("DATABRICKS_AZURE_SUBSCRIPTION_ID") != "" { + case os.Getenv("DATABRICKS_AZURE_SUBSCRIPTION_ID") != "": + tokenPayload.SubscriptionID = os.Getenv("DATABRICKS_AZURE_SUBSCRIPTION_ID") + case os.Getenv("ARM_SUBSCRIPTION_ID") != "": tokenPayload.SubscriptionID = os.Getenv("DATABRICKS_AZURE_SUBSCRIPTION_ID") - } else if os.Getenv("ARM_SUBSCRIPTION_ID") != "" { - tokenPayload.SubscriptionID = os.Getenv("ARM_SUBSCRIPTION_ID") } - if clientSecret, ok := azureAuthMap["client_secret"].(string); ok { + + clientSecret, ok := azureAuthMap["client_secret"].(string) + switch { + case ok: tokenPayload.ClientSecret = clientSecret - } else if os.Getenv("DATABRICKS_AZURE_CLIENT_SECRET") != "" { + case os.Getenv("DATABRICKS_AZURE_CLIENT_SECRET") != "": tokenPayload.ClientSecret = os.Getenv("DATABRICKS_AZURE_CLIENT_SECRET") - } else if os.Getenv("ARM_CLIENT_SECRET") != "" { + case os.Getenv("ARM_CLIENT_SECRET") != "": tokenPayload.ClientSecret = os.Getenv("ARM_CLIENT_SECRET") } - if clientID, ok := azureAuthMap["client_id"].(string); ok { + + clientID, ok := azureAuthMap["client_id"].(string) + switch { + case ok: tokenPayload.ClientID = clientID - } else if os.Getenv("DATABRICKS_AZURE_CLIENT_ID") != "" { + case os.Getenv("DATABRICKS_AZURE_CLIENT_ID") != "": tokenPayload.ClientID = os.Getenv("DATABRICKS_AZURE_CLIENT_ID") - } else if os.Getenv("ARM_CLIENT_ID") != "" { + case os.Getenv("ARM_CLIENT_ID") != "": tokenPayload.ClientID = os.Getenv("ARM_CLIENT_ID") } - if tenantID, ok := azureAuthMap["tenant_id"].(string); ok { + + tenantID, ok := azureAuthMap["tenant_id"].(string) + switch { + case ok: tokenPayload.TenantID = tenantID - } else if os.Getenv("DATABRICKS_AZURE_TENANT_ID") != "" { + case os.Getenv("DATABRICKS_AZURE_TENANT_ID") != "": tokenPayload.TenantID = os.Getenv("DATABRICKS_AZURE_TENANT_ID") - } else if os.Getenv("ARM_TENANT_ID") != "" { + case os.Getenv("ARM_TENANT_ID") != "": tokenPayload.TenantID = os.Getenv("ARM_TENANT_ID") } @@ -237,9 +249,7 @@ func providerConfigureAzureClient(d *schema.ResourceData, providerVersion string } // Setup the CustomAuthorizer Function to be called at API invoke rather than client invoke - config.CustomAuthorizer = func(config *service.DBApiClientConfig) error { - return azureAuthSetup.initWorkspaceAndGetClient(config) - } + config.CustomAuthorizer = azureAuthSetup.initWorkspaceAndGetClient var dbClient service.DBApiClient dbClient.SetConfig(config) return &dbClient, nil @@ -313,11 +323,10 @@ func providerConfigure(d *schema.ResourceData, providerVersion string) (interfac return nil, fmt.Errorf("failed to get credentials from config file; error msg: %w", err) } } - } else { // Abstracted logic to another function that returns a interface{}, error to inject directly // for the providers during cloud integration testing - return providerConfigureAzureClient(d, providerVersion, &config) + return providerConfigureAzureClient(d, &config) } var dbClient service.DBApiClient diff --git a/databricks/provider_test.go b/databricks/provider_test.go index 49273bf60..4840607d5 100644 --- a/databricks/provider_test.go +++ b/databricks/provider_test.go @@ -31,14 +31,13 @@ func init() { if cloudEnv == "azure" { var config service.DBApiClientConfig testAccProvider.ConfigureFunc = func(data *schema.ResourceData) (i interface{}, e error) { - return providerConfigureAzureClient(data, "", &config) + return providerConfigureAzureClient(data, &config) } } testAccProviders = map[string]terraform.ResourceProvider{ "databricks": testAccProvider, } - } func getMWSClient() *service.DBApiClient { @@ -111,7 +110,7 @@ func TestProvider(t *testing.T) { func TestProvider_NoOptionsResultsInError(t *testing.T) { var provider = Provider("") var raw = make(map[string]interface{}) - raw["config_file"] = "testdata/.databrickscfg_non_existant" + raw["config_file"] = "testdata/.databrickscfg_non_existent" err := provider.Configure(terraform.NewResourceConfigRaw(raw)) assert.NotNil(t, err) } diff --git a/databricks/resource_databricks_azure_blob_mount_test.go b/databricks/resource_databricks_azure_blob_mount_test.go index 026b23a60..f7f8a0b15 100644 --- a/databricks/resource_databricks_azure_blob_mount_test.go +++ b/databricks/resource_databricks_azure_blob_mount_test.go @@ -122,7 +122,6 @@ func testAccAzureBlobMountMountExists(n string, azureBlobMount *AzureBlobMount, *azureBlobMount = *blobMount return nil - } } diff --git a/databricks/resource_databricks_cluster.go b/databricks/resource_databricks_cluster.go index 15b092079..c7cdd13de 100644 --- a/databricks/resource_databricks_cluster.go +++ b/databricks/resource_databricks_cluster.go @@ -502,7 +502,6 @@ func convertListInterfaceToString(m []interface{}) []string { } func resourceClusterCreate(d *schema.ResourceData, m interface{}) error { - client := m.(*service.DBApiClient) cluster := parseSchemaToCluster(d, "") @@ -683,7 +682,7 @@ func resourceClusterRead(d *schema.ResourceData, m interface{}) error { return err } - if len(clusterInfo.SparkConf) >= 0 { + if clusterInfo.SparkConf != nil { err = d.Set("spark_conf", clusterInfo.SparkConf) if err != nil { return err @@ -873,7 +872,7 @@ func resourceClusterRead(d *schema.ResourceData, m interface{}) error { return err } - err = d.Set("state_message", string(clusterInfo.StateMessage)) + err = d.Set("state_message", clusterInfo.StateMessage) return err } @@ -883,22 +882,23 @@ func calculateLibraryChanges(new []model.Library, old []model.Library) ([]model. newKeys := []string{} for _, library := range new { - if len(library.Whl) > 0 { + switch { + case len(library.Whl) > 0: newDictionary[library.Whl] = library newKeys = append(newKeys, library.Whl) - } else if len(library.Egg) > 0 { + case len(library.Egg) > 0: newDictionary[library.Egg] = library newKeys = append(newKeys, library.Egg) - } else if len(library.Jar) > 0 { + case len(library.Jar) > 0: newDictionary[library.Jar] = library newKeys = append(newKeys, library.Jar) - } else if len(library.Pypi.Package) > 0 { + case library.Pypi != nil && len(library.Pypi.Package) > 0: newDictionary[library.Pypi.Package+library.Pypi.Repo] = library newKeys = append(newKeys, library.Pypi.Package+library.Pypi.Repo) - } else if len(library.Maven.Coordinates) > 0 { + case library.Maven != nil && len(library.Maven.Coordinates) > 0: newDictionary[library.Maven.Coordinates+library.Maven.Repo+strings.Join(library.Maven.Exclusions, "")] = library newKeys = append(newKeys, library.Maven.Coordinates+library.Maven.Repo+strings.Join(library.Maven.Exclusions, "")) - } else if len(library.Cran.Package) > 0 { + case library.Cran != nil && len(library.Cran.Package) > 0: newDictionary[library.Cran.Package+library.Cran.Repo] = library newKeys = append(newKeys, library.Cran.Package+library.Cran.Repo) } @@ -907,22 +907,23 @@ func calculateLibraryChanges(new []model.Library, old []model.Library) ([]model. oldDictionary := map[string]model.Library{} oldKeys := []string{} for _, library := range old { - if len(library.Whl) > 0 { + switch { + case len(library.Whl) > 0: oldDictionary[library.Whl] = library oldKeys = append(oldKeys, library.Whl) - } else if len(library.Egg) > 0 { + case len(library.Egg) > 0: oldDictionary[library.Egg] = library oldKeys = append(oldKeys, library.Egg) - } else if len(library.Jar) > 0 { + case len(library.Jar) > 0: oldDictionary[library.Jar] = library oldKeys = append(oldKeys, library.Jar) - } else if len(library.Pypi.Package) > 0 { + case library.Pypi != nil && len(library.Pypi.Package) > 0: oldDictionary[library.Pypi.Package+library.Pypi.Repo] = library oldKeys = append(oldKeys, library.Pypi.Package+library.Pypi.Repo) - } else if len(library.Maven.Coordinates) > 0 { + case library.Maven != nil && len(library.Maven.Coordinates) > 0: oldDictionary[library.Maven.Coordinates+library.Maven.Repo+strings.Join(library.Maven.Exclusions, "")] = library oldKeys = append(oldKeys, library.Maven.Coordinates+library.Maven.Repo+strings.Join(library.Maven.Exclusions, "")) - } else if len(library.Cran.Package) > 0 { + case library.Cran != nil && len(library.Cran.Package) > 0: oldDictionary[library.Cran.Package+library.Cran.Repo] = library oldKeys = append(oldKeys, library.Cran.Package+library.Cran.Repo) } @@ -1058,7 +1059,8 @@ func resourceClusterUpdate(d *schema.ResourceData, m interface{}) error { clusterState := clusterInfo.State - if model.ContainsClusterState([]model.ClusterState{model.ClusterState(model.ClusterStateTerminated)}, clusterState) { + switch { + case model.ContainsClusterState([]model.ClusterState{model.ClusterState(model.ClusterStateTerminated)}, clusterState): cluster := parseSchemaToCluster(d, "") cluster.ClusterID = id err := client.Clusters().Edit(cluster) @@ -1095,7 +1097,7 @@ func resourceClusterUpdate(d *schema.ResourceData, m interface{}) error { } } return resourceClusterRead(d, m) - } else if model.ContainsClusterState([]model.ClusterState{model.ClusterState(model.ClusterStateRunning)}, clusterState) { + case model.ContainsClusterState([]model.ClusterState{model.ClusterState(model.ClusterStateRunning)}, clusterState): cluster := parseSchemaToCluster(d, "") cluster.ClusterID = id @@ -1124,8 +1126,7 @@ func resourceClusterUpdate(d *schema.ResourceData, m interface{}) error { return err } return resourceClusterRead(d, m) - } else if model.ContainsClusterState([]model.ClusterState{model.ClusterStatePending, - model.ClusterStateResizing}, clusterState) { + case model.ContainsClusterState([]model.ClusterState{model.ClusterStatePending, model.ClusterStateResizing}, clusterState): err = client.Clusters().WaitForClusterRunning(clusterInfo.ClusterID, 30, 120) if err != nil { return err @@ -1374,7 +1375,6 @@ func parseSchemaToCluster(d *schema.ResourceData, schemaAttPrefix string) model. initScriptsLocations = append(initScriptsLocations, initScriptsConf) } cluster.InitScripts = initScriptsLocations - } //Deal with docker image for DCS @@ -1403,7 +1403,6 @@ func parseSchemaToCluster(d *schema.ResourceData, schemaAttPrefix string) model. } cluster.DockerImage = &dockerImageData } - } //Deal with spark environment variables diff --git a/databricks/resource_databricks_dbfs_file.go b/databricks/resource_databricks_dbfs_file.go index 6cdaefadc..01417c2a9 100644 --- a/databricks/resource_databricks_dbfs_file.go +++ b/databricks/resource_databricks_dbfs_file.go @@ -2,11 +2,12 @@ package databricks import ( "fmt" - "github.com/databrickslabs/databricks-terraform/client/service" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "log" "path/filepath" "strings" + + "github.com/databrickslabs/databricks-terraform/client/service" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceDBFSFile() *schema.Resource { diff --git a/databricks/resource_databricks_dbfs_file_sync.go b/databricks/resource_databricks_dbfs_file_sync.go index 5143c0c73..c3b3a66e4 100644 --- a/databricks/resource_databricks_dbfs_file_sync.go +++ b/databricks/resource_databricks_dbfs_file_sync.go @@ -1,10 +1,11 @@ package databricks import ( - "github.com/databrickslabs/databricks-terraform/client/service" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "path/filepath" "strings" + + "github.com/databrickslabs/databricks-terraform/client/service" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceDBFSFileSync() *schema.Resource { diff --git a/databricks/resource_databricks_instance_pool.go b/databricks/resource_databricks_instance_pool.go index 3374d379b..16c035b0e 100644 --- a/databricks/resource_databricks_instance_pool.go +++ b/databricks/resource_databricks_instance_pool.go @@ -2,13 +2,14 @@ package databricks import ( "fmt" + "log" + "strings" + "time" + "github.com/databrickslabs/databricks-terraform/client/model" "github.com/databrickslabs/databricks-terraform/client/service" "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/helper/validation" - "log" - "strings" - "time" ) func resourceInstancePool() *schema.Resource { @@ -295,14 +296,12 @@ func resourceInstancePoolRead(d *schema.ResourceData, m interface{}) error { diskSpecList := []interface{}{} diskSpecListItem := map[string]interface{}{} if instancePoolInfo.DiskSpec.DiskType != nil { - if instancePoolInfo.DiskSpec.DiskCount >= 0 { diskSpecListItem["disk_count"] = instancePoolInfo.DiskSpec.DiskCount } if instancePoolInfo.DiskSpec.DiskSize >= 0 { diskSpecListItem["disk_size"] = instancePoolInfo.DiskSpec.DiskSize } - } if instancePoolInfo.DiskSpec.DiskType.EbsVolumeType != "" { diskSpecListItem["ebs_volume_type"] = instancePoolInfo.DiskSpec.DiskType.EbsVolumeType diff --git a/databricks/resource_databricks_instance_profile.go b/databricks/resource_databricks_instance_profile.go index 0cc0578d9..54917d300 100644 --- a/databricks/resource_databricks_instance_profile.go +++ b/databricks/resource_databricks_instance_profile.go @@ -2,10 +2,11 @@ package databricks import ( "fmt" - "github.com/databrickslabs/databricks-terraform/client/service" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "log" "strings" + + "github.com/databrickslabs/databricks-terraform/client/service" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceInstanceProfile() *schema.Resource { diff --git a/databricks/resource_databricks_job.go b/databricks/resource_databricks_job.go index e197a0ab4..eb250b35a 100644 --- a/databricks/resource_databricks_job.go +++ b/databricks/resource_databricks_job.go @@ -512,7 +512,6 @@ func resourceJob() *schema.Resource { } func resourceJobCreate(d *schema.ResourceData, m interface{}) error { - client := m.(*service.DBApiClient) jobSettings := parseSchemaToJobSettings(d) @@ -877,7 +876,6 @@ func resourceJobRead(d *schema.ResourceData, m interface{}) error { return err } } - } else { err = d.Set("email_notifications", nil) if err != nil { @@ -974,7 +972,6 @@ func resourceJobDelete(d *schema.ResourceData, m interface{}) error { } func parseSchemaToJobSettings(d *schema.ResourceData) model.JobSettings { - var jobSettings model.JobSettings if existingClusterID, ok := d.GetOk("existing_cluster_id"); ok { @@ -1048,7 +1045,6 @@ func parseSchemaToJobSettings(d *schema.ResourceData) model.JobSettings { QuartzCronExpression: scheduleMap["quartz_cron_expression"].(string), TimezoneID: scheduleMap["timezone_id"].(string), } - } if maxConcurrentRuns, ok := d.GetOk("max_concurrent_runs"); ok { intVal, _ := maxConcurrentRuns.(int) diff --git a/databricks/resource_databricks_job_aws_test.go b/databricks/resource_databricks_job_aws_test.go index e423ed498..dd98d5b82 100644 --- a/databricks/resource_databricks_job_aws_test.go +++ b/databricks/resource_databricks_job_aws_test.go @@ -3,13 +3,14 @@ package databricks import ( "errors" "fmt" + "strconv" + "testing" + "github.com/databrickslabs/databricks-terraform/client/model" "github.com/databrickslabs/databricks-terraform/client/service" "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/stretchr/testify/assert" - "strconv" - "testing" ) func TestAccAwsJobResource(t *testing.T) { @@ -30,7 +31,7 @@ func TestAccAwsJobResource(t *testing.T) { // compose a basic test, checking both remote and local values Check: resource.ComposeTestCheckFunc( // query the API to retrieve the tokenInfo object - testAwsJobResourceExists("databricks_job.my_job", &job, t), + testAwsJobResourceExists("databricks_job.my_job", &job), // verify remote values testAwsJobValuesNewCluster(t, &job), ), @@ -60,7 +61,7 @@ func testAwsJobResourceDestroy(s *terraform.State) error { } // testAccCheckTokenResourceExists queries the API and retrieves the matching Widget. -func testAwsJobResourceExists(n string, job *model.Job, t *testing.T) resource.TestCheckFunc { +func testAwsJobResourceExists(n string, job *model.Job) resource.TestCheckFunc { return func(s *terraform.State) error { // find the corresponding state object rs, ok := s.RootModule().Resources[n] diff --git a/databricks/resource_databricks_job_azure_test.go b/databricks/resource_databricks_job_azure_test.go index 58712e442..898be71f0 100644 --- a/databricks/resource_databricks_job_azure_test.go +++ b/databricks/resource_databricks_job_azure_test.go @@ -3,13 +3,14 @@ package databricks import ( "errors" "fmt" + "strconv" + "testing" + "github.com/databrickslabs/databricks-terraform/client/model" "github.com/databrickslabs/databricks-terraform/client/service" "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/stretchr/testify/assert" - "strconv" - "testing" ) func TestAccAzureJobResource(t *testing.T) { @@ -30,7 +31,7 @@ func TestAccAzureJobResource(t *testing.T) { // compose a basic test, checking both remote and local values Check: resource.ComposeTestCheckFunc( // query the API to retrieve the tokenInfo object - testAzureJobResourceExists("databricks_job.my_job", &job, t), + testAzureJobResourceExists("databricks_job.my_job", &job), // verify remote values testAzureJobValuesNewCluster(t, &job), ), @@ -60,7 +61,7 @@ func testAzureJobResourceDestroy(s *terraform.State) error { } // testAccCheckTokenResourceExists queries the API and retrieves the matching Widget. -func testAzureJobResourceExists(n string, job *model.Job, t *testing.T) resource.TestCheckFunc { +func testAzureJobResourceExists(n string, job *model.Job) resource.TestCheckFunc { return func(s *terraform.State) error { // find the corresponding state object rs, ok := s.RootModule().Resources[n] diff --git a/databricks/resource_databricks_mws_credentials_mws_test.go b/databricks/resource_databricks_mws_credentials_mws_test.go index 17101e324..144be7a45 100644 --- a/databricks/resource_databricks_mws_credentials_mws_test.go +++ b/databricks/resource_databricks_mws_credentials_mws_test.go @@ -3,11 +3,12 @@ package databricks import ( "errors" "fmt" + "os" + "testing" + "github.com/databrickslabs/databricks-terraform/client/model" "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/terraform" - "os" - "testing" ) func TestAccMWSCredentials(t *testing.T) { diff --git a/databricks/resource_databricks_mws_networks.go b/databricks/resource_databricks_mws_networks.go index 70acdb83e..b3c0963b7 100644 --- a/databricks/resource_databricks_mws_networks.go +++ b/databricks/resource_databricks_mws_networks.go @@ -1,13 +1,14 @@ package databricks import ( + "log" + "reflect" + "strings" + "github.com/databrickslabs/databricks-terraform/client/model" "github.com/databrickslabs/databricks-terraform/client/service" "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/helper/validation" - "log" - "reflect" - "strings" ) func resourceMWSNetworks() *schema.Resource { diff --git a/databricks/resource_databricks_mws_networks_mws_test.go b/databricks/resource_databricks_mws_networks_mws_test.go index 2236f10d9..8bce801be 100644 --- a/databricks/resource_databricks_mws_networks_mws_test.go +++ b/databricks/resource_databricks_mws_networks_mws_test.go @@ -3,11 +3,12 @@ package databricks import ( "errors" "fmt" + "os" + "testing" + "github.com/databrickslabs/databricks-terraform/client/model" "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/terraform" - "os" - "testing" ) func TestAccMWSNetworks(t *testing.T) { diff --git a/databricks/resource_databricks_mws_storage_configurations.go b/databricks/resource_databricks_mws_storage_configurations.go index a6c09168f..19ff4c362 100644 --- a/databricks/resource_databricks_mws_storage_configurations.go +++ b/databricks/resource_databricks_mws_storage_configurations.go @@ -1,10 +1,11 @@ package databricks import ( - "github.com/databrickslabs/databricks-terraform/client/service" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "log" "strings" + + "github.com/databrickslabs/databricks-terraform/client/service" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceMWSStorageConfigurations() *schema.Resource { diff --git a/databricks/resource_databricks_mws_storage_configurations_mws_test.go b/databricks/resource_databricks_mws_storage_configurations_mws_test.go index 41674cace..04eb7e07c 100644 --- a/databricks/resource_databricks_mws_storage_configurations_mws_test.go +++ b/databricks/resource_databricks_mws_storage_configurations_mws_test.go @@ -3,11 +3,12 @@ package databricks import ( "errors" "fmt" + "os" + "testing" + "github.com/databrickslabs/databricks-terraform/client/model" "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/terraform" - "os" - "testing" ) func TestAccMWSStorageConfigurations(t *testing.T) { diff --git a/databricks/resource_databricks_mws_workspaces.go b/databricks/resource_databricks_mws_workspaces.go index 0345ff21d..2f22cea10 100644 --- a/databricks/resource_databricks_mws_workspaces.go +++ b/databricks/resource_databricks_mws_workspaces.go @@ -3,11 +3,12 @@ package databricks import ( "bytes" "fmt" + "reflect" + "github.com/databrickslabs/databricks-terraform/client/model" "github.com/databrickslabs/databricks-terraform/client/service" "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/helper/validation" - "reflect" "log" "strconv" diff --git a/databricks/resource_databricks_mws_workspaces_mws_test.go b/databricks/resource_databricks_mws_workspaces_mws_test.go index 3e79d6a5c..fc301273c 100644 --- a/databricks/resource_databricks_mws_workspaces_mws_test.go +++ b/databricks/resource_databricks_mws_workspaces_mws_test.go @@ -3,12 +3,13 @@ package databricks import ( "errors" "fmt" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" "os" "strconv" "testing" + + "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/terraform" ) func TestAccMWSWorkspaces(t *testing.T) { diff --git a/databricks/resource_databricks_notebook.go b/databricks/resource_databricks_notebook.go index 13ed636c2..75cca301f 100644 --- a/databricks/resource_databricks_notebook.go +++ b/databricks/resource_databricks_notebook.go @@ -196,7 +196,6 @@ func convertBase64ToCheckSum(b64 string) (string, error) { return strconv.Itoa(int(crc32.ChecksumIEEE(dataArr))), nil } return checksum, nil - } func convertZipBytesToCRC(b64 []byte) (string, error) { @@ -221,7 +220,7 @@ func convertZipBytesToCRC(b64 []byte) (string, error) { return strconv.Itoa(int(totalSum)), nil } -func getDBCCheckSumForCommands(fileIO io.ReadCloser) (int, error) { +func getDBCCheckSumForCommands(fileIO io.Reader) (int, error) { var stringBuff bytes.Buffer scanner := bufio.NewScanner(fileIO) buf := make([]byte, 0, 64*1024) diff --git a/databricks/resource_databricks_scim_group.go b/databricks/resource_databricks_scim_group.go index e674529fc..9a2e4dd62 100644 --- a/databricks/resource_databricks_scim_group.go +++ b/databricks/resource_databricks_scim_group.go @@ -2,11 +2,12 @@ package databricks import ( "fmt" + "log" + "strings" + "github.com/databrickslabs/databricks-terraform/client/model" "github.com/databrickslabs/databricks-terraform/client/service" "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "log" - "strings" ) func resourceScimGroup() *schema.Resource { diff --git a/databricks/resource_databricks_scim_group_aws_test.go b/databricks/resource_databricks_scim_group_aws_test.go index bd7f342d4..934d112db 100644 --- a/databricks/resource_databricks_scim_group_aws_test.go +++ b/databricks/resource_databricks_scim_group_aws_test.go @@ -3,12 +3,13 @@ package databricks import ( "errors" "fmt" + "testing" + "github.com/databrickslabs/databricks-terraform/client/model" "github.com/databrickslabs/databricks-terraform/client/service" "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/stretchr/testify/assert" - "testing" ) func TestAccAwsScimGroupResource(t *testing.T) { diff --git a/databricks/resource_databricks_scim_group_azure_test.go b/databricks/resource_databricks_scim_group_azure_test.go index 131fff685..f2936e406 100644 --- a/databricks/resource_databricks_scim_group_azure_test.go +++ b/databricks/resource_databricks_scim_group_azure_test.go @@ -3,12 +3,13 @@ package databricks import ( "errors" "fmt" + "testing" + "github.com/databrickslabs/databricks-terraform/client/model" "github.com/databrickslabs/databricks-terraform/client/service" "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/stretchr/testify/assert" - "testing" ) func TestAccAzureScimGroupResource(t *testing.T) { diff --git a/databricks/resource_databricks_scim_user.go b/databricks/resource_databricks_scim_user.go index 61e0687a9..a55d7b90e 100644 --- a/databricks/resource_databricks_scim_user.go +++ b/databricks/resource_databricks_scim_user.go @@ -2,13 +2,14 @@ package databricks import ( "fmt" - "github.com/databrickslabs/databricks-terraform/client/model" - "github.com/databrickslabs/databricks-terraform/client/service" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "log" "reflect" "sort" "strings" + + "github.com/databrickslabs/databricks-terraform/client/model" + "github.com/databrickslabs/databricks-terraform/client/service" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceScimUser() *schema.Resource { @@ -209,7 +210,6 @@ func resourceScimUserUpdate(d *schema.ResourceData, m interface{}) error { displayName = rDisplayName.(string) } if rRoles, ok := d.GetOk("roles"); ok { - roles = convertInterfaceSliceToStringSlice(rRoles.(*schema.Set).List()) } if rEntitlements, ok := d.GetOk("entitlements"); ok { diff --git a/databricks/resource_databricks_scim_user_aws_test.go b/databricks/resource_databricks_scim_user_aws_test.go index e2c9bb766..c195e5aa1 100644 --- a/databricks/resource_databricks_scim_user_aws_test.go +++ b/databricks/resource_databricks_scim_user_aws_test.go @@ -3,12 +3,13 @@ package databricks import ( "errors" "fmt" + "testing" + "github.com/databrickslabs/databricks-terraform/client/model" "github.com/databrickslabs/databricks-terraform/client/service" "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/stretchr/testify/assert" - "testing" ) func TestAccAwsScimUserResource(t *testing.T) { diff --git a/databricks/resource_databricks_scim_user_azure_test.go b/databricks/resource_databricks_scim_user_azure_test.go index eae4c2d86..80bb97475 100644 --- a/databricks/resource_databricks_scim_user_azure_test.go +++ b/databricks/resource_databricks_scim_user_azure_test.go @@ -4,14 +4,15 @@ import ( "bytes" "errors" "fmt" + "log" + "reflect" + "testing" + "github.com/databrickslabs/databricks-terraform/client/model" "github.com/databrickslabs/databricks-terraform/client/service" "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/stretchr/testify/assert" - "log" - "reflect" - "testing" ) func TestAccAzureScimUserResource(t *testing.T) { diff --git a/databricks/resource_databricks_secret_acl.go b/databricks/resource_databricks_secret_acl.go index e2ffff41f..474298f18 100644 --- a/databricks/resource_databricks_secret_acl.go +++ b/databricks/resource_databricks_secret_acl.go @@ -2,11 +2,12 @@ package databricks import ( "fmt" + "log" + "strings" + "github.com/databrickslabs/databricks-terraform/client/model" "github.com/databrickslabs/databricks-terraform/client/service" "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "log" - "strings" ) func resourceSecretACL() *schema.Resource { @@ -39,8 +40,8 @@ func getSecretACLID(scope string, key string) (string, error) { return scope + "|||" + key, nil } -func getScopeAndKeyFromSecretACLID(SecretACLIDString string) (string, string, error) { - return strings.Split(SecretACLIDString, "|||")[0], strings.Split(SecretACLIDString, "|||")[1], nil +func getScopeAndKeyFromSecretACLID(secretACLIDString string) (string, string, error) { + return strings.Split(secretACLIDString, "|||")[0], strings.Split(secretACLIDString, "|||")[1], nil } func resourceSecretACLCreate(d *schema.ResourceData, m interface{}) error { diff --git a/databricks/resource_databricks_secret_acl_aws_test.go b/databricks/resource_databricks_secret_acl_aws_test.go index de6f8f0ed..fd51ec68c 100644 --- a/databricks/resource_databricks_secret_acl_aws_test.go +++ b/databricks/resource_databricks_secret_acl_aws_test.go @@ -3,12 +3,13 @@ package databricks import ( "errors" "fmt" + "testing" + "github.com/databrickslabs/databricks-terraform/client/model" "github.com/databrickslabs/databricks-terraform/client/service" "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/stretchr/testify/assert" - "testing" ) func TestAccAwsSecretAclResource(t *testing.T) { diff --git a/databricks/resource_databricks_secret_acl_azure_test.go b/databricks/resource_databricks_secret_acl_azure_test.go index 2536a390c..a480dd776 100644 --- a/databricks/resource_databricks_secret_acl_azure_test.go +++ b/databricks/resource_databricks_secret_acl_azure_test.go @@ -3,12 +3,13 @@ package databricks import ( "errors" "fmt" + "testing" + "github.com/databrickslabs/databricks-terraform/client/model" "github.com/databrickslabs/databricks-terraform/client/service" "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/stretchr/testify/assert" - "testing" ) func TestAccAzureSecretAclResource(t *testing.T) { diff --git a/databricks/resource_databricks_secret_aws_test.go b/databricks/resource_databricks_secret_aws_test.go index 2fccc0dc2..71202bfe0 100644 --- a/databricks/resource_databricks_secret_aws_test.go +++ b/databricks/resource_databricks_secret_aws_test.go @@ -103,7 +103,6 @@ func testAwsSecretResourceDestroy(s *terraform.State) error { if err == nil { return errors.New("resource secret is not cleaned up") } - } return nil } diff --git a/databricks/resource_databricks_secret_azure_test.go b/databricks/resource_databricks_secret_azure_test.go index 9a759694a..36c72dad9 100644 --- a/databricks/resource_databricks_secret_azure_test.go +++ b/databricks/resource_databricks_secret_azure_test.go @@ -103,7 +103,6 @@ func testAzureSecretResourceDestroy(s *terraform.State) error { if err == nil { return errors.New("resource secret is not cleaned up") } - } return nil } diff --git a/databricks/resource_databricks_secret_scope.go b/databricks/resource_databricks_secret_scope.go index 4327f7b33..22e4201f8 100644 --- a/databricks/resource_databricks_secret_scope.go +++ b/databricks/resource_databricks_secret_scope.go @@ -2,10 +2,11 @@ package databricks import ( "fmt" - "github.com/databrickslabs/databricks-terraform/client/service" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "log" "strings" + + "github.com/databrickslabs/databricks-terraform/client/service" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceSecretScope() *schema.Resource { diff --git a/databricks/resource_databricks_secret_scope_aws_test.go b/databricks/resource_databricks_secret_scope_aws_test.go index 2379c15c2..8c3a25148 100644 --- a/databricks/resource_databricks_secret_scope_aws_test.go +++ b/databricks/resource_databricks_secret_scope_aws_test.go @@ -3,12 +3,13 @@ package databricks import ( "errors" "fmt" + "testing" + "github.com/databrickslabs/databricks-terraform/client/model" "github.com/databrickslabs/databricks-terraform/client/service" "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/stretchr/testify/assert" - "testing" ) func TestAccAwsSecretScopeResource(t *testing.T) { diff --git a/databricks/resource_databricks_secret_scope_azure_test.go b/databricks/resource_databricks_secret_scope_azure_test.go index 004034f67..15c933146 100644 --- a/databricks/resource_databricks_secret_scope_azure_test.go +++ b/databricks/resource_databricks_secret_scope_azure_test.go @@ -3,12 +3,13 @@ package databricks import ( "errors" "fmt" + "testing" + "github.com/databrickslabs/databricks-terraform/client/model" "github.com/databrickslabs/databricks-terraform/client/service" "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/stretchr/testify/assert" - "testing" ) func TestAccAzureSecretScopeResource(t *testing.T) { diff --git a/databricks/resource_databricks_token.go b/databricks/resource_databricks_token.go index bb3fdc467..3147d4fa4 100644 --- a/databricks/resource_databricks_token.go +++ b/databricks/resource_databricks_token.go @@ -2,10 +2,11 @@ package databricks import ( "fmt" - "github.com/databrickslabs/databricks-terraform/client/service" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "log" "strings" + + "github.com/databrickslabs/databricks-terraform/client/service" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) func resourceToken() *schema.Resource { diff --git a/databricks/resource_databricks_token_aws_test.go b/databricks/resource_databricks_token_aws_test.go index 917155911..7f3888766 100644 --- a/databricks/resource_databricks_token_aws_test.go +++ b/databricks/resource_databricks_token_aws_test.go @@ -3,13 +3,14 @@ package databricks import ( "errors" "fmt" + "testing" + "github.com/databrickslabs/databricks-terraform/client/model" "github.com/databrickslabs/databricks-terraform/client/service" "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/stretchr/testify/assert" - "testing" ) func TestAccAwsTokenResource(t *testing.T) { diff --git a/databricks/resource_databricks_token_azure_test.go b/databricks/resource_databricks_token_azure_test.go index bb2c6c70b..455dd1ba0 100644 --- a/databricks/resource_databricks_token_azure_test.go +++ b/databricks/resource_databricks_token_azure_test.go @@ -3,13 +3,14 @@ package databricks import ( "errors" "fmt" + "testing" + "github.com/databrickslabs/databricks-terraform/client/model" "github.com/databrickslabs/databricks-terraform/client/service" "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/stretchr/testify/assert" - "testing" ) func TestAccAzureTokenResource(t *testing.T) { diff --git a/databricks/utils.go b/databricks/utils.go index 936bdda78..931d61069 100644 --- a/databricks/utils.go +++ b/databricks/utils.go @@ -67,7 +67,6 @@ func changeClusterIntoRunningState(clusterID string, client *service.DBApiClient } return fmt.Errorf("cluster is in a non recoverable state: %s", currentState) - } func isClusterMissing(errorMsg, resourceID string) bool { diff --git a/main.go b/main.go index 1d327a215..329203fbd 100644 --- a/main.go +++ b/main.go @@ -1,10 +1,11 @@ package main import ( + "log" + "github.com/databrickslabs/databricks-terraform/databricks" "github.com/hashicorp/terraform-plugin-sdk/plugin" "github.com/hashicorp/terraform-plugin-sdk/terraform" - "log" ) var ( From aaf8f89e21f1b4800e3d709f60dab1b895209266 Mon Sep 17 00:00:00 2001 From: Sriharsha Tikkireddy Date: Mon, 15 Jun 2020 08:26:44 -0400 Subject: [PATCH 07/13] updated to remove fmt from lint command so you will have to now run make fmt manually so build step in travis catches the error --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 081b1aea4..42d0a209f 100644 --- a/Makefile +++ b/Makefile @@ -30,8 +30,8 @@ build: lint test @echo "==> Building source code with go build..." @go build -mod vendor -v -o terraform-provider-databricks -lint: fmt - @echo "==> Linting source code with golangci-lint..." +lint: + @echo "==> Linting source code with golangci-lint make sure you run make fmt ..." @golangci-lint run --skip-dirs-use-default --timeout 5m fmt: From fa5daa7721770a28b75e5cf530351b04f56a785f Mon Sep 17 00:00:00 2001 From: Sriharsha Tikkireddy Date: Mon, 15 Jun 2020 08:31:53 -0400 Subject: [PATCH 08/13] fixed vendor and added the required vendor files to successfully run the builds --- .../testify/assert/assertion_compare.go | 274 ++ vendor/gopkg.in/yaml.v3/.travis.yml | 16 + vendor/gopkg.in/yaml.v3/LICENSE | 50 + vendor/gopkg.in/yaml.v3/NOTICE | 13 + vendor/gopkg.in/yaml.v3/README.md | 150 + vendor/gopkg.in/yaml.v3/apic.go | 746 ++++ vendor/gopkg.in/yaml.v3/decode.go | 931 +++++ vendor/gopkg.in/yaml.v3/emitterc.go | 1992 +++++++++++ vendor/gopkg.in/yaml.v3/encode.go | 561 +++ vendor/gopkg.in/yaml.v3/go.mod | 5 + vendor/gopkg.in/yaml.v3/parserc.go | 1229 +++++++ vendor/gopkg.in/yaml.v3/readerc.go | 434 +++ vendor/gopkg.in/yaml.v3/resolve.go | 326 ++ vendor/gopkg.in/yaml.v3/scannerc.go | 3025 +++++++++++++++++ vendor/gopkg.in/yaml.v3/sorter.go | 134 + vendor/gopkg.in/yaml.v3/writerc.go | 48 + vendor/gopkg.in/yaml.v3/yaml.go | 662 ++++ vendor/gopkg.in/yaml.v3/yamlh.go | 805 +++++ vendor/gopkg.in/yaml.v3/yamlprivateh.go | 198 ++ 19 files changed, 11599 insertions(+) create mode 100644 vendor/github.com/stretchr/testify/assert/assertion_compare.go create mode 100644 vendor/gopkg.in/yaml.v3/.travis.yml create mode 100644 vendor/gopkg.in/yaml.v3/LICENSE create mode 100644 vendor/gopkg.in/yaml.v3/NOTICE create mode 100644 vendor/gopkg.in/yaml.v3/README.md create mode 100644 vendor/gopkg.in/yaml.v3/apic.go create mode 100644 vendor/gopkg.in/yaml.v3/decode.go create mode 100644 vendor/gopkg.in/yaml.v3/emitterc.go create mode 100644 vendor/gopkg.in/yaml.v3/encode.go create mode 100644 vendor/gopkg.in/yaml.v3/go.mod create mode 100644 vendor/gopkg.in/yaml.v3/parserc.go create mode 100644 vendor/gopkg.in/yaml.v3/readerc.go create mode 100644 vendor/gopkg.in/yaml.v3/resolve.go create mode 100644 vendor/gopkg.in/yaml.v3/scannerc.go create mode 100644 vendor/gopkg.in/yaml.v3/sorter.go create mode 100644 vendor/gopkg.in/yaml.v3/writerc.go create mode 100644 vendor/gopkg.in/yaml.v3/yaml.go create mode 100644 vendor/gopkg.in/yaml.v3/yamlh.go create mode 100644 vendor/gopkg.in/yaml.v3/yamlprivateh.go diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go new file mode 100644 index 000000000..dc200395c --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go @@ -0,0 +1,274 @@ +package assert + +import ( + "fmt" + "reflect" +) + +type CompareType int + +const ( + compareLess CompareType = iota - 1 + compareEqual + compareGreater +) + +func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { + switch kind { + case reflect.Int: + { + intobj1 := obj1.(int) + intobj2 := obj2.(int) + if intobj1 > intobj2 { + return compareGreater, true + } + if intobj1 == intobj2 { + return compareEqual, true + } + if intobj1 < intobj2 { + return compareLess, true + } + } + case reflect.Int8: + { + int8obj1 := obj1.(int8) + int8obj2 := obj2.(int8) + if int8obj1 > int8obj2 { + return compareGreater, true + } + if int8obj1 == int8obj2 { + return compareEqual, true + } + if int8obj1 < int8obj2 { + return compareLess, true + } + } + case reflect.Int16: + { + int16obj1 := obj1.(int16) + int16obj2 := obj2.(int16) + if int16obj1 > int16obj2 { + return compareGreater, true + } + if int16obj1 == int16obj2 { + return compareEqual, true + } + if int16obj1 < int16obj2 { + return compareLess, true + } + } + case reflect.Int32: + { + int32obj1 := obj1.(int32) + int32obj2 := obj2.(int32) + if int32obj1 > int32obj2 { + return compareGreater, true + } + if int32obj1 == int32obj2 { + return compareEqual, true + } + if int32obj1 < int32obj2 { + return compareLess, true + } + } + case reflect.Int64: + { + int64obj1 := obj1.(int64) + int64obj2 := obj2.(int64) + if int64obj1 > int64obj2 { + return compareGreater, true + } + if int64obj1 == int64obj2 { + return compareEqual, true + } + if int64obj1 < int64obj2 { + return compareLess, true + } + } + case reflect.Uint: + { + uintobj1 := obj1.(uint) + uintobj2 := obj2.(uint) + if uintobj1 > uintobj2 { + return compareGreater, true + } + if uintobj1 == uintobj2 { + return compareEqual, true + } + if uintobj1 < uintobj2 { + return compareLess, true + } + } + case reflect.Uint8: + { + uint8obj1 := obj1.(uint8) + uint8obj2 := obj2.(uint8) + if uint8obj1 > uint8obj2 { + return compareGreater, true + } + if uint8obj1 == uint8obj2 { + return compareEqual, true + } + if uint8obj1 < uint8obj2 { + return compareLess, true + } + } + case reflect.Uint16: + { + uint16obj1 := obj1.(uint16) + uint16obj2 := obj2.(uint16) + if uint16obj1 > uint16obj2 { + return compareGreater, true + } + if uint16obj1 == uint16obj2 { + return compareEqual, true + } + if uint16obj1 < uint16obj2 { + return compareLess, true + } + } + case reflect.Uint32: + { + uint32obj1 := obj1.(uint32) + uint32obj2 := obj2.(uint32) + if uint32obj1 > uint32obj2 { + return compareGreater, true + } + if uint32obj1 == uint32obj2 { + return compareEqual, true + } + if uint32obj1 < uint32obj2 { + return compareLess, true + } + } + case reflect.Uint64: + { + uint64obj1 := obj1.(uint64) + uint64obj2 := obj2.(uint64) + if uint64obj1 > uint64obj2 { + return compareGreater, true + } + if uint64obj1 == uint64obj2 { + return compareEqual, true + } + if uint64obj1 < uint64obj2 { + return compareLess, true + } + } + case reflect.Float32: + { + float32obj1 := obj1.(float32) + float32obj2 := obj2.(float32) + if float32obj1 > float32obj2 { + return compareGreater, true + } + if float32obj1 == float32obj2 { + return compareEqual, true + } + if float32obj1 < float32obj2 { + return compareLess, true + } + } + case reflect.Float64: + { + float64obj1 := obj1.(float64) + float64obj2 := obj2.(float64) + if float64obj1 > float64obj2 { + return compareGreater, true + } + if float64obj1 == float64obj2 { + return compareEqual, true + } + if float64obj1 < float64obj2 { + return compareLess, true + } + } + case reflect.String: + { + stringobj1 := obj1.(string) + stringobj2 := obj2.(string) + if stringobj1 > stringobj2 { + return compareGreater, true + } + if stringobj1 == stringobj2 { + return compareEqual, true + } + if stringobj1 < stringobj2 { + return compareLess, true + } + } + } + + return compareEqual, false +} + +// Greater asserts that the first element is greater than the second +// +// assert.Greater(t, 2, 1) +// assert.Greater(t, float64(2), float64(1)) +// assert.Greater(t, "b", "a") +func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs) +} + +// GreaterOrEqual asserts that the first element is greater than or equal to the second +// +// assert.GreaterOrEqual(t, 2, 1) +// assert.GreaterOrEqual(t, 2, 2) +// assert.GreaterOrEqual(t, "b", "a") +// assert.GreaterOrEqual(t, "b", "b") +func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs) +} + +// Less asserts that the first element is less than the second +// +// assert.Less(t, 1, 2) +// assert.Less(t, float64(1), float64(2)) +// assert.Less(t, "a", "b") +func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs) +} + +// LessOrEqual asserts that the first element is less than or equal to the second +// +// assert.LessOrEqual(t, 1, 2) +// assert.LessOrEqual(t, 2, 2) +// assert.LessOrEqual(t, "a", "b") +// assert.LessOrEqual(t, "b", "b") +func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs) +} + +func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + e1Kind := reflect.ValueOf(e1).Kind() + e2Kind := reflect.ValueOf(e2).Kind() + if e1Kind != e2Kind { + return Fail(t, "Elements should be the same type", msgAndArgs...) + } + + compareResult, isComparable := compare(e1, e2, e1Kind) + if !isComparable { + return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) + } + + if !containsValue(allowedComparesResults, compareResult) { + return Fail(t, fmt.Sprintf(failMessage, e1, e2), msgAndArgs...) + } + + return true +} + +func containsValue(values []CompareType, value CompareType) bool { + for _, v := range values { + if v == value { + return true + } + } + + return false +} diff --git a/vendor/gopkg.in/yaml.v3/.travis.yml b/vendor/gopkg.in/yaml.v3/.travis.yml new file mode 100644 index 000000000..04d4dae09 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/.travis.yml @@ -0,0 +1,16 @@ +language: go + +go: + - "1.4.x" + - "1.5.x" + - "1.6.x" + - "1.7.x" + - "1.8.x" + - "1.9.x" + - "1.10.x" + - "1.11.x" + - "1.12.x" + - "1.13.x" + - "tip" + +go_import_path: gopkg.in/yaml.v3 diff --git a/vendor/gopkg.in/yaml.v3/LICENSE b/vendor/gopkg.in/yaml.v3/LICENSE new file mode 100644 index 000000000..2683e4bb1 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/LICENSE @@ -0,0 +1,50 @@ + +This project is covered by two different licenses: MIT and Apache. + +#### MIT License #### + +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original MIT license, with the additional +copyright staring in 2011 when the project was ported over: + + apic.go emitterc.go parserc.go readerc.go scannerc.go + writerc.go yamlh.go yamlprivateh.go + +Copyright (c) 2006-2010 Kirill Simonov +Copyright (c) 2006-2011 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +### Apache License ### + +All the remaining project files are covered by the Apache license: + +Copyright (c) 2011-2019 Canonical Ltd + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/gopkg.in/yaml.v3/NOTICE b/vendor/gopkg.in/yaml.v3/NOTICE new file mode 100644 index 000000000..866d74a7a --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/NOTICE @@ -0,0 +1,13 @@ +Copyright 2011-2016 Canonical Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/gopkg.in/yaml.v3/README.md b/vendor/gopkg.in/yaml.v3/README.md new file mode 100644 index 000000000..08eb1babd --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/README.md @@ -0,0 +1,150 @@ +# YAML support for the Go language + +Introduction +------------ + +The yaml package enables Go programs to comfortably encode and decode YAML +values. It was developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a +pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) +C library to parse and generate YAML data quickly and reliably. + +Compatibility +------------- + +The yaml package supports most of YAML 1.2, but preserves some behavior +from 1.1 for backwards compatibility. + +Specifically, as of v3 of the yaml package: + + - YAML 1.1 bools (_yes/no, on/off_) are supported as long as they are being + decoded into a typed bool value. Otherwise they behave as a string. Booleans + in YAML 1.2 are _true/false_ only. + - Octals encode and decode as _0777_ per YAML 1.1, rather than _0o777_ + as specified in YAML 1.2, because most parsers still use the old format. + Octals in the _0o777_ format are supported though, so new files work. + - Does not support base-60 floats. These are gone from YAML 1.2, and were + actually never supported by this package as it's clearly a poor choice. + +and offers backwards +compatibility with YAML 1.1 in some cases. +1.2, including support for +anchors, tags, map merging, etc. Multi-document unmarshalling is not yet +implemented, and base-60 floats from YAML 1.1 are purposefully not +supported since they're a poor design and are gone in YAML 1.2. + +Installation and usage +---------------------- + +The import path for the package is *gopkg.in/yaml.v3*. + +To install it, run: + + go get gopkg.in/yaml.v3 + +API documentation +----------------- + +If opened in a browser, the import path itself leads to the API documentation: + + - [https://gopkg.in/yaml.v3](https://gopkg.in/yaml.v3) + +API stability +------------- + +The package API for yaml v3 will remain stable as described in [gopkg.in](https://gopkg.in). + + +License +------- + +The yaml package is licensed under the MIT and Apache License 2.0 licenses. +Please see the LICENSE file for details. + + +Example +------- + +```Go +package main + +import ( + "fmt" + "log" + + "gopkg.in/yaml.v3" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +// Note: struct fields must be public in order for unmarshal to +// correctly populate the data. +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + diff --git a/vendor/gopkg.in/yaml.v3/apic.go b/vendor/gopkg.in/yaml.v3/apic.go new file mode 100644 index 000000000..65846e674 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/apic.go @@ -0,0 +1,746 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "io" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// Reader read handler. +func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_reader.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_reader_read_handler + parser.input_reader = r +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + } +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// yaml_writer_write_handler uses emitter.output_writer to write the +// emitted text. +func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_writer.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_writer_write_handler + emitter.output_writer = w +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize( + event *yaml_event_t, + version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, + implicit bool, +) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } +} + +// Create ALIAS. +func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) bool { + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + anchor: anchor, + } + return true +} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compiler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/vendor/gopkg.in/yaml.v3/decode.go b/vendor/gopkg.in/yaml.v3/decode.go new file mode 100644 index 000000000..be63169b7 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/decode.go @@ -0,0 +1,931 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "io" + "math" + "reflect" + "strconv" + "time" +) + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *Node + anchors map[string]*Node + doneInit bool +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + if len(b) == 0 { + b = []byte{'\n'} + } + yaml_parser_set_input_string(&p.parser, b) + return &p +} + +func newParserFromReader(r io.Reader) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + yaml_parser_set_input_reader(&p.parser, r) + return &p +} + +func (p *parser) init() { + if p.doneInit { + return + } + p.anchors = make(map[string]*Node) + p.expect(yaml_STREAM_START_EVENT) + p.doneInit = true +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +// expect consumes an event from the event stream and +// checks that it's of the expected type. +func (p *parser) expect(e yaml_event_type_t) { + if p.event.typ == yaml_NO_EVENT { + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + } + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + if p.event.typ != e { + p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) + p.fail() + } + yaml_event_delete(&p.event) + p.event.typ = yaml_NO_EVENT +} + +// peek peeks at the next event in the event stream, +// puts the results into p.event and returns the event type. +func (p *parser) peek() yaml_event_type_t { + if p.event.typ != yaml_NO_EVENT { + return p.event.typ + } + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + return p.event.typ +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } else if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *Node, anchor []byte) { + if anchor != nil { + n.Anchor = string(anchor) + p.anchors[n.Anchor] = n + } +} + +func (p *parser) parse() *Node { + p.init() + switch p.peek() { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + case yaml_TAIL_COMMENT_EVENT: + panic("internal error: unexpected tail comment event (please report)") + default: + panic("internal error: attempted to parse unknown event (please report): " + p.event.typ.String()) + } +} + +func (p *parser) node(kind Kind, defaultTag, tag, value string) *Node { + var style Style + if tag != "" && tag != "!" { + tag = shortTag(tag) + style = TaggedStyle + } else if defaultTag != "" { + tag = defaultTag + } else if kind == ScalarNode { + tag, _ = resolve("", value) + } + return &Node{ + Kind: kind, + Tag: tag, + Value: value, + Style: style, + Line: p.event.start_mark.line + 1, + Column: p.event.start_mark.column + 1, + HeadComment: string(p.event.head_comment), + LineComment: string(p.event.line_comment), + FootComment: string(p.event.foot_comment), + } +} + +func (p *parser) parseChild(parent *Node) *Node { + child := p.parse() + parent.Content = append(parent.Content, child) + return child +} + +func (p *parser) document() *Node { + n := p.node(DocumentNode, "", "", "") + p.doc = n + p.expect(yaml_DOCUMENT_START_EVENT) + p.parseChild(n) + if p.peek() == yaml_DOCUMENT_END_EVENT { + n.FootComment = string(p.event.foot_comment) + } + p.expect(yaml_DOCUMENT_END_EVENT) + return n +} + +func (p *parser) alias() *Node { + n := p.node(AliasNode, "", "", string(p.event.anchor)) + n.Alias = p.anchors[n.Value] + if n.Alias == nil { + failf("unknown anchor '%s' referenced", n.Value) + } + p.expect(yaml_ALIAS_EVENT) + return n +} + +func (p *parser) scalar() *Node { + var parsedStyle = p.event.scalar_style() + var nodeStyle Style + switch { + case parsedStyle&yaml_DOUBLE_QUOTED_SCALAR_STYLE != 0: + nodeStyle = DoubleQuotedStyle + case parsedStyle&yaml_SINGLE_QUOTED_SCALAR_STYLE != 0: + nodeStyle = SingleQuotedStyle + case parsedStyle&yaml_LITERAL_SCALAR_STYLE != 0: + nodeStyle = LiteralStyle + case parsedStyle&yaml_FOLDED_SCALAR_STYLE != 0: + nodeStyle = FoldedStyle + } + var nodeValue = string(p.event.value) + var nodeTag = string(p.event.tag) + var defaultTag string + if nodeStyle == 0 { + if nodeValue == "<<" { + defaultTag = mergeTag + } + } else { + defaultTag = strTag + } + n := p.node(ScalarNode, defaultTag, nodeTag, nodeValue) + n.Style |= nodeStyle + p.anchor(n, p.event.anchor) + p.expect(yaml_SCALAR_EVENT) + return n +} + +func (p *parser) sequence() *Node { + n := p.node(SequenceNode, seqTag, string(p.event.tag), "") + if p.event.sequence_style()&yaml_FLOW_SEQUENCE_STYLE != 0 { + n.Style |= FlowStyle + } + p.anchor(n, p.event.anchor) + p.expect(yaml_SEQUENCE_START_EVENT) + for p.peek() != yaml_SEQUENCE_END_EVENT { + p.parseChild(n) + } + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + p.expect(yaml_SEQUENCE_END_EVENT) + return n +} + +func (p *parser) mapping() *Node { + n := p.node(MappingNode, mapTag, string(p.event.tag), "") + block := true + if p.event.mapping_style()&yaml_FLOW_MAPPING_STYLE != 0 { + block = false + n.Style |= FlowStyle + } + p.anchor(n, p.event.anchor) + p.expect(yaml_MAPPING_START_EVENT) + for p.peek() != yaml_MAPPING_END_EVENT { + k := p.parseChild(n) + if block && k.FootComment != "" { + // Must be a foot comment for the prior value when being dedented. + if len(n.Content) > 2 { + n.Content[len(n.Content)-3].FootComment = k.FootComment + k.FootComment = "" + } + } + v := p.parseChild(n) + if k.FootComment == "" && v.FootComment != "" { + k.FootComment = v.FootComment + v.FootComment = "" + } + if p.peek() == yaml_TAIL_COMMENT_EVENT { + if k.FootComment == "" { + k.FootComment = string(p.event.foot_comment) + } + p.expect(yaml_TAIL_COMMENT_EVENT) + } + } + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + if n.Style&FlowStyle == 0 && n.FootComment != "" && len(n.Content) > 1 { + n.Content[len(n.Content)-2].FootComment = n.FootComment + n.FootComment = "" + } + p.expect(yaml_MAPPING_END_EVENT) + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *Node + aliases map[*Node]bool + terrors []string + + stringMapType reflect.Type + generalMapType reflect.Type + + knownFields bool + uniqueKeys bool + decodeCount int + aliasCount int + aliasDepth int +} + +var ( + nodeType = reflect.TypeOf(Node{}) + durationType = reflect.TypeOf(time.Duration(0)) + stringMapType = reflect.TypeOf(map[string]interface{}{}) + generalMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = generalMapType.Elem() + timeType = reflect.TypeOf(time.Time{}) + ptrTimeType = reflect.TypeOf(&time.Time{}) +) + +func newDecoder() *decoder { + d := &decoder{ + stringMapType: stringMapType, + generalMapType: generalMapType, + uniqueKeys: true, + } + d.aliases = make(map[*Node]bool) + return d +} + +func (d *decoder) terror(n *Node, tag string, out reflect.Value) { + if n.Tag != "" { + tag = n.Tag + } + value := n.Value + if tag != seqTag && tag != mapTag { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.Line, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *Node, u Unmarshaler) (good bool) { + err := u.UnmarshalYAML(n) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +func (d *decoder) callObsoleteUnmarshaler(n *Node, u obsoleteUnmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *Node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.ShortTag() == nullTag { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + outi := out.Addr().Interface() + if u, ok := outi.(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + if u, ok := outi.(obsoleteUnmarshaler); ok { + good = d.callObsoleteUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +func (d *decoder) fieldByIndex(n *Node, v reflect.Value, index []int) (field reflect.Value) { + if n.ShortTag() == nullTag { + return reflect.Value{} + } + for _, num := range index { + for { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + continue + } + break + } + v = v.Field(num) + } + return v +} + +const ( + // 400,000 decode operations is ~500kb of dense object declarations, or + // ~5kb of dense object declarations with 10000% alias expansion + alias_ratio_range_low = 400000 + + // 4,000,000 decode operations is ~5MB of dense object declarations, or + // ~4.5MB of dense object declarations with 10% alias expansion + alias_ratio_range_high = 4000000 + + // alias_ratio_range is the range over which we scale allowed alias ratios + alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) +) + +func allowedAliasRatio(decodeCount int) float64 { + switch { + case decodeCount <= alias_ratio_range_low: + // allow 99% to come from alias expansion for small-to-medium documents + return 0.99 + case decodeCount >= alias_ratio_range_high: + // allow 10% to come from alias expansion for very large documents + return 0.10 + default: + // scale smoothly from 99% down to 10% over the range. + // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. + // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). + return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) + } +} + +func (d *decoder) unmarshal(n *Node, out reflect.Value) (good bool) { + d.decodeCount++ + if d.aliasDepth > 0 { + d.aliasCount++ + } + if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { + failf("document contains excessive aliasing") + } + if out.Type() == nodeType { + out.Set(reflect.ValueOf(n).Elem()) + return true + } + switch n.Kind { + case DocumentNode: + return d.document(n, out) + case AliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.Kind { + case ScalarNode: + good = d.scalar(n, out) + case MappingNode: + good = d.mapping(n, out) + case SequenceNode: + good = d.sequence(n, out) + default: + panic("internal error: unknown node kind: " + strconv.Itoa(int(n.Kind))) + } + return good +} + +func (d *decoder) document(n *Node, out reflect.Value) (good bool) { + if len(n.Content) == 1 { + d.doc = n + d.unmarshal(n.Content[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *Node, out reflect.Value) (good bool) { + if d.aliases[n] { + // TODO this could actually be allowed in some circumstances. + failf("anchor '%s' value contains itself", n.Value) + } + d.aliases[n] = true + d.aliasDepth++ + good = d.unmarshal(n.Alias, out) + d.aliasDepth-- + delete(d.aliases, n) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) scalar(n *Node, out reflect.Value) bool { + var tag string + var resolved interface{} + if n.indicatedString() { + tag = strTag + resolved = n.Value + } else { + tag, resolved = resolve(n.Tag, n.Value) + if tag == binaryTag { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + if out.CanAddr() { + switch out.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + out.Set(reflect.Zero(out.Type())) + return true + } + } + return false + } + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + // We've resolved to exactly the type we want, so use that. + out.Set(resolvedv) + return true + } + // Perhaps we can use the value as a TextUnmarshaler to + // set its value. + if out.CanAddr() { + u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) + if ok { + var text []byte + if tag == binaryTag { + text = []byte(resolved.(string)) + } else { + // We let any value be unmarshaled into TextUnmarshaler. + // That might be more lax than we'd like, but the + // TextUnmarshaler itself should bowl out any dubious values. + text = []byte(n.Value) + } + err := u.UnmarshalText(text) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == binaryTag { + out.SetString(resolved.(string)) + return true + } + out.SetString(n.Value) + return true + case reflect.Interface: + out.Set(reflect.ValueOf(resolved)) + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + // This used to work in v2, but it's very unfriendly. + isDuration := out.Type() == durationType + + switch resolved := resolved.(type) { + case int: + if !isDuration && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case int64: + if !isDuration && !out.OverflowInt(resolved) { + out.SetInt(resolved) + return true + } + case uint64: + if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case float64: + if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + return true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + return true + case string: + // This offers some compatibility with the 1.1 spec (https://yaml.org/type/bool.html). + // It only works if explicitly attempting to unmarshal into a typed bool value. + switch resolved { + case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON": + out.SetBool(true) + return true + case "n", "N", "no", "No", "NO", "off", "Off", "OFF": + out.SetBool(false) + return true + } + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + return true + case int64: + out.SetFloat(float64(resolved)) + return true + case uint64: + out.SetFloat(float64(resolved)) + return true + case float64: + out.SetFloat(resolved) + return true + } + case reflect.Struct: + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + out.Set(resolvedv) + return true + } + case reflect.Ptr: + panic("yaml internal error: please report the issue") + } + d.terror(n, tag, out) + return false +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *Node, out reflect.Value) (good bool) { + l := len(n.Content) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Array: + if l != out.Len() { + failf("invalid array: want %d elements but got %d", out.Len(), l) + } + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, seqTag, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.Content[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + if out.Kind() != reflect.Array { + out.Set(out.Slice(0, j)) + } + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { + l := len(n.Content) + if d.uniqueKeys { + nerrs := len(d.terrors) + for i := 0; i < l; i += 2 { + ni := n.Content[i] + for j := i + 2; j < l; j += 2 { + nj := n.Content[j] + if ni.Kind == nj.Kind && ni.Value == nj.Value { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: mapping key %#v already defined at line %d", nj.Line, nj.Value, ni.Line)) + } + } + } + if len(d.terrors) > nerrs { + return false + } + } + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Map: + // okay + case reflect.Interface: + iface := out + if isStringMap(n) { + out = reflect.MakeMap(d.stringMapType) + } else { + out = reflect.MakeMap(d.generalMapType) + } + iface.Set(out) + default: + d.terror(n, mapTag, out) + return false + } + + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + stringMapType := d.stringMapType + generalMapType := d.generalMapType + if outt.Elem() == ifaceType { + if outt.Key().Kind() == reflect.String { + d.stringMapType = outt + } else if outt.Key() == ifaceType { + d.generalMapType = outt + } + } + + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + } + for i := 0; i < l; i += 2 { + if isMerge(n.Content[i]) { + d.merge(n.Content[i+1], out) + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.Content[i], k) { + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.Content[i+1], e) { + out.SetMapIndex(k, e) + } + } + } + d.stringMapType = stringMapType + d.generalMapType = generalMapType + return true +} + +func isStringMap(n *Node) bool { + if n.Kind != MappingNode { + return false + } + l := len(n.Content) + for i := 0; i < l; i += 2 { + if n.Content[i].ShortTag() != strTag { + return false + } + } + return true +} + +func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) + elemType = inlineMap.Type().Elem() + } + + for _, index := range sinfo.InlineUnmarshalers { + field := d.fieldByIndex(n, out, index) + d.prepare(n, field) + } + + var doneFields []bool + if d.uniqueKeys { + doneFields = make([]bool, len(sinfo.FieldsList)) + } + name := settableValueOf("") + l := len(n.Content) + for i := 0; i < l; i += 2 { + ni := n.Content[i] + if isMerge(ni) { + d.merge(n.Content[i+1], out) + continue + } + if !d.unmarshal(ni, name) { + continue + } + if info, ok := sinfo.FieldsMap[name.String()]; ok { + if d.uniqueKeys { + if doneFields[info.Id] { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type())) + continue + } + doneFields[info.Id] = true + } + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = d.fieldByIndex(n, out, info.Inline) + } + d.unmarshal(n.Content[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.Content[i+1], value) + inlineMap.SetMapIndex(name, value) + } else if d.knownFields { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type())) + } + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) merge(n *Node, out reflect.Value) { + switch n.Kind { + case MappingNode: + d.unmarshal(n, out) + case AliasNode: + if n.Alias != nil && n.Alias.Kind != MappingNode { + failWantMap() + } + d.unmarshal(n, out) + case SequenceNode: + // Step backwards as earlier nodes take precedence. + for i := len(n.Content) - 1; i >= 0; i-- { + ni := n.Content[i] + if ni.Kind == AliasNode { + if ni.Alias != nil && ni.Alias.Kind != MappingNode { + failWantMap() + } + } else if ni.Kind != MappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } +} + +func isMerge(n *Node) bool { + return n.Kind == ScalarNode && n.Value == "<<" && (n.Tag == "" || n.Tag == "!" || shortTag(n.Tag) == mergeTag) +} diff --git a/vendor/gopkg.in/yaml.v3/emitterc.go b/vendor/gopkg.in/yaml.v3/emitterc.go new file mode 100644 index 000000000..ab2a06619 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/emitterc.go @@ -0,0 +1,1992 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "bytes" + "fmt" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + if emitter.column == 0 { + emitter.space_above = true + } + emitter.column = 0 + emitter.line++ + // [Go] Do this here and below and drop from everywhere else (see commented lines). + emitter.indention = true + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + if emitter.column == 0 { + emitter.space_above = true + } + emitter.column = 0 + emitter.line++ + // [Go] Do this here and above and drop from everywhere else (see commented lines). + emitter.indention = true + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +// +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + emitter.indent += emitter.best_indent + // [Go] If inside a block sequence item, discount the space taken by the indicator. + if emitter.best_indent > 2 && emitter.states[len(emitter.states)-1] == yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE { + emitter.indent -= 2 + } + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true, false) + + case yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true, false) + + case yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + emitter.space_above = true + emitter.foot_indent = -1 + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical || true { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if len(emitter.head_comment) > 0 { + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !put_break(emitter) { + return false + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !yaml_emitter_emit_node(emitter, event, true, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + // [Go] Force document foot separation. + emitter.foot_indent = 0 + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.foot_indent = -1 + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + if emitter.canonical && !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.column == 0 || emitter.canonical && !first { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if emitter.column == 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE) + } else { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + } + if !yaml_emitter_emit_node(emitter, event, false, true, false, false) { + return false + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + if (emitter.canonical || len(emitter.head_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0) && !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + + if emitter.column == 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE) + } else { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + } + if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { + return false + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + // [Go] The original logic here would not indent the sequence when inside a mapping. + // In Go we always indent it, but take the sequence indicator out of the indentation. + indentless := emitter.best_indent == 2 && emitter.mapping_context && (emitter.column == 0 || !emitter.indention) + original := emitter.indent + if !yaml_emitter_increase_indent(emitter, false, indentless) { + return false + } + if emitter.indent > original+2 { + emitter.indent -= 2 + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + if !yaml_emitter_emit_node(emitter, event, false, true, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) + } +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an anchor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Write a head comment. +func yaml_emitter_process_head_comment(emitter *yaml_emitter_t) bool { + if len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.tail_comment) { + return false + } + emitter.tail_comment = emitter.tail_comment[:0] + emitter.foot_indent = emitter.indent + if emitter.foot_indent < 0 { + emitter.foot_indent = 0 + } + } + + if len(emitter.head_comment) == 0 { + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.head_comment) { + return false + } + emitter.head_comment = emitter.head_comment[:0] + return true +} + +// Write an line comment. +func yaml_emitter_process_line_comment(emitter *yaml_emitter_t) bool { + if len(emitter.line_comment) == 0 { + return true + } + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !yaml_emitter_write_comment(emitter, emitter.line_comment) { + return false + } + emitter.line_comment = emitter.line_comment[:0] + return true +} + +// Write a foot comment. +func yaml_emitter_process_foot_comment(emitter *yaml_emitter_t) bool { + if len(emitter.foot_comment) == 0 { + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.foot_comment) { + return false + } + emitter.foot_comment = emitter.foot_comment[:0] + emitter.foot_indent = emitter.indent + if emitter.foot_indent < 0 { + emitter.foot_indent = 0 + } + return true +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + tab_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if value[i] == '\t' { + tab_characters = true + } else if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || tab_characters || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + if len(event.head_comment) > 0 { + emitter.head_comment = event.head_comment + } + if len(event.line_comment) > 0 { + emitter.line_comment = event.line_comment + } + if len(event.foot_comment) > 0 { + emitter.foot_comment = event.foot_comment + } + if len(event.tail_comment) > 0 { + emitter.tail_comment = event.tail_comment + } + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + if emitter.foot_indent == indent { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + //emitter.indention = true + emitter.space_above = false + emitter.foot_indent = -1 + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if len(value) > 0 && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + if len(value) > 0 { + emitter.whitespace = false + } + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !put_break(emitter) { + return false + } + //emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + + if !put_break(emitter) { + return false + } + //emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} + +func yaml_emitter_write_comment(emitter *yaml_emitter_t, comment []byte) bool { + breaks := false + pound := false + for i := 0; i < len(comment); { + if is_break(comment, i) { + if !write_break(emitter, comment, &i) { + return false + } + //emitter.indention = true + breaks = true + pound = false + } else { + if breaks && !yaml_emitter_write_indent(emitter) { + return false + } + if !pound { + if comment[i] != '#' && (!put(emitter, '#') || !put(emitter, ' ')) { + return false + } + pound = true + } + if !write(emitter, comment, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + if !breaks && !put_break(emitter) { + return false + } + + emitter.whitespace = true + //emitter.indention = true + return true +} diff --git a/vendor/gopkg.in/yaml.v3/encode.go b/vendor/gopkg.in/yaml.v3/encode.go new file mode 100644 index 000000000..1f37271ce --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/encode.go @@ -0,0 +1,561 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding" + "fmt" + "io" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool + indent int + doneInit bool +} + +func newEncoder() *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func newEncoderWithWriter(w io.Writer) *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_writer(&e.emitter, w) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func (e *encoder) init() { + if e.doneInit { + return + } + if e.indent == 0 { + e.indent = 4 + } + e.emitter.best_indent = e.indent + yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) + e.emit() + e.doneInit = true +} + +func (e *encoder) finish() { + e.emitter.open_ended = false + yaml_stream_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + e.must(yaml_emitter_emit(&e.emitter, &e.event)) +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshalDoc(tag string, in reflect.Value) { + e.init() + var node *Node + if in.IsValid() { + node, _ = in.Interface().(*Node) + } + if node != nil && node.Kind == DocumentNode { + e.nodev(in) + } else { + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.emit() + e.marshal(tag, in) + yaml_document_end_event_initialize(&e.event, true) + e.emit() + } +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + tag = shortTag(tag) + if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { + e.nilv() + return + } + iface := in.Interface() + switch value := iface.(type) { + case *Node: + e.nodev(in) + return + case time.Time: + e.timev(tag, in) + return + case *time.Time: + e.timev(tag, in.Elem()) + return + case time.Duration: + e.stringv(tag, reflect.ValueOf(value.String())) + return + case Marshaler: + v, err := value.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + e.marshal(tag, reflect.ValueOf(v)) + return + case encoding.TextMarshaler: + text, err := value.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + case nil: + e.nilv() + return + } + switch in.Kind() { + case reflect.Interface: + e.marshal(tag, in.Elem()) + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + e.marshal(tag, in.Elem()) + case reflect.Struct: + e.structv(tag, in) + case reflect.Slice, reflect.Array: + e.slicev(tag, in) + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + e.intv(tag, in) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) fieldByIndex(v reflect.Value, index []int) (field reflect.Value) { + for _, num := range index { + for { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + return reflect.Value{} + } + v = v.Elem() + continue + } + break + } + v = v.Field(num) + } + return v +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = e.fieldByIndex(in, info.Inline) + if !value.IsValid() { + continue + } + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("cannot have key %q in inlined map: conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) + e.emit() + f() + yaml_mapping_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +// isOldBool returns whether s is bool notation as defined in YAML 1.1. +// +// We continue to force strings that YAML 1.1 would interpret as booleans to be +// rendered as quotes strings so that the marshalled output valid for YAML 1.1 +// parsing. +func isOldBool(s string) (result bool) { + switch s { + case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON", + "n", "N", "no", "No", "NO", "off", "Off", "OFF": + return true + default: + return false + } +} + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + canUsePlain := true + switch { + case !utf8.ValidString(s): + if tag == binaryTag { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if tag != "" { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = binaryTag + s = encodeBase64(s) + case tag == "": + // Check to see if it would resolve to a specific + // tag when encoded unquoted. If it doesn't, + // there's no need to quote it. + rtag, _ := resolve("", s) + canUsePlain = rtag == strTag && !(isBase60Float(s) || isOldBool(s)) + } + // Note: it's possible for user code to emit invalid YAML + // if they explicitly specify a tag and a string containing + // text that's incompatible with that tag. + switch { + case strings.Contains(s, "\n"): + if e.flow { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } else { + style = yaml_LITERAL_SCALAR_STYLE + } + case canUsePlain: + style = yaml_PLAIN_SCALAR_STYLE + default: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style, nil, nil, nil, nil) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) timev(tag string, in reflect.Value) { + t := in.Interface().(time.Time) + s := t.Format(time.RFC3339Nano) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // Issue #352: When formatting, use the precision of the underlying value + precision := 64 + if in.Kind() == reflect.Float32 { + precision = 32 + } + + s := strconv.FormatFloat(in.Float(), 'g', -1, precision) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t, head, line, foot, tail []byte) { + // TODO Kill this function. Replace all initialize calls by their underlining Go literals. + implicit := tag == "" + if !implicit { + tag = longTag(tag) + } + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.event.head_comment = head + e.event.line_comment = line + e.event.foot_comment = foot + e.event.tail_comment = tail + e.emit() +} + +func (e *encoder) nodev(in reflect.Value) { + e.node(in.Interface().(*Node), "") +} + +func (e *encoder) node(node *Node, tail string) { + // If the tag was not explicitly requested, and dropping it won't change the + // implicit tag of the value, don't include it in the presentation. + var tag = node.Tag + var stag = shortTag(tag) + var rtag string + var forceQuoting bool + if tag != "" && node.Style&TaggedStyle == 0 { + if node.Kind == ScalarNode { + if stag == strTag && node.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0 { + tag = "" + } else { + rtag, _ = resolve("", node.Value) + if rtag == stag { + tag = "" + } else if stag == strTag { + tag = "" + forceQuoting = true + } + } + } else { + switch node.Kind { + case MappingNode: + rtag = mapTag + case SequenceNode: + rtag = seqTag + } + if rtag == stag { + tag = "" + } + } + } + + switch node.Kind { + case DocumentNode: + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + for _, node := range node.Content { + e.node(node, "") + } + yaml_document_end_event_initialize(&e.event, true) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case SequenceNode: + style := yaml_BLOCK_SEQUENCE_STYLE + if node.Style&FlowStyle != 0 { + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, []byte(node.Anchor), []byte(tag), tag == "", style)) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + for _, node := range node.Content { + e.node(node, "") + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case MappingNode: + style := yaml_BLOCK_MAPPING_STYLE + if node.Style&FlowStyle != 0 { + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, []byte(node.Anchor), []byte(tag), tag == "", style) + e.event.tail_comment = []byte(tail) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + + // The tail logic below moves the foot comment of prior keys to the following key, + // since the value for each key may be a nested structure and the foot needs to be + // processed only the entirety of the value is streamed. The last tail is processed + // with the mapping end event. + var tail string + for i := 0; i+1 < len(node.Content); i += 2 { + k := node.Content[i] + foot := k.FootComment + if foot != "" { + kopy := *k + kopy.FootComment = "" + k = &kopy + } + e.node(k, tail) + tail = foot + + v := node.Content[i+1] + e.node(v, "") + } + + yaml_mapping_end_event_initialize(&e.event) + e.event.tail_comment = []byte(tail) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case AliasNode: + yaml_alias_event_initialize(&e.event, []byte(node.Value)) + e.event.head_comment = []byte(node.HeadComment) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case ScalarNode: + value := node.Value + if !utf8.ValidString(value) { + if tag == binaryTag { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if tag != "" { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = binaryTag + value = encodeBase64(value) + } + + style := yaml_PLAIN_SCALAR_STYLE + switch { + case node.Style&DoubleQuotedStyle != 0: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + case node.Style&SingleQuotedStyle != 0: + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + case node.Style&LiteralStyle != 0: + style = yaml_LITERAL_SCALAR_STYLE + case node.Style&FoldedStyle != 0: + style = yaml_FOLDED_SCALAR_STYLE + case strings.Contains(value, "\n"): + style = yaml_LITERAL_SCALAR_STYLE + case forceQuoting: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + e.emitScalar(value, node.Anchor, tag, style, []byte(node.HeadComment), []byte(node.LineComment), []byte(node.FootComment), []byte(tail)) + } +} diff --git a/vendor/gopkg.in/yaml.v3/go.mod b/vendor/gopkg.in/yaml.v3/go.mod new file mode 100644 index 000000000..f407ea321 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/go.mod @@ -0,0 +1,5 @@ +module "gopkg.in/yaml.v3" + +require ( + "gopkg.in/check.v1" v0.0.0-20161208181325-20d25e280405 +) diff --git a/vendor/gopkg.in/yaml.v3/parserc.go b/vendor/gopkg.in/yaml.v3/parserc.go new file mode 100644 index 000000000..aea9050b8 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/parserc.go @@ -0,0 +1,1229 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + token := &parser.tokens[parser.tokens_head] + yaml_parser_unfold_comments(parser, token) + return token + } + return nil +} + +// yaml_parser_unfold_comments walks through the comments queue and joins all +// comments behind the position of the provided token into the respective +// top-level comment slices in the parser. +func yaml_parser_unfold_comments(parser *yaml_parser_t, token *yaml_token_t) { + for parser.comments_head < len(parser.comments) && token.start_mark.index >= parser.comments[parser.comments_head].token_mark.index { + comment := &parser.comments[parser.comments_head] + if len(comment.head) > 0 { + if token.typ == yaml_BLOCK_END_TOKEN { + // No heads on ends, so keep comment.head for a follow up token. + break + } + if len(parser.head_comment) > 0 { + parser.head_comment = append(parser.head_comment, '\n') + } + parser.head_comment = append(parser.head_comment, comment.head...) + } + if len(comment.foot) > 0 { + if len(parser.foot_comment) > 0 { + parser.foot_comment = append(parser.foot_comment, '\n') + } + parser.foot_comment = append(parser.foot_comment, comment.foot...) + } + if len(comment.line) > 0 { + if len(parser.line_comment) > 0 { + parser.line_comment = append(parser.line_comment, '\n') + } + parser.line_comment = append(parser.line_comment, comment.line...) + } + *comment = yaml_comment_t{} + parser.comments_head++ + } +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// * +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + var head_comment []byte + if len(parser.head_comment) > 0 { + // [Go] Scan the header comment backwards, and if an empty line is found, break + // the header so the part before the last empty line goes into the + // document header, while the bottom of it goes into a follow up event. + for i := len(parser.head_comment) - 1; i > 0; i-- { + if parser.head_comment[i] == '\n' { + if i == len(parser.head_comment)-1 { + head_comment = parser.head_comment[:i] + parser.head_comment = parser.head_comment[i+1:] + break + } else if parser.head_comment[i-1] == '\n' { + head_comment = parser.head_comment[:i-1] + parser.head_comment = parser.head_comment[i+1:] + break + } + } + } + } + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + + head_comment: head_comment, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// *********** +// +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// ************* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + yaml_parser_set_event_comments(parser, event) + if len(event.head_comment) > 0 && len(event.foot_comment) == 0 { + event.foot_comment = event.head_comment + event.head_comment = nil + } + return true +} + +func yaml_parser_set_event_comments(parser *yaml_parser_t, event *yaml_event_t) { + event.head_comment = parser.head_comment + event.line_comment = parser.line_comment + event.foot_comment = parser.foot_comment + parser.head_comment = nil + parser.line_comment = nil + parser.foot_comment = nil + parser.tail_comment = nil + parser.stem_comment = nil +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// block_node ::= ALIAS +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// flow_node ::= ALIAS +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// ************************* +// block_content ::= block_collection | flow_collection | SCALAR +// ****** +// flow_content ::= flow_collection | SCALAR +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + yaml_parser_set_event_comments(parser, event) + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + yaml_parser_set_event_comments(parser, event) + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + if parser.stem_comment != nil { + event.head_comment = parser.stem_comment + parser.stem_comment = nil + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// ******************** *********** * ********* +// +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + prior_head := len(parser.head_comment) + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if prior_head > 0 && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + // [Go] It's a sequence under a sequence entry, so the former head comment + // is for the list itself, not the first list item under it. + parser.stem_comment = parser.head_comment[:prior_head] + if len(parser.head_comment) == prior_head { + parser.head_comment = nil + } else { + // Copy suffix to prevent very strange bugs if someone ever appends + // further bytes to the prefix in the stem_comment slice above. + parser.head_comment = append([]byte(nil), parser.head_comment[prior_head+1:]...) + } + + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +// +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + // [Go] A tail comment was left from the prior mapping value processed. Emit an event + // as it needs to be processed with that value and not the following key. + if len(parser.tail_comment) > 0 { + *event = yaml_event_t{ + typ: yaml_TAIL_COMMENT_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + foot_comment: parser.tail_comment, + } + parser.tail_comment = nil + return true + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +// +// +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + yaml_parser_set_event_comments(parser, event) + + skip_token(parser) + return true +} + +// +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// *** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// ***** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * *** * +// +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * ***** * +// +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/vendor/gopkg.in/yaml.v3/readerc.go b/vendor/gopkg.in/yaml.v3/readerc.go new file mode 100644 index 000000000..b7de0a89c --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/readerc.go @@ -0,0 +1,434 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // [Go] This function was changed to guarantee the requested length size at EOF. + // The fact we need to do this is pretty awful, but the description above implies + // for that to be the case, and there are tests + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + // [Go] ACTUALLY! Read the documentation of this function above. + // This is just broken. To return true, we need to have the + // given length in the buffer. Not doing that means every single + // check that calls this function to make sure the buffer has a + // given length is Go) panicking; or C) accessing invalid memory. + //return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + // [Go] Read the documentation of this function above. To return true, + // we need to have the given length in the buffer. Not doing that means + // every single check that calls this function to make sure the buffer + // has a given length is Go) panicking; or C) accessing invalid memory. + // This happens here due to the EOF above breaking early. + for buffer_len < length { + parser.buffer[buffer_len] = 0 + buffer_len++ + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/vendor/gopkg.in/yaml.v3/resolve.go b/vendor/gopkg.in/yaml.v3/resolve.go new file mode 100644 index 000000000..64ae88805 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/resolve.go @@ -0,0 +1,326 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding/base64" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, boolTag, []string{"true", "True", "TRUE"}}, + {false, boolTag, []string{"false", "False", "FALSE"}}, + {nil, nullTag, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), floatTag, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), floatTag, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), floatTag, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), floatTag, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", mergeTag, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const ( + nullTag = "!!null" + boolTag = "!!bool" + strTag = "!!str" + intTag = "!!int" + floatTag = "!!float" + timestampTag = "!!timestamp" + seqTag = "!!seq" + mapTag = "!!map" + binaryTag = "!!binary" + mergeTag = "!!merge" +) + +var longTags = make(map[string]string) +var shortTags = make(map[string]string) + +func init() { + for _, stag := range []string{nullTag, boolTag, strTag, intTag, floatTag, timestampTag, seqTag, mapTag, binaryTag, mergeTag} { + ltag := longTag(stag) + longTags[stag] = ltag + shortTags[ltag] = stag + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + if strings.HasPrefix(tag, longTagPrefix) { + if stag, ok := shortTags[tag]; ok { + return stag + } + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + if ltag, ok := longTags[tag]; ok { + return ltag + } + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", strTag, boolTag, intTag, floatTag, nullTag, timestampTag: + return true + } + return false +} + +var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) + +func resolve(tag string, in string) (rtag string, out interface{}) { + tag = shortTag(tag) + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, strTag, binaryTag: + return + case floatTag: + if rtag == intTag { + switch v := out.(type) { + case int64: + rtag = floatTag + out = float64(v) + return + case int: + rtag = floatTag + out = float64(v) + return + } + } + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != strTag && tag != binaryTag { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return floatTag, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + // Only try values as a timestamp if the value is unquoted or there's an explicit + // !!timestamp tag. + if tag == "" || tag == timestampTag { + t, ok := parseTimestamp(in) + if ok { + return timestampTag, t + } + } + + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return intTag, uintv + } + if yamlStyleFloat.MatchString(plain) { + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return floatTag, floatv + } + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return intTag, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt("-"+plain[3:], 2, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + } + // Octals as introduced in version 1.2 of the spec. + // Octals from the 1.1 spec, spelled as 0777, are still + // decoded by default in v3 as well for compatibility. + // May be dropped in v4 depending on how usage evolves. + if strings.HasPrefix(plain, "0o") { + intv, err := strconv.ParseInt(plain[2:], 8, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 8, 64) + if err == nil { + return intTag, uintv + } + } else if strings.HasPrefix(plain, "-0o") { + intv, err := strconv.ParseInt("-"+plain[3:], 8, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + } + default: + panic("internal error: missing handler for resolver table: " + string(rune(hint)) + " (with " + in + ")") + } + } + return strTag, in +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} + +// This is a subset of the formats allowed by the regular expression +// defined at http://yaml.org/type/timestamp.html. +var allowedTimestampFormats = []string{ + "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. + "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". + "2006-1-2 15:4:5.999999999", // space separated with no time zone + "2006-1-2", // date only + // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" + // from the set of examples. +} + +// parseTimestamp parses s as a timestamp string and +// returns the timestamp and reports whether it succeeded. +// Timestamp formats are defined at http://yaml.org/type/timestamp.html +func parseTimestamp(s string) (time.Time, bool) { + // TODO write code to check all the formats supported by + // http://yaml.org/type/timestamp.html instead of using time.Parse. + + // Quick check: all date formats start with YYYY-. + i := 0 + for ; i < len(s); i++ { + if c := s[i]; c < '0' || c > '9' { + break + } + } + if i != 4 || i == len(s) || s[i] != '-' { + return time.Time{}, false + } + for _, format := range allowedTimestampFormats { + if t, err := time.Parse(format, s); err == nil { + return t, true + } + } + return time.Time{}, false +} diff --git a/vendor/gopkg.in/yaml.v3/scannerc.go b/vendor/gopkg.in/yaml.v3/scannerc.go new file mode 100644 index 000000000..57e954ca5 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/scannerc.go @@ -0,0 +1,3025 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + if !is_blank(parser.buffer, parser.buffer_pos) { + parser.newlines = 0 + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + parser.newlines++ + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + parser.newlines++ + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + if !is_blank(parser.buffer, parser.buffer_pos) { + parser.newlines = 0 + } + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.newlines++ + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, problem) +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + // [Go] The comment parsing logic requires a lookahead of two tokens + // so that foot comments may be parsed in time of associating them + // with the tokens that are parsed before them, and also for line + // comments to be transformed into head comments in some edge cases. + if parser.tokens_head < len(parser.tokens)-2 { + // If a potential simple key is at the head position, we need to fetch + // the next token to disambiguate it. + head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed] + if !ok { + break + } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok { + return false + } else if !valid { + break + } + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) (ok bool) { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + scan_mark := parser.mark + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // [Go] While unrolling indents, transform the head comments of prior + // indentation levels observed after scan_start into foot comments at + // the respective indexes. + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column, scan_mark) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + comment_mark := parser.mark + if len(parser.tokens) > 0 && (parser.flow_level == 0 && buf[pos] == ':' || parser.flow_level > 0 && buf[pos] == ',') { + // Associate any following comments with the prior token. + comment_mark = parser.tokens[len(parser.tokens)-1].start_mark + } + defer func() { + if !ok { + return + } + if !yaml_parser_scan_line_comment(parser, comment_mark) { + ok = false + return + } + }() + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] TODO Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) { + if !simple_key.possible { + return false, true + } + + // The 1.2 specification says: + // + // "If the ? indicator is omitted, parsing needs to see past the + // implicit key to recognize it as such. To limit the amount of + // lookahead required, the “:” indicator must appear at most 1024 + // Unicode characters beyond the start of the key. In addition, the key + // is restricted to a single line." + // + if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index { + // Check if the potential simple key to be removed is required. + if simple_key.required { + return false, yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + return false, true + } + return true, true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + } + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1 + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number) + } + return true +} + +// max_flow_level limits the flow_level +const max_flow_level = 10000 + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{ + possible: false, + required: false, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + }) + + // Increase the flow level. + parser.flow_level++ + if parser.flow_level > max_flow_level { + return yaml_parser_set_scanner_error(parser, + "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_flow_level)) + } + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + last := len(parser.simple_keys) - 1 + delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number) + parser.simple_keys = parser.simple_keys[:last] + } + return true +} + +// max_indents limits the indents stack size +const max_indents = 10000 + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + if len(parser.indents) > max_indents { + return yaml_parser_set_scanner_error(parser, + "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_indents)) + } + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int, scan_mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + block_mark := scan_mark + block_mark.index-- + + // Loop through the indentation levels in the stack. + for parser.indent > column { + + // [Go] Reposition the end token before potential following + // foot comments of parent blocks. For that, search + // backwards for recent comments that were at the same + // indent as the block that is ending now. + stop_index := block_mark.index + for i := len(parser.comments) - 1; i >= 0; i-- { + comment := &parser.comments[i] + + if comment.end_mark.index < stop_index { + // Don't go back beyond the start of the comment/whitespace scan, unless column < 0. + // If requested indent column is < 0, then the document is over and everything else + // is a foot anyway. + break + } + if comment.start_mark.column == parser.indent+1 { + // This is a good match. But maybe there's a former comment + // at that same indent level, so keep searching. + block_mark = comment.start_mark + } + + // While the end of the former comment matches with + // the start of the following one, we know there's + // nothing in between and scanning is still safe. + stop_index = comment.scan_mark.index + } + + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: block_mark, + end_mark: block_mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + parser.simple_keys_by_tok = make(map[int]int) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok { + return false + + } else if valid { + + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + delete(parser.simple_keys_by_tok, simple_key.token_number) + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + scan_mark := parser.mark + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if we just had a line comment under a sequence entry that + // looks more like a header to the following content. Similar to this: + // + // - # The comment + // - Some data + // + // If so, transform the line comment to a head comment and reposition. + if len(parser.comments) > 0 && len(parser.tokens) > 1 { + tokenA := parser.tokens[len(parser.tokens)-2] + tokenB := parser.tokens[len(parser.tokens)-1] + comment := &parser.comments[len(parser.comments)-1] + if tokenA.typ == yaml_BLOCK_SEQUENCE_START_TOKEN && tokenB.typ == yaml_BLOCK_ENTRY_TOKEN && len(comment.line) > 0 && !is_break(parser.buffer, parser.buffer_pos) { + // If it was in the prior line, reposition so it becomes a + // header of the follow up token. Otherwise, keep it in place + // so it becomes a header of the former. + comment.head = comment.line + comment.line = nil + if comment.start_mark.line == parser.mark.line-1 { + comment.token_mark = parser.mark + } + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + if !yaml_parser_scan_comments(parser, scan_mark) { + return false + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + // [Go] Discard this inline comment for the time being. + //if !yaml_parser_scan_line_comment(parser, start_mark) { + // return false + //} + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +// +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && string(s) != "!" { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + hasTag := len(head) > 0 + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] TODO Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + hasTag = true + } + + if !hasTag { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + // TODO Test this and then re-enable it. + //if !yaml_parser_scan_line_comment(parser, start_mark) { + // return false + //} + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab characters that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violates indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} + +func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t) bool { + if parser.newlines > 0 { + return true + } + + var start_mark yaml_mark_t + var text []byte + + for peek := 0; peek < 512; peek++ { + if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) { + break + } + if is_blank(parser.buffer, parser.buffer_pos+peek) { + continue + } + if parser.buffer[parser.buffer_pos+peek] == '#' { + seen := parser.mark.index+peek + for { + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_breakz(parser.buffer, parser.buffer_pos) { + if parser.mark.index >= seen { + break + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } else { + if parser.mark.index >= seen { + if len(text) == 0 { + start_mark = parser.mark + } + text = append(text, parser.buffer[parser.buffer_pos]) + } + skip(parser) + } + } + } + break + } + if len(text) > 0 { + parser.comments = append(parser.comments, yaml_comment_t{ + token_mark: token_mark, + start_mark: start_mark, + line: text, + }) + } + return true +} + +func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) bool { + token := parser.tokens[len(parser.tokens)-1] + + if token.typ == yaml_FLOW_ENTRY_TOKEN && len(parser.tokens) > 1 { + token = parser.tokens[len(parser.tokens)-2] + } + + var token_mark = token.start_mark + var start_mark yaml_mark_t + + var recent_empty = false + var first_empty = parser.newlines <= 1 + + var line = parser.mark.line + var column = parser.mark.column + + var text []byte + + // The foot line is the place where a comment must start to + // still be considered as a foot of the prior content. + // If there's some content in the currently parsed line, then + // the foot is the line below it. + var foot_line = -1 + if scan_mark.line > 0 { + foot_line = parser.mark.line-parser.newlines+1 + if parser.newlines == 0 && parser.mark.column > 1 { + foot_line++ + } + } + + var peek = 0 + for ; peek < 512; peek++ { + if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) { + break + } + column++ + if is_blank(parser.buffer, parser.buffer_pos+peek) { + continue + } + c := parser.buffer[parser.buffer_pos+peek] + if is_breakz(parser.buffer, parser.buffer_pos+peek) || parser.flow_level > 0 && (c == ']' || c == '}') { + // Got line break or terminator. + if !recent_empty { + if first_empty && (start_mark.line == foot_line || start_mark.column-1 < parser.indent) { + // This is the first empty line and there were no empty lines before, + // so this initial part of the comment is a foot of the prior token + // instead of being a head for the following one. Split it up. + if len(text) > 0 { + if start_mark.column-1 < parser.indent { + // If dedented it's unrelated to the prior token. + token_mark = start_mark + } + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: token_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek, line, column}, + foot: text, + }) + scan_mark = yaml_mark_t{parser.mark.index + peek, line, column} + token_mark = scan_mark + text = nil + } + } else { + if len(text) > 0 && parser.buffer[parser.buffer_pos+peek] != 0 { + text = append(text, '\n') + } + } + } + if !is_break(parser.buffer, parser.buffer_pos+peek) { + break + } + first_empty = false + recent_empty = true + column = 0 + line++ + continue + } + + if len(text) > 0 && column < parser.indent+1 && column != start_mark.column { + // The comment at the different indentation is a foot of the + // preceding data rather than a head of the upcoming one. + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: token_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek, line, column}, + foot: text, + }) + scan_mark = yaml_mark_t{parser.mark.index + peek, line, column} + token_mark = scan_mark + text = nil + } + + if parser.buffer[parser.buffer_pos+peek] != '#' { + break + } + + if len(text) == 0 { + start_mark = yaml_mark_t{parser.mark.index + peek, line, column} + } else { + text = append(text, '\n') + } + + recent_empty = false + + // Consume until after the consumed comment line. + seen := parser.mark.index+peek + for { + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_breakz(parser.buffer, parser.buffer_pos) { + if parser.mark.index >= seen { + break + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } else { + if parser.mark.index >= seen { + text = append(text, parser.buffer[parser.buffer_pos]) + } + skip(parser) + } + } + + peek = 0 + column = 0 + line = parser.mark.line + } + + if len(text) > 0 { + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: start_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek - 1, line, column}, + head: text, + }) + } + return true +} diff --git a/vendor/gopkg.in/yaml.v3/sorter.go b/vendor/gopkg.in/yaml.v3/sorter.go new file mode 100644 index 000000000..9210ece7e --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/sorter.go @@ -0,0 +1,134 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + digits := false + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + digits = unicode.IsDigit(ar[i]) + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + if digits { + return al + } else { + return bl + } + } + var ai, bi int + var an, bn int64 + if ar[i] == '0' || br[i] == '0' { + for j := i - 1; j >= 0 && unicode.IsDigit(ar[j]); j-- { + if ar[j] != '0' { + an = 1 + bn = 1 + break + } + } + } + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff --git a/vendor/gopkg.in/yaml.v3/writerc.go b/vendor/gopkg.in/yaml.v3/writerc.go new file mode 100644 index 000000000..b8a116bf9 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/writerc.go @@ -0,0 +1,48 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true +} diff --git a/vendor/gopkg.in/yaml.v3/yaml.go b/vendor/gopkg.in/yaml.v3/yaml.go new file mode 100644 index 000000000..b5d35a50d --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/yaml.go @@ -0,0 +1,662 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/go-yaml/yaml +// +package yaml + +import ( + "errors" + "fmt" + "io" + "reflect" + "strings" + "sync" + "unicode/utf8" +) + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. +type Unmarshaler interface { + UnmarshalYAML(value *Node) error +} + +type obsoleteUnmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +// +func Unmarshal(in []byte, out interface{}) (err error) { + return unmarshal(in, out, false) +} + +// A Decorder reads and decodes YAML values from an input stream. +type Decoder struct { + parser *parser + knownFields bool +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read +// data from r beyond the YAML values requested. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + parser: newParserFromReader(r), + } +} + +// KnownFields ensures that the keys in decoded mappings to +// exist as fields in the struct being decoded into. +func (dec *Decoder) KnownFields(enable bool) { + dec.knownFields = enable +} + +// Decode reads the next YAML-encoded value from its input +// and stores it in the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (dec *Decoder) Decode(v interface{}) (err error) { + d := newDecoder() + d.knownFields = dec.knownFields + defer handleErr(&err) + node := dec.parser.parse() + if node == nil { + return io.EOF + } + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(node, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Decode decodes the node and stores its data into the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (n *Node) Decode(v interface{}) (err error) { + d := newDecoder() + defer handleErr(&err) + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(n, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +func unmarshal(in []byte, out interface{}, strict bool) (err error) { + defer handleErr(&err) + d := newDecoder() + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only marshalled if they are exported (have an upper case +// first letter), and are marshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Zero valued structs will be omitted if all their public +// fields are zero, unless they implement an IsZero +// method (see the IsZeroer interface type), in which +// case the field will be included if that method returns true. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +// +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +// An Encoder writes YAML values to an output stream. +type Encoder struct { + encoder *encoder +} + +// NewEncoder returns a new encoder that writes to w. +// The Encoder should be closed after use to flush all data +// to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + encoder: newEncoderWithWriter(w), + } +} + +// Encode writes the YAML encoding of v to the stream. +// If multiple items are encoded to the stream, the +// second and subsequent document will be preceded +// with a "---" document separator, but the first will not. +// +// See the documentation for Marshal for details about the conversion of Go +// values to YAML. +func (e *Encoder) Encode(v interface{}) (err error) { + defer handleErr(&err) + e.encoder.marshalDoc("", reflect.ValueOf(v)) + return nil +} + +// SetIndent changes the used indentation used when encoding. +func (e *Encoder) SetIndent(spaces int) { + if spaces < 0 { + panic("yaml: cannot indent to a negative number of spaces") + } + e.encoder.indent = spaces +} + +// Close closes the encoder by writing any remaining data. +// It does not write a stream terminating string "...". +func (e *Encoder) Close() (err error) { + defer handleErr(&err) + e.encoder.finish() + return nil +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +type Kind uint32 + +const ( + DocumentNode Kind = 1 << iota + SequenceNode + MappingNode + ScalarNode + AliasNode +) + +type Style uint32 + +const ( + TaggedStyle Style = 1 << iota + DoubleQuotedStyle + SingleQuotedStyle + LiteralStyle + FoldedStyle + FlowStyle +) + +// Node represents an element in the YAML document hierarchy. While documents +// are typically encoded and decoded into higher level types, such as structs +// and maps, Node is an intermediate representation that allows detailed +// control over the content being decoded or encoded. +// +// Values that make use of the Node type interact with the yaml package in the +// same way any other type would do, by encoding and decoding yaml data +// directly or indirectly into them. +// +// For example: +// +// var person struct { +// Name string +// Address yaml.Node +// } +// err := yaml.Unmarshal(data, &person) +// +// Or by itself: +// +// var person Node +// err := yaml.Unmarshal(data, &person) +// +type Node struct { + // Kind defines whether the node is a document, a mapping, a sequence, + // a scalar value, or an alias to another node. The specific data type of + // scalar nodes may be obtained via the ShortTag and LongTag methods. + Kind Kind + + // Style allows customizing the apperance of the node in the tree. + Style Style + + // Tag holds the YAML tag defining the data type for the value. + // When decoding, this field will always be set to the resolved tag, + // even when it wasn't explicitly provided in the YAML content. + // When encoding, if this field is unset the value type will be + // implied from the node properties, and if it is set, it will only + // be serialized into the representation if TaggedStyle is used or + // the implicit tag diverges from the provided one. + Tag string + + // Value holds the unescaped and unquoted represenation of the value. + Value string + + // Anchor holds the anchor name for this node, which allows aliases to point to it. + Anchor string + + // Alias holds the node that this alias points to. Only valid when Kind is AliasNode. + Alias *Node + + // Content holds contained nodes for documents, mappings, and sequences. + Content []*Node + + // HeadComment holds any comments in the lines preceding the node and + // not separated by an empty line. + HeadComment string + + // LineComment holds any comments at the end of the line where the node is in. + LineComment string + + // FootComment holds any comments following the node and before empty lines. + FootComment string + + // Line and Column hold the node position in the decoded YAML text. + // These fields are not respected when encoding the node. + Line int + Column int +} + +// LongTag returns the long form of the tag that indicates the data type for +// the node. If the Tag field isn't explicitly defined, one will be computed +// based on the node properties. +func (n *Node) LongTag() string { + return longTag(n.ShortTag()) +} + +// ShortTag returns the short form of the YAML tag that indicates data type for +// the node. If the Tag field isn't explicitly defined, one will be computed +// based on the node properties. +func (n *Node) ShortTag() string { + if n.indicatedString() { + return strTag + } + if n.Tag == "" || n.Tag == "!" { + switch n.Kind { + case MappingNode: + return mapTag + case SequenceNode: + return seqTag + case AliasNode: + if n.Alias != nil { + return n.Alias.ShortTag() + } + case ScalarNode: + tag, _ := resolve("", n.Value) + return tag + } + return "" + } + return shortTag(n.Tag) +} + +func (n *Node) indicatedString() bool { + return n.Kind == ScalarNode && + (shortTag(n.Tag) == strTag || + (n.Tag == "" || n.Tag == "!") && n.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0) +} + +// SetString is a convenience function that sets the node to a string value +// and defines its style in a pleasant way depending on its content. +func (n *Node) SetString(s string) { + n.Kind = ScalarNode + if utf8.ValidString(s) { + n.Value = s + n.Tag = strTag + } else { + n.Value = encodeBase64(s) + n.Tag = binaryTag + } + if strings.Contains(n.Value, "\n") { + n.Style = LiteralStyle + } +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int + + // InlineUnmarshalers holds indexes to inlined fields that + // contain unmarshaler values. + InlineUnmarshalers [][]int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + // Id holds the unique field identifier, so we can cheaply + // check for field duplicates without maintaining an extra map. + Id int + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex +var unmarshalerType reflect.Type + +func init() { + var v Unmarshaler + unmarshalerType = reflect.ValueOf(&v).Elem().Type() +} + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + inlineUnmarshalers := [][]int(nil) + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct, reflect.Ptr: + ftype := field.Type + for ftype.Kind() == reflect.Ptr { + ftype = ftype.Elem() + } + if ftype.Kind() != reflect.Struct { + return nil, errors.New("option ,inline may only be used on a struct or map field") + } + if reflect.PtrTo(ftype).Implements(unmarshalerType) { + inlineUnmarshalers = append(inlineUnmarshalers, []int{i}) + } else { + sinfo, err := getStructInfo(ftype) + if err != nil { + return nil, err + } + for _, index := range sinfo.InlineUnmarshalers { + inlineUnmarshalers = append(inlineUnmarshalers, append([]int{i}, index...)) + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + finfo.Id = len(fieldsList) + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + } + default: + return nil, errors.New("option ,inline may only be used on a struct or map field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + info.Id = len(fieldsList) + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{ + FieldsMap: fieldsMap, + FieldsList: fieldsList, + InlineMap: inlineMap, + InlineUnmarshalers: inlineUnmarshalers, + } + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +// IsZeroer is used to check whether an object is zero to +// determine whether it should be omitted when marshaling +// with the omitempty flag. One notable implementation +// is time.Time. +type IsZeroer interface { + IsZero() bool +} + +func isZero(v reflect.Value) bool { + kind := v.Kind() + if z, ok := v.Interface().(IsZeroer); ok { + if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { + return true + } + return z.IsZero() + } + switch kind { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/vendor/gopkg.in/yaml.v3/yamlh.go b/vendor/gopkg.in/yaml.v3/yamlh.go new file mode 100644 index 000000000..2719cfbb0 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/yamlh.go @@ -0,0 +1,805 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "fmt" + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = 0 + + yaml_PLAIN_SCALAR_STYLE yaml_scalar_style_t = 1 << iota // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. + yaml_TAIL_COMMENT_EVENT +) + +var eventStrings = []string{ + yaml_NO_EVENT: "none", + yaml_STREAM_START_EVENT: "stream start", + yaml_STREAM_END_EVENT: "stream end", + yaml_DOCUMENT_START_EVENT: "document start", + yaml_DOCUMENT_END_EVENT: "document end", + yaml_ALIAS_EVENT: "alias", + yaml_SCALAR_EVENT: "scalar", + yaml_SEQUENCE_START_EVENT: "sequence start", + yaml_SEQUENCE_END_EVENT: "sequence end", + yaml_MAPPING_START_EVENT: "mapping start", + yaml_MAPPING_END_EVENT: "mapping end", + yaml_TAIL_COMMENT_EVENT: "tail comment", +} + +func (e yaml_event_type_t) String() string { + if e < 0 || int(e) >= len(eventStrings) { + return fmt.Sprintf("unknown event %d", e) + } + return eventStrings[e] +} + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The comments + head_comment []byte + line_comment []byte + foot_comment []byte + tail_comment []byte + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// yaml_parser_set_input(). +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occurred. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_reader io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + newlines int // The number of line breaks since last non-break/non-blank character + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Comments + + head_comment []byte // The current head comments + line_comment []byte // The current line comments + foot_comment []byte // The current foot comments + tail_comment []byte // Foot comment that happens at the end of a block. + stem_comment []byte // Comment in item preceding a nested structure (list inside list item, etc) + + comments []yaml_comment_t // The folded comments for all parsed tokens + comments_head int + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +type yaml_comment_t struct { + + scan_mark yaml_mark_t // Position where scanning for comments started + token_mark yaml_mark_t // Position after which tokens will be associated with this comment + start_mark yaml_mark_t // Position of '#' comment mark + end_mark yaml_mark_t // Position where comment terminated + + head []byte + line []byte + foot []byte +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// yaml_emitter_set_output(). +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +// +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE // Expect the next item of a flow sequence, with the comma already written out + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE // Expect the next key of a flow mapping, with the comma already written out + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_writer io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + space_above bool // Is there's an empty line above? + foot_indent int // The indent used to write the foot comment above, or -1 if none. + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Comments + head_comment []byte + line_comment []byte + foot_comment []byte + tail_comment []byte + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/vendor/gopkg.in/yaml.v3/yamlprivateh.go b/vendor/gopkg.in/yaml.v3/yamlprivateh.go new file mode 100644 index 000000000..e88f9c54a --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/yamlprivateh.go @@ -0,0 +1,198 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( + // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( + // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( + // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +} From 511d3c330c1363eb3763c6849d35458323436c8c Mon Sep 17 00:00:00 2001 From: Sriharsha Tikkireddy Date: Mon, 15 Jun 2020 09:01:47 -0400 Subject: [PATCH 09/13] removed deprecation notice as it is invalid --- databricks/resource_databricks_group.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/databricks/resource_databricks_group.go b/databricks/resource_databricks_group.go index b8d502619..ee51b67ab 100644 --- a/databricks/resource_databricks_group.go +++ b/databricks/resource_databricks_group.go @@ -21,12 +21,10 @@ func resourceGroup() *schema.Resource { Required: true, }, "allow_cluster_create": { - Deprecated: "Will be deprecated in a future release for general permissions api", Type: schema.TypeBool, Optional: true, }, "allow_instance_pool_create": { - Deprecated: "Will be deprecated in a future release for general permissions api", Type: schema.TypeBool, Optional: true, }, From 76afb2e1890471fa442343395eeebdf063e71aef Mon Sep 17 00:00:00 2001 From: Sriharsha Tikkireddy Date: Mon, 15 Jun 2020 11:17:18 -0400 Subject: [PATCH 10/13] renamed group_role to group_instance_profile --- databricks/provider.go | 22 ++-- ...ource_databricks_group_instance_profile.go | 120 ++++++++++++++++++ ...bricks_group_instance_profile_aws_test.go} | 28 ++-- databricks/resource_databricks_group_role.go | 120 ------------------ 4 files changed, 145 insertions(+), 145 deletions(-) create mode 100644 databricks/resource_databricks_group_instance_profile.go rename databricks/{resource_databricks_group_role_aws_test.go => resource_databricks_group_instance_profile_aws_test.go} (75%) delete mode 100644 databricks/resource_databricks_group_role.go diff --git a/databricks/provider.go b/databricks/provider.go index 45098ea82..05f2ab497 100644 --- a/databricks/provider.go +++ b/databricks/provider.go @@ -34,17 +34,17 @@ func Provider(version string) terraform.ResourceProvider { "databricks_scim_user": resourceScimUser(), "databricks_scim_group": resourceScimGroup(), // Scim Group is split into multiple components for flexibility to pick and choose - "databricks_group": resourceGroup(), - "databricks_group_role": resourceGroupRole(), - "databricks_group_member": resourceGroupMember(), - "databricks_notebook": resourceNotebook(), - "databricks_cluster": resourceCluster(), - "databricks_job": resourceJob(), - "databricks_dbfs_file": resourceDBFSFile(), - "databricks_dbfs_file_sync": resourceDBFSFileSync(), - "databricks_instance_profile": resourceInstanceProfile(), - "databricks_aws_s3_mount": resourceAWSS3Mount(), - "databricks_azure_blob_mount": resourceAzureBlobMount(), + "databricks_group": resourceGroup(), + "databricks_group_instance_profile": resourceGroupInstanceProfile(), + "databricks_group_member": resourceGroupMember(), + "databricks_notebook": resourceNotebook(), + "databricks_cluster": resourceCluster(), + "databricks_job": resourceJob(), + "databricks_dbfs_file": resourceDBFSFile(), + "databricks_dbfs_file_sync": resourceDBFSFileSync(), + "databricks_instance_profile": resourceInstanceProfile(), + "databricks_aws_s3_mount": resourceAWSS3Mount(), + "databricks_azure_blob_mount": resourceAzureBlobMount(), "databricks_azure_adls_gen1_mount": resourceAzureAdlsGen1Mount(), "databricks_azure_adls_gen2_mount": resourceAzureAdlsGen2Mount(), // MWS (multiple workspaces) resources are only limited to AWS as azure already has a built in concept of MWS diff --git a/databricks/resource_databricks_group_instance_profile.go b/databricks/resource_databricks_group_instance_profile.go new file mode 100644 index 000000000..a069ab77f --- /dev/null +++ b/databricks/resource_databricks_group_instance_profile.go @@ -0,0 +1,120 @@ +package databricks + +import ( + "fmt" + "github.com/databrickslabs/databricks-terraform/client/model" + "github.com/databrickslabs/databricks-terraform/client/service" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "log" + "strings" +) + +func resourceGroupInstanceProfile() *schema.Resource { + return &schema.Resource{ + Create: resourceGroupInstanceProfileCreate, + Read: resourceGroupInstanceProfileRead, + Delete: resourceGroupInstanceProfileDelete, + + Schema: map[string]*schema.Schema{ + "group_id": { + Type: schema.TypeString, + ForceNew: true, + Required: true, + }, + "instance_profile_id": { + Type: schema.TypeString, + ForceNew: true, + Required: true, + ValidateFunc: ValidateInstanceProfileARN, + }, + }, + } +} + +func resourceGroupInstanceProfileCreate(d *schema.ResourceData, m interface{}) error { + client := m.(*service.DBApiClient) + groupID := d.Get("group_id").(string) + instanceProfileID := d.Get("instance_profile_id").(string) + groupInstanceProfileID := &GroupInstanceProfileID{ + GroupID: groupID, + InstanceProfileID: instanceProfileID, + } + + roleAddList := []string{groupInstanceProfileID.InstanceProfileID} + err := client.Groups().Patch(groupInstanceProfileID.GroupID, roleAddList, nil, model.GroupRolesPath) + if err != nil { + return err + } + + d.SetId(groupInstanceProfileID.String()) + return resourceGroupInstanceProfileRead(d, m) +} + +func resourceGroupInstanceProfileRead(d *schema.ResourceData, m interface{}) error { + id := d.Id() + client := m.(*service.DBApiClient) + groupInstanceProfileID := parseGroupInstanceProfileID(id) + group, err := client.Groups().Read(groupInstanceProfileID.GroupID) + + // First verify if the group exists + if err != nil { + if isScimGroupMissing(err.Error(), groupInstanceProfileID.GroupID) { + log.Printf("Missing group with id: %s.", groupInstanceProfileID.GroupID) + d.SetId("") + return nil + } + return err + } + + // Set Id to null if instance profile is not in group + if !iInstanceProfileInGroup(groupInstanceProfileID.InstanceProfileID, &group) { + log.Printf("Missing role %s in group with id: %s.", groupInstanceProfileID.InstanceProfileID, groupInstanceProfileID.GroupID) + d.SetId("") + return nil + } + + err = d.Set("group_id", groupInstanceProfileID.GroupID) + if err != nil { + return err + } + + err = d.Set("instance_profile_id", groupInstanceProfileID.InstanceProfileID) + return err +} + +func resourceGroupInstanceProfileDelete(d *schema.ResourceData, m interface{}) error { + id := d.Id() + client := m.(*service.DBApiClient) + groupInstanceProfileID := parseGroupInstanceProfileID(id) + + roleRemoveList := []string{groupInstanceProfileID.InstanceProfileID} + // Patch op to remove role from group + err := client.Groups().Patch(groupInstanceProfileID.GroupID, nil, roleRemoveList, model.GroupRolesPath) + return err +} + +type GroupInstanceProfileID struct { + GroupID string + InstanceProfileID string +} + +func (g GroupInstanceProfileID) String() string { + return fmt.Sprintf("%s|%s", g.GroupID, g.InstanceProfileID) +} + +func parseGroupInstanceProfileID(id string) *GroupInstanceProfileID { + parts := strings.Split(id, "|") + return &GroupInstanceProfileID{ + GroupID: parts[0], + InstanceProfileID: parts[1], + } +} + +func iInstanceProfileInGroup(role string, group *model.Group) bool { + for _, groupRole := range group.Roles { + if groupRole.Value == role { + return true + } + } + return false +} diff --git a/databricks/resource_databricks_group_role_aws_test.go b/databricks/resource_databricks_group_instance_profile_aws_test.go similarity index 75% rename from databricks/resource_databricks_group_role_aws_test.go rename to databricks/resource_databricks_group_instance_profile_aws_test.go index d7c9b9d9d..8d6876a72 100644 --- a/databricks/resource_databricks_group_role_aws_test.go +++ b/databricks/resource_databricks_group_instance_profile_aws_test.go @@ -11,7 +11,7 @@ import ( "testing" ) -func TestAccAWSGroupRoleResource(t *testing.T) { +func TestAccAWSGroupInstanceProfileResource(t *testing.T) { var group model.Group // generate a random name for each tokenInfo test run, to avoid // collisions from multiple concurrent tests. @@ -23,27 +23,27 @@ func TestAccAWSGroupRoleResource(t *testing.T) { resource.Test(t, resource.TestCase{ Providers: testAccProviders, - CheckDestroy: testAWSGroupRoleResourceDestroy, + CheckDestroy: testAWSGroupInstanceProfileResourceDestroy, Steps: []resource.TestStep{ { // use a dynamic configuration with the random name from above - Config: testAWSGroupRoleResourceCreate(role, groupName), + Config: testAWSGroupInstanceProfileResourceCreate(role, groupName), // compose a basic test, checking both remote and local values Check: resource.ComposeTestCheckFunc( // query the API to retrieve the tokenInfo object - testAWSGroupRoleResourceExists("databricks_group.my_group", &group, t), + testAWSGroupInstanceProfileResourceExists("databricks_group.my_group", &group, t), // verify remote values - testAWSGroupRoleValues(t, &group, groupName, role), + testAWSGroupInstanceProfileValues(t, &group, groupName, role), // verify local values resource.TestCheckResourceAttr("databricks_group.my_group", "display_name", groupName), - resource.TestCheckResourceAttr("databricks_group_role.my_group_role", "instance_profile_id", role), + resource.TestCheckResourceAttr("databricks_group_instance_profile.my_group_instance_profile", "instance_profile_id", role), ), Destroy: false, }, { // use a dynamic configuration with the random name from above - Config: testAWSGroupRoleResourceCreate(role, groupName), + Config: testAWSGroupInstanceProfileResourceCreate(role, groupName), // Test behavior to expect to attempt to create new role mapping because role is gone PreConfig: func() { @@ -57,7 +57,7 @@ func TestAccAWSGroupRoleResource(t *testing.T) { }, { // use a dynamic configuration with the random name from above - Config: testAWSGroupRoleResourceCreate(role, groupName), + Config: testAWSGroupInstanceProfileResourceCreate(role, groupName), // Test behavior to expect to attempt to create new role mapping because role is gone PreConfig: func() { @@ -73,7 +73,7 @@ func TestAccAWSGroupRoleResource(t *testing.T) { }) } -func testAWSGroupRoleResourceDestroy(s *terraform.State) error { +func testAWSGroupInstanceProfileResourceDestroy(s *terraform.State) error { client := testAccProvider.Meta().(*service.DBApiClient) for _, rs := range s.RootModule().Resources { if rs.Type != "databricks_group" { @@ -88,16 +88,16 @@ func testAWSGroupRoleResourceDestroy(s *terraform.State) error { return nil } -func testAWSGroupRoleValues(t *testing.T, group *model.Group, displayName, role string) resource.TestCheckFunc { +func testAWSGroupInstanceProfileValues(t *testing.T, group *model.Group, displayName, role string) resource.TestCheckFunc { return func(s *terraform.State) error { assert.True(t, group.DisplayName == displayName) - assert.True(t, iRoleInGroup(role, group), "role is not in group") + assert.True(t, iInstanceProfileInGroup(role, group), "role is not in group") return nil } } // testAccCheckTokenResourceExists queries the API and retrieves the matching Widget. -func testAWSGroupRoleResourceExists(n string, group *model.Group, t *testing.T) resource.TestCheckFunc { +func testAWSGroupInstanceProfileResourceExists(n string, group *model.Group, t *testing.T) resource.TestCheckFunc { return func(s *terraform.State) error { // find the corresponding state object rs, ok := s.RootModule().Resources[n] @@ -118,7 +118,7 @@ func testAWSGroupRoleResourceExists(n string, group *model.Group, t *testing.T) } } -func testAWSGroupRoleResourceCreate(roleArn, groupName string) string { +func testAWSGroupInstanceProfileResourceCreate(roleArn, groupName string) string { return fmt.Sprintf(` resource "databricks_instance_profile" "instance_profile" { instance_profile_arn = "%s" @@ -127,7 +127,7 @@ func testAWSGroupRoleResourceCreate(roleArn, groupName string) string { resource "databricks_group" "my_group" { display_name = "%s" } - resource "databricks_group_role" "my_group_role" { + resource "databricks_group_instance_profile" "my_group_instance_profile" { group_id = databricks_group.my_group.id instance_profile_id = databricks_instance_profile.instance_profile.id } diff --git a/databricks/resource_databricks_group_role.go b/databricks/resource_databricks_group_role.go deleted file mode 100644 index 34931547f..000000000 --- a/databricks/resource_databricks_group_role.go +++ /dev/null @@ -1,120 +0,0 @@ -package databricks - -import ( - "fmt" - "github.com/databrickslabs/databricks-terraform/client/model" - "github.com/databrickslabs/databricks-terraform/client/service" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "log" - "strings" -) - -func resourceGroupRole() *schema.Resource { - return &schema.Resource{ - Create: resourceGroupRoleCreate, - Read: resourceGroupRoleRead, - Delete: resourceGroupRoleDelete, - - Schema: map[string]*schema.Schema{ - "group_id": { - Type: schema.TypeString, - ForceNew: true, - Required: true, - }, - "instance_profile_id": { - Type: schema.TypeString, - ForceNew: true, - Required: true, - ValidateFunc: ValidateInstanceProfileARN, - }, - }, - } -} - -func resourceGroupRoleCreate(d *schema.ResourceData, m interface{}) error { - client := m.(*service.DBApiClient) - groupID := d.Get("group_id").(string) - instanceProfileID := d.Get("instance_profile_id").(string) - groupRoleID := &GroupRoleID{ - GroupID: groupID, - InstanceProfileID: instanceProfileID, - } - - roleAddList := []string{groupRoleID.InstanceProfileID} - err := client.Groups().Patch(groupRoleID.GroupID, roleAddList, nil, model.GroupRolesPath) - if err != nil { - return err - } - - d.SetId(groupRoleID.String()) - return resourceGroupRoleRead(d, m) -} - -func resourceGroupRoleRead(d *schema.ResourceData, m interface{}) error { - id := d.Id() - client := m.(*service.DBApiClient) - groupRoleID := parseGroupRoleID(id) - group, err := client.Groups().Read(groupRoleID.GroupID) - - // First verify if the group exists - if err != nil { - if isScimGroupMissing(err.Error(), groupRoleID.GroupID) { - log.Printf("Missing group with id: %s.", groupRoleID.GroupID) - d.SetId("") - return nil - } - return err - } - - // Set Id to null if instance profile is not in group - if !iRoleInGroup(groupRoleID.InstanceProfileID, &group) { - log.Printf("Missing role %s in group with id: %s.", groupRoleID.InstanceProfileID, groupRoleID.GroupID) - d.SetId("") - return nil - } - - err = d.Set("group_id", groupRoleID.GroupID) - if err != nil { - return err - } - - err = d.Set("instance_profile_id", groupRoleID.InstanceProfileID) - return err -} - -func resourceGroupRoleDelete(d *schema.ResourceData, m interface{}) error { - id := d.Id() - client := m.(*service.DBApiClient) - groupRoleID := parseGroupRoleID(id) - - roleRemoveList := []string{groupRoleID.InstanceProfileID} - // Patch op to remove role from group - err := client.Groups().Patch(groupRoleID.GroupID, nil, roleRemoveList, model.GroupRolesPath) - return err -} - -type GroupRoleID struct { - GroupID string - InstanceProfileID string -} - -func (g GroupRoleID) String() string { - return fmt.Sprintf("%s|%s", g.GroupID, g.InstanceProfileID) -} - -func parseGroupRoleID(id string) *GroupRoleID { - parts := strings.Split(id, "|") - return &GroupRoleID{ - GroupID: parts[0], - InstanceProfileID: parts[1], - } -} - -func iRoleInGroup(role string, group *model.Group) bool { - for _, groupRole := range group.Roles { - if groupRole.Value == role { - return true - } - } - return false -} From 20af367466a66c8a4ed1acef663de9d6158ea104 Mon Sep 17 00:00:00 2001 From: Sriharsha Tikkireddy Date: Mon, 15 Jun 2020 11:19:29 -0400 Subject: [PATCH 11/13] fixed function name ValidateInstanceProfileARN in the doc comments --- databricks/utils.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/databricks/utils.go b/databricks/utils.go index 79107d60c..1fc9d3a13 100644 --- a/databricks/utils.go +++ b/databricks/utils.go @@ -99,7 +99,7 @@ func unpackMWSAccountID(combined string) (PackagedMWSIds, error) { return packagedMWSIds, nil } -// ValidateAWSARN is a ValidateFunc that ensures the role id is a valid aws iam instance profile arn +// ValidateInstanceProfileARN is a ValidateFunc that ensures the role id is a valid aws iam instance profile arn func ValidateInstanceProfileARN(val interface{}, key string) (warns []string, errs []error) { v := val.(string) From d56c8ed9963ff2b91b0df174d78abea8f8a4c38a Mon Sep 17 00:00:00 2001 From: Sriharsha Tikkireddy Date: Mon, 15 Jun 2020 11:20:32 -0400 Subject: [PATCH 12/13] formatting and fixed typo --- databricks/provider.go | 24 +++++++++---------- databricks/resource_databricks_group.go | 11 +++++---- .../resource_databricks_group_aws_test.go | 3 ++- .../resource_databricks_group_azure_test.go | 3 ++- ...ource_databricks_group_instance_profile.go | 5 ++-- ...abricks_group_instance_profile_aws_test.go | 3 ++- .../resource_databricks_group_member.go | 5 ++-- ...source_databricks_group_member_aws_test.go | 3 ++- ...urce_databricks_group_member_azure_test.go | 3 ++- databricks/utils.go | 5 ++-- 10 files changed, 37 insertions(+), 28 deletions(-) diff --git a/databricks/provider.go b/databricks/provider.go index 05f2ab497..ee93f40e3 100644 --- a/databricks/provider.go +++ b/databricks/provider.go @@ -34,19 +34,19 @@ func Provider(version string) terraform.ResourceProvider { "databricks_scim_user": resourceScimUser(), "databricks_scim_group": resourceScimGroup(), // Scim Group is split into multiple components for flexibility to pick and choose - "databricks_group": resourceGroup(), + "databricks_group": resourceGroup(), "databricks_group_instance_profile": resourceGroupInstanceProfile(), - "databricks_group_member": resourceGroupMember(), - "databricks_notebook": resourceNotebook(), - "databricks_cluster": resourceCluster(), - "databricks_job": resourceJob(), - "databricks_dbfs_file": resourceDBFSFile(), - "databricks_dbfs_file_sync": resourceDBFSFileSync(), - "databricks_instance_profile": resourceInstanceProfile(), - "databricks_aws_s3_mount": resourceAWSS3Mount(), - "databricks_azure_blob_mount": resourceAzureBlobMount(), - "databricks_azure_adls_gen1_mount": resourceAzureAdlsGen1Mount(), - "databricks_azure_adls_gen2_mount": resourceAzureAdlsGen2Mount(), + "databricks_group_member": resourceGroupMember(), + "databricks_notebook": resourceNotebook(), + "databricks_cluster": resourceCluster(), + "databricks_job": resourceJob(), + "databricks_dbfs_file": resourceDBFSFile(), + "databricks_dbfs_file_sync": resourceDBFSFileSync(), + "databricks_instance_profile": resourceInstanceProfile(), + "databricks_aws_s3_mount": resourceAWSS3Mount(), + "databricks_azure_blob_mount": resourceAzureBlobMount(), + "databricks_azure_adls_gen1_mount": resourceAzureAdlsGen1Mount(), + "databricks_azure_adls_gen2_mount": resourceAzureAdlsGen2Mount(), // MWS (multiple workspaces) resources are only limited to AWS as azure already has a built in concept of MWS "databricks_mws_credentials": resourceMWSCredentials(), "databricks_mws_storage_configurations": resourceMWSStorageConfigurations(), diff --git a/databricks/resource_databricks_group.go b/databricks/resource_databricks_group.go index ee51b67ab..8cb20355f 100644 --- a/databricks/resource_databricks_group.go +++ b/databricks/resource_databricks_group.go @@ -1,10 +1,11 @@ package databricks import ( + "log" + "github.com/databrickslabs/databricks-terraform/client/model" "github.com/databrickslabs/databricks-terraform/client/service" "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "log" ) func resourceGroup() *schema.Resource { @@ -21,12 +22,12 @@ func resourceGroup() *schema.Resource { Required: true, }, "allow_cluster_create": { - Type: schema.TypeBool, - Optional: true, + Type: schema.TypeBool, + Optional: true, }, "allow_instance_pool_create": { - Type: schema.TypeBool, - Optional: true, + Type: schema.TypeBool, + Optional: true, }, }, Importer: &schema.ResourceImporter{ diff --git a/databricks/resource_databricks_group_aws_test.go b/databricks/resource_databricks_group_aws_test.go index 675f3b3d1..fceffef7a 100644 --- a/databricks/resource_databricks_group_aws_test.go +++ b/databricks/resource_databricks_group_aws_test.go @@ -3,13 +3,14 @@ package databricks import ( "errors" "fmt" + "testing" + "github.com/databrickslabs/databricks-terraform/client/model" "github.com/databrickslabs/databricks-terraform/client/service" "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/stretchr/testify/assert" - "testing" ) func TestAccAWSGroupResource(t *testing.T) { diff --git a/databricks/resource_databricks_group_azure_test.go b/databricks/resource_databricks_group_azure_test.go index 984fb666d..f23f756f3 100644 --- a/databricks/resource_databricks_group_azure_test.go +++ b/databricks/resource_databricks_group_azure_test.go @@ -3,13 +3,14 @@ package databricks import ( "errors" "fmt" + "testing" + "github.com/databrickslabs/databricks-terraform/client/model" "github.com/databrickslabs/databricks-terraform/client/service" "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/stretchr/testify/assert" - "testing" ) func TestAccAzureGroupResource(t *testing.T) { diff --git a/databricks/resource_databricks_group_instance_profile.go b/databricks/resource_databricks_group_instance_profile.go index a069ab77f..aed137294 100644 --- a/databricks/resource_databricks_group_instance_profile.go +++ b/databricks/resource_databricks_group_instance_profile.go @@ -2,11 +2,12 @@ package databricks import ( "fmt" + "log" + "strings" + "github.com/databrickslabs/databricks-terraform/client/model" "github.com/databrickslabs/databricks-terraform/client/service" "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "log" - "strings" ) func resourceGroupInstanceProfile() *schema.Resource { diff --git a/databricks/resource_databricks_group_instance_profile_aws_test.go b/databricks/resource_databricks_group_instance_profile_aws_test.go index 8d6876a72..30cf732e5 100644 --- a/databricks/resource_databricks_group_instance_profile_aws_test.go +++ b/databricks/resource_databricks_group_instance_profile_aws_test.go @@ -3,12 +3,13 @@ package databricks import ( "errors" "fmt" + "testing" + "github.com/databrickslabs/databricks-terraform/client/model" "github.com/databrickslabs/databricks-terraform/client/service" "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/stretchr/testify/assert" - "testing" ) func TestAccAWSGroupInstanceProfileResource(t *testing.T) { diff --git a/databricks/resource_databricks_group_member.go b/databricks/resource_databricks_group_member.go index 33fbfbdd7..3e59ded94 100644 --- a/databricks/resource_databricks_group_member.go +++ b/databricks/resource_databricks_group_member.go @@ -2,11 +2,12 @@ package databricks import ( "fmt" + "log" + "strings" + "github.com/databrickslabs/databricks-terraform/client/model" "github.com/databrickslabs/databricks-terraform/client/service" "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "log" - "strings" ) // Use this to manage individual members of a particular group diff --git a/databricks/resource_databricks_group_member_aws_test.go b/databricks/resource_databricks_group_member_aws_test.go index 8f1b9e699..7fdcb260c 100644 --- a/databricks/resource_databricks_group_member_aws_test.go +++ b/databricks/resource_databricks_group_member_aws_test.go @@ -3,12 +3,13 @@ package databricks import ( "errors" "fmt" + "testing" + "github.com/databrickslabs/databricks-terraform/client/model" "github.com/databrickslabs/databricks-terraform/client/service" "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/stretchr/testify/assert" - "testing" ) func TestAccAWSGroupMemberResource(t *testing.T) { diff --git a/databricks/resource_databricks_group_member_azure_test.go b/databricks/resource_databricks_group_member_azure_test.go index 8b8b355df..770280a30 100644 --- a/databricks/resource_databricks_group_member_azure_test.go +++ b/databricks/resource_databricks_group_member_azure_test.go @@ -3,12 +3,13 @@ package databricks import ( "errors" "fmt" + "testing" + "github.com/databrickslabs/databricks-terraform/client/model" "github.com/databrickslabs/databricks-terraform/client/service" "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/terraform" "github.com/stretchr/testify/assert" - "testing" ) func TestAccAzureGroupMemberResource(t *testing.T) { diff --git a/databricks/utils.go b/databricks/utils.go index 1fc9d3a13..e28916e19 100644 --- a/databricks/utils.go +++ b/databricks/utils.go @@ -2,10 +2,11 @@ package databricks import ( "fmt" - "github.com/aws/aws-sdk-go/aws/arn" "strings" "time" + "github.com/aws/aws-sdk-go/aws/arn" + "github.com/databrickslabs/databricks-terraform/client/model" "github.com/databrickslabs/databricks-terraform/client/service" ) @@ -110,7 +111,7 @@ func ValidateInstanceProfileARN(val interface{}, key string) (warns []string, er // Parse and verify instance profiles instanceProfileArn, err := arn.Parse(v) if err != nil { - return nil, []error{fmt.Errorf("%s is invalid got: %s recieved error: %w", key, v, err)} + return nil, []error{fmt.Errorf("%s is invalid got: %s received error: %w", key, v, err)} } // Verify instance profile resource type, Resource gets parsed as instance-profile/ if !strings.HasPrefix(instanceProfileArn.Resource, "instance-profile") { From 54d5011e41b976a9db202137c8e33d70fc488016 Mon Sep 17 00:00:00 2001 From: Sriharsha Tikkireddy Date: Mon, 15 Jun 2020 11:21:23 -0400 Subject: [PATCH 13/13] added vendoring for aws sdk --- .../github.com/aws/aws-sdk-go/aws/arn/arn.go | 93 + .../aws/aws-sdk-go/aws/awsutil/path_value.go | 2 +- .../aws/aws-sdk-go/aws/client/client.go | 1 + .../aws-sdk-go/aws/client/default_retryer.go | 6 +- .../aws/client/metadata/client_info.go | 1 + .../github.com/aws/aws-sdk-go/aws/config.go | 53 +- .../aws-sdk-go/aws/context_background_1_5.go | 40 +- .../aws-sdk-go/aws/corehandlers/handlers.go | 2 +- .../credentials/context_background_go1.5.go | 22 + .../credentials/context_background_go1.7.go | 20 + .../aws/credentials/context_go1.5.go | 39 + .../aws/credentials/context_go1.9.go | 13 + .../aws-sdk-go/aws/credentials/credentials.go | 134 +- .../ec2rolecreds/ec2_role_provider.go | 20 +- .../aws/credentials/endpointcreds/provider.go | 11 +- .../aws/credentials/processcreds/provider.go | 3 +- .../shared_credentials_provider.go | 5 +- .../aws/credentials/static_provider.go | 4 +- .../stscreds/assume_role_provider.go | 61 +- .../stscreds/web_identity_provider.go | 45 +- .../aws/aws-sdk-go/aws/csm/reporter.go | 3 +- .../aws/aws-sdk-go/aws/ec2metadata/api.go | 120 +- .../aws/aws-sdk-go/aws/ec2metadata/service.go | 118 +- .../aws/ec2metadata/token_provider.go | 92 + .../aws/aws-sdk-go/aws/endpoints/decode.go | 30 +- .../aws/aws-sdk-go/aws/endpoints/defaults.go | 4385 ++++++++-- .../aws/aws-sdk-go/aws/endpoints/endpoints.go | 120 +- .../aws/endpoints/legacy_regions.go | 24 + .../aws/aws-sdk-go/aws/endpoints/v3model.go | 67 +- .../aws/aws-sdk-go/aws/request/handlers.go | 21 + .../aws/aws-sdk-go/aws/request/request.go | 36 +- .../aws/request/request_pagination.go | 6 +- .../aws/aws-sdk-go/aws/request/retryer.go | 41 +- .../aws/aws-sdk-go/aws/session/credentials.go | 14 +- .../aws/aws-sdk-go/aws/session/env_config.go | 78 +- .../aws/aws-sdk-go/aws/session/session.go | 166 +- .../aws-sdk-go/aws/session/shared_config.go | 86 +- .../aws-sdk-go/aws/signer/v4/header_rules.go | 5 +- .../aws/signer/v4/request_context_go1.5.go | 13 + .../aws/signer/v4/request_context_go1.7.go | 13 + .../aws/aws-sdk-go/aws/signer/v4/stream.go | 63 + .../aws/aws-sdk-go/aws/signer/v4/v4.go | 110 +- vendor/github.com/aws/aws-sdk-go/aws/types.go | 57 + .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../internal/context/background_go1.5.go | 40 + .../aws/aws-sdk-go/internal/ini/ini_parser.go | 11 +- .../aws/aws-sdk-go/internal/ini/skipper.go | 6 +- .../aws-sdk-go/internal/strings/strings.go | 11 + .../internal/sync/singleflight/LICENSE | 27 + .../sync/singleflight/singleflight.go | 120 + .../private/checksum/content_md5.go | 53 + .../private/protocol/eventstream/debug.go | 2 +- .../private/protocol/eventstream/decode.go | 33 +- .../private/protocol/eventstream/encode.go | 72 +- .../eventstream/eventstreamapi/error.go | 55 +- .../eventstreamapi/{api.go => reader.go} | 42 +- .../eventstream/eventstreamapi/shared.go | 23 + .../eventstream/eventstreamapi/signer.go | 123 + .../eventstreamapi/stream_writer.go | 129 + .../eventstream/eventstreamapi/writer.go | 109 + .../protocol/eventstream/header_value.go | 5 + .../private/protocol/eventstream/message.go | 2 +- .../protocol/json/jsonutil/unmarshal.go | 60 +- .../aws-sdk-go/private/protocol/payload.go | 2 +- .../aws-sdk-go/private/protocol/protocol.go | 49 + .../private/protocol/query/build.go | 2 +- .../private/protocol/query/unmarshal.go | 2 +- .../private/protocol/rest/unmarshal.go | 78 +- .../private/protocol/restxml/restxml.go | 4 +- .../aws-sdk-go/private/protocol/unmarshal.go | 6 + .../private/protocol/unmarshal_error.go | 65 + .../private/protocol/xml/xmlutil/build.go | 9 + .../private/protocol/xml/xmlutil/unmarshal.go | 8 + .../aws/aws-sdk-go/service/s3/api.go | 7147 +++++++++++++++-- .../aws/aws-sdk-go/service/s3/body_hash.go | 49 +- .../aws-sdk-go/service/s3/customizations.go | 16 +- .../aws/aws-sdk-go/service/s3/endpoint.go | 233 + .../aws-sdk-go/service/s3/endpoint_errors.go | 151 + .../aws/aws-sdk-go/service/s3/errors.go | 10 +- .../service/s3/host_style_bucket.go | 43 +- .../s3/internal/arn/accesspoint_arn.go | 45 + .../aws-sdk-go/service/s3/internal/arn/arn.go | 71 + .../aws/aws-sdk-go/service/s3/service.go | 10 +- .../aws-sdk-go/service/s3/statusok_error.go | 16 +- .../aws-sdk-go/service/s3/unmarshal_error.go | 40 +- .../aws/aws-sdk-go/service/sts/api.go | 611 +- .../aws/aws-sdk-go/service/sts/errors.go | 23 +- .../aws/aws-sdk-go/service/sts/service.go | 9 +- .../jmespath/go-jmespath/.travis.yml | 10 +- .../github.com/jmespath/go-jmespath/README.md | 82 +- vendor/github.com/jmespath/go-jmespath/api.go | 2 +- vendor/github.com/jmespath/go-jmespath/go.mod | 5 + vendor/github.com/jmespath/go-jmespath/go.sum | 11 + .../github.com/jmespath/go-jmespath/parser.go | 2 +- vendor/modules.txt | 10 +- 95 files changed, 14160 insertions(+), 1829 deletions(-) create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/arn/arn.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/checksum/content_md5.go rename vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/{api.go => reader.go} (77%) create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/shared.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/signer.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/stream_writer.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/writer.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_errors.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/internal/arn/accesspoint_arn.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/internal/arn/arn.go create mode 100644 vendor/github.com/jmespath/go-jmespath/go.mod create mode 100644 vendor/github.com/jmespath/go-jmespath/go.sum diff --git a/vendor/github.com/aws/aws-sdk-go/aws/arn/arn.go b/vendor/github.com/aws/aws-sdk-go/aws/arn/arn.go new file mode 100644 index 000000000..1c4967429 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/arn/arn.go @@ -0,0 +1,93 @@ +// Package arn provides a parser for interacting with Amazon Resource Names. +package arn + +import ( + "errors" + "strings" +) + +const ( + arnDelimiter = ":" + arnSections = 6 + arnPrefix = "arn:" + + // zero-indexed + sectionPartition = 1 + sectionService = 2 + sectionRegion = 3 + sectionAccountID = 4 + sectionResource = 5 + + // errors + invalidPrefix = "arn: invalid prefix" + invalidSections = "arn: not enough sections" +) + +// ARN captures the individual fields of an Amazon Resource Name. +// See http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html for more information. +type ARN struct { + // The partition that the resource is in. For standard AWS regions, the partition is "aws". If you have resources in + // other partitions, the partition is "aws-partitionname". For example, the partition for resources in the China + // (Beijing) region is "aws-cn". + Partition string + + // The service namespace that identifies the AWS product (for example, Amazon S3, IAM, or Amazon RDS). For a list of + // namespaces, see + // http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces. + Service string + + // The region the resource resides in. Note that the ARNs for some resources do not require a region, so this + // component might be omitted. + Region string + + // The ID of the AWS account that owns the resource, without the hyphens. For example, 123456789012. Note that the + // ARNs for some resources don't require an account number, so this component might be omitted. + AccountID string + + // The content of this part of the ARN varies by service. It often includes an indicator of the type of resource — + // for example, an IAM user or Amazon RDS database - followed by a slash (/) or a colon (:), followed by the + // resource name itself. Some services allows paths for resource names, as described in + // http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arns-paths. + Resource string +} + +// Parse parses an ARN into its constituent parts. +// +// Some example ARNs: +// arn:aws:elasticbeanstalk:us-east-1:123456789012:environment/My App/MyEnvironment +// arn:aws:iam::123456789012:user/David +// arn:aws:rds:eu-west-1:123456789012:db:mysql-db +// arn:aws:s3:::my_corporate_bucket/exampleobject.png +func Parse(arn string) (ARN, error) { + if !strings.HasPrefix(arn, arnPrefix) { + return ARN{}, errors.New(invalidPrefix) + } + sections := strings.SplitN(arn, arnDelimiter, arnSections) + if len(sections) != arnSections { + return ARN{}, errors.New(invalidSections) + } + return ARN{ + Partition: sections[sectionPartition], + Service: sections[sectionService], + Region: sections[sectionRegion], + AccountID: sections[sectionAccountID], + Resource: sections[sectionResource], + }, nil +} + +// IsARN returns whether the given string is an ARN by looking for +// whether the string starts with "arn:" and contains the correct number +// of sections delimited by colons(:). +func IsARN(arn string) bool { + return strings.HasPrefix(arn, arnPrefix) && strings.Count(arn, ":") >= arnSections-1 +} + +// String returns the canonical representation of the ARN +func (arn ARN) String() string { + return arnPrefix + + arn.Partition + arnDelimiter + + arn.Service + arnDelimiter + + arn.Region + arnDelimiter + + arn.AccountID + arnDelimiter + + arn.Resource +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go index 285e54d67..a4eb6a7f4 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go @@ -70,7 +70,7 @@ func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTer value = value.FieldByNameFunc(func(name string) bool { if c == name { return true - } else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) { + } else if !caseSensitive && strings.EqualFold(name, c) { return true } return false diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go index c022407f5..03334d692 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go @@ -12,6 +12,7 @@ import ( type Config struct { Config *aws.Config Handlers request.Handlers + PartitionID string Endpoint string SigningRegion string SigningName string diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go index 0fda42510..9f6af19dd 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go @@ -16,11 +16,11 @@ import ( type DefaultRetryer struct { // Num max Retries is the number of max retries that will be performed. // By default, this is zero. - NumMaxRetries int + NumMaxRetries int // MinRetryDelay is the minimum retry delay after which retry will be performed. // If not set, the value is 0ns. - MinRetryDelay time.Duration + MinRetryDelay time.Duration // MinThrottleRetryDelay is the minimum retry delay when throttled. // If not set, the value is 0ns. @@ -28,7 +28,7 @@ type DefaultRetryer struct { // MaxRetryDelay is the maximum retry delay before which retry must be performed. // If not set, the value is 0ns. - MaxRetryDelay time.Duration + MaxRetryDelay time.Duration // MaxThrottleDelay is the maximum retry delay when throttled. // If not set, the value is 0ns. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go index 920e9fddf..0c48f72e0 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go @@ -5,6 +5,7 @@ type ClientInfo struct { ServiceName string ServiceID string APIVersion string + PartitionID string Endpoint string SigningName string SigningRegion string diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go index fd1e240f6..2c002e152 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -43,7 +43,7 @@ type Config struct { // An optional endpoint URL (hostname only or fully qualified URI) // that overrides the default generated endpoint for a client. Set this - // to `""` to use the default generated endpoint. + // to `nil` to use the default generated endpoint. // // Note: You must still provide a `Region` value when specifying an // endpoint for a client. @@ -161,6 +161,17 @@ type Config struct { // on GetObject API calls. S3DisableContentMD5Validation *bool + // Set this to `true` to have the S3 service client to use the region specified + // in the ARN, when an ARN is provided as an argument to a bucket parameter. + S3UseARNRegion *bool + + // Set this to `true` to enable the SDK to unmarshal API response header maps to + // normalized lower case map keys. + // + // For example S3's X-Amz-Meta prefixed header will be unmarshaled to lower case + // Metadata member's map keys. The value of the header in the map is unaffected. + LowerCaseHeaderMaps *bool + // Set this to `true` to disable the EC2Metadata client from overriding the // default http.Client's Timeout. This is helpful if you do not want the // EC2Metadata client to create a new http.Client. This options is only @@ -227,6 +238,7 @@ type Config struct { // EnableEndpointDiscovery will allow for endpoint discovery on operations that // have the definition in its model. By default, endpoint discovery is off. + // To use EndpointDiscovery, Endpoint should be unset or set to an empty string. // // Example: // sess := session.Must(session.NewSession(&aws.Config{ @@ -246,6 +258,12 @@ type Config struct { // Disabling this feature is useful when you want to use local endpoints // for testing that do not support the modeled host prefix pattern. DisableEndpointHostPrefix *bool + + // STSRegionalEndpoint will enable regional or legacy endpoint resolving + STSRegionalEndpoint endpoints.STSRegionalEndpoint + + // S3UsEast1RegionalEndpoint will enable regional or legacy endpoint resolving + S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint } // NewConfig returns a new Config pointer that can be chained with builder @@ -379,6 +397,13 @@ func (c *Config) WithS3DisableContentMD5Validation(enable bool) *Config { } +// WithS3UseARNRegion sets a config S3UseARNRegion value and +// returning a Config pointer for chaining +func (c *Config) WithS3UseARNRegion(enable bool) *Config { + c.S3UseARNRegion = &enable + return c +} + // WithUseDualStack sets a config UseDualStack value returning a Config // pointer for chaining. func (c *Config) WithUseDualStack(enable bool) *Config { @@ -420,6 +445,20 @@ func (c *Config) MergeIn(cfgs ...*Config) { } } +// WithSTSRegionalEndpoint will set whether or not to use regional endpoint flag +// when resolving the endpoint for a service +func (c *Config) WithSTSRegionalEndpoint(sre endpoints.STSRegionalEndpoint) *Config { + c.STSRegionalEndpoint = sre + return c +} + +// WithS3UsEast1RegionalEndpoint will set whether or not to use regional endpoint flag +// when resolving the endpoint for a service +func (c *Config) WithS3UsEast1RegionalEndpoint(sre endpoints.S3UsEast1RegionalEndpoint) *Config { + c.S3UsEast1RegionalEndpoint = sre + return c +} + func mergeInConfig(dst *Config, other *Config) { if other == nil { return @@ -493,6 +532,10 @@ func mergeInConfig(dst *Config, other *Config) { dst.S3DisableContentMD5Validation = other.S3DisableContentMD5Validation } + if other.S3UseARNRegion != nil { + dst.S3UseARNRegion = other.S3UseARNRegion + } + if other.UseDualStack != nil { dst.UseDualStack = other.UseDualStack } @@ -520,6 +563,14 @@ func mergeInConfig(dst *Config, other *Config) { if other.DisableEndpointHostPrefix != nil { dst.DisableEndpointHostPrefix = other.DisableEndpointHostPrefix } + + if other.STSRegionalEndpoint != endpoints.UnsetSTSEndpoint { + dst.STSRegionalEndpoint = other.STSRegionalEndpoint + } + + if other.S3UsEast1RegionalEndpoint != endpoints.UnsetS3UsEast1Endpoint { + dst.S3UsEast1RegionalEndpoint = other.S3UsEast1RegionalEndpoint + } } // Copy will return a shallow copy of the Config object. If any additional diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go index 66c5945db..2f9446333 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go @@ -2,42 +2,8 @@ package aws -import "time" - -// An emptyCtx is a copy of the Go 1.7 context.emptyCtx type. This is copied to -// provide a 1.6 and 1.5 safe version of context that is compatible with Go -// 1.7's Context. -// -// An emptyCtx is never canceled, has no values, and has no deadline. It is not -// struct{}, since vars of this type must have distinct addresses. -type emptyCtx int - -func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (*emptyCtx) Done() <-chan struct{} { - return nil -} - -func (*emptyCtx) Err() error { - return nil -} - -func (*emptyCtx) Value(key interface{}) interface{} { - return nil -} - -func (e *emptyCtx) String() string { - switch e { - case backgroundCtx: - return "aws.BackgroundContext" - } - return "unknown empty Context" -} - -var ( - backgroundCtx = new(emptyCtx) +import ( + "github.com/aws/aws-sdk-go/internal/context" ) // BackgroundContext returns a context that will never be canceled, has no @@ -52,5 +18,5 @@ var ( // // See https://golang.org/pkg/context for more information on Contexts. func BackgroundContext() Context { - return backgroundCtx + return context.BackgroundCtx } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go index 0c60e612e..aa902d708 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go @@ -161,7 +161,7 @@ func handleSendError(r *request.Request, err error) { } // Catch all request errors, and let the default retrier determine // if the error is retryable. - r.Error = awserr.New("RequestError", "send request failed", err) + r.Error = awserr.New(request.ErrCodeRequestError, "send request failed", err) // Override the error with a context canceled error, if that was canceled. ctx := r.Context() diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go new file mode 100644 index 000000000..5852b2648 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go @@ -0,0 +1,22 @@ +// +build !go1.7 + +package credentials + +import ( + "github.com/aws/aws-sdk-go/internal/context" +) + +// backgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func backgroundContext() Context { + return context.BackgroundCtx +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go new file mode 100644 index 000000000..388b21541 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go @@ -0,0 +1,20 @@ +// +build go1.7 + +package credentials + +import "context" + +// backgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func backgroundContext() Context { + return context.Background() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go new file mode 100644 index 000000000..8152a864a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go @@ -0,0 +1,39 @@ +// +build !go1.9 + +package credentials + +import "time" + +// Context is an copy of the Go v1.7 stdlib's context.Context interface. +// It is represented as a SDK interface to enable you to use the "WithContext" +// API methods with Go v1.6 and a Context type such as golang.org/x/net/context. +// +// This type, aws.Context, and context.Context are equivalent. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + Value(key interface{}) interface{} +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go new file mode 100644 index 000000000..4356edb3d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go @@ -0,0 +1,13 @@ +// +build go1.9 + +package credentials + +import "context" + +// Context is an alias of the Go stdlib's context.Context interface. +// It can be used within the SDK's API operation "WithContext" methods. +// +// This type, aws.Context, and context.Context are equivalent. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context = context.Context diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go index 4af592158..9f8fd92a5 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go @@ -50,10 +50,11 @@ package credentials import ( "fmt" - "sync" + "sync/atomic" "time" "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/internal/sync/singleflight" ) // AnonymousCredentials is an empty Credential object that can be used as @@ -106,6 +107,13 @@ type Provider interface { IsExpired() bool } +// ProviderWithContext is a Provider that can retrieve credentials with a Context +type ProviderWithContext interface { + Provider + + RetrieveWithContext(Context) (Value, error) +} + // An Expirer is an interface that Providers can implement to expose the expiration // time, if known. If the Provider cannot accurately provide this info, // it should not implement this interface. @@ -197,24 +205,24 @@ func (e *Expiry) ExpiresAt() time.Time { // first instance of the credentials Value. All calls to Get() after that // will return the cached credentials Value until IsExpired() returns true. type Credentials struct { - creds Value - forceRefresh bool - - m sync.RWMutex + creds atomic.Value + sf singleflight.Group provider Provider } // NewCredentials returns a pointer to a new Credentials with the provider set. func NewCredentials(provider Provider) *Credentials { - return &Credentials{ - provider: provider, - forceRefresh: true, + c := &Credentials{ + provider: provider, } + c.creds.Store(Value{}) + return c } -// Get returns the credentials value, or error if the credentials Value failed -// to be retrieved. +// GetWithContext returns the credentials value, or error if the credentials +// Value failed to be retrieved. Will return early if the passed in context is +// canceled. // // Will return the cached credentials Value if it has not expired. If the // credentials Value has expired the Provider's Retrieve() will be called @@ -222,31 +230,56 @@ func NewCredentials(provider Provider) *Credentials { // // If Credentials.Expire() was called the credentials Value will be force // expired, and the next call to Get() will cause them to be refreshed. -func (c *Credentials) Get() (Value, error) { - // Check the cached credentials first with just the read lock. - c.m.RLock() - if !c.isExpired() { - creds := c.creds - c.m.RUnlock() - return creds, nil +// +// Passed in Context is equivalent to aws.Context, and context.Context. +func (c *Credentials) GetWithContext(ctx Context) (Value, error) { + if curCreds := c.creds.Load(); !c.isExpired(curCreds) { + return curCreds.(Value), nil } - c.m.RUnlock() - - // Credentials are expired need to retrieve the credentials taking the full - // lock. - c.m.Lock() - defer c.m.Unlock() - - if c.isExpired() { - creds, err := c.provider.Retrieve() - if err != nil { - return Value{}, err - } - c.creds = creds - c.forceRefresh = false + + // Cannot pass context down to the actual retrieve, because the first + // context would cancel the whole group when there is not direct + // association of items in the group. + resCh := c.sf.DoChan("", func() (interface{}, error) { + return c.singleRetrieve(&suppressedContext{ctx}) + }) + select { + case res := <-resCh: + return res.Val.(Value), res.Err + case <-ctx.Done(): + return Value{}, awserr.New("RequestCanceled", + "request context canceled", ctx.Err()) } +} - return c.creds, nil +func (c *Credentials) singleRetrieve(ctx Context) (creds interface{}, err error) { + if curCreds := c.creds.Load(); !c.isExpired(curCreds) { + return curCreds.(Value), nil + } + + if p, ok := c.provider.(ProviderWithContext); ok { + creds, err = p.RetrieveWithContext(ctx) + } else { + creds, err = c.provider.Retrieve() + } + if err == nil { + c.creds.Store(creds) + } + + return creds, err +} + +// Get returns the credentials value, or error if the credentials Value failed +// to be retrieved. +// +// Will return the cached credentials Value if it has not expired. If the +// credentials Value has expired the Provider's Retrieve() will be called +// to refresh the credentials. +// +// If Credentials.Expire() was called the credentials Value will be force +// expired, and the next call to Get() will cause them to be refreshed. +func (c *Credentials) Get() (Value, error) { + return c.GetWithContext(backgroundContext()) } // Expire expires the credentials and forces them to be retrieved on the @@ -255,10 +288,7 @@ func (c *Credentials) Get() (Value, error) { // This will override the Provider's expired state, and force Credentials // to call the Provider's Retrieve(). func (c *Credentials) Expire() { - c.m.Lock() - defer c.m.Unlock() - - c.forceRefresh = true + c.creds.Store(Value{}) } // IsExpired returns if the credentials are no longer valid, and need @@ -267,33 +297,43 @@ func (c *Credentials) Expire() { // If the Credentials were forced to be expired with Expire() this will // reflect that override. func (c *Credentials) IsExpired() bool { - c.m.RLock() - defer c.m.RUnlock() - - return c.isExpired() + return c.isExpired(c.creds.Load()) } // isExpired helper method wrapping the definition of expired credentials. -func (c *Credentials) isExpired() bool { - return c.forceRefresh || c.provider.IsExpired() +func (c *Credentials) isExpired(creds interface{}) bool { + return creds == nil || creds.(Value) == Value{} || c.provider.IsExpired() } // ExpiresAt provides access to the functionality of the Expirer interface of // the underlying Provider, if it supports that interface. Otherwise, it returns // an error. func (c *Credentials) ExpiresAt() (time.Time, error) { - c.m.RLock() - defer c.m.RUnlock() - expirer, ok := c.provider.(Expirer) if !ok { return time.Time{}, awserr.New("ProviderNotExpirer", - fmt.Sprintf("provider %s does not support ExpiresAt()", c.creds.ProviderName), + fmt.Sprintf("provider %s does not support ExpiresAt()", c.creds.Load().(Value).ProviderName), nil) } - if c.forceRefresh { + if c.creds.Load().(Value) == (Value{}) { // set expiration time to the distant past return time.Time{}, nil } return expirer.ExpiresAt(), nil } + +type suppressedContext struct { + Context +} + +func (s *suppressedContext) Deadline() (deadline time.Time, ok bool) { + return time.Time{}, false +} + +func (s *suppressedContext) Done() <-chan struct{} { + return nil +} + +func (s *suppressedContext) Err() error { + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go index 43d4ed386..92af5b725 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go @@ -7,6 +7,7 @@ import ( "strings" "time" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/credentials" @@ -87,7 +88,14 @@ func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(* // Error will be returned if the request fails, or unable to extract // the desired credentials. func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) { - credsList, err := requestCredList(m.Client) + return m.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext retrieves credentials from the EC2 service. +// Error will be returned if the request fails, or unable to extract +// the desired credentials. +func (m *EC2RoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { + credsList, err := requestCredList(ctx, m.Client) if err != nil { return credentials.Value{ProviderName: ProviderName}, err } @@ -97,7 +105,7 @@ func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) { } credsName := credsList[0] - roleCreds, err := requestCred(m.Client, credsName) + roleCreds, err := requestCred(ctx, m.Client, credsName) if err != nil { return credentials.Value{ProviderName: ProviderName}, err } @@ -130,8 +138,8 @@ const iamSecurityCredsPath = "iam/security-credentials/" // requestCredList requests a list of credentials from the EC2 service. // If there are no credentials, or there is an error making or receiving the request -func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) { - resp, err := client.GetMetadata(iamSecurityCredsPath) +func requestCredList(ctx aws.Context, client *ec2metadata.EC2Metadata) ([]string, error) { + resp, err := client.GetMetadataWithContext(ctx, iamSecurityCredsPath) if err != nil { return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err) } @@ -154,8 +162,8 @@ func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) { // // If the credentials cannot be found, or there is an error reading the response // and error will be returned. -func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { - resp, err := client.GetMetadata(sdkuri.PathJoin(iamSecurityCredsPath, credsName)) +func requestCred(ctx aws.Context, client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { + resp, err := client.GetMetadataWithContext(ctx, sdkuri.PathJoin(iamSecurityCredsPath, credsName)) if err != nil { return ec2RoleCredRespBody{}, awserr.New("EC2RoleRequestError", diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go index 1a7af53a4..785f30d8e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go @@ -116,7 +116,13 @@ func (p *Provider) IsExpired() bool { // Retrieve will attempt to request the credentials from the endpoint the Provider // was configured for. And error will be returned if the retrieval fails. func (p *Provider) Retrieve() (credentials.Value, error) { - resp, err := p.getCredentials() + return p.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext will attempt to request the credentials from the endpoint the Provider +// was configured for. And error will be returned if the retrieval fails. +func (p *Provider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { + resp, err := p.getCredentials(ctx) if err != nil { return credentials.Value{ProviderName: ProviderName}, awserr.New("CredentialsEndpointError", "failed to load credentials", err) @@ -148,7 +154,7 @@ type errorOutput struct { Message string `json:"message"` } -func (p *Provider) getCredentials() (*getCredentialsOutput, error) { +func (p *Provider) getCredentials(ctx aws.Context) (*getCredentialsOutput, error) { op := &request.Operation{ Name: "GetCredentials", HTTPMethod: "GET", @@ -156,6 +162,7 @@ func (p *Provider) getCredentials() (*getCredentialsOutput, error) { out := &getCredentialsOutput{} req := p.Client.NewRequest(op, nil, out) + req.SetContext(ctx) req.HTTPRequest.Header.Set("Accept", "application/json") if authToken := p.AuthorizationToken; len(authToken) != 0 { req.HTTPRequest.Header.Set("Authorization", authToken) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go index 1980c8c14..e62483600 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go @@ -90,6 +90,7 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/internal/sdkio" ) const ( @@ -142,7 +143,7 @@ const ( // DefaultBufSize limits buffer size from growing to an enormous // amount due to a faulty process. - DefaultBufSize = 1024 + DefaultBufSize = int(8 * sdkio.KibiByte) // DefaultTimeout default limit on time a process can run. DefaultTimeout = time.Duration(1) * time.Minute diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go index e15514958..22b5c5d9f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go @@ -17,8 +17,9 @@ var ( ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil) ) -// A SharedCredentialsProvider retrieves credentials from the current user's home -// directory, and keeps track if those credentials are expired. +// A SharedCredentialsProvider retrieves access key pair (access key ID, +// secret access key, and session token if present) credentials from the current +// user's home directory, and keeps track if those credentials are expired. // // Profile ini file example: $HOME/.aws/credentials type SharedCredentialsProvider struct { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go index 531139e39..cbba1e3d5 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go @@ -19,7 +19,9 @@ type StaticProvider struct { } // NewStaticCredentials returns a pointer to a new Credentials object -// wrapping a static credentials value provider. +// wrapping a static credentials value provider. Token is only required +// for temporary security credentials retrieved via STS, otherwise an empty +// string can be passed for this parameter. func NewStaticCredentials(id, secret, token string) *Credentials { return NewCredentials(&StaticProvider{Value: Value{ AccessKeyID: id, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go index 2e528d130..6846ef6f8 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go @@ -87,6 +87,7 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/internal/sdkrand" "github.com/aws/aws-sdk-go/service/sts" ) @@ -118,6 +119,10 @@ type AssumeRoler interface { AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) } +type assumeRolerWithContext interface { + AssumeRoleWithContext(aws.Context, *sts.AssumeRoleInput, ...request.Option) (*sts.AssumeRoleOutput, error) +} + // DefaultDuration is the default amount of time in minutes that the credentials // will be valid for. var DefaultDuration = time.Duration(15) * time.Minute @@ -144,6 +149,13 @@ type AssumeRoleProvider struct { // Session name, if you wish to reuse the credentials elsewhere. RoleSessionName string + // Optional, you can pass tag key-value pairs to your session. These tags are called session tags. + Tags []*sts.Tag + + // A list of keys for session tags that you want to set as transitive. + // If you set a tag key as transitive, the corresponding key and value passes to subsequent sessions in a role chain. + TransitiveTagKeys []*string + // Expiry duration of the STS credentials. Defaults to 15 minutes if not set. Duration time.Duration @@ -157,6 +169,29 @@ type AssumeRoleProvider struct { // size. Policy *string + // The ARNs of IAM managed policies you want to use as managed session policies. + // The policies must exist in the same account as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plain text that you use for both inline and managed session + // policies can't exceed 2,048 characters. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent AWS API calls to access resources in the account that owns + // the role. You cannot use session policies to grant more permissions than + // those allowed by the identity-based policy of the role that is being assumed. + // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*sts.PolicyDescriptorType + // The identification number of the MFA device that is associated with the user // who is making the AssumeRole call. Specify this value if the trust policy // of the role being assumed includes a condition that requires MFA authentication. @@ -258,6 +293,11 @@ func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(* // Retrieve generates a new set of temporary credentials using STS. func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { + return p.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext generates a new set of temporary credentials using STS. +func (p *AssumeRoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { // Apply defaults where parameters are not set. if p.RoleSessionName == "" { // Try to work out a role name that will hopefully end up unique. @@ -269,10 +309,13 @@ func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { } jitter := time.Duration(sdkrand.SeededRand.Float64() * p.MaxJitterFrac * float64(p.Duration)) input := &sts.AssumeRoleInput{ - DurationSeconds: aws.Int64(int64((p.Duration - jitter) / time.Second)), - RoleArn: aws.String(p.RoleARN), - RoleSessionName: aws.String(p.RoleSessionName), - ExternalId: p.ExternalID, + DurationSeconds: aws.Int64(int64((p.Duration - jitter) / time.Second)), + RoleArn: aws.String(p.RoleARN), + RoleSessionName: aws.String(p.RoleSessionName), + ExternalId: p.ExternalID, + Tags: p.Tags, + PolicyArns: p.PolicyArns, + TransitiveTagKeys: p.TransitiveTagKeys, } if p.Policy != nil { input.Policy = p.Policy @@ -295,7 +338,15 @@ func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { } } - roleOutput, err := p.Client.AssumeRole(input) + var roleOutput *sts.AssumeRoleOutput + var err error + + if c, ok := p.Client.(assumeRolerWithContext); ok { + roleOutput, err = c.AssumeRoleWithContext(ctx, input) + } else { + roleOutput, err = p.Client.AssumeRole(input) + } + if err != nil { return credentials.Value{ProviderName: ProviderName}, err } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go index b20b63394..6feb262b2 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go @@ -28,15 +28,34 @@ const ( // compare test values. var now = time.Now +// TokenFetcher shuold return WebIdentity token bytes or an error +type TokenFetcher interface { + FetchToken(credentials.Context) ([]byte, error) +} + +// FetchTokenPath is a path to a WebIdentity token file +type FetchTokenPath string + +// FetchToken returns a token by reading from the filesystem +func (f FetchTokenPath) FetchToken(ctx credentials.Context) ([]byte, error) { + data, err := ioutil.ReadFile(string(f)) + if err != nil { + errMsg := fmt.Sprintf("unable to read file at %s", f) + return nil, awserr.New(ErrCodeWebIdentity, errMsg, err) + } + return data, nil +} + // WebIdentityRoleProvider is used to retrieve credentials using // an OIDC token. type WebIdentityRoleProvider struct { credentials.Expiry + PolicyArns []*sts.PolicyDescriptorType client stsiface.STSAPI ExpiryWindow time.Duration - tokenFilePath string + tokenFetcher TokenFetcher roleARN string roleSessionName string } @@ -52,9 +71,15 @@ func NewWebIdentityCredentials(c client.ConfigProvider, roleARN, roleSessionName // NewWebIdentityRoleProvider will return a new WebIdentityRoleProvider with the // provided stsiface.STSAPI func NewWebIdentityRoleProvider(svc stsiface.STSAPI, roleARN, roleSessionName, path string) *WebIdentityRoleProvider { + return NewWebIdentityRoleProviderWithToken(svc, roleARN, roleSessionName, FetchTokenPath(path)) +} + +// NewWebIdentityRoleProviderWithToken will return a new WebIdentityRoleProvider with the +// provided stsiface.STSAPI and a TokenFetcher +func NewWebIdentityRoleProviderWithToken(svc stsiface.STSAPI, roleARN, roleSessionName string, tokenFetcher TokenFetcher) *WebIdentityRoleProvider { return &WebIdentityRoleProvider{ client: svc, - tokenFilePath: path, + tokenFetcher: tokenFetcher, roleARN: roleARN, roleSessionName: roleSessionName, } @@ -64,10 +89,16 @@ func NewWebIdentityRoleProvider(svc stsiface.STSAPI, roleARN, roleSessionName, p // 'WebIdentityTokenFilePath' specified destination and if that is empty an // error will be returned. func (p *WebIdentityRoleProvider) Retrieve() (credentials.Value, error) { - b, err := ioutil.ReadFile(p.tokenFilePath) + return p.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext will attempt to assume a role from a token which is located at +// 'WebIdentityTokenFilePath' specified destination and if that is empty an +// error will be returned. +func (p *WebIdentityRoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { + b, err := p.tokenFetcher.FetchToken(ctx) if err != nil { - errMsg := fmt.Sprintf("unable to read file at %s", p.tokenFilePath) - return credentials.Value{}, awserr.New(ErrCodeWebIdentity, errMsg, err) + return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed fetching WebIdentity token: ", err) } sessionName := p.roleSessionName @@ -77,10 +108,14 @@ func (p *WebIdentityRoleProvider) Retrieve() (credentials.Value, error) { sessionName = strconv.FormatInt(now().UnixNano(), 10) } req, resp := p.client.AssumeRoleWithWebIdentityRequest(&sts.AssumeRoleWithWebIdentityInput{ + PolicyArns: p.PolicyArns, RoleArn: &p.roleARN, RoleSessionName: &sessionName, WebIdentityToken: aws.String(string(b)), }) + + req.SetContext(ctx) + // InvalidIdentityToken error is a temporary error that can occur // when assuming an Role with a JWT web identity token. req.RetryErrorCodes = append(req.RetryErrorCodes, sts.ErrCodeInvalidIdentityTokenException) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go index c7008d8c3..835bcd49c 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go @@ -66,7 +66,6 @@ func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) { XAmzRequestID: aws.String(r.RequestID), - AttemptCount: aws.Int(r.RetryCount + 1), AttemptLatency: aws.Int(int(now.Sub(r.AttemptTime).Nanoseconds() / int64(time.Millisecond))), AccessKey: aws.String(creds.AccessKeyID), } @@ -90,7 +89,7 @@ func getMetricException(err awserr.Error) metricException { code := err.Code() switch code { - case "RequestError", + case request.ErrCodeRequestError, request.ErrCodeSerialization, request.CanceledErrorCode: return sdkException{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go index d126764ce..a716c021c 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go @@ -4,28 +4,73 @@ import ( "encoding/json" "fmt" "net/http" + "strconv" "strings" "time" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/internal/sdkuri" ) +// getToken uses the duration to return a token for EC2 metadata service, +// or an error if the request failed. +func (c *EC2Metadata) getToken(ctx aws.Context, duration time.Duration) (tokenOutput, error) { + op := &request.Operation{ + Name: "GetToken", + HTTPMethod: "PUT", + HTTPPath: "/api/token", + } + + var output tokenOutput + req := c.NewRequest(op, nil, &output) + req.SetContext(ctx) + + // remove the fetch token handler from the request handlers to avoid infinite recursion + req.Handlers.Sign.RemoveByName(fetchTokenHandlerName) + + // Swap the unmarshalMetadataHandler with unmarshalTokenHandler on this request. + req.Handlers.Unmarshal.Swap(unmarshalMetadataHandlerName, unmarshalTokenHandler) + + ttl := strconv.FormatInt(int64(duration/time.Second), 10) + req.HTTPRequest.Header.Set(ttlHeader, ttl) + + err := req.Send() + + // Errors with bad request status should be returned. + if err != nil { + err = awserr.NewRequestFailure( + awserr.New(req.HTTPResponse.Status, http.StatusText(req.HTTPResponse.StatusCode), err), + req.HTTPResponse.StatusCode, req.RequestID) + } + + return output, err +} + // GetMetadata uses the path provided to request information from the EC2 -// instance metdata service. The content will be returned as a string, or +// instance metadata service. The content will be returned as a string, or // error if the request failed. func (c *EC2Metadata) GetMetadata(p string) (string, error) { + return c.GetMetadataWithContext(aws.BackgroundContext(), p) +} + +// GetMetadataWithContext uses the path provided to request information from the EC2 +// instance metadata service. The content will be returned as a string, or +// error if the request failed. +func (c *EC2Metadata) GetMetadataWithContext(ctx aws.Context, p string) (string, error) { op := &request.Operation{ Name: "GetMetadata", HTTPMethod: "GET", HTTPPath: sdkuri.PathJoin("/meta-data", p), } - output := &metadataOutput{} + req := c.NewRequest(op, nil, output) - err := req.Send() + req.SetContext(ctx) + + err := req.Send() return output.Content, err } @@ -33,6 +78,13 @@ func (c *EC2Metadata) GetMetadata(p string) (string, error) { // there is no user-data setup for the EC2 instance a "NotFoundError" error // code will be returned. func (c *EC2Metadata) GetUserData() (string, error) { + return c.GetUserDataWithContext(aws.BackgroundContext()) +} + +// GetUserDataWithContext returns the userdata that was configured for the service. If +// there is no user-data setup for the EC2 instance a "NotFoundError" error +// code will be returned. +func (c *EC2Metadata) GetUserDataWithContext(ctx aws.Context) (string, error) { op := &request.Operation{ Name: "GetUserData", HTTPMethod: "GET", @@ -41,13 +93,9 @@ func (c *EC2Metadata) GetUserData() (string, error) { output := &metadataOutput{} req := c.NewRequest(op, nil, output) - req.Handlers.UnmarshalError.PushBack(func(r *request.Request) { - if r.HTTPResponse.StatusCode == http.StatusNotFound { - r.Error = awserr.New("NotFoundError", "user-data not found", r.Error) - } - }) - err := req.Send() + req.SetContext(ctx) + err := req.Send() return output.Content, err } @@ -55,6 +103,13 @@ func (c *EC2Metadata) GetUserData() (string, error) { // instance metadata service for dynamic data. The content will be returned // as a string, or error if the request failed. func (c *EC2Metadata) GetDynamicData(p string) (string, error) { + return c.GetDynamicDataWithContext(aws.BackgroundContext(), p) +} + +// GetDynamicDataWithContext uses the path provided to request information from the EC2 +// instance metadata service for dynamic data. The content will be returned +// as a string, or error if the request failed. +func (c *EC2Metadata) GetDynamicDataWithContext(ctx aws.Context, p string) (string, error) { op := &request.Operation{ Name: "GetDynamicData", HTTPMethod: "GET", @@ -63,8 +118,9 @@ func (c *EC2Metadata) GetDynamicData(p string) (string, error) { output := &metadataOutput{} req := c.NewRequest(op, nil, output) - err := req.Send() + req.SetContext(ctx) + err := req.Send() return output.Content, err } @@ -72,7 +128,14 @@ func (c *EC2Metadata) GetDynamicData(p string) (string, error) { // instance. Error is returned if the request fails or is unable to parse // the response. func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) { - resp, err := c.GetDynamicData("instance-identity/document") + return c.GetInstanceIdentityDocumentWithContext(aws.BackgroundContext()) +} + +// GetInstanceIdentityDocumentWithContext retrieves an identity document describing an +// instance. Error is returned if the request fails or is unable to parse +// the response. +func (c *EC2Metadata) GetInstanceIdentityDocumentWithContext(ctx aws.Context) (EC2InstanceIdentityDocument, error) { + resp, err := c.GetDynamicDataWithContext(ctx, "instance-identity/document") if err != nil { return EC2InstanceIdentityDocument{}, awserr.New("EC2MetadataRequestError", @@ -91,7 +154,12 @@ func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument // IAMInfo retrieves IAM info from the metadata API func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) { - resp, err := c.GetMetadata("iam/info") + return c.IAMInfoWithContext(aws.BackgroundContext()) +} + +// IAMInfoWithContext retrieves IAM info from the metadata API +func (c *EC2Metadata) IAMInfoWithContext(ctx aws.Context) (EC2IAMInfo, error) { + resp, err := c.GetMetadataWithContext(ctx, "iam/info") if err != nil { return EC2IAMInfo{}, awserr.New("EC2MetadataRequestError", @@ -116,24 +184,36 @@ func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) { // Region returns the region the instance is running in. func (c *EC2Metadata) Region() (string, error) { - resp, err := c.GetMetadata("placement/availability-zone") + return c.RegionWithContext(aws.BackgroundContext()) +} + +// RegionWithContext returns the region the instance is running in. +func (c *EC2Metadata) RegionWithContext(ctx aws.Context) (string, error) { + ec2InstanceIdentityDocument, err := c.GetInstanceIdentityDocumentWithContext(ctx) if err != nil { return "", err } - - if len(resp) == 0 { - return "", awserr.New("EC2MetadataError", "invalid Region response", nil) + // extract region from the ec2InstanceIdentityDocument + region := ec2InstanceIdentityDocument.Region + if len(region) == 0 { + return "", awserr.New("EC2MetadataError", "invalid region received for ec2metadata instance", nil) } - - // returns region without the suffix. Eg: us-west-2a becomes us-west-2 - return resp[:len(resp)-1], nil + // returns region + return region, nil } // Available returns if the application has access to the EC2 Metadata service. // Can be used to determine if application is running within an EC2 Instance and // the metadata service is available. func (c *EC2Metadata) Available() bool { - if _, err := c.GetMetadata("instance-id"); err != nil { + return c.AvailableWithContext(aws.BackgroundContext()) +} + +// AvailableWithContext returns if the application has access to the EC2 Metadata service. +// Can be used to determine if application is running within an EC2 Instance and +// the metadata service is available. +func (c *EC2Metadata) AvailableWithContext(ctx aws.Context) bool { + if _, err := c.GetMetadataWithContext(ctx, "instance-id"); err != nil { return false } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go index 4c5636e35..b8b2940d7 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go @@ -13,6 +13,7 @@ import ( "io" "net/http" "os" + "strconv" "strings" "time" @@ -24,9 +25,25 @@ import ( "github.com/aws/aws-sdk-go/aws/request" ) -// ServiceName is the name of the service. -const ServiceName = "ec2metadata" -const disableServiceEnvVar = "AWS_EC2_METADATA_DISABLED" +const ( + // ServiceName is the name of the service. + ServiceName = "ec2metadata" + disableServiceEnvVar = "AWS_EC2_METADATA_DISABLED" + + // Headers for Token and TTL + ttlHeader = "x-aws-ec2-metadata-token-ttl-seconds" + tokenHeader = "x-aws-ec2-metadata-token" + + // Named Handler constants + fetchTokenHandlerName = "FetchTokenHandler" + unmarshalMetadataHandlerName = "unmarshalMetadataHandler" + unmarshalTokenHandlerName = "unmarshalTokenHandler" + enableTokenProviderHandlerName = "enableTokenProviderHandler" + + // TTL constants + defaultTTL = 21600 * time.Second + ttlExpirationWindow = 30 * time.Second +) // A EC2Metadata is an EC2 Metadata service Client. type EC2Metadata struct { @@ -63,8 +80,10 @@ func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio // use a shorter timeout than default because the metadata // service is local if it is running, and to fail faster // if not running on an ec2 instance. - Timeout: 5 * time.Second, + Timeout: 1 * time.Second, } + // max number of retries on the client operation + cfg.MaxRetries = aws.Int(2) } svc := &EC2Metadata{ @@ -80,13 +99,27 @@ func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ), } - svc.Handlers.Unmarshal.PushBack(unmarshalHandler) + // token provider instance + tp := newTokenProvider(svc, defaultTTL) + + // NamedHandler for fetching token + svc.Handlers.Sign.PushBackNamed(request.NamedHandler{ + Name: fetchTokenHandlerName, + Fn: tp.fetchTokenHandler, + }) + // NamedHandler for enabling token provider + svc.Handlers.Complete.PushBackNamed(request.NamedHandler{ + Name: enableTokenProviderHandlerName, + Fn: tp.enableTokenProviderHandler, + }) + + svc.Handlers.Unmarshal.PushBackNamed(unmarshalHandler) svc.Handlers.UnmarshalError.PushBack(unmarshalError) svc.Handlers.Validate.Clear() svc.Handlers.Validate.PushBack(validateEndpointHandler) // Disable the EC2 Metadata service if the environment variable is set. - // This shortcirctes the service's functionality to always fail to send + // This short-circuits the service's functionality to always fail to send // requests. if strings.ToLower(os.Getenv(disableServiceEnvVar)) == "true" { svc.Handlers.Send.SwapNamed(request.NamedHandler{ @@ -107,7 +140,6 @@ func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio for _, option := range opts { option(svc.Client) } - return svc } @@ -119,30 +151,74 @@ type metadataOutput struct { Content string } -func unmarshalHandler(r *request.Request) { - defer r.HTTPResponse.Body.Close() - b := &bytes.Buffer{} - if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { - r.Error = awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata response", err) - return - } +type tokenOutput struct { + Token string + TTL time.Duration +} - if data, ok := r.Data.(*metadataOutput); ok { - data.Content = b.String() - } +// unmarshal token handler is used to parse the response of a getToken operation +var unmarshalTokenHandler = request.NamedHandler{ + Name: unmarshalTokenHandlerName, + Fn: func(r *request.Request) { + defer r.HTTPResponse.Body.Close() + var b bytes.Buffer + if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.NewRequestFailure(awserr.New(request.ErrCodeSerialization, + "unable to unmarshal EC2 metadata response", err), r.HTTPResponse.StatusCode, r.RequestID) + return + } + + v := r.HTTPResponse.Header.Get(ttlHeader) + data, ok := r.Data.(*tokenOutput) + if !ok { + return + } + + data.Token = b.String() + // TTL is in seconds + i, err := strconv.ParseInt(v, 10, 64) + if err != nil { + r.Error = awserr.NewRequestFailure(awserr.New(request.ParamFormatErrCode, + "unable to parse EC2 token TTL response", err), r.HTTPResponse.StatusCode, r.RequestID) + return + } + t := time.Duration(i) * time.Second + data.TTL = t + }, +} + +var unmarshalHandler = request.NamedHandler{ + Name: unmarshalMetadataHandlerName, + Fn: func(r *request.Request) { + defer r.HTTPResponse.Body.Close() + var b bytes.Buffer + if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.NewRequestFailure(awserr.New(request.ErrCodeSerialization, + "unable to unmarshal EC2 metadata response", err), r.HTTPResponse.StatusCode, r.RequestID) + return + } + + if data, ok := r.Data.(*metadataOutput); ok { + data.Content = b.String() + } + }, } func unmarshalError(r *request.Request) { defer r.HTTPResponse.Body.Close() - b := &bytes.Buffer{} - if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { - r.Error = awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata error response", err) + var b bytes.Buffer + + if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata error response", err), + r.HTTPResponse.StatusCode, r.RequestID) return } // Response body format is not consistent between metadata endpoints. // Grab the error message as a string and include that as the source error - r.Error = awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String())) + r.Error = awserr.NewRequestFailure(awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String())), + r.HTTPResponse.StatusCode, r.RequestID) } func validateEndpointHandler(r *request.Request) { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go new file mode 100644 index 000000000..d0a3a020d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go @@ -0,0 +1,92 @@ +package ec2metadata + +import ( + "net/http" + "sync/atomic" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A tokenProvider struct provides access to EC2Metadata client +// and atomic instance of a token, along with configuredTTL for it. +// tokenProvider also provides an atomic flag to disable the +// fetch token operation. +// The disabled member will use 0 as false, and 1 as true. +type tokenProvider struct { + client *EC2Metadata + token atomic.Value + configuredTTL time.Duration + disabled uint32 +} + +// A ec2Token struct helps use of token in EC2 Metadata service ops +type ec2Token struct { + token string + credentials.Expiry +} + +// newTokenProvider provides a pointer to a tokenProvider instance +func newTokenProvider(c *EC2Metadata, duration time.Duration) *tokenProvider { + return &tokenProvider{client: c, configuredTTL: duration} +} + +// fetchTokenHandler fetches token for EC2Metadata service client by default. +func (t *tokenProvider) fetchTokenHandler(r *request.Request) { + + // short-circuits to insecure data flow if tokenProvider is disabled. + if v := atomic.LoadUint32(&t.disabled); v == 1 { + return + } + + if ec2Token, ok := t.token.Load().(ec2Token); ok && !ec2Token.IsExpired() { + r.HTTPRequest.Header.Set(tokenHeader, ec2Token.token) + return + } + + output, err := t.client.getToken(r.Context(), t.configuredTTL) + + if err != nil { + + // change the disabled flag on token provider to true, + // when error is request timeout error. + if requestFailureError, ok := err.(awserr.RequestFailure); ok { + switch requestFailureError.StatusCode() { + case http.StatusForbidden, http.StatusNotFound, http.StatusMethodNotAllowed: + atomic.StoreUint32(&t.disabled, 1) + case http.StatusBadRequest: + r.Error = requestFailureError + } + + // Check if request timed out while waiting for response + if e, ok := requestFailureError.OrigErr().(awserr.Error); ok { + if e.Code() == request.ErrCodeRequestError { + atomic.StoreUint32(&t.disabled, 1) + } + } + } + return + } + + newToken := ec2Token{ + token: output.Token, + } + newToken.SetExpiration(time.Now().Add(output.TTL), ttlExpirationWindow) + t.token.Store(newToken) + + // Inject token header to the request. + if ec2Token, ok := t.token.Load().(ec2Token); ok { + r.HTTPRequest.Header.Set(tokenHeader, ec2Token.token) + } +} + +// enableTokenProviderHandler enables the token provider +func (t *tokenProvider) enableTokenProviderHandler(r *request.Request) { + // If the error code status is 401, we enable the token provider + if e, ok := r.Error.(awserr.RequestFailure); ok && e != nil && + e.StatusCode() == http.StatusUnauthorized { + atomic.StoreUint32(&t.disabled, 0) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go index 87b9ff3ff..654fb1ad5 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go @@ -83,6 +83,7 @@ func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resol p := &ps[i] custAddEC2Metadata(p) custAddS3DualStack(p) + custRegionalS3(p) custRmIotDataService(p) custFixAppAutoscalingChina(p) custFixAppAutoscalingUsGov(p) @@ -92,7 +93,7 @@ func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resol } func custAddS3DualStack(p *partition) { - if p.ID != "aws" { + if !(p.ID == "aws" || p.ID == "aws-cn" || p.ID == "aws-us-gov") { return } @@ -100,6 +101,33 @@ func custAddS3DualStack(p *partition) { custAddDualstack(p, "s3-control") } +func custRegionalS3(p *partition) { + if p.ID != "aws" { + return + } + + service, ok := p.Services["s3"] + if !ok { + return + } + + // If global endpoint already exists no customization needed. + if _, ok := service.Endpoints["aws-global"]; ok { + return + } + + service.PartitionEndpoint = "aws-global" + service.Endpoints["us-east-1"] = endpoint{} + service.Endpoints["aws-global"] = endpoint{ + Hostname: "s3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + } + + p.Services["s3"] = service +} + func custAddDualstack(p *partition, svcName string) { s, ok := p.Services[svcName] if !ok { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 452cefda6..64089826d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -17,6 +17,7 @@ const ( // AWS Standard partition's regions. const ( + AfSouth1RegionID = "af-south-1" // Africa (Cape Town). ApEast1RegionID = "ap-east-1" // Asia Pacific (Hong Kong). ApNortheast1RegionID = "ap-northeast-1" // Asia Pacific (Tokyo). ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul). @@ -24,11 +25,12 @@ const ( ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore). ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney). CaCentral1RegionID = "ca-central-1" // Canada (Central). - EuCentral1RegionID = "eu-central-1" // EU (Frankfurt). - EuNorth1RegionID = "eu-north-1" // EU (Stockholm). - EuWest1RegionID = "eu-west-1" // EU (Ireland). - EuWest2RegionID = "eu-west-2" // EU (London). - EuWest3RegionID = "eu-west-3" // EU (Paris). + EuCentral1RegionID = "eu-central-1" // Europe (Frankfurt). + EuNorth1RegionID = "eu-north-1" // Europe (Stockholm). + EuSouth1RegionID = "eu-south-1" // Europe (Milan). + EuWest1RegionID = "eu-west-1" // Europe (Ireland). + EuWest2RegionID = "eu-west-2" // Europe (London). + EuWest3RegionID = "eu-west-3" // Europe (Paris). MeSouth1RegionID = "me-south-1" // Middle East (Bahrain). SaEast1RegionID = "sa-east-1" // South America (Sao Paulo). UsEast1RegionID = "us-east-1" // US East (N. Virginia). @@ -46,7 +48,7 @@ const ( // AWS GovCloud (US) partition's regions. const ( UsGovEast1RegionID = "us-gov-east-1" // AWS GovCloud (US-East). - UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US). + UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US-West). ) // AWS ISO (US) partition's regions. @@ -97,7 +99,7 @@ var awsPartition = partition{ DNSSuffix: "amazonaws.com", RegionRegex: regionRegex{ Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^(us|eu|ap|sa|ca|me)\\-\\w+\\-\\d+$") + reg, _ := regexp.Compile("^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$") return reg }(), }, @@ -107,6 +109,9 @@ var awsPartition = partition{ SignatureVersions: []string{"v4"}, }, Regions: regions{ + "af-south-1": region{ + Description: "Africa (Cape Town)", + }, "ap-east-1": region{ Description: "Asia Pacific (Hong Kong)", }, @@ -129,19 +134,22 @@ var awsPartition = partition{ Description: "Canada (Central)", }, "eu-central-1": region{ - Description: "EU (Frankfurt)", + Description: "Europe (Frankfurt)", }, "eu-north-1": region{ - Description: "EU (Stockholm)", + Description: "Europe (Stockholm)", + }, + "eu-south-1": region{ + Description: "Europe (Milan)", }, "eu-west-1": region{ - Description: "EU (Ireland)", + Description: "Europe (Ireland)", }, "eu-west-2": region{ - Description: "EU (London)", + Description: "Europe (London)", }, "eu-west-3": region{ - Description: "EU (Paris)", + Description: "Europe (Paris)", }, "me-south-1": region{ Description: "Middle East (Bahrain)", @@ -169,9 +177,10 @@ var awsPartition = partition{ "us-east-1": endpoint{}, }, }, - "acm": service{ + "access-analyzer": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -181,6 +190,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -192,6 +202,61 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "acm": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "ca-central-1-fips": endpoint{ + Hostname: "acm-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "acm-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "acm-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "acm-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "acm-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, "acm-pca": service{ Defaults: endpoint{ Protocols: []string{"https"}, @@ -209,6 +274,61 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "acm-pca-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "acm-pca-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "acm-pca-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "acm-pca-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "acm-pca-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "api.detective": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -218,6 +338,12 @@ var awsPartition = partition{ "api.ecr": service{ Endpoints: endpoints{ + "af-south-1": endpoint{ + Hostname: "api.ecr.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, "ap-east-1": endpoint{ Hostname: "api.ecr.ap-east-1.amazonaws.com", CredentialScope: credentialScope{ @@ -272,6 +398,12 @@ var awsPartition = partition{ Region: "eu-north-1", }, }, + "eu-south-1": endpoint{ + Hostname: "api.ecr.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, "eu-west-1": endpoint{ Hostname: "api.ecr.eu-west-1.amazonaws.com", CredentialScope: credentialScope{ @@ -290,6 +422,30 @@ var awsPartition = partition{ Region: "eu-west-3", }, }, + "fips-us-east-1": endpoint{ + Hostname: "ecr-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "ecr-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ecr-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ecr-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, "me-south-1": endpoint{ Hostname: "api.ecr.me-south-1.amazonaws.com", CredentialScope: credentialScope{ @@ -328,6 +484,29 @@ var awsPartition = partition{ }, }, }, + "api.elastic-inference": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "api.elastic-inference.ap-northeast-1.amazonaws.com", + }, + "ap-northeast-2": endpoint{ + Hostname: "api.elastic-inference.ap-northeast-2.amazonaws.com", + }, + "eu-west-1": endpoint{ + Hostname: "api.elastic-inference.eu-west-1.amazonaws.com", + }, + "us-east-1": endpoint{ + Hostname: "api.elastic-inference.us-east-1.amazonaws.com", + }, + "us-east-2": endpoint{ + Hostname: "api.elastic-inference.us-east-2.amazonaws.com", + }, + "us-west-2": endpoint{ + Hostname: "api.elastic-inference.us-west-2.amazonaws.com", + }, + }, + }, "api.mediatailor": service{ Endpoints: endpoints{ @@ -366,6 +545,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-1-fips": endpoint{ @@ -400,6 +580,7 @@ var awsPartition = partition{ "apigateway": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -409,6 +590,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -422,13 +604,10 @@ var awsPartition = partition{ }, "application-autoscaling": service{ Defaults: endpoint{ - Hostname: "autoscaling.{region}.amazonaws.com", Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "application-autoscaling", - }, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -438,6 +617,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -452,6 +632,7 @@ var awsPartition = partition{ "appmesh": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -459,8 +640,12 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -481,8 +666,14 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "fips": endpoint{ + Hostname: "appstream2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "appsync": service{ @@ -515,8 +706,12 @@ var awsPartition = partition{ "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -525,6 +720,7 @@ var awsPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -534,6 +730,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -547,13 +744,10 @@ var awsPartition = partition{ }, "autoscaling-plans": service{ Defaults: endpoint{ - Hostname: "autoscaling.{region}.amazonaws.com", Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "autoscaling-plans", - }, }, Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -561,8 +755,12 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -572,14 +770,20 @@ var awsPartition = partition{ "backup": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -601,12 +805,36 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "fips.batch.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "fips.batch.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "fips.batch.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "fips.batch.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "budgets": service{ @@ -655,12 +883,23 @@ var awsPartition = partition{ "cloud9": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -681,6 +920,7 @@ var awsPartition = partition{ "cloudformation": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -690,15 +930,40 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "cloudformation-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "cloudformation-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "cloudformation-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "cloudformation-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, "cloudfront": service{ @@ -750,6 +1015,7 @@ var awsPartition = partition{ "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -774,6 +1040,7 @@ var awsPartition = partition{ "cloudtrail": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -783,20 +1050,60 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "codebuild": service{ - - Endpoints: endpoints{ + "fips-us-east-1": endpoint{ + Hostname: "cloudtrail-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "cloudtrail-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "cloudtrail-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "cloudtrail-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codeartifact": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codebuild": service{ + + Endpoints: endpoints{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -844,6 +1151,7 @@ var awsPartition = partition{ "codecommit": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -861,16 +1169,18 @@ var awsPartition = partition{ Region: "ca-central-1", }, }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "codedeploy": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -880,6 +1190,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -929,24 +1240,76 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "codepipeline-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "codepipeline-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "codepipeline-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "codepipeline-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "codepipeline-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codestar": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "codestar": service{ + "codestar-connections": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -965,9 +1328,27 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "cognito-identity-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "cognito-identity-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "cognito-identity-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "cognito-idp": service{ @@ -982,9 +1363,27 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "cognito-idp-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "cognito-idp-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "cognito-idp-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "cognito-sync": service{ @@ -1008,15 +1407,36 @@ var awsPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "comprehend-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "comprehend-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "comprehend-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "comprehendmedical": service{ @@ -1026,9 +1446,27 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "comprehendmedical-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "comprehendmedical-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "comprehendmedical-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "config": service{ @@ -1058,8 +1496,10 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-west-2": endpoint{}, }, @@ -1079,7 +1519,24 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "dataexchange": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -1096,12 +1553,24 @@ var awsPartition = partition{ "datasync": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "datasync-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, "fips-us-east-1": endpoint{ Hostname: "datasync-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ @@ -1127,6 +1596,7 @@ var awsPartition = partition{ }, }, "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -1142,6 +1612,8 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1158,6 +1630,7 @@ var awsPartition = partition{ "directconnect": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1167,26 +1640,56 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "directconnect-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "directconnect-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "directconnect-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "directconnect-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "discovery": service{ Endpoints: endpoints{ - "us-west-2": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "dms": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1194,17 +1697,24 @@ var awsPartition = partition{ "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "dms-fips": endpoint{ + Hostname: "dms-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "docdb": service{ @@ -1222,12 +1732,30 @@ var awsPartition = partition{ Region: "ap-northeast-2", }, }, + "ap-south-1": endpoint{ + Hostname: "rds.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "rds.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, "ap-southeast-2": endpoint{ Hostname: "rds.ap-southeast-2.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-southeast-2", }, }, + "ca-central-1": endpoint{ + Hostname: "rds.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, "eu-central-1": endpoint{ Hostname: "rds.eu-central-1.amazonaws.com", CredentialScope: credentialScope{ @@ -1246,6 +1774,12 @@ var awsPartition = partition{ Region: "eu-west-2", }, }, + "eu-west-3": endpoint{ + Hostname: "rds.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, "us-east-1": endpoint{ Hostname: "rds.us-east-1.amazonaws.com", CredentialScope: credentialScope{ @@ -1269,6 +1803,7 @@ var awsPartition = partition{ "ds": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1279,11 +1814,43 @@ var awsPartition = partition{ "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "ds-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "ds-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "ds-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ds-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ds-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "dynamodb": service{ @@ -1291,6 +1858,7 @@ var awsPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1306,6 +1874,7 @@ var awsPartition = partition{ }, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -1353,6 +1922,7 @@ var awsPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1362,15 +1932,46 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "ec2-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "ec2-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "ec2-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ec2-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ec2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "ec2metadata": service{ @@ -1387,6 +1988,7 @@ var awsPartition = partition{ "ecs": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1396,19 +1998,46 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "ecs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "ecs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ecs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ecs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "elasticache": service{ - + "eks": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, Endpoints: endpoints{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, @@ -1422,23 +2051,35 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "fips": endpoint{ - Hostname: "elasticache-fips.us-west-1.amazonaws.com", + "fips-us-east-1": endpoint{ + Hostname: "fips.eks.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "fips.eks.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "fips.eks.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", }, }, "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, - "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "elasticbeanstalk": service{ + "elasticache": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1448,20 +2089,29 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips": endpoint{ + Hostname: "elasticache-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "elasticfilesystem": service{ + "elasticbeanstalk": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1469,20 +2119,47 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "elasticbeanstalk-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "elasticbeanstalk-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "elasticbeanstalk-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "elasticbeanstalk-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "elasticloadbalancing": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, + "elasticfilesystem": service{ + Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1492,37 +2169,241 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "elasticmapreduce": service{ - Defaults: endpoint{ - SSLCommonName: "{region}.{service}.{dnsSuffix}", - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{ + "fips-af-south-1": endpoint{ + Hostname: "elasticfilesystem-fips.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + "fips-ap-east-1": endpoint{ + Hostname: "elasticfilesystem-fips.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + "fips-ap-northeast-1": endpoint{ + Hostname: "elasticfilesystem-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "fips-ap-northeast-2": endpoint{ + Hostname: "elasticfilesystem-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "fips-ap-south-1": endpoint{ + Hostname: "elasticfilesystem-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "fips-ap-southeast-1": endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "fips-ap-southeast-2": endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "fips-ca-central-1": endpoint{ + Hostname: "elasticfilesystem-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-eu-central-1": endpoint{ + Hostname: "elasticfilesystem-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "fips-eu-north-1": endpoint{ + Hostname: "elasticfilesystem-fips.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "fips-eu-south-1": endpoint{ + Hostname: "elasticfilesystem-fips.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + "fips-eu-west-1": endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "fips-eu-west-2": endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "fips-eu-west-3": endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "fips-me-south-1": endpoint{ + Hostname: "elasticfilesystem-fips.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + "fips-sa-east-1": endpoint{ + Hostname: "elasticfilesystem-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "elasticfilesystem-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "elasticfilesystem-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "elasticfilesystem-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "elasticfilesystem-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "elasticloadbalancing-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "elasticloadbalancing-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "elasticloadbalancing-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "elasticloadbalancing-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticmapreduce": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.{service}.{dnsSuffix}", + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{ SSLCommonName: "{service}.{region}.{dnsSuffix}", }, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "elasticmapreduce-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "elasticmapreduce-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "elasticmapreduce-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "elasticmapreduce-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "elasticmapreduce-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{ @@ -1570,6 +2451,7 @@ var awsPartition = partition{ "es": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1579,6 +2461,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -1599,6 +2482,7 @@ var awsPartition = partition{ "events": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1608,15 +2492,40 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "events-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "events-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "events-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "events-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "firehose": service{ @@ -1634,11 +2543,36 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "firehose-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "firehose-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "firehose-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "firehose-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "fms": service{ @@ -1648,30 +2582,159 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "fsx": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ap-northeast-1": endpoint{ + Hostname: "fms-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "fips-ap-northeast-2": endpoint{ + Hostname: "fms-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "fips-ap-south-1": endpoint{ + Hostname: "fms-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "fips-ap-southeast-1": endpoint{ + Hostname: "fms-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "fips-ap-southeast-2": endpoint{ + Hostname: "fms-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "fips-ca-central-1": endpoint{ + Hostname: "fms-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-eu-central-1": endpoint{ + Hostname: "fms-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "fips-eu-west-1": endpoint{ + Hostname: "fms-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "fips-eu-west-2": endpoint{ + Hostname: "fms-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "fips-eu-west-3": endpoint{ + Hostname: "fms-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "fips-sa-east-1": endpoint{ + Hostname: "fms-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "fms-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "fms-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "fms-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "fms-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "forecast": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "forecastquery": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "fsx": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -1699,6 +2762,7 @@ var awsPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1708,15 +2772,46 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "glacier-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "glacier-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "glacier-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "glacier-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "glacier-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "glue": service{ @@ -1734,12 +2829,36 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "glue-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "glue-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "glue-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "glue-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "greengrass": service{ @@ -1764,8 +2883,12 @@ var awsPartition = partition{ "groundstation": service{ Endpoints: endpoints{ - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "me-south-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "guardduty": service{ @@ -1835,6 +2958,12 @@ var awsPartition = partition{ Region: "us-east-1", }, }, + "iam-fips": endpoint{ + Hostname: "iam-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, }, }, "importexport": service{ @@ -1863,10 +2992,34 @@ var awsPartition = partition{ "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "inspector-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "inspector-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "inspector-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "inspector-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "iot": service{ @@ -1911,9 +3064,12 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-2": endpoint{}, @@ -1928,6 +3084,18 @@ var awsPartition = partition{ Region: "ap-northeast-1", }, }, + "ap-northeast-2": endpoint{ + Hostname: "data.iotevents.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "data.iotevents.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, "ap-southeast-2": endpoint{ Hostname: "data.iotevents.ap-southeast-2.amazonaws.com", CredentialScope: credentialScope{ @@ -1946,6 +3114,12 @@ var awsPartition = partition{ Region: "eu-west-1", }, }, + "eu-west-2": endpoint{ + Hostname: "data.iotevents.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, "us-east-1": endpoint{ Hostname: "data.iotevents.us-east-1.amazonaws.com", CredentialScope: credentialScope{ @@ -1966,41 +3140,71 @@ var awsPartition = partition{ }, }, }, - "iotthingsgraph": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "iotthingsgraph", - }, - }, + "iotsecuredtunneling": service{ + Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "iotthingsgraph": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "iotthingsgraph", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, "us-west-2": endpoint{}, }, }, "kafka": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, "kinesis": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -2010,30 +3214,59 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "kinesis-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "kinesis-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "kinesis-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "kinesis-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "kinesisanalytics": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-2": endpoint{}, @@ -2042,17 +3275,27 @@ var awsPartition = partition{ "kinesisvideo": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, + "us-east-2": endpoint{}, "us-west-2": endpoint{}, }, }, "kms": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -2062,6 +3305,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -2077,15 +3321,27 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, "lambda": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -2095,15 +3351,40 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "lambda-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "lambda-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "lambda-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "lambda-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "license-manager": service{ @@ -2121,12 +3402,36 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "license-manager-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "license-manager-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "license-manager-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "license-manager-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "lightsail": service{ @@ -2150,6 +3455,7 @@ var awsPartition = partition{ "logs": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -2159,6 +3465,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -2177,6 +3484,35 @@ var awsPartition = partition{ "us-east-1": endpoint{}, }, }, + "macie": service{ + + Endpoints: endpoints{ + "fips-us-east-1": endpoint{ + Hostname: "macie-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "macie-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "managedblockchain": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + }, + }, "marketplacecommerceanalytics": service{ Endpoints: endpoints{ @@ -2186,6 +3522,7 @@ var awsPartition = partition{ "mediaconnect": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2213,14 +3550,45 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "mediaconvert-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "mediaconvert-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "mediaconvert-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "mediaconvert-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "mediaconvert-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "medialive": service{ @@ -2234,8 +3602,11 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, + "us-east-2": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -2248,6 +3619,7 @@ var awsPartition = partition{ "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -2266,6 +3638,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-west-2": endpoint{}, }, @@ -2277,6 +3650,7 @@ var awsPartition = partition{ }, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -2286,6 +3660,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -2300,7 +3675,11 @@ var awsPartition = partition{ "mgh": service{ Endpoints: endpoints{ - "us-west-2": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "mobileanalytics": service{ @@ -2316,39 +3695,22 @@ var awsPartition = partition{ }, }, Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "monitoring": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "mq": service{ - + "monitoring": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2356,13 +3718,88 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "monitoring-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "monitoring-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "monitoring-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "monitoring-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mq": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "mq-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "mq-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "mq-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "mq-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "mturk-requester": service{ @@ -2408,6 +3845,12 @@ var awsPartition = partition{ Region: "ap-southeast-2", }, }, + "ca-central-1": endpoint{ + Hostname: "rds.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, "eu-central-1": endpoint{ Hostname: "rds.eu-central-1.amazonaws.com", CredentialScope: credentialScope{ @@ -2432,6 +3875,18 @@ var awsPartition = partition{ Region: "eu-west-2", }, }, + "eu-west-3": endpoint{ + Hostname: "rds.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "me-south-1": endpoint{ + Hostname: "rds.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, "us-east-1": endpoint{ Hostname: "rds.us-east-1.amazonaws.com", CredentialScope: credentialScope{ @@ -2452,6 +3907,65 @@ var awsPartition = partition{ }, }, }, + "oidc": service{ + + Endpoints: endpoints{ + "ap-southeast-1": endpoint{ + Hostname: "oidc.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "oidc.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "oidc.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "oidc.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "oidc.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "oidc.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "us-east-1": endpoint{ + Hostname: "oidc.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "oidc.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "oidc.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, "opsworks": service{ Endpoints: endpoints{ @@ -2497,29 +4011,20 @@ var awsPartition = partition{ Region: "us-east-1", }, }, - }, - }, - "pinpoint": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "mobiletargeting", + "fips-aws-global": endpoint{ + Hostname: "organizations-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, }, }, - Endpoints: endpoints{ - "ap-south-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, }, - "polly": service{ + "outposts": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, @@ -2528,56 +4033,81 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "projects.iot1click": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "outposts-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "outposts-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "outposts-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "outposts-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "outposts-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "qldb": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "pinpoint": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "mobiletargeting", + }, }, - }, - "ram": service{ - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "pinpoint-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "pinpoint-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{ + Hostname: "pinpoint.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-west-2": endpoint{ + Hostname: "pinpoint.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, - "rds": service{ + "polly": service{ Endpoints: endpoints{ "ap-east-1": endpoint{}, @@ -2592,17 +4122,124 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{ - SSLCommonName: "{service}.{dnsSuffix}", + "fips-us-east-1": endpoint{ + Hostname: "polly-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, }, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-2": endpoint{ + Hostname: "polly-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "polly-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "polly-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "redshift": service{ + "portal.sso": service{ + + Endpoints: endpoints{ + "ap-southeast-1": endpoint{ + Hostname: "portal.sso.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "portal.sso.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "portal.sso.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "portal.sso.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "portal.sso.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "portal.sso.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "us-east-1": endpoint{ + Hostname: "portal.sso.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "portal.sso.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "portal.sso.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "projects.iot1click": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "qldb": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ram": service{ Endpoints: endpoints{ "ap-east-1": endpoint{}, @@ -2625,6 +4262,118 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "rds": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "rds-fips.ca-central-1": endpoint{ + Hostname: "rds-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "rds-fips.us-east-1": endpoint{ + Hostname: "rds-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "rds-fips.us-east-2": endpoint{ + Hostname: "rds-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "rds-fips.us-west-1": endpoint{ + Hostname: "rds-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "rds-fips.us-west-2": endpoint{ + Hostname: "rds-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "{service}.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "redshift-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "redshift-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "redshift-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "redshift-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "redshift-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "rekognition": service{ Endpoints: endpoints{ @@ -2645,6 +4394,7 @@ var awsPartition = partition{ "resource-groups": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -2654,6 +4404,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -2725,6 +4476,7 @@ var awsPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2732,9 +4484,12 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -2748,9 +4503,13 @@ var awsPartition = partition{ }, }, Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "runtime.sagemaker": service{ @@ -2768,6 +4527,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-1-fips": endpoint{ @@ -2800,7 +4560,7 @@ var awsPartition = partition{ }, }, "s3": service{ - PartitionEndpoint: "us-east-1", + PartitionEndpoint: "aws-global", IsRegionalized: boxedTrue, Defaults: endpoint{ Protocols: []string{"http", "https"}, @@ -2810,7 +4570,8 @@ var awsPartition = partition{ DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", }, Endpoints: endpoints{ - "ap-east-1": endpoint{}, + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{ Hostname: "s3.ap-northeast-1.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, @@ -2825,9 +4586,17 @@ var awsPartition = partition{ Hostname: "s3.ap-southeast-2.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, }, + "aws-global": endpoint{ + Hostname: "s3.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{ Hostname: "s3.eu-west-1.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, @@ -2847,7 +4616,7 @@ var awsPartition = partition{ SignatureVersions: []string{"s3", "s3v4"}, }, "us-east-1": endpoint{ - Hostname: "s3.amazonaws.com", + Hostname: "s3.us-east-1.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, }, "us-east-2": endpoint{}, @@ -3012,6 +4781,41 @@ var awsPartition = partition{ }, }, }, + "savingsplans": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "savingsplans.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "schemas": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "sdb": service{ Defaults: endpoint{ Protocols: []string{"http", "https"}, @@ -3033,6 +4837,8 @@ var awsPartition = partition{ "secretsmanager": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -3041,9 +4847,11 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-1-fips": endpoint{ @@ -3078,6 +4886,7 @@ var awsPartition = partition{ "securityhub": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3087,14 +4896,40 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "securityhub-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "securityhub-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "securityhub-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "securityhub-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "serverlessrepo": service{ @@ -3102,6 +4937,9 @@ var awsPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "ap-east-1": endpoint{ + Protocols: []string{"https"}, + }, "ap-northeast-1": endpoint{ Protocols: []string{"https"}, }, @@ -3135,6 +4973,9 @@ var awsPartition = partition{ "eu-west-3": endpoint{ Protocols: []string{"https"}, }, + "me-south-1": endpoint{ + Protocols: []string{"https"}, + }, "sa-east-1": endpoint{ Protocols: []string{"https"}, }, @@ -3155,6 +4996,7 @@ var awsPartition = partition{ "servicecatalog": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -3166,6 +5008,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-1-fips": endpoint{ @@ -3224,6 +5067,10 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -3231,18 +5078,31 @@ var awsPartition = partition{ }, }, "shield": service{ - IsRegionalized: boxedFalse, + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, Defaults: endpoint{ SSLCommonName: "shield.us-east-1.amazonaws.com", Protocols: []string{"https"}, }, Endpoints: endpoints{ - "us-east-1": endpoint{}, + "aws-global": endpoint{ + Hostname: "shield.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-aws-global": endpoint{ + Hostname: "shield-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, }, }, "sms": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3252,15 +5112,40 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "sms-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "sms-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "sms-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "sms-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "snowball": service{ @@ -3276,11 +5161,101 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-ap-northeast-1": endpoint{ + Hostname: "snowball-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "fips-ap-northeast-2": endpoint{ + Hostname: "snowball-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "fips-ap-south-1": endpoint{ + Hostname: "snowball-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "fips-ap-southeast-1": endpoint{ + Hostname: "snowball-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "fips-ap-southeast-2": endpoint{ + Hostname: "snowball-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "fips-ca-central-1": endpoint{ + Hostname: "snowball-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-eu-central-1": endpoint{ + Hostname: "snowball-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "fips-eu-west-1": endpoint{ + Hostname: "snowball-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "fips-eu-west-2": endpoint{ + Hostname: "snowball-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "fips-eu-west-3": endpoint{ + Hostname: "snowball-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "fips-sa-east-1": endpoint{ + Hostname: "snowball-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "snowball-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "snowball-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "snowball-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "snowball-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "sns": service{ @@ -3288,6 +5263,7 @@ var awsPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3297,15 +5273,40 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "sns-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "sns-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "sns-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "sns-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "sqs": service{ @@ -3314,6 +5315,7 @@ var awsPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3323,6 +5325,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -3363,6 +5366,7 @@ var awsPartition = partition{ "ssm": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3372,20 +5376,70 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "ssm-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "ssm-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ssm-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ssm-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "ssm-facade-fips-us-east-1": endpoint{ + Hostname: "ssm-facade-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "ssm-facade-fips-us-east-2": endpoint{ + Hostname: "ssm-facade-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "ssm-facade-fips-us-west-1": endpoint{ + Hostname: "ssm-facade-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "ssm-facade-fips-us-west-2": endpoint{ + Hostname: "ssm-facade-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "states": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3395,29 +5449,56 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "storagegateway": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "states-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "states-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "states-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "states-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "storagegateway": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -3495,44 +5576,31 @@ var awsPartition = partition{ }, "sts": service{ PartitionEndpoint: "aws-global", - Defaults: endpoint{ - Hostname: "sts.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, + Endpoints: endpoints{ - "ap-east-1": endpoint{ - Hostname: "sts.ap-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-east-1", - }, - }, + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{ - Hostname: "sts.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, + "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, - "aws-global": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{ - Hostname: "sts.me-south-1.amazonaws.com", + "aws-global": endpoint{ + Hostname: "sts.amazonaws.com", CredentialScope: credentialScope{ - Region: "me-south-1", + Region: "us-east-1", }, }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, "us-east-1-fips": endpoint{ Hostname: "sts-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ @@ -3577,6 +5645,7 @@ var awsPartition = partition{ "swf": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3586,20 +5655,46 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "swf-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "swf-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "swf-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "swf-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "tagging": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3609,6 +5704,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -3620,6 +5716,65 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "transcribe": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "fips.transcribe.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "fips.transcribe.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "fips.transcribe.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "fips.transcribe.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "transcribestreaming": service{ + + Endpoints: endpoints{ + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "transfer": service{ Endpoints: endpoints{ @@ -3646,13 +5801,18 @@ var awsPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "us-east-1": endpoint{}, "us-east-1-fips": endpoint{ Hostname: "translate-fips.us-east-1.amazonaws.com", @@ -3667,49 +5827,256 @@ var awsPartition = partition{ Region: "us-east-2", }, }, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, "us-west-2-fips": endpoint{ Hostname: "translate-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "us-west-2", + }, + }, + }, + }, + "waf": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-fips": endpoint{ + Hostname: "waf-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "aws-global": endpoint{ + Hostname: "waf.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "waf-regional": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{ + Hostname: "waf-regional.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + "ap-northeast-1": endpoint{ + Hostname: "waf-regional.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "waf-regional.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{ + Hostname: "waf-regional.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "waf-regional.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "waf-regional.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "waf-regional.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "waf-regional.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-north-1": endpoint{ + Hostname: "waf-regional.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "waf-regional.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "waf-regional.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "eu-west-3": endpoint{ + Hostname: "waf-regional.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "fips-ap-east-1": endpoint{ + Hostname: "waf-regional-fips.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + "fips-ap-northeast-1": endpoint{ + Hostname: "waf-regional-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "fips-ap-northeast-2": endpoint{ + Hostname: "waf-regional-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "fips-ap-south-1": endpoint{ + Hostname: "waf-regional-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "fips-ap-southeast-1": endpoint{ + Hostname: "waf-regional-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "fips-ap-southeast-2": endpoint{ + Hostname: "waf-regional-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "fips-ca-central-1": endpoint{ + Hostname: "waf-regional-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-eu-central-1": endpoint{ + Hostname: "waf-regional-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "fips-eu-north-1": endpoint{ + Hostname: "waf-regional-fips.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "fips-eu-west-1": endpoint{ + Hostname: "waf-regional-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "fips-eu-west-2": endpoint{ + Hostname: "waf-regional-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "fips-eu-west-3": endpoint{ + Hostname: "waf-regional-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "fips-me-south-1": endpoint{ + Hostname: "waf-regional-fips.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + "fips-sa-east-1": endpoint{ + Hostname: "waf-regional-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "waf-regional-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "waf-regional-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "waf-regional-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "waf-regional-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{ + Hostname: "waf-regional.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + "sa-east-1": endpoint{ + Hostname: "waf-regional.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "waf-regional.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "waf-regional.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{ + Hostname: "waf-regional.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", }, }, - }, - }, - "waf": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "waf.amazonaws.com", + "us-west-2": endpoint{ + Hostname: "waf-regional.us-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "us-west-2", }, }, }, }, - "waf-regional": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, "workdocs": service{ Endpoints: endpoints{ @@ -3717,8 +6084,20 @@ var awsPartition = partition{ "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "workdocs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "workdocs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "workmail": service{ @@ -3750,6 +6129,7 @@ var awsPartition = partition{ "xray": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3759,6 +6139,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -3802,6 +6183,13 @@ var awscnPartition = partition{ }, }, Services: services{ + "acm": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "api.ecr": service{ Endpoints: endpoints{ @@ -3819,6 +6207,13 @@ var awscnPartition = partition{ }, }, }, + "api.sagemaker": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "apigateway": service{ Endpoints: endpoints{ @@ -3828,17 +6223,26 @@ var awscnPartition = partition{ }, "application-autoscaling": service{ Defaults: endpoint{ - Hostname: "autoscaling.{region}.amazonaws.com.cn", Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "application-autoscaling", - }, }, Endpoints: endpoints{ "cn-north-1": endpoint{}, "cn-northwest-1": endpoint{}, }, }, + "appsync": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "athena": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "autoscaling": service{ Defaults: endpoint{ Protocols: []string{"http", "https"}, @@ -3848,6 +6252,29 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "autoscaling-plans": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "backup": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "batch": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "cloudformation": service{ Endpoints: endpoints{ @@ -3883,6 +6310,13 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "codecommit": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "codedeploy": service{ Endpoints: endpoints{ @@ -3903,6 +6337,12 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "dax": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, + }, + }, "directconnect": service{ Endpoints: endpoints{ @@ -3960,6 +6400,15 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "eks": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "elasticache": service{ Endpoints: endpoints{ @@ -3974,6 +6423,25 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "elasticfilesystem": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + "fips-cn-north-1": endpoint{ + Hostname: "elasticfilesystem-fips.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + "fips-cn-northwest-1": endpoint{ + Hostname: "elasticfilesystem-fips.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, "elasticloadbalancing": service{ Defaults: endpoint{ Protocols: []string{"https"}, @@ -4028,6 +6496,13 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "glue": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "greengrass": service{ IsRegionalized: boxedTrue, Defaults: endpoint{ @@ -4037,6 +6512,13 @@ var awscnPartition = partition{ "cn-north-1": endpoint{}, }, }, + "health": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "iam": service{ PartitionEndpoint: "aws-cn-global", IsRegionalized: boxedFalse, @@ -4061,6 +6543,20 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "iotsecuredtunneling": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "kafka": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "kinesis": service{ Endpoints: endpoints{ @@ -4068,6 +6564,13 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "kinesisanalytics": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "kms": service{ Endpoints: endpoints{ @@ -4116,6 +6619,17 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "neptune": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{ + Hostname: "rds.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, "polly": service{ Endpoints: endpoints{ @@ -4136,10 +6650,33 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "route53": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "route53.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "runtime.sagemaker": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "s3": service{ Defaults: endpoint{ Protocols: []string{"http", "https"}, SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", }, Endpoints: endpoints{ "cn-north-1": endpoint{}, @@ -4150,6 +6687,9 @@ var awscnPartition = partition{ Defaults: endpoint{ Protocols: []string{"https"}, SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", }, Endpoints: endpoints{ "cn-north-1": endpoint{ @@ -4168,6 +6708,26 @@ var awscnPartition = partition{ }, }, }, + "secretsmanager": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "serverlessrepo": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{ + Protocols: []string{"https"}, + }, + "cn-northwest-1": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, "sms": service{ Endpoints: endpoints{ @@ -4179,6 +6739,12 @@ var awscnPartition = partition{ Endpoints: endpoints{ "cn-north-1": endpoint{}, + "fips-cn-north-1": endpoint{ + Hostname: "snowball-fips.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, }, }, "sns": service{ @@ -4217,7 +6783,8 @@ var awscnPartition = partition{ "storagegateway": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "streams.dynamodb": service{ @@ -4232,33 +6799,65 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, - "sts": service{ + "sts": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "support": service{ + PartitionEndpoint: "aws-cn-global", + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "support.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "tagging": service{ Endpoints: endpoints{ "cn-north-1": endpoint{}, "cn-northwest-1": endpoint{}, }, }, - "support": service{ - PartitionEndpoint: "aws-cn-global", - + "transcribe": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, Endpoints: endpoints{ - "aws-cn-global": endpoint{ - Hostname: "support.cn-north-1.amazonaws.com", + "cn-north-1": endpoint{ + Hostname: "cn.transcribe.cn-north-1.amazonaws.com.cn", CredentialScope: credentialScope{ Region: "cn-north-1", }, }, + "cn-northwest-1": endpoint{ + Hostname: "cn.transcribe.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, }, }, - "swf": service{ + "workspaces": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, "cn-northwest-1": endpoint{}, }, }, - "tagging": service{ + "xray": service{ Endpoints: endpoints{ "cn-north-1": endpoint{}, @@ -4293,10 +6892,17 @@ var awsusgovPartition = partition{ Description: "AWS GovCloud (US-East)", }, "us-gov-west-1": region{ - Description: "AWS GovCloud (US)", + Description: "AWS GovCloud (US-West)", }, }, Services: services{ + "access-analyzer": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, "acm": service{ Endpoints: endpoints{ @@ -4309,6 +6915,18 @@ var awsusgovPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "acm-pca.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "acm-pca.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -4316,6 +6934,18 @@ var awsusgovPartition = partition{ "api.ecr": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{ Hostname: "api.ecr.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ @@ -4334,6 +6964,18 @@ var awsusgovPartition = partition{ Endpoints: endpoints{ "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1-fips-secondary": endpoint{ + Hostname: "api.sagemaker.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "apigateway": service{ @@ -4345,7 +6987,8 @@ var awsusgovPartition = partition{ }, "application-autoscaling": service{ Defaults: endpoint{ - Hostname: "autoscaling.{region}.amazonaws.com", + Hostname: "autoscaling.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, CredentialScope: credentialScope{ Service: "application-autoscaling", }, @@ -4355,9 +6998,38 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "appstream2": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Service: "appstream", + }, + }, + Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "appstream2-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, "athena": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "athena-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "athena-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -4365,12 +7037,42 @@ var awsusgovPartition = partition{ "autoscaling": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, + "us-gov-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, "us-gov-west-1": endpoint{ Protocols: []string{"http", "https"}, }, }, }, + "autoscaling-plans": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "batch": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "batch.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "batch.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, "clouddirectory": service{ Endpoints: endpoints{ @@ -4380,8 +7082,18 @@ var awsusgovPartition = partition{ "cloudformation": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-gov-east-1": endpoint{ + Hostname: "cloudformation.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "cloudformation.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "cloudhsm": service{ @@ -4404,20 +7116,48 @@ var awsusgovPartition = partition{ "cloudtrail": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-gov-east-1": endpoint{ + Hostname: "cloudtrail.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "cloudtrail.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "codebuild": service{ Endpoints: endpoints{ "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "codebuild-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "codebuild-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "codecommit": service{ Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "codecommit-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -4441,10 +7181,52 @@ var awsusgovPartition = partition{ }, }, }, + "codepipeline": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "codepipeline-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "cognito-identity": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "cognito-idp": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "cognito-idp-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, "comprehend": service{ Defaults: endpoint{ Protocols: []string{"https"}, }, + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "comprehend-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "comprehendmedical": service{ + Endpoints: endpoints{ "us-gov-west-1": endpoint{}, }, @@ -4459,25 +7241,48 @@ var awsusgovPartition = partition{ "datasync": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "datasync-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, "fips-us-gov-west-1": endpoint{ Hostname: "datasync-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, }, + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, "directconnect": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-gov-east-1": endpoint{ + Hostname: "directconnect.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "directconnect.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "dms": service{ Endpoints: endpoints{ + "dms-fips": endpoint{ + Hostname: "dms.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -4485,6 +7290,18 @@ var awsusgovPartition = partition{ "ds": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "ds-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "ds-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -4511,23 +7328,54 @@ var awsusgovPartition = partition{ "ec2": service{ Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "ec2.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "ec2.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "ecs-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "ecs-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, - "ec2metadata": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "169.254.169.254/latest", - Protocols: []string{"http"}, - }, + "eks": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, }, - }, - "ecs": service{ - Endpoints: endpoints{ "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, @@ -4549,19 +7397,54 @@ var awsusgovPartition = partition{ "elasticbeanstalk": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-gov-east-1": endpoint{ + Hostname: "elasticbeanstalk.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "elasticbeanstalk.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "elasticfilesystem": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "elasticfilesystem-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "elasticfilesystem-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, "elasticloadbalancing": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "elasticloadbalancing-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "elasticloadbalancing-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{ Protocols: []string{"http", "https"}, @@ -4571,12 +7454,36 @@ var awsusgovPartition = partition{ "elasticmapreduce": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "elasticmapreduce.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "elasticmapreduce.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{ Protocols: []string{"https"}, }, }, }, + "email": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "email-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, "es": service{ Endpoints: endpoints{ @@ -4593,13 +7500,35 @@ var awsusgovPartition = partition{ "events": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-gov-east-1": endpoint{ + Hostname: "events.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "events.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "firehose": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "firehose-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "firehose-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -4607,15 +7536,36 @@ var awsusgovPartition = partition{ "glacier": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, + "us-gov-east-1": endpoint{ + Hostname: "glacier.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, "us-gov-west-1": endpoint{ + Hostname: "glacier.us-gov-west-1.amazonaws.com", Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, }, }, }, "glue": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "glue-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "glue-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -4626,7 +7576,12 @@ var awsusgovPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "us-gov-west-1": endpoint{ + Hostname: "greengrass.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "guardduty": service{ @@ -4636,6 +7591,12 @@ var awsusgovPartition = partition{ }, Endpoints: endpoints{ "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "guardduty.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "health": service{ @@ -4655,11 +7616,29 @@ var awsusgovPartition = partition{ Region: "us-gov-west-1", }, }, + "iam-govcloud-fips": endpoint{ + Hostname: "iam.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "inspector": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "inspector-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "inspector-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -4674,9 +7653,34 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "iotsecuredtunneling": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "kafka": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, "kinesis": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "kinesis-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "kinesis-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -4697,6 +7701,18 @@ var awsusgovPartition = partition{ "lambda": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "lambda-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "lambda-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -4704,6 +7720,18 @@ var awsusgovPartition = partition{ "license-manager": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "license-manager-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "license-manager-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -4718,7 +7746,12 @@ var awsusgovPartition = partition{ "mediaconvert": service{ Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "us-gov-west-1": endpoint{ + Hostname: "mediaconvert.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "metering.marketplace": service{ @@ -4735,6 +7768,18 @@ var awsusgovPartition = partition{ "monitoring": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "monitoring.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "monitoring.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -4767,11 +7812,50 @@ var awsusgovPartition = partition{ Region: "us-gov-west-1", }, }, + "fips-aws-us-gov-global": endpoint{ + Hostname: "organizations.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "outposts": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "outposts.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "outposts.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "pinpoint": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "mobiletargeting", + }, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, }, }, "polly": service{ Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "polly-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-west-1": endpoint{}, }, }, @@ -4785,6 +7869,18 @@ var awsusgovPartition = partition{ "rds": service{ Endpoints: endpoints{ + "rds.us-gov-east-1": endpoint{ + Hostname: "rds.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "rds.us-gov-west-1": endpoint{ + Hostname: "rds.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -4792,16 +7888,45 @@ var awsusgovPartition = partition{ "redshift": service{ Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "redshift.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "redshift.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "rekognition": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "resource-groups": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "resource-groups.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "resource-groups.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, - "rekognition": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, "route53": service{ PartitionEndpoint: "aws-us-gov-global", IsRegionalized: boxedFalse, @@ -4815,6 +7940,13 @@ var awsusgovPartition = partition{ }, }, }, + "route53resolver": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, "runtime.sagemaker": service{ Endpoints: endpoints{ @@ -4824,6 +7956,9 @@ var awsusgovPartition = partition{ "s3": service{ Defaults: endpoint{ SignatureVersions: []string{"s3", "s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", }, Endpoints: endpoints{ "fips-us-gov-west-1": endpoint{ @@ -4846,6 +7981,9 @@ var awsusgovPartition = partition{ Defaults: endpoint{ Protocols: []string{"https"}, SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", }, Endpoints: endpoints{ "us-gov-east-1": endpoint{ @@ -4881,6 +8019,13 @@ var awsusgovPartition = partition{ "secretsmanager": service{ Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "secretsmanager-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, "us-gov-west-1": endpoint{}, "us-gov-west-1-fips": endpoint{ Hostname: "secretsmanager-fips.us-gov-west-1.amazonaws.com", @@ -4890,22 +8035,56 @@ var awsusgovPartition = partition{ }, }, }, + "securityhub": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "securityhub-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "securityhub-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, "serverlessrepo": service{ Defaults: endpoint{ Protocols: []string{"https"}, }, Endpoints: endpoints{ "us-gov-east-1": endpoint{ + Hostname: "serverlessrepo.us-gov-east-1.amazonaws.com", Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, }, "us-gov-west-1": endpoint{ + Hostname: "serverlessrepo.us-gov-west-1.amazonaws.com", Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, }, }, }, "servicecatalog": service{ Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "servicecatalog-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, "us-gov-west-1": endpoint{}, "us-gov-west-1-fips": endpoint{ Hostname: "servicecatalog-fips.us-gov-west-1.amazonaws.com", @@ -4918,6 +8097,18 @@ var awsusgovPartition = partition{ "sms": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "sms-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "sms-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -4925,6 +8116,18 @@ var awsusgovPartition = partition{ "snowball": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "snowball-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "snowball-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -4932,25 +8135,67 @@ var awsusgovPartition = partition{ "sns": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, + "us-gov-east-1": endpoint{ + Hostname: "sns.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, "us-gov-west-1": endpoint{ + Hostname: "sns.us-gov-west-1.amazonaws.com", Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, }, }, }, "sqs": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, + "us-gov-east-1": endpoint{ + Hostname: "sqs.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, "us-gov-west-1": endpoint{ + Hostname: "sqs.us-gov-west-1.amazonaws.com", SSLCommonName: "{region}.queue.{dnsSuffix}", Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, }, }, }, "ssm": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "ssm.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "ssm.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "ssm-facade-fips-us-gov-east-1": endpoint{ + Hostname: "ssm-facade.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "ssm-facade-fips-us-gov-west-1": endpoint{ + Hostname: "ssm-facade.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -4958,6 +8203,18 @@ var awsusgovPartition = partition{ "states": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "states-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "states.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -4965,6 +8222,13 @@ var awsusgovPartition = partition{ "storagegateway": service{ Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "storagegateway-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, @@ -4995,14 +8259,54 @@ var awsusgovPartition = partition{ Endpoints: endpoints{ "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "sts.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "sts.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "support": service{ + PartitionEndpoint: "aws-us-gov-global", + + Endpoints: endpoints{ + "aws-us-gov-global": endpoint{ + Hostname: "support.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "support.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "swf": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-gov-east-1": endpoint{ + Hostname: "swf.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "swf.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "tagging": service{ @@ -5012,6 +8316,27 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "transcribe": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "fips.transcribe.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "fips.transcribe.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, "translate": service{ Defaults: endpoint{ Protocols: []string{"https"}, @@ -5029,7 +8354,18 @@ var awsusgovPartition = partition{ "waf-regional": service{ Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "fips-us-gov-west-1": endpoint{ + Hostname: "waf-regional-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "waf-regional.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "workspaces": service{ @@ -5038,6 +8374,13 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "xray": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, }, } @@ -5078,13 +8421,21 @@ var awsisoPartition = partition{ }, }, }, + "api.sagemaker": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "apigateway": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, "application-autoscaling": service{ Defaults: endpoint{ - Hostname: "autoscaling.{region}.amazonaws.com", Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "application-autoscaling", - }, }, Endpoints: endpoints{ "us-iso-east-1": endpoint{}, @@ -5116,6 +8467,14 @@ var awsisoPartition = partition{ "us-iso-east-1": endpoint{}, }, }, + "comprehend": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, "config": service{ Endpoints: endpoints{ @@ -5137,6 +8496,12 @@ var awsisoPartition = partition{ "dms": service{ Endpoints: endpoints{ + "dms-fips": endpoint{ + Hostname: "dms.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, "us-iso-east-1": endpoint{}, }, }, @@ -5199,6 +8564,12 @@ var awsisoPartition = partition{ }, }, }, + "es": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, "events": service{ Endpoints: endpoints{ @@ -5293,6 +8664,12 @@ var awsisoPartition = partition{ }, }, }, + "runtime.sagemaker": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, "s3": service{ Defaults: endpoint{ SignatureVersions: []string{"s3v4"}, @@ -5406,11 +8783,7 @@ var awsisobPartition = partition{ Services: services{ "application-autoscaling": service{ Defaults: endpoint{ - Hostname: "autoscaling.{region}.amazonaws.com", Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "application-autoscaling", - }, }, Endpoints: endpoints{ "us-isob-east-1": endpoint{}, @@ -5451,6 +8824,12 @@ var awsisobPartition = partition{ "dms": service{ Endpoints: endpoints{ + "dms-fips": endpoint{ + Hostname: "dms.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, "us-isob-east-1": endpoint{}, }, }, @@ -5550,6 +8929,12 @@ var awsisobPartition = partition{ "us-isob-east-1": endpoint{}, }, }, + "license-manager": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, "logs": service{ Endpoints: endpoints{ @@ -5606,6 +8991,12 @@ var awsisobPartition = partition{ "us-isob-east-1": endpoint{}, }, }, + "ssm": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, "states": service{ Endpoints: endpoints{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go index 9c936be6c..ca956e5f1 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go @@ -3,6 +3,7 @@ package endpoints import ( "fmt" "regexp" + "strings" "github.com/aws/aws-sdk-go/aws/awserr" ) @@ -46,6 +47,108 @@ type Options struct { // // This option is ignored if StrictMatching is enabled. ResolveUnknownService bool + + // STS Regional Endpoint flag helps with resolving the STS endpoint + STSRegionalEndpoint STSRegionalEndpoint + + // S3 Regional Endpoint flag helps with resolving the S3 endpoint + S3UsEast1RegionalEndpoint S3UsEast1RegionalEndpoint +} + +// STSRegionalEndpoint is an enum for the states of the STS Regional Endpoint +// options. +type STSRegionalEndpoint int + +func (e STSRegionalEndpoint) String() string { + switch e { + case LegacySTSEndpoint: + return "legacy" + case RegionalSTSEndpoint: + return "regional" + case UnsetSTSEndpoint: + return "" + default: + return "unknown" + } +} + +const ( + + // UnsetSTSEndpoint represents that STS Regional Endpoint flag is not specified. + UnsetSTSEndpoint STSRegionalEndpoint = iota + + // LegacySTSEndpoint represents when STS Regional Endpoint flag is specified + // to use legacy endpoints. + LegacySTSEndpoint + + // RegionalSTSEndpoint represents when STS Regional Endpoint flag is specified + // to use regional endpoints. + RegionalSTSEndpoint +) + +// GetSTSRegionalEndpoint function returns the STSRegionalEndpointFlag based +// on the input string provided in env config or shared config by the user. +// +// `legacy`, `regional` are the only case-insensitive valid strings for +// resolving the STS regional Endpoint flag. +func GetSTSRegionalEndpoint(s string) (STSRegionalEndpoint, error) { + switch { + case strings.EqualFold(s, "legacy"): + return LegacySTSEndpoint, nil + case strings.EqualFold(s, "regional"): + return RegionalSTSEndpoint, nil + default: + return UnsetSTSEndpoint, fmt.Errorf("unable to resolve the value of STSRegionalEndpoint for %v", s) + } +} + +// S3UsEast1RegionalEndpoint is an enum for the states of the S3 us-east-1 +// Regional Endpoint options. +type S3UsEast1RegionalEndpoint int + +func (e S3UsEast1RegionalEndpoint) String() string { + switch e { + case LegacyS3UsEast1Endpoint: + return "legacy" + case RegionalS3UsEast1Endpoint: + return "regional" + case UnsetS3UsEast1Endpoint: + return "" + default: + return "unknown" + } +} + +const ( + + // UnsetS3UsEast1Endpoint represents that S3 Regional Endpoint flag is not + // specified. + UnsetS3UsEast1Endpoint S3UsEast1RegionalEndpoint = iota + + // LegacyS3UsEast1Endpoint represents when S3 Regional Endpoint flag is + // specified to use legacy endpoints. + LegacyS3UsEast1Endpoint + + // RegionalS3UsEast1Endpoint represents when S3 Regional Endpoint flag is + // specified to use regional endpoints. + RegionalS3UsEast1Endpoint +) + +// GetS3UsEast1RegionalEndpoint function returns the S3UsEast1RegionalEndpointFlag based +// on the input string provided in env config or shared config by the user. +// +// `legacy`, `regional` are the only case-insensitive valid strings for +// resolving the S3 regional Endpoint flag. +func GetS3UsEast1RegionalEndpoint(s string) (S3UsEast1RegionalEndpoint, error) { + switch { + case strings.EqualFold(s, "legacy"): + return LegacyS3UsEast1Endpoint, nil + case strings.EqualFold(s, "regional"): + return RegionalS3UsEast1Endpoint, nil + default: + return UnsetS3UsEast1Endpoint, + fmt.Errorf("unable to resolve the value of S3UsEast1RegionalEndpoint for %v", s) + } } // Set combines all of the option functions together. @@ -79,6 +182,12 @@ func ResolveUnknownServiceOption(o *Options) { o.ResolveUnknownService = true } +// STSRegionalEndpointOption enables the STS endpoint resolver behavior to resolve +// STS endpoint to their regional endpoint, instead of the global endpoint. +func STSRegionalEndpointOption(o *Options) { + o.STSRegionalEndpoint = RegionalSTSEndpoint +} + // A Resolver provides the interface for functionality to resolve endpoints. // The build in Partition and DefaultResolver return value satisfy this interface. type Resolver interface { @@ -194,7 +303,7 @@ func (p Partition) ID() string { return p.id } // require the provided service and region to be known by the partition. // If the endpoint cannot be strictly resolved an error will be returned. This // mode is useful to ensure the endpoint resolved is valid. Without -// StrictMatching enabled the endpoint returned my look valid but may not work. +// StrictMatching enabled the endpoint returned may look valid but may not work. // StrictMatching requires the SDK to be updated if you want to take advantage // of new regions and services expansions. // @@ -208,7 +317,7 @@ func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) ( // Regions returns a map of Regions indexed by their ID. This is useful for // enumerating over the regions in a partition. func (p Partition) Regions() map[string]Region { - rs := map[string]Region{} + rs := make(map[string]Region, len(p.p.Regions)) for id, r := range p.p.Regions { rs[id] = Region{ id: id, @@ -223,7 +332,7 @@ func (p Partition) Regions() map[string]Region { // Services returns a map of Service indexed by their ID. This is useful for // enumerating over the services in a partition. func (p Partition) Services() map[string]Service { - ss := map[string]Service{} + ss := make(map[string]Service, len(p.p.Services)) for id := range p.p.Services { ss[id] = Service{ id: id, @@ -310,7 +419,7 @@ func (s Service) Regions() map[string]Region { // A region is the AWS region the service exists in. Whereas a Endpoint is // an URL that can be resolved to a instance of a service. func (s Service) Endpoints() map[string]Endpoint { - es := map[string]Endpoint{} + es := make(map[string]Endpoint, len(s.p.Services[s.id].Endpoints)) for id := range s.p.Services[s.id].Endpoints { es[id] = Endpoint{ id: id, @@ -350,6 +459,9 @@ type ResolvedEndpoint struct { // The endpoint URL URL string + // The endpoint partition + PartitionID string + // The region that should be used for signing requests. SigningRegion string diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go new file mode 100644 index 000000000..df75e899a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go @@ -0,0 +1,24 @@ +package endpoints + +var legacyGlobalRegions = map[string]map[string]struct{}{ + "sts": { + "ap-northeast-1": {}, + "ap-south-1": {}, + "ap-southeast-1": {}, + "ap-southeast-2": {}, + "ca-central-1": {}, + "eu-central-1": {}, + "eu-north-1": {}, + "eu-west-1": {}, + "eu-west-2": {}, + "eu-west-3": {}, + "sa-east-1": {}, + "us-east-1": {}, + "us-east-2": {}, + "us-west-1": {}, + "us-west-2": {}, + }, + "s3": { + "us-east-1": {}, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go index 523ad79ac..eb2ac83c9 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go @@ -75,24 +75,56 @@ func (p partition) canResolveEndpoint(service, region string, strictMatch bool) return p.RegionRegex.MatchString(region) } +func allowLegacyEmptyRegion(service string) bool { + legacy := map[string]struct{}{ + "budgets": {}, + "ce": {}, + "chime": {}, + "cloudfront": {}, + "ec2metadata": {}, + "iam": {}, + "importexport": {}, + "organizations": {}, + "route53": {}, + "sts": {}, + "support": {}, + "waf": {}, + } + + _, allowed := legacy[service] + return allowed +} + func (p partition) EndpointFor(service, region string, opts ...func(*Options)) (resolved ResolvedEndpoint, err error) { var opt Options opt.Set(opts...) s, hasService := p.Services[service] - if !(hasService || opt.ResolveUnknownService) { + if len(service) == 0 || !(hasService || opt.ResolveUnknownService) { // Only return error if the resolver will not fallback to creating // endpoint based on service endpoint ID passed in. return resolved, NewUnknownServiceError(p.ID, service, serviceList(p.Services)) } + if len(region) == 0 && allowLegacyEmptyRegion(service) && len(s.PartitionEndpoint) != 0 { + region = s.PartitionEndpoint + } + + if (service == "sts" && opt.STSRegionalEndpoint != RegionalSTSEndpoint) || + (service == "s3" && opt.S3UsEast1RegionalEndpoint != RegionalS3UsEast1Endpoint) { + if _, ok := legacyGlobalRegions[service][region]; ok { + region = "aws-global" + } + } + e, hasEndpoint := s.endpointForRegion(region) - if !hasEndpoint && opt.StrictMatching { + if len(region) == 0 || (!hasEndpoint && opt.StrictMatching) { return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(s.Endpoints)) } defs := []endpoint{p.Defaults, s.Defaults} - return e.resolve(service, region, p.DNSSuffix, defs, opt), nil + + return e.resolve(service, p.ID, region, p.DNSSuffix, defs, opt), nil } func serviceList(ss services) []string { @@ -201,7 +233,7 @@ func getByPriority(s []string, p []string, def string) string { return s[0] } -func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, opts Options) ResolvedEndpoint { +func (e endpoint) resolve(service, partitionID, region, dnsSuffix string, defs []endpoint, opts Options) ResolvedEndpoint { var merged endpoint for _, def := range defs { merged.mergeIn(def) @@ -209,11 +241,23 @@ func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, op merged.mergeIn(e) e = merged - hostname := e.Hostname + signingRegion := e.CredentialScope.Region + if len(signingRegion) == 0 { + signingRegion = region + } + signingName := e.CredentialScope.Service + var signingNameDerived bool + if len(signingName) == 0 { + signingName = service + signingNameDerived = true + } + + hostname := e.Hostname // Offset the hostname for dualstack if enabled if opts.UseDualStack && e.HasDualStack == boxedTrue { hostname = e.DualStackHostname + region = signingRegion } u := strings.Replace(hostname, "{service}", service, 1) @@ -223,20 +267,9 @@ func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, op scheme := getEndpointScheme(e.Protocols, opts.DisableSSL) u = fmt.Sprintf("%s://%s", scheme, u) - signingRegion := e.CredentialScope.Region - if len(signingRegion) == 0 { - signingRegion = region - } - - signingName := e.CredentialScope.Service - var signingNameDerived bool - if len(signingName) == 0 { - signingName = service - signingNameDerived = true - } - return ResolvedEndpoint{ URL: u, + PartitionID: partitionID, SigningRegion: signingRegion, SigningName: signingName, SigningNameDerived: signingNameDerived, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go index 185b07318..e819ab6c0 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go @@ -10,6 +10,7 @@ import ( type Handlers struct { Validate HandlerList Build HandlerList + BuildStream HandlerList Sign HandlerList Send HandlerList ValidateResponse HandlerList @@ -28,6 +29,7 @@ func (h *Handlers) Copy() Handlers { return Handlers{ Validate: h.Validate.copy(), Build: h.Build.copy(), + BuildStream: h.BuildStream.copy(), Sign: h.Sign.copy(), Send: h.Send.copy(), ValidateResponse: h.ValidateResponse.copy(), @@ -46,6 +48,7 @@ func (h *Handlers) Copy() Handlers { func (h *Handlers) Clear() { h.Validate.Clear() h.Build.Clear() + h.BuildStream.Clear() h.Send.Clear() h.Sign.Clear() h.Unmarshal.Clear() @@ -67,6 +70,9 @@ func (h *Handlers) IsEmpty() bool { if h.Build.Len() != 0 { return false } + if h.BuildStream.Len() != 0 { + return false + } if h.Send.Len() != 0 { return false } @@ -320,3 +326,18 @@ func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) { AddToUserAgent(r, s) } } + +// WithSetRequestHeaders updates the operation request's HTTP header to contain +// the header key value pairs provided. If the header key already exists in the +// request's HTTP header set, the existing value(s) will be replaced. +func WithSetRequestHeaders(h map[string]string) Option { + return withRequestHeader(h).SetRequestHeaders +} + +type withRequestHeader map[string]string + +func (h withRequestHeader) SetRequestHeaders(r *Request) { + for k, v := range h { + r.HTTPRequest.Header[k] = []string{v} + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go index 8e332cce6..d597c6ead 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go @@ -36,6 +36,10 @@ const ( // API request that was canceled. Requests given a aws.Context may // return this error when canceled. CanceledErrorCode = "RequestCanceled" + + // ErrCodeRequestError is an error preventing the SDK from continuing to + // process the request. + ErrCodeRequestError = "RequestError" ) // A Request is the service request to be made. @@ -51,6 +55,7 @@ type Request struct { HTTPRequest *http.Request HTTPResponse *http.Response Body io.ReadSeeker + streamingBody io.ReadCloser BodyStart int64 // offset from beginning of Body that the request body starts Params interface{} Error error @@ -99,8 +104,12 @@ type Operation struct { BeforePresignFn func(r *Request) error } -// New returns a new Request pointer for the service API -// operation and parameters. +// New returns a new Request pointer for the service API operation and +// parameters. +// +// A Retryer should be provided to direct how the request is retried. If +// Retryer is nil, a default no retry value will be used. You can use +// NoOpRetryer in the Client package to disable retry behavior directly. // // Params is any value of input parameters to be the request payload. // Data is pointer value to an object which the request's response @@ -108,6 +117,10 @@ type Operation struct { func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request { + if retryer == nil { + retryer = noOpRetryer{} + } + method := operation.HTTPMethod if method == "" { method = "POST" @@ -122,8 +135,6 @@ func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err) } - SanitizeHostForHeader(httpReq) - r := &Request{ Config: cfg, ClientInfo: clientInfo, @@ -287,6 +298,13 @@ func (r *Request) SetReaderBody(reader io.ReadSeeker) { r.ResetBody() } +// SetStreamingBody set the reader to be used for the request that will stream +// bytes to the server. Request's Body must not be set to any reader. +func (r *Request) SetStreamingBody(reader io.ReadCloser) { + r.streamingBody = reader + r.SetReaderBody(aws.ReadSeekCloser(reader)) +} + // Presign returns the request's signed URL. Error will be returned // if the signing fails. The expire parameter is only used for presigned Amazon // S3 API requests. All other AWS services will use a fixed expiration @@ -406,11 +424,17 @@ func (r *Request) Sign() error { return r.Error } + SanitizeHostForHeader(r.HTTPRequest) + r.Handlers.Sign.Run(r) return r.Error } func (r *Request) getNextRequestBody() (body io.ReadCloser, err error) { + if r.streamingBody != nil { + return r.streamingBody, nil + } + if r.safeBody != nil { r.safeBody.Close() } @@ -615,6 +639,10 @@ func getHost(r *http.Request) string { return r.Host } + if r.URL == nil { + return "" + } + return r.URL.Host } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go index f093fc542..64784e16f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go @@ -17,11 +17,13 @@ import ( // does the pagination between API operations, and Paginator defines the // configuration that will be used per page request. // -// cont := true -// for p.Next() && cont { +// for p.Next() { // data := p.Page().(*s3.ListObjectsOutput) // // process the page's data +// // ... +// // break out of loop to stop fetching additional pages // } +// // return p.Err() // // See service client API operation Pages methods for examples how the SDK will diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go index e84084da5..752ae47f8 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -35,16 +35,47 @@ type Retryer interface { } // WithRetryer sets a Retryer value to the given Config returning the Config -// value for chaining. +// value for chaining. The value must not be nil. func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config { + if retryer == nil { + if cfg.Logger != nil { + cfg.Logger.Log("ERROR: Request.WithRetryer called with nil retryer. Replacing with retry disabled Retryer.") + } + retryer = noOpRetryer{} + } cfg.Retryer = retryer return cfg + +} + +// noOpRetryer is a internal no op retryer used when a request is created +// without a retryer. +// +// Provides a retryer that performs no retries. +// It should be used when we do not want retries to be performed. +type noOpRetryer struct{} + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API; For NoOpRetryer the MaxRetries will always be zero. +func (d noOpRetryer) MaxRetries() int { + return 0 +} + +// ShouldRetry will always return false for NoOpRetryer, as it should never retry. +func (d noOpRetryer) ShouldRetry(_ *Request) bool { + return false +} + +// RetryRules returns the delay duration before retrying this request again; +// since NoOpRetryer does not retry, RetryRules always returns 0. +func (d noOpRetryer) RetryRules(_ *Request) time.Duration { + return 0 } // retryableCodes is a collection of service response codes which are retry-able // without any further action. var retryableCodes = map[string]struct{}{ - "RequestError": {}, + ErrCodeRequestError: {}, "RequestTimeout": {}, ErrCodeResponseTimeout: {}, "RequestTimeoutException": {}, // Glacier's flavor of RequestTimeout @@ -52,6 +83,7 @@ var retryableCodes = map[string]struct{}{ var throttleCodes = map[string]struct{}{ "ProvisionedThroughputExceededException": {}, + "ThrottledException": {}, // SNS, XRay, ResourceGroupsTagging API "Throttling": {}, "ThrottlingException": {}, "RequestLimitExceeded": {}, @@ -60,6 +92,7 @@ var throttleCodes = map[string]struct{}{ "TooManyRequestsException": {}, // Lambda functions "PriorRequestNotComplete": {}, // Route53 "TransactionInProgressException": {}, + "EC2ThrottledException": {}, // EC2 } // credsExpiredCodes is a collection of error codes which signify the credentials @@ -145,8 +178,8 @@ func shouldRetryError(origErr error) bool { origErr := err.OrigErr() var shouldRetry bool if origErr != nil { - shouldRetry := shouldRetryError(origErr) - if err.Code() == "RequestError" && !shouldRetry { + shouldRetry = shouldRetryError(origErr) + if err.Code() == ErrCodeRequestError && !shouldRetry { return false } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go index 7713ccfca..fe6dac1f4 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go @@ -3,6 +3,7 @@ package session import ( "fmt" "os" + "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" @@ -47,10 +48,10 @@ func resolveCredentials(cfg *aws.Config, } // WebIdentityEmptyRoleARNErr will occur if 'AWS_WEB_IDENTITY_TOKEN_FILE' was set but -// 'AWS_IAM_ROLE_ARN' was not set. +// 'AWS_ROLE_ARN' was not set. var WebIdentityEmptyRoleARNErr = awserr.New(stscreds.ErrCodeWebIdentity, "role ARN is not set", nil) -// WebIdentityEmptyTokenFilePathErr will occur if 'AWS_IAM_ROLE_ARN' was set but +// WebIdentityEmptyTokenFilePathErr will occur if 'AWS_ROLE_ARN' was set but // 'AWS_WEB_IDENTITY_TOKEN_FILE' was not set. var WebIdentityEmptyTokenFilePathErr = awserr.New(stscreds.ErrCodeWebIdentity, "token file path is not set", nil) @@ -206,7 +207,14 @@ func credsFromAssumeRole(cfg aws.Config, sharedCfg.RoleARN, func(opt *stscreds.AssumeRoleProvider) { opt.RoleSessionName = sharedCfg.RoleSessionName - opt.Duration = sessOpts.AssumeRoleDuration + + if sessOpts.AssumeRoleDuration == 0 && + sharedCfg.AssumeRoleDuration != nil && + *sharedCfg.AssumeRoleDuration/time.Minute > 15 { + opt.Duration = *sharedCfg.AssumeRoleDuration + } else if sessOpts.AssumeRoleDuration != 0 { + opt.Duration = sessOpts.AssumeRoleDuration + } // Assume role with external ID if len(sharedCfg.ExternalID) > 0 { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go index 60a6f9ce2..c1e0e9c95 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go @@ -1,12 +1,15 @@ package session import ( + "fmt" "os" "strconv" + "strings" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/endpoints" ) // EnvProviderName provides a name of the provider when config is loaded from environment. @@ -125,6 +128,26 @@ type envConfig struct { // // AWS_ROLE_SESSION_NAME=session_name RoleSessionName string + + // Specifies the STS Regional Endpoint flag for the SDK to resolve the endpoint + // for a service. + // + // AWS_STS_REGIONAL_ENDPOINTS=regional + // This can take value as `regional` or `legacy` + STSRegionalEndpoint endpoints.STSRegionalEndpoint + + // Specifies the S3 Regional Endpoint flag for the SDK to resolve the + // endpoint for a service. + // + // AWS_S3_US_EAST_1_REGIONAL_ENDPOINT=regional + // This can take value as `regional` or `legacy` + S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint + + // Specifies if the S3 service should allow ARNs to direct the region + // the client's requests are sent to. + // + // AWS_S3_USE_ARN_REGION=true + S3UseARNRegion bool } var ( @@ -179,6 +202,15 @@ var ( roleSessionNameEnvKey = []string{ "AWS_ROLE_SESSION_NAME", } + stsRegionalEndpointKey = []string{ + "AWS_STS_REGIONAL_ENDPOINTS", + } + s3UsEast1RegionalEndpoint = []string{ + "AWS_S3_US_EAST_1_REGIONAL_ENDPOINT", + } + s3UseARNRegionEnvKey = []string{ + "AWS_S3_USE_ARN_REGION", + } ) // loadEnvConfig retrieves the SDK's environment configuration. @@ -187,7 +219,7 @@ var ( // If the environment variable `AWS_SDK_LOAD_CONFIG` is set to a truthy value // the shared SDK config will be loaded in addition to the SDK's specific // configuration values. -func loadEnvConfig() envConfig { +func loadEnvConfig() (envConfig, error) { enableSharedConfig, _ := strconv.ParseBool(os.Getenv("AWS_SDK_LOAD_CONFIG")) return envConfigLoad(enableSharedConfig) } @@ -198,11 +230,11 @@ func loadEnvConfig() envConfig { // Loads the shared configuration in addition to the SDK's specific configuration. // This will load the same values as `loadEnvConfig` if the `AWS_SDK_LOAD_CONFIG` // environment variable is set. -func loadSharedEnvConfig() envConfig { +func loadSharedEnvConfig() (envConfig, error) { return envConfigLoad(true) } -func envConfigLoad(enableSharedConfig bool) envConfig { +func envConfigLoad(enableSharedConfig bool) (envConfig, error) { cfg := envConfig{} cfg.EnableSharedConfig = enableSharedConfig @@ -264,12 +296,48 @@ func envConfigLoad(enableSharedConfig bool) envConfig { cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE") - return cfg + var err error + // STS Regional Endpoint variable + for _, k := range stsRegionalEndpointKey { + if v := os.Getenv(k); len(v) != 0 { + cfg.STSRegionalEndpoint, err = endpoints.GetSTSRegionalEndpoint(v) + if err != nil { + return cfg, fmt.Errorf("failed to load, %v from env config, %v", k, err) + } + } + } + + // S3 Regional Endpoint variable + for _, k := range s3UsEast1RegionalEndpoint { + if v := os.Getenv(k); len(v) != 0 { + cfg.S3UsEast1RegionalEndpoint, err = endpoints.GetS3UsEast1RegionalEndpoint(v) + if err != nil { + return cfg, fmt.Errorf("failed to load, %v from env config, %v", k, err) + } + } + } + + var s3UseARNRegion string + setFromEnvVal(&s3UseARNRegion, s3UseARNRegionEnvKey) + if len(s3UseARNRegion) != 0 { + switch { + case strings.EqualFold(s3UseARNRegion, "false"): + cfg.S3UseARNRegion = false + case strings.EqualFold(s3UseARNRegion, "true"): + cfg.S3UseARNRegion = true + default: + return envConfig{}, fmt.Errorf( + "invalid value for environment variable, %s=%s, need true or false", + s3UseARNRegionEnvKey[0], s3UseARNRegion) + } + } + + return cfg, nil } func setFromEnvVal(dst *string, keys []string) { for _, k := range keys { - if v := os.Getenv(k); len(v) > 0 { + if v := os.Getenv(k); len(v) != 0 { *dst = v break } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go index 7b0a942e2..0ff499605 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -73,7 +73,7 @@ type Session struct { // func is called instead of waiting to receive an error until a request is made. func New(cfgs ...*aws.Config) *Session { // load initial config from environment - envCfg := loadEnvConfig() + envCfg, envErr := loadEnvConfig() if envCfg.EnableSharedConfig { var cfg aws.Config @@ -93,17 +93,17 @@ func New(cfgs ...*aws.Config) *Session { // Session creation failed, need to report the error and prevent // any requests from succeeding. s = &Session{Config: defaults.Config()} - s.Config.MergeIn(cfgs...) - s.Config.Logger.Log("ERROR:", msg, "Error:", err) - s.Handlers.Validate.PushBack(func(r *request.Request) { - r.Error = err - }) + s.logDeprecatedNewSessionError(msg, err, cfgs) } return s } s := deprecatedNewSession(cfgs...) + if envErr != nil { + msg := "failed to load env config" + s.logDeprecatedNewSessionError(msg, envErr, cfgs) + } if csmCfg, err := loadCSMConfig(envCfg, []string{}); err != nil { if l := s.Config.Logger; l != nil { @@ -112,11 +112,8 @@ func New(cfgs ...*aws.Config) *Session { } else if csmCfg.Enabled { err := enableCSM(&s.Handlers, csmCfg, s.Config.Logger) if err != nil { - err = fmt.Errorf("failed to enable CSM, %v", err) - s.Config.Logger.Log("ERROR:", err.Error()) - s.Handlers.Validate.PushBack(func(r *request.Request) { - r.Error = err - }) + msg := "failed to enable CSM" + s.logDeprecatedNewSessionError(msg, err, cfgs) } } @@ -279,10 +276,17 @@ type Options struct { // })) func NewSessionWithOptions(opts Options) (*Session, error) { var envCfg envConfig + var err error if opts.SharedConfigState == SharedConfigEnable { - envCfg = loadSharedEnvConfig() + envCfg, err = loadSharedEnvConfig() + if err != nil { + return nil, fmt.Errorf("failed to load shared config, %v", err) + } } else { - envCfg = loadEnvConfig() + envCfg, err = loadEnvConfig() + if err != nil { + return nil, fmt.Errorf("failed to load environment config, %v", err) + } } if len(opts.Profile) != 0 { @@ -550,6 +554,22 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, } } + // Regional Endpoint flag for STS endpoint resolving + mergeSTSRegionalEndpointConfig(cfg, []endpoints.STSRegionalEndpoint{ + userCfg.STSRegionalEndpoint, + envCfg.STSRegionalEndpoint, + sharedCfg.STSRegionalEndpoint, + endpoints.LegacySTSEndpoint, + }) + + // Regional Endpoint flag for S3 endpoint resolving + mergeS3UsEast1RegionalEndpointConfig(cfg, []endpoints.S3UsEast1RegionalEndpoint{ + userCfg.S3UsEast1RegionalEndpoint, + envCfg.S3UsEast1RegionalEndpoint, + sharedCfg.S3UsEast1RegionalEndpoint, + endpoints.LegacyS3UsEast1Endpoint, + }) + // Configure credentials if not already set by the user when creating the // Session. if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { @@ -560,9 +580,35 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, cfg.Credentials = creds } + cfg.S3UseARNRegion = userCfg.S3UseARNRegion + if cfg.S3UseARNRegion == nil { + cfg.S3UseARNRegion = &envCfg.S3UseARNRegion + } + if cfg.S3UseARNRegion == nil { + cfg.S3UseARNRegion = &sharedCfg.S3UseARNRegion + } + return nil } +func mergeSTSRegionalEndpointConfig(cfg *aws.Config, values []endpoints.STSRegionalEndpoint) { + for _, v := range values { + if v != endpoints.UnsetSTSEndpoint { + cfg.STSRegionalEndpoint = v + break + } + } +} + +func mergeS3UsEast1RegionalEndpointConfig(cfg *aws.Config, values []endpoints.S3UsEast1RegionalEndpoint) { + for _, v := range values { + if v != endpoints.UnsetS3UsEast1Endpoint { + cfg.S3UsEast1RegionalEndpoint = v + break + } + } +} + func initHandlers(s *Session) { // Add the Validate parameter handler if it is not disabled. s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler) @@ -591,47 +637,67 @@ func (s *Session) Copy(cfgs ...*aws.Config) *Session { // ClientConfig satisfies the client.ConfigProvider interface and is used to // configure the service client instances. Passing the Session to the service // client's constructor (New) will use this method to configure the client. -func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config { - // Backwards compatibility, the error will be eaten if user calls ClientConfig - // directly. All SDK services will use ClientconfigWithError. - cfg, _ := s.clientConfigWithErr(serviceName, cfgs...) - - return cfg -} - -func (s *Session) clientConfigWithErr(serviceName string, cfgs ...*aws.Config) (client.Config, error) { +func (s *Session) ClientConfig(service string, cfgs ...*aws.Config) client.Config { s = s.Copy(cfgs...) - var resolved endpoints.ResolvedEndpoint - var err error - region := aws.StringValue(s.Config.Region) - - if endpoint := aws.StringValue(s.Config.Endpoint); len(endpoint) != 0 { - resolved.URL = endpoints.AddScheme(endpoint, aws.BoolValue(s.Config.DisableSSL)) - resolved.SigningRegion = region - } else { - resolved, err = s.Config.EndpointResolver.EndpointFor( - serviceName, region, - func(opt *endpoints.Options) { - opt.DisableSSL = aws.BoolValue(s.Config.DisableSSL) - opt.UseDualStack = aws.BoolValue(s.Config.UseDualStack) - - // Support the condition where the service is modeled but its - // endpoint metadata is not available. - opt.ResolveUnknownService = true - }, - ) + resolved, err := s.resolveEndpoint(service, region, s.Config) + if err != nil { + s.Handlers.Validate.PushBack(func(r *request.Request) { + if len(r.ClientInfo.Endpoint) != 0 { + // Error occurred while resolving endpoint, but the request + // being invoked has had an endpoint specified after the client + // was created. + return + } + r.Error = err + }) } return client.Config{ Config: s.Config, Handlers: s.Handlers, + PartitionID: resolved.PartitionID, Endpoint: resolved.URL, SigningRegion: resolved.SigningRegion, SigningNameDerived: resolved.SigningNameDerived, SigningName: resolved.SigningName, - }, err + } +} + +func (s *Session) resolveEndpoint(service, region string, cfg *aws.Config) (endpoints.ResolvedEndpoint, error) { + + if ep := aws.StringValue(cfg.Endpoint); len(ep) != 0 { + return endpoints.ResolvedEndpoint{ + URL: endpoints.AddScheme(ep, aws.BoolValue(cfg.DisableSSL)), + SigningRegion: region, + }, nil + } + + resolved, err := cfg.EndpointResolver.EndpointFor(service, region, + func(opt *endpoints.Options) { + opt.DisableSSL = aws.BoolValue(cfg.DisableSSL) + opt.UseDualStack = aws.BoolValue(cfg.UseDualStack) + // Support for STSRegionalEndpoint where the STSRegionalEndpoint is + // provided in envConfig or sharedConfig with envConfig getting + // precedence. + opt.STSRegionalEndpoint = cfg.STSRegionalEndpoint + + // Support for S3UsEast1RegionalEndpoint where the S3UsEast1RegionalEndpoint is + // provided in envConfig or sharedConfig with envConfig getting + // precedence. + opt.S3UsEast1RegionalEndpoint = cfg.S3UsEast1RegionalEndpoint + + // Support the condition where the service is modeled but its + // endpoint metadata is not available. + opt.ResolveUnknownService = true + }, + ) + if err != nil { + return endpoints.ResolvedEndpoint{}, err + } + + return resolved, nil } // ClientConfigNoResolveEndpoint is the same as ClientConfig with the exception @@ -641,12 +707,9 @@ func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Conf s = s.Copy(cfgs...) var resolved endpoints.ResolvedEndpoint - - region := aws.StringValue(s.Config.Region) - if ep := aws.StringValue(s.Config.Endpoint); len(ep) > 0 { resolved.URL = endpoints.AddScheme(ep, aws.BoolValue(s.Config.DisableSSL)) - resolved.SigningRegion = region + resolved.SigningRegion = aws.StringValue(s.Config.Region) } return client.Config{ @@ -658,3 +721,14 @@ func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Conf SigningName: resolved.SigningName, } } + +// logDeprecatedNewSessionError function enables error handling for session +func (s *Session) logDeprecatedNewSessionError(msg string, err error, cfgs []*aws.Config) { + // Session creation failed, need to report the error and prevent + // any requests from succeeding. + s.Config.MergeIn(cfgs...) + s.Config.Logger.Log("ERROR:", msg, "Error:", err) + s.Handlers.Validate.PushBack(func(r *request.Request) { + r.Error = err + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go index d91ac93a5..680805a38 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -2,9 +2,11 @@ package session import ( "fmt" + "time" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/internal/ini" ) @@ -15,12 +17,13 @@ const ( sessionTokenKey = `aws_session_token` // optional // Assume Role Credentials group - roleArnKey = `role_arn` // group required - sourceProfileKey = `source_profile` // group required (or credential_source) - credentialSourceKey = `credential_source` // group required (or source_profile) - externalIDKey = `external_id` // optional - mfaSerialKey = `mfa_serial` // optional - roleSessionNameKey = `role_session_name` // optional + roleArnKey = `role_arn` // group required + sourceProfileKey = `source_profile` // group required (or credential_source) + credentialSourceKey = `credential_source` // group required (or source_profile) + externalIDKey = `external_id` // optional + mfaSerialKey = `mfa_serial` // optional + roleSessionNameKey = `role_session_name` // optional + roleDurationSecondsKey = "duration_seconds" // optional // CSM options csmEnabledKey = `csm_enabled` @@ -40,10 +43,19 @@ const ( // Web Identity Token File webIdentityTokenFileKey = `web_identity_token_file` // optional + // Additional config fields for regional or legacy endpoints + stsRegionalEndpointSharedKey = `sts_regional_endpoints` + + // Additional config fields for regional or legacy endpoints + s3UsEast1RegionalSharedKey = `s3_us_east_1_regional_endpoint` + // DefaultSharedConfigProfile is the default profile to be used when // loading configuration from the config files if another profile name // is not provided. DefaultSharedConfigProfile = `default` + + // S3 ARN Region Usage + s3UseARNRegionKey = "s3_use_arn_region" ) // sharedConfig represents the configuration fields of the SDK config files. @@ -63,10 +75,11 @@ type sharedConfig struct { CredentialProcess string WebIdentityTokenFile string - RoleARN string - RoleSessionName string - ExternalID string - MFASerial string + RoleARN string + RoleSessionName string + ExternalID string + MFASerial string + AssumeRoleDuration *time.Duration SourceProfileName string SourceProfile *sharedConfig @@ -88,6 +101,24 @@ type sharedConfig struct { CSMHost string CSMPort string CSMClientID string + + // Specifies the Regional Endpoint flag for the SDK to resolve the endpoint for a service + // + // sts_regional_endpoints = regional + // This can take value as `LegacySTSEndpoint` or `RegionalSTSEndpoint` + STSRegionalEndpoint endpoints.STSRegionalEndpoint + + // Specifies the Regional Endpoint flag for the SDK to resolve the endpoint for a service + // + // s3_us_east_1_regional_endpoint = regional + // This can take value as `LegacyS3UsEast1Endpoint` or `RegionalS3UsEast1Endpoint` + S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint + + // Specifies if the S3 service should allow ARNs to direct the region + // the client's requests are sent to. + // + // s3_use_arn_region=true + S3UseARNRegion bool } type sharedConfigFile struct { @@ -244,8 +275,30 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e updateString(&cfg.RoleSessionName, section, roleSessionNameKey) updateString(&cfg.SourceProfileName, section, sourceProfileKey) updateString(&cfg.CredentialSource, section, credentialSourceKey) - updateString(&cfg.Region, section, regionKey) + + if section.Has(roleDurationSecondsKey) { + d := time.Duration(section.Int(roleDurationSecondsKey)) * time.Second + cfg.AssumeRoleDuration = &d + } + + if v := section.String(stsRegionalEndpointSharedKey); len(v) != 0 { + sre, err := endpoints.GetSTSRegionalEndpoint(v) + if err != nil { + return fmt.Errorf("failed to load %s from shared config, %s, %v", + stsRegionalEndpointSharedKey, file.Filename, err) + } + cfg.STSRegionalEndpoint = sre + } + + if v := section.String(s3UsEast1RegionalSharedKey); len(v) != 0 { + sre, err := endpoints.GetS3UsEast1RegionalEndpoint(v) + if err != nil { + return fmt.Errorf("failed to load %s from shared config, %s, %v", + s3UsEast1RegionalSharedKey, file.Filename, err) + } + cfg.S3UsEast1RegionalEndpoint = sre + } } updateString(&cfg.CredentialProcess, section, credentialProcessKey) @@ -271,6 +324,8 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e updateString(&cfg.CSMPort, section, csmPortKey) updateString(&cfg.CSMClientID, section, csmClientIDKey) + updateBool(&cfg.S3UseARNRegion, section, s3UseARNRegionKey) + return nil } @@ -363,6 +418,15 @@ func updateString(dst *string, section ini.Section, key string) { *dst = section.String(key) } +// updateBool will only update the dst with the value in the section key, key +// is present in the section. +func updateBool(dst *bool, section ini.Section, key string) { + if !section.Has(key) { + return + } + *dst = section.Bool(key) +} + // updateBoolPtr will only update the dst with the value in the section key, // key is present in the section. func updateBoolPtr(dst **bool, section ini.Section, key string) { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go index 244c86da0..07ea799fb 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go @@ -1,8 +1,7 @@ package v4 import ( - "net/http" - "strings" + "github.com/aws/aws-sdk-go/internal/strings" ) // validator houses a set of rule needed for validation of a @@ -61,7 +60,7 @@ type patterns []string // been found func (p patterns) IsValid(value string) bool { for _, pattern := range p { - if strings.HasPrefix(http.CanonicalHeaderKey(value), pattern) { + if strings.HasPrefixFold(value, pattern) { return true } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go new file mode 100644 index 000000000..f35fc860b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go @@ -0,0 +1,13 @@ +// +build !go1.7 + +package v4 + +import ( + "net/http" + + "github.com/aws/aws-sdk-go/aws" +) + +func requestContext(r *http.Request) aws.Context { + return aws.BackgroundContext() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go new file mode 100644 index 000000000..fed5c859c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go @@ -0,0 +1,13 @@ +// +build go1.7 + +package v4 + +import ( + "net/http" + + "github.com/aws/aws-sdk-go/aws" +) + +func requestContext(r *http.Request) aws.Context { + return r.Context() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go new file mode 100644 index 000000000..02cbd97e2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go @@ -0,0 +1,63 @@ +package v4 + +import ( + "encoding/hex" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/credentials" +) + +type credentialValueProvider interface { + Get() (credentials.Value, error) +} + +// StreamSigner implements signing of event stream encoded payloads +type StreamSigner struct { + region string + service string + + credentials credentialValueProvider + + prevSig []byte +} + +// NewStreamSigner creates a SigV4 signer used to sign Event Stream encoded messages +func NewStreamSigner(region, service string, seedSignature []byte, credentials *credentials.Credentials) *StreamSigner { + return &StreamSigner{ + region: region, + service: service, + credentials: credentials, + prevSig: seedSignature, + } +} + +// GetSignature takes an event stream encoded headers and payload and returns a signature +func (s *StreamSigner) GetSignature(headers, payload []byte, date time.Time) ([]byte, error) { + credValue, err := s.credentials.Get() + if err != nil { + return nil, err + } + + sigKey := deriveSigningKey(s.region, s.service, credValue.SecretAccessKey, date) + + keyPath := buildSigningScope(s.region, s.service, date) + + stringToSign := buildEventStreamStringToSign(headers, payload, s.prevSig, keyPath, date) + + signature := hmacSHA256(sigKey, []byte(stringToSign)) + s.prevSig = signature + + return signature, nil +} + +func buildEventStreamStringToSign(headers, payload, prevSig []byte, scope string, date time.Time) string { + return strings.Join([]string{ + "AWS4-HMAC-SHA256-PAYLOAD", + formatTime(date), + scope, + hex.EncodeToString(prevSig), + hex.EncodeToString(hashSHA256(headers)), + hex.EncodeToString(hashSHA256(payload)), + }, "\n") +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go index 8104793aa..d71f7b3f4 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -76,9 +76,14 @@ import ( ) const ( + authorizationHeader = "Authorization" + authHeaderSignatureElem = "Signature=" + signatureQueryKey = "X-Amz-Signature" + authHeaderPrefix = "AWS4-HMAC-SHA256" timeFormat = "20060102T150405Z" shortTimeFormat = "20060102" + awsV4Request = "aws4_request" // emptyStringSHA256 is a SHA256 of an empty string emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855` @@ -87,9 +92,9 @@ const ( var ignoredHeaders = rules{ blacklist{ mapRule{ - "Authorization": struct{}{}, - "User-Agent": struct{}{}, - "X-Amzn-Trace-Id": struct{}{}, + authorizationHeader: struct{}{}, + "User-Agent": struct{}{}, + "X-Amzn-Trace-Id": struct{}{}, }, }, } @@ -229,11 +234,9 @@ type signingCtx struct { DisableURIPathEscaping bool - credValues credentials.Value - isPresign bool - formattedTime string - formattedShortTime string - unsignedPayload bool + credValues credentials.Value + isPresign bool + unsignedPayload bool bodyDigest string signedHeaders string @@ -337,7 +340,7 @@ func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, regi } var err error - ctx.credValues, err = v4.Credentials.Get() + ctx.credValues, err = v4.Credentials.GetWithContext(requestContext(r)) if err != nil { return http.Header{}, err } @@ -532,39 +535,56 @@ func (ctx *signingCtx) build(disableHeaderHoisting bool) error { ctx.buildSignature() // depends on string to sign if ctx.isPresign { - ctx.Request.URL.RawQuery += "&X-Amz-Signature=" + ctx.signature + ctx.Request.URL.RawQuery += "&" + signatureQueryKey + "=" + ctx.signature } else { parts := []string{ authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString, "SignedHeaders=" + ctx.signedHeaders, - "Signature=" + ctx.signature, + authHeaderSignatureElem + ctx.signature, } - ctx.Request.Header.Set("Authorization", strings.Join(parts, ", ")) + ctx.Request.Header.Set(authorizationHeader, strings.Join(parts, ", ")) } return nil } -func (ctx *signingCtx) buildTime() { - ctx.formattedTime = ctx.Time.UTC().Format(timeFormat) - ctx.formattedShortTime = ctx.Time.UTC().Format(shortTimeFormat) +// GetSignedRequestSignature attempts to extract the signature of the request. +// Returning an error if the request is unsigned, or unable to extract the +// signature. +func GetSignedRequestSignature(r *http.Request) ([]byte, error) { + + if auth := r.Header.Get(authorizationHeader); len(auth) != 0 { + ps := strings.Split(auth, ", ") + for _, p := range ps { + if idx := strings.Index(p, authHeaderSignatureElem); idx >= 0 { + sig := p[len(authHeaderSignatureElem):] + if len(sig) == 0 { + return nil, fmt.Errorf("invalid request signature authorization header") + } + return hex.DecodeString(sig) + } + } + } + + if sig := r.URL.Query().Get("X-Amz-Signature"); len(sig) != 0 { + return hex.DecodeString(sig) + } + + return nil, fmt.Errorf("request not signed") +} +func (ctx *signingCtx) buildTime() { if ctx.isPresign { duration := int64(ctx.ExpireTime / time.Second) - ctx.Query.Set("X-Amz-Date", ctx.formattedTime) + ctx.Query.Set("X-Amz-Date", formatTime(ctx.Time)) ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10)) } else { - ctx.Request.Header.Set("X-Amz-Date", ctx.formattedTime) + ctx.Request.Header.Set("X-Amz-Date", formatTime(ctx.Time)) } } func (ctx *signingCtx) buildCredentialString() { - ctx.credentialString = strings.Join([]string{ - ctx.formattedShortTime, - ctx.Region, - ctx.ServiceName, - "aws4_request", - }, "/") + ctx.credentialString = buildSigningScope(ctx.Region, ctx.ServiceName, ctx.Time) if ctx.isPresign { ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString) @@ -588,8 +608,7 @@ func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) { var headers []string headers = append(headers, "host") for k, v := range header { - canonicalKey := http.CanonicalHeaderKey(k) - if !r.IsValid(canonicalKey) { + if !r.IsValid(k) { continue // ignored header } if ctx.SignedHeaderVals == nil { @@ -653,19 +672,15 @@ func (ctx *signingCtx) buildCanonicalString() { func (ctx *signingCtx) buildStringToSign() { ctx.stringToSign = strings.Join([]string{ authHeaderPrefix, - ctx.formattedTime, + formatTime(ctx.Time), ctx.credentialString, - hex.EncodeToString(makeSha256([]byte(ctx.canonicalString))), + hex.EncodeToString(hashSHA256([]byte(ctx.canonicalString))), }, "\n") } func (ctx *signingCtx) buildSignature() { - secret := ctx.credValues.SecretAccessKey - date := makeHmac([]byte("AWS4"+secret), []byte(ctx.formattedShortTime)) - region := makeHmac(date, []byte(ctx.Region)) - service := makeHmac(region, []byte(ctx.ServiceName)) - credentials := makeHmac(service, []byte("aws4_request")) - signature := makeHmac(credentials, []byte(ctx.stringToSign)) + creds := deriveSigningKey(ctx.Region, ctx.ServiceName, ctx.credValues.SecretAccessKey, ctx.Time) + signature := hmacSHA256(creds, []byte(ctx.stringToSign)) ctx.signature = hex.EncodeToString(signature) } @@ -726,13 +741,13 @@ func (ctx *signingCtx) removePresign() { ctx.Query.Del("X-Amz-SignedHeaders") } -func makeHmac(key []byte, data []byte) []byte { +func hmacSHA256(key []byte, data []byte) []byte { hash := hmac.New(sha256.New, key) hash.Write(data) return hash.Sum(nil) } -func makeSha256(data []byte) []byte { +func hashSHA256(data []byte) []byte { hash := sha256.New() hash.Write(data) return hash.Sum(nil) @@ -804,3 +819,28 @@ func stripExcessSpaces(vals []string) { vals[i] = string(buf[:m]) } } + +func buildSigningScope(region, service string, dt time.Time) string { + return strings.Join([]string{ + formatShortTime(dt), + region, + service, + awsV4Request, + }, "/") +} + +func deriveSigningKey(region, service, secretKey string, dt time.Time) []byte { + kDate := hmacSHA256([]byte("AWS4"+secretKey), []byte(formatShortTime(dt))) + kRegion := hmacSHA256(kDate, []byte(region)) + kService := hmacSHA256(kRegion, []byte(service)) + signingKey := hmacSHA256(kService, []byte(awsV4Request)) + return signingKey +} + +func formatShortTime(dt time.Time) string { + return dt.UTC().Format(shortTimeFormat) +} + +func formatTime(dt time.Time) string { + return dt.UTC().Format(timeFormat) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go index 455091540..98751ee84 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/types.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/types.go @@ -2,6 +2,7 @@ package aws import ( "io" + "strings" "sync" "github.com/aws/aws-sdk-go/internal/sdkio" @@ -205,3 +206,59 @@ func (b *WriteAtBuffer) Bytes() []byte { defer b.m.Unlock() return b.buf } + +// MultiCloser is a utility to close multiple io.Closers within a single +// statement. +type MultiCloser []io.Closer + +// Close closes all of the io.Closers making up the MultiClosers. Any +// errors that occur while closing will be returned in the order they +// occur. +func (m MultiCloser) Close() error { + var errs errors + for _, c := range m { + err := c.Close() + if err != nil { + errs = append(errs, err) + } + } + if len(errs) != 0 { + return errs + } + + return nil +} + +type errors []error + +func (es errors) Error() string { + var parts []string + for _, e := range es { + parts = append(parts, e.Error()) + } + + return strings.Join(parts, "\n") +} + +// CopySeekableBody copies the seekable body to an io.Writer +func CopySeekableBody(dst io.Writer, src io.ReadSeeker) (int64, error) { + curPos, err := src.Seek(0, sdkio.SeekCurrent) + if err != nil { + return 0, err + } + + // copy errors may be assumed to be from the body. + n, err := io.Copy(dst, src) + if err != nil { + return n, err + } + + // seek back to the first position after reading to reset + // the body for transmission. + _, err = src.Seek(curPos, sdkio.SeekStart) + if err != nil { + return n, err + } + + return n, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index d1548ebd8..c224e7692 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.25.3" +const SDKVersion = "1.32.1" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go b/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go new file mode 100644 index 000000000..876dcb3fd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go @@ -0,0 +1,40 @@ +// +build !go1.7 + +package context + +import "time" + +// An emptyCtx is a copy of the Go 1.7 context.emptyCtx type. This is copied to +// provide a 1.6 and 1.5 safe version of context that is compatible with Go +// 1.7's Context. +// +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case BackgroundCtx: + return "aws.BackgroundContext" + } + return "unknown empty Context" +} + +// BackgroundCtx is the common base context. +var BackgroundCtx = new(emptyCtx) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go index e56dcee2f..cf9fad81e 100644 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go @@ -162,7 +162,7 @@ loop: if len(tokens) == 0 { break loop } - + // if should skip is true, we skip the tokens until should skip is set to false. step = SkipTokenState } @@ -218,7 +218,7 @@ loop: // S -> equal_expr' expr_stmt' switch k.Kind { case ASTKindEqualExpr: - // assiging a value to some key + // assigning a value to some key k.AppendChild(newExpression(tok)) stack.Push(newExprStatement(k)) case ASTKindExpr: @@ -250,6 +250,13 @@ loop: if !runeCompare(tok.Raw(), openBrace) { return nil, NewParseError("expected '['") } + // If OpenScopeState is not at the start, we must mark the previous ast as complete + // + // for example: if previous ast was a skip statement; + // we should mark it as complete before we create a new statement + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } stmt := newStatement() stack.Push(stmt) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go index 6bb696447..da7a4049c 100644 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go @@ -22,24 +22,24 @@ func newSkipper() skipper { } func (s *skipper) ShouldSkip(tok Token) bool { + // should skip state will be modified only if previous token was new line (NL); + // and the current token is not WhiteSpace (WS). if s.shouldSkip && s.prevTok.Type() == TokenNL && tok.Type() != TokenWS { - s.Continue() return false } s.prevTok = tok - return s.shouldSkip } func (s *skipper) Skip() { s.shouldSkip = true - s.prevTok = emptyToken } func (s *skipper) Continue() { s.shouldSkip = false + // empty token is assigned as we return to default state, when should skip is false s.prevTok = emptyToken } diff --git a/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go b/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go new file mode 100644 index 000000000..d008ae27c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go @@ -0,0 +1,11 @@ +package strings + +import ( + "strings" +) + +// HasPrefixFold tests whether the string s begins with prefix, interpreted as UTF-8 strings, +// under Unicode case-folding. +func HasPrefixFold(s, prefix string) bool { + return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE b/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go b/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go new file mode 100644 index 000000000..14ad0c589 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go @@ -0,0 +1,120 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package singleflight provides a duplicate function call suppression +// mechanism. +package singleflight + +import "sync" + +// call is an in-flight or completed singleflight.Do call +type call struct { + wg sync.WaitGroup + + // These fields are written once before the WaitGroup is done + // and are only read after the WaitGroup is done. + val interface{} + err error + + // forgotten indicates whether Forget was called with this call's key + // while the call was still in flight. + forgotten bool + + // These fields are read and written with the singleflight + // mutex held before the WaitGroup is done, and are read but + // not written after the WaitGroup is done. + dups int + chans []chan<- Result +} + +// Group represents a class of work and forms a namespace in +// which units of work can be executed with duplicate suppression. +type Group struct { + mu sync.Mutex // protects m + m map[string]*call // lazily initialized +} + +// Result holds the results of Do, so they can be passed +// on a channel. +type Result struct { + Val interface{} + Err error + Shared bool +} + +// Do executes and returns the results of the given function, making +// sure that only one execution is in-flight for a given key at a +// time. If a duplicate comes in, the duplicate caller waits for the +// original to complete and receives the same results. +// The return value shared indicates whether v was given to multiple callers. +func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) { + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + g.mu.Unlock() + c.wg.Wait() + return c.val, c.err, true + } + c := new(call) + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + g.doCall(c, key, fn) + return c.val, c.err, c.dups > 0 +} + +// DoChan is like Do but returns a channel that will receive the +// results when they are ready. +func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result { + ch := make(chan Result, 1) + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + c.chans = append(c.chans, ch) + g.mu.Unlock() + return ch + } + c := &call{chans: []chan<- Result{ch}} + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + go g.doCall(c, key, fn) + + return ch +} + +// doCall handles the single call for a key. +func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) { + c.val, c.err = fn() + c.wg.Done() + + g.mu.Lock() + if !c.forgotten { + delete(g.m, key) + } + for _, ch := range c.chans { + ch <- Result{c.val, c.err, c.dups > 0} + } + g.mu.Unlock() +} + +// Forget tells the singleflight to forget about a key. Future calls +// to Do for this key will call the function rather than waiting for +// an earlier call to complete. +func (g *Group) Forget(key string) { + g.mu.Lock() + if c, ok := g.m[key]; ok { + c.forgotten = true + } + delete(g.m, key) + g.mu.Unlock() +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/checksum/content_md5.go b/vendor/github.com/aws/aws-sdk-go/private/checksum/content_md5.go new file mode 100644 index 000000000..e045f38d8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/checksum/content_md5.go @@ -0,0 +1,53 @@ +package checksum + +import ( + "crypto/md5" + "encoding/base64" + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +const contentMD5Header = "Content-Md5" + +// AddBodyContentMD5Handler computes and sets the HTTP Content-MD5 header for requests that +// require it. +func AddBodyContentMD5Handler(r *request.Request) { + // if Content-MD5 header is already present, return + if v := r.HTTPRequest.Header.Get(contentMD5Header); len(v) != 0 { + return + } + + // if S3DisableContentMD5Validation flag is set, return + if aws.BoolValue(r.Config.S3DisableContentMD5Validation) { + return + } + + // if request is presigned, return + if r.IsPresigned() { + return + } + + // if body is not seekable, return + if !aws.IsReaderSeekable(r.Body) { + if r.Config.Logger != nil { + r.Config.Logger.Log(fmt.Sprintf( + "Unable to compute Content-MD5 for unseekable body, S3.%s", + r.Operation.Name)) + } + return + } + + h := md5.New() + + if _, err := aws.CopySeekableBody(h, r.Body); err != nil { + r.Error = awserr.New("ContentMD5", "failed to compute body MD5", err) + return + } + + // encode the md5 checksum in base64 and set the request header. + v := base64.StdEncoding.EncodeToString(h.Sum(nil)) + r.HTTPRequest.Header.Set(contentMD5Header, v) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go index ecc7bf82f..151054971 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go @@ -101,7 +101,7 @@ func (hs *decodedHeaders) UnmarshalJSON(b []byte) error { } headers.Set(h.Name, value) } - (*hs) = decodedHeaders(headers) + *hs = decodedHeaders(headers) return nil } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go index 4b972b2d6..474339391 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go @@ -21,10 +21,24 @@ type Decoder struct { // NewDecoder initializes and returns a Decoder for decoding event // stream messages from the reader provided. -func NewDecoder(r io.Reader) *Decoder { - return &Decoder{ +func NewDecoder(r io.Reader, opts ...func(*Decoder)) *Decoder { + d := &Decoder{ r: r, } + + for _, opt := range opts { + opt(d) + } + + return d +} + +// DecodeWithLogger adds a logger to be used by the decoder when decoding +// stream events. +func DecodeWithLogger(logger aws.Logger) func(*Decoder) { + return func(d *Decoder) { + d.logger = logger + } } // Decode attempts to decode a single message from the event stream reader. @@ -40,6 +54,15 @@ func (d *Decoder) Decode(payloadBuf []byte) (m Message, err error) { }() } + m, err = Decode(reader, payloadBuf) + + return m, err +} + +// Decode attempts to decode a single message from the event stream reader. +// Will return the event stream message, or error if Decode fails to read +// the message from the reader. +func Decode(reader io.Reader, payloadBuf []byte) (m Message, err error) { crc := crc32.New(crc32IEEETable) hashReader := io.TeeReader(reader, crc) @@ -72,12 +95,6 @@ func (d *Decoder) Decode(payloadBuf []byte) (m Message, err error) { return m, nil } -// UseLogger specifies the Logger that that the decoder should use to log the -// message decode to. -func (d *Decoder) UseLogger(logger aws.Logger) { - d.logger = logger -} - func logMessageDecode(logger aws.Logger, msgBuf *bytes.Buffer, msg Message, decodeErr error) { w := bytes.NewBuffer(nil) defer func() { logger.Log(w.String()) }() diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go index 150a60981..ffade3bc0 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go @@ -3,61 +3,107 @@ package eventstream import ( "bytes" "encoding/binary" + "encoding/hex" + "encoding/json" + "fmt" "hash" "hash/crc32" "io" + + "github.com/aws/aws-sdk-go/aws" ) // Encoder provides EventStream message encoding. type Encoder struct { - w io.Writer + w io.Writer + logger aws.Logger headersBuf *bytes.Buffer } // NewEncoder initializes and returns an Encoder to encode Event Stream // messages to an io.Writer. -func NewEncoder(w io.Writer) *Encoder { - return &Encoder{ +func NewEncoder(w io.Writer, opts ...func(*Encoder)) *Encoder { + e := &Encoder{ w: w, headersBuf: bytes.NewBuffer(nil), } + + for _, opt := range opts { + opt(e) + } + + return e +} + +// EncodeWithLogger adds a logger to be used by the encode when decoding +// stream events. +func EncodeWithLogger(logger aws.Logger) func(*Encoder) { + return func(d *Encoder) { + d.logger = logger + } } // Encode encodes a single EventStream message to the io.Writer the Encoder // was created with. An error is returned if writing the message fails. -func (e *Encoder) Encode(msg Message) error { +func (e *Encoder) Encode(msg Message) (err error) { e.headersBuf.Reset() - err := encodeHeaders(e.headersBuf, msg.Headers) - if err != nil { + writer := e.w + if e.logger != nil { + encodeMsgBuf := bytes.NewBuffer(nil) + writer = io.MultiWriter(writer, encodeMsgBuf) + defer func() { + logMessageEncode(e.logger, encodeMsgBuf, msg, err) + }() + } + + if err = EncodeHeaders(e.headersBuf, msg.Headers); err != nil { return err } crc := crc32.New(crc32IEEETable) - hashWriter := io.MultiWriter(e.w, crc) + hashWriter := io.MultiWriter(writer, crc) headersLen := uint32(e.headersBuf.Len()) payloadLen := uint32(len(msg.Payload)) - if err := encodePrelude(hashWriter, crc, headersLen, payloadLen); err != nil { + if err = encodePrelude(hashWriter, crc, headersLen, payloadLen); err != nil { return err } if headersLen > 0 { - if _, err := io.Copy(hashWriter, e.headersBuf); err != nil { + if _, err = io.Copy(hashWriter, e.headersBuf); err != nil { return err } } if payloadLen > 0 { - if _, err := hashWriter.Write(msg.Payload); err != nil { + if _, err = hashWriter.Write(msg.Payload); err != nil { return err } } msgCRC := crc.Sum32() - return binary.Write(e.w, binary.BigEndian, msgCRC) + return binary.Write(writer, binary.BigEndian, msgCRC) +} + +func logMessageEncode(logger aws.Logger, msgBuf *bytes.Buffer, msg Message, encodeErr error) { + w := bytes.NewBuffer(nil) + defer func() { logger.Log(w.String()) }() + + fmt.Fprintf(w, "Message to encode:\n") + encoder := json.NewEncoder(w) + if err := encoder.Encode(msg); err != nil { + fmt.Fprintf(w, "Failed to get encoded message, %v\n", err) + } + + if encodeErr != nil { + fmt.Fprintf(w, "Encode error: %v\n", encodeErr) + return + } + + fmt.Fprintf(w, "Raw message:\n%s\n", hex.Dump(msgBuf.Bytes())) } func encodePrelude(w io.Writer, crc hash.Hash32, headersLen, payloadLen uint32) error { @@ -86,7 +132,9 @@ func encodePrelude(w io.Writer, crc hash.Hash32, headersLen, payloadLen uint32) return nil } -func encodeHeaders(w io.Writer, headers Headers) error { +// EncodeHeaders writes the header values to the writer encoded in the event +// stream format. Returns an error if a header fails to encode. +func EncodeHeaders(w io.Writer, headers Headers) error { for _, h := range headers { hn := headerName{ Len: uint8(len(h.Name)), diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go index 5ea5a988b..34c2e89d5 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go @@ -1,6 +1,9 @@ package eventstreamapi -import "fmt" +import ( + "fmt" + "sync" +) type messageError struct { code string @@ -22,3 +25,53 @@ func (e messageError) Error() string { func (e messageError) OrigErr() error { return nil } + +// OnceError wraps the behavior of recording an error +// once and signal on a channel when this has occurred. +// Signaling is done by closing of the channel. +// +// Type is safe for concurrent usage. +type OnceError struct { + mu sync.RWMutex + err error + ch chan struct{} +} + +// NewOnceError return a new OnceError +func NewOnceError() *OnceError { + return &OnceError{ + ch: make(chan struct{}, 1), + } +} + +// Err acquires a read-lock and returns an +// error if one has been set. +func (e *OnceError) Err() error { + e.mu.RLock() + err := e.err + e.mu.RUnlock() + + return err +} + +// SetError acquires a write-lock and will set +// the underlying error value if one has not been set. +func (e *OnceError) SetError(err error) { + if err == nil { + return + } + + e.mu.Lock() + if e.err == nil { + e.err = err + close(e.ch) + } + e.mu.Unlock() +} + +// ErrorSet returns a channel that will be used to signal +// that an error has been set. This channel will be closed +// when the error value has been set for OnceError. +func (e *OnceError) ErrorSet() <-chan struct{} { + return e.ch +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/reader.go similarity index 77% rename from vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go rename to vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/reader.go index 97937c8e5..bb8ea5da1 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/reader.go @@ -2,9 +2,7 @@ package eventstreamapi import ( "fmt" - "io" - "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/eventstream" ) @@ -15,27 +13,8 @@ type Unmarshaler interface { UnmarshalEvent(protocol.PayloadUnmarshaler, eventstream.Message) error } -// EventStream headers with specific meaning to async API functionality. -const ( - MessageTypeHeader = `:message-type` // Identifies type of message. - EventMessageType = `event` - ErrorMessageType = `error` - ExceptionMessageType = `exception` - - // Message Events - EventTypeHeader = `:event-type` // Identifies message event type e.g. "Stats". - - // Message Error - ErrorCodeHeader = `:error-code` - ErrorMessageHeader = `:error-message` - - // Message Exception - ExceptionTypeHeader = `:exception-type` -) - // EventReader provides reading from the EventStream of an reader. type EventReader struct { - reader io.ReadCloser decoder *eventstream.Decoder unmarshalerForEventType func(string) (Unmarshaler, error) @@ -47,27 +26,18 @@ type EventReader struct { // NewEventReader returns a EventReader built from the reader and unmarshaler // provided. Use ReadStream method to start reading from the EventStream. func NewEventReader( - reader io.ReadCloser, + decoder *eventstream.Decoder, payloadUnmarshaler protocol.PayloadUnmarshaler, unmarshalerForEventType func(string) (Unmarshaler, error), ) *EventReader { return &EventReader{ - reader: reader, - decoder: eventstream.NewDecoder(reader), + decoder: decoder, payloadUnmarshaler: payloadUnmarshaler, unmarshalerForEventType: unmarshalerForEventType, payloadBuf: make([]byte, 10*1024), } } -// UseLogger instructs the EventReader to use the logger and log level -// specified. -func (r *EventReader) UseLogger(logger aws.Logger, logLevel aws.LogLevelType) { - if logger != nil && logLevel.Matches(aws.LogDebugWithEventStreamBody) { - r.decoder.UseLogger(logger) - } -} - // ReadEvent attempts to read a message from the EventStream and return the // unmarshaled event value that the message is for. // @@ -95,8 +65,7 @@ func (r *EventReader) ReadEvent() (event interface{}, err error) { case EventMessageType: return r.unmarshalEventMessage(msg) case ExceptionMessageType: - err = r.unmarshalEventException(msg) - return nil, err + return nil, r.unmarshalEventException(msg) case ErrorMessageType: return nil, r.unmarshalErrorMessage(msg) default: @@ -174,11 +143,6 @@ func (r *EventReader) unmarshalErrorMessage(msg eventstream.Message) (err error) return msgErr } -// Close closes the EventReader's EventStream reader. -func (r *EventReader) Close() error { - return r.reader.Close() -} - // GetHeaderString returns the value of the header as a string. If the header // is not set or the value is not a string an error will be returned. func GetHeaderString(msg eventstream.Message, headerName string) (string, error) { diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/shared.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/shared.go new file mode 100644 index 000000000..e46b8acc2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/shared.go @@ -0,0 +1,23 @@ +package eventstreamapi + +// EventStream headers with specific meaning to async API functionality. +const ( + ChunkSignatureHeader = `:chunk-signature` // chunk signature for message + DateHeader = `:date` // Date header for signature + + // Message header and values + MessageTypeHeader = `:message-type` // Identifies type of message. + EventMessageType = `event` + ErrorMessageType = `error` + ExceptionMessageType = `exception` + + // Message Events + EventTypeHeader = `:event-type` // Identifies message event type e.g. "Stats". + + // Message Error + ErrorCodeHeader = `:error-code` + ErrorMessageHeader = `:error-message` + + // Message Exception + ExceptionTypeHeader = `:exception-type` +) diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/signer.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/signer.go new file mode 100644 index 000000000..3a7ba5cd5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/signer.go @@ -0,0 +1,123 @@ +package eventstreamapi + +import ( + "bytes" + "strings" + "time" + + "github.com/aws/aws-sdk-go/private/protocol/eventstream" +) + +var timeNow = time.Now + +// StreamSigner defines an interface for the implementation of signing of event stream payloads +type StreamSigner interface { + GetSignature(headers, payload []byte, date time.Time) ([]byte, error) +} + +// SignEncoder envelopes event stream messages +// into an event stream message payload with included +// signature headers using the provided signer and encoder. +type SignEncoder struct { + signer StreamSigner + encoder Encoder + bufEncoder *BufferEncoder + + closeErr error + closed bool +} + +// NewSignEncoder returns a new SignEncoder using the provided stream signer and +// event stream encoder. +func NewSignEncoder(signer StreamSigner, encoder Encoder) *SignEncoder { + // TODO: Need to pass down logging + + return &SignEncoder{ + signer: signer, + encoder: encoder, + bufEncoder: NewBufferEncoder(), + } +} + +// Close encodes a final event stream signing envelope with an empty event stream +// payload. This final end-frame is used to mark the conclusion of the stream. +func (s *SignEncoder) Close() error { + if s.closed { + return s.closeErr + } + + if err := s.encode([]byte{}); err != nil { + if strings.Contains(err.Error(), "on closed pipe") { + return nil + } + + s.closeErr = err + s.closed = true + return s.closeErr + } + + return nil +} + +// Encode takes the provided message and add envelopes the message +// with the required signature. +func (s *SignEncoder) Encode(msg eventstream.Message) error { + payload, err := s.bufEncoder.Encode(msg) + if err != nil { + return err + } + + return s.encode(payload) +} + +func (s SignEncoder) encode(payload []byte) error { + date := timeNow() + + var msg eventstream.Message + msg.Headers.Set(DateHeader, eventstream.TimestampValue(date)) + msg.Payload = payload + + var headers bytes.Buffer + if err := eventstream.EncodeHeaders(&headers, msg.Headers); err != nil { + return err + } + + sig, err := s.signer.GetSignature(headers.Bytes(), msg.Payload, date) + if err != nil { + return err + } + + msg.Headers.Set(ChunkSignatureHeader, eventstream.BytesValue(sig)) + + return s.encoder.Encode(msg) +} + +// BufferEncoder is a utility that provides a buffered +// event stream encoder +type BufferEncoder struct { + encoder Encoder + buffer *bytes.Buffer +} + +// NewBufferEncoder returns a new BufferEncoder initialized +// with a 1024 byte buffer. +func NewBufferEncoder() *BufferEncoder { + buf := bytes.NewBuffer(make([]byte, 1024)) + return &BufferEncoder{ + encoder: eventstream.NewEncoder(buf), + buffer: buf, + } +} + +// Encode returns the encoded message as a byte slice. +// The returned byte slice will be modified on the next encode call +// and should not be held onto. +func (e *BufferEncoder) Encode(msg eventstream.Message) ([]byte, error) { + e.buffer.Reset() + + if err := e.encoder.Encode(msg); err != nil { + return nil, err + } + + return e.buffer.Bytes(), nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/stream_writer.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/stream_writer.go new file mode 100644 index 000000000..433bb1630 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/stream_writer.go @@ -0,0 +1,129 @@ +package eventstreamapi + +import ( + "fmt" + "io" + "sync" + + "github.com/aws/aws-sdk-go/aws" +) + +// StreamWriter provides concurrent safe writing to an event stream. +type StreamWriter struct { + eventWriter *EventWriter + stream chan eventWriteAsyncReport + + done chan struct{} + closeOnce sync.Once + err *OnceError + + streamCloser io.Closer +} + +// NewStreamWriter returns a StreamWriter for the event writer, and stream +// closer provided. +func NewStreamWriter(eventWriter *EventWriter, streamCloser io.Closer) *StreamWriter { + w := &StreamWriter{ + eventWriter: eventWriter, + streamCloser: streamCloser, + stream: make(chan eventWriteAsyncReport), + done: make(chan struct{}), + err: NewOnceError(), + } + go w.writeStream() + + return w +} + +// Close terminates the writers ability to write new events to the stream. Any +// future call to Send will fail with an error. +func (w *StreamWriter) Close() error { + w.closeOnce.Do(w.safeClose) + return w.Err() +} + +func (w *StreamWriter) safeClose() { + close(w.done) +} + +// ErrorSet returns a channel which will be closed +// if an error occurs. +func (w *StreamWriter) ErrorSet() <-chan struct{} { + return w.err.ErrorSet() +} + +// Err returns any error that occurred while attempting to write an event to the +// stream. +func (w *StreamWriter) Err() error { + return w.err.Err() +} + +// Send writes a single event to the stream returning an error if the write +// failed. +// +// Send may be called concurrently. Events will be written to the stream +// safely. +func (w *StreamWriter) Send(ctx aws.Context, event Marshaler) error { + if err := w.Err(); err != nil { + return err + } + + resultCh := make(chan error) + wrapped := eventWriteAsyncReport{ + Event: event, + Result: resultCh, + } + + select { + case w.stream <- wrapped: + case <-ctx.Done(): + return ctx.Err() + case <-w.done: + return fmt.Errorf("stream closed, unable to send event") + } + + select { + case err := <-resultCh: + return err + case <-ctx.Done(): + return ctx.Err() + case <-w.done: + return fmt.Errorf("stream closed, unable to send event") + } +} + +func (w *StreamWriter) writeStream() { + defer w.Close() + + for { + select { + case wrapper := <-w.stream: + err := w.eventWriter.WriteEvent(wrapper.Event) + wrapper.ReportResult(w.done, err) + if err != nil { + w.err.SetError(err) + return + } + + case <-w.done: + if err := w.streamCloser.Close(); err != nil { + w.err.SetError(err) + } + return + } + } +} + +type eventWriteAsyncReport struct { + Event Marshaler + Result chan<- error +} + +func (e eventWriteAsyncReport) ReportResult(cancel <-chan struct{}, err error) bool { + select { + case e.Result <- err: + return true + case <-cancel: + return false + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/writer.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/writer.go new file mode 100644 index 000000000..10a3823df --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/writer.go @@ -0,0 +1,109 @@ +package eventstreamapi + +import ( + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/eventstream" +) + +// Marshaler provides a marshaling interface for event types to event stream +// messages. +type Marshaler interface { + MarshalEvent(protocol.PayloadMarshaler) (eventstream.Message, error) +} + +// Encoder is an stream encoder that will encode an event stream message for +// the transport. +type Encoder interface { + Encode(eventstream.Message) error +} + +// EventWriter provides a wrapper around the underlying event stream encoder +// for an io.WriteCloser. +type EventWriter struct { + encoder Encoder + payloadMarshaler protocol.PayloadMarshaler + eventTypeFor func(Marshaler) (string, error) +} + +// NewEventWriter returns a new event stream writer, that will write to the +// writer provided. Use the WriteEvent method to write an event to the stream. +func NewEventWriter(encoder Encoder, pm protocol.PayloadMarshaler, eventTypeFor func(Marshaler) (string, error), +) *EventWriter { + return &EventWriter{ + encoder: encoder, + payloadMarshaler: pm, + eventTypeFor: eventTypeFor, + } +} + +// WriteEvent writes an event to the stream. Returns an error if the event +// fails to marshal into a message, or writing to the underlying writer fails. +func (w *EventWriter) WriteEvent(event Marshaler) error { + msg, err := w.marshal(event) + if err != nil { + return err + } + + return w.encoder.Encode(msg) +} + +func (w *EventWriter) marshal(event Marshaler) (eventstream.Message, error) { + eventType, err := w.eventTypeFor(event) + if err != nil { + return eventstream.Message{}, err + } + + msg, err := event.MarshalEvent(w.payloadMarshaler) + if err != nil { + return eventstream.Message{}, err + } + + msg.Headers.Set(EventTypeHeader, eventstream.StringValue(eventType)) + return msg, nil +} + +//type EventEncoder struct { +// encoder Encoder +// ppayloadMarshaler protocol.PayloadMarshaler +// eventTypeFor func(Marshaler) (string, error) +//} +// +//func (e EventEncoder) Encode(event Marshaler) error { +// msg, err := e.marshal(event) +// if err != nil { +// return err +// } +// +// return w.encoder.Encode(msg) +//} +// +//func (e EventEncoder) marshal(event Marshaler) (eventstream.Message, error) { +// eventType, err := w.eventTypeFor(event) +// if err != nil { +// return eventstream.Message{}, err +// } +// +// msg, err := event.MarshalEvent(w.payloadMarshaler) +// if err != nil { +// return eventstream.Message{}, err +// } +// +// msg.Headers.Set(EventTypeHeader, eventstream.StringValue(eventType)) +// return msg, nil +//} +// +//func (w *EventWriter) marshal(event Marshaler) (eventstream.Message, error) { +// eventType, err := w.eventTypeFor(event) +// if err != nil { +// return eventstream.Message{}, err +// } +// +// msg, err := event.MarshalEvent(w.payloadMarshaler) +// if err != nil { +// return eventstream.Message{}, err +// } +// +// msg.Headers.Set(EventTypeHeader, eventstream.StringValue(eventType)) +// return msg, nil +//} +// diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go index e3fc0766a..9f509d8f6 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go @@ -461,6 +461,11 @@ func (v *TimestampValue) decode(r io.Reader) error { return nil } +// MarshalJSON implements the json.Marshaler interface +func (v TimestampValue) MarshalJSON() ([]byte, error) { + return []byte(v.String()), nil +} + func timeFromEpochMilli(t int64) time.Time { secs := t / 1e3 msec := t % 1e3 diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go index 2dc012a66..25c9783cd 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go @@ -27,7 +27,7 @@ func (m *Message) rawMessage() (rawMessage, error) { if len(m.Headers) > 0 { var headers bytes.Buffer - if err := encodeHeaders(&headers, m.Headers); err != nil { + if err := EncodeHeaders(&headers, m.Headers); err != nil { return rawMessage{}, err } raw.Headers = headers.Bytes() diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go index ea0da79a5..5e9499699 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go @@ -7,6 +7,7 @@ import ( "fmt" "io" "reflect" + "strings" "time" "github.com/aws/aws-sdk-go/aws" @@ -45,10 +46,31 @@ func UnmarshalJSON(v interface{}, stream io.Reader) error { return err } - return unmarshalAny(reflect.ValueOf(v), out, "") + return unmarshaler{}.unmarshalAny(reflect.ValueOf(v), out, "") } -func unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) error { +// UnmarshalJSONCaseInsensitive reads a stream and unmarshals the result into the +// object v. Ignores casing for structure members. +func UnmarshalJSONCaseInsensitive(v interface{}, stream io.Reader) error { + var out interface{} + + err := json.NewDecoder(stream).Decode(&out) + if err == io.EOF { + return nil + } else if err != nil { + return err + } + + return unmarshaler{ + caseInsensitive: true, + }.unmarshalAny(reflect.ValueOf(v), out, "") +} + +type unmarshaler struct { + caseInsensitive bool +} + +func (u unmarshaler) unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) error { vtype := value.Type() if vtype.Kind() == reflect.Ptr { vtype = vtype.Elem() // check kind of actual element type @@ -80,17 +102,17 @@ func unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) if field, ok := vtype.FieldByName("_"); ok { tag = field.Tag } - return unmarshalStruct(value, data, tag) + return u.unmarshalStruct(value, data, tag) case "list": - return unmarshalList(value, data, tag) + return u.unmarshalList(value, data, tag) case "map": - return unmarshalMap(value, data, tag) + return u.unmarshalMap(value, data, tag) default: - return unmarshalScalar(value, data, tag) + return u.unmarshalScalar(value, data, tag) } } -func unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTag) error { +func (u unmarshaler) unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTag) error { if data == nil { return nil } @@ -114,7 +136,7 @@ func unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTa // unwrap any payloads if payload := tag.Get("payload"); payload != "" { field, _ := t.FieldByName(payload) - return unmarshalAny(value.FieldByName(payload), data, field.Tag) + return u.unmarshalAny(value.FieldByName(payload), data, field.Tag) } for i := 0; i < t.NumField(); i++ { @@ -128,9 +150,19 @@ func unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTa if locName := field.Tag.Get("locationName"); locName != "" { name = locName } + if u.caseInsensitive { + if _, ok := mapData[name]; !ok { + // Fallback to uncased name search if the exact name didn't match. + for kn, v := range mapData { + if strings.EqualFold(kn, name) { + mapData[name] = v + } + } + } + } member := value.FieldByIndex(field.Index) - err := unmarshalAny(member, mapData[name], field.Tag) + err := u.unmarshalAny(member, mapData[name], field.Tag) if err != nil { return err } @@ -138,7 +170,7 @@ func unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTa return nil } -func unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) error { +func (u unmarshaler) unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) error { if data == nil { return nil } @@ -153,7 +185,7 @@ func unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) } for i, c := range listData { - err := unmarshalAny(value.Index(i), c, "") + err := u.unmarshalAny(value.Index(i), c, "") if err != nil { return err } @@ -162,7 +194,7 @@ func unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) return nil } -func unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag) error { +func (u unmarshaler) unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag) error { if data == nil { return nil } @@ -179,14 +211,14 @@ func unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag) kvalue := reflect.ValueOf(k) vvalue := reflect.New(value.Type().Elem()).Elem() - unmarshalAny(vvalue, v, "") + u.unmarshalAny(vvalue, v, "") value.SetMapIndex(kvalue, vvalue) } return nil } -func unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTag) error { +func (u unmarshaler) unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTag) error { switch d := data.(type) { case nil: diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go index e21614a12..0ea0647a5 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go @@ -64,7 +64,7 @@ func (h HandlerPayloadMarshal) MarshalPayload(w io.Writer, v interface{}) error metadata.ClientInfo{}, request.Handlers{}, nil, - &request.Operation{HTTPMethod: "GET"}, + &request.Operation{HTTPMethod: "PUT"}, v, nil, ) diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go new file mode 100644 index 000000000..9d521dcb9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go @@ -0,0 +1,49 @@ +package protocol + +import ( + "fmt" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// RequireHTTPMinProtocol request handler is used to enforce that +// the target endpoint supports the given major and minor HTTP protocol version. +type RequireHTTPMinProtocol struct { + Major, Minor int +} + +// Handler will mark the request.Request with an error if the +// target endpoint did not connect with the required HTTP protocol +// major and minor version. +func (p RequireHTTPMinProtocol) Handler(r *request.Request) { + if r.Error != nil || r.HTTPResponse == nil { + return + } + + if !strings.HasPrefix(r.HTTPResponse.Proto, "HTTP") { + r.Error = newMinHTTPProtoError(p.Major, p.Minor, r) + } + + if r.HTTPResponse.ProtoMajor < p.Major || r.HTTPResponse.ProtoMinor < p.Minor { + r.Error = newMinHTTPProtoError(p.Major, p.Minor, r) + } +} + +// ErrCodeMinimumHTTPProtocolError error code is returned when the target endpoint +// did not match the required HTTP major and minor protocol version. +const ErrCodeMinimumHTTPProtocolError = "MinimumHTTPProtocolError" + +func newMinHTTPProtoError(major, minor int, r *request.Request) error { + return awserr.NewRequestFailure( + awserr.New("MinimumHTTPProtocolError", + fmt.Sprintf( + "operation requires minimum HTTP protocol of HTTP/%d.%d, but was %s", + major, minor, r.HTTPResponse.Proto, + ), + nil, + ), + r.HTTPResponse.StatusCode, r.RequestID, + ) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go index 0cb99eb57..d40346a77 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go @@ -1,7 +1,7 @@ // Package query provides serialization of AWS query requests, and responses. package query -//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/query.json build_test.go +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/query.json build_test.go import ( "net/url" diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go index f69c1efc9..9231e95d1 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go @@ -1,6 +1,6 @@ package query -//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/query.json unmarshal_test.go +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/query.json unmarshal_test.go import ( "encoding/xml" diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go index 74e361e07..92f8b4d9a 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go @@ -15,6 +15,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" + awsStrings "github.com/aws/aws-sdk-go/internal/strings" "github.com/aws/aws-sdk-go/private/protocol" ) @@ -28,7 +29,9 @@ var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta func Unmarshal(r *request.Request) { if r.DataFilled() { v := reflect.Indirect(reflect.ValueOf(r.Data)) - unmarshalBody(r, v) + if err := unmarshalBody(r, v); err != nil { + r.Error = err + } } } @@ -40,12 +43,21 @@ func UnmarshalMeta(r *request.Request) { r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id") } if r.DataFilled() { - v := reflect.Indirect(reflect.ValueOf(r.Data)) - unmarshalLocationElements(r, v) + if err := UnmarshalResponse(r.HTTPResponse, r.Data, aws.BoolValue(r.Config.LowerCaseHeaderMaps)); err != nil { + r.Error = err + } } } -func unmarshalBody(r *request.Request, v reflect.Value) { +// UnmarshalResponse attempts to unmarshal the REST response headers to +// the data type passed in. The type must be a pointer. An error is returned +// with any error unmarshaling the response into the target datatype. +func UnmarshalResponse(resp *http.Response, data interface{}, lowerCaseHeaderMaps bool) error { + v := reflect.Indirect(reflect.ValueOf(data)) + return unmarshalLocationElements(resp, v, lowerCaseHeaderMaps) +} + +func unmarshalBody(r *request.Request, v reflect.Value) error { if field, ok := v.Type().FieldByName("_"); ok { if payloadName := field.Tag.Get("payload"); payloadName != "" { pfield, _ := v.Type().FieldByName(payloadName) @@ -57,35 +69,38 @@ func unmarshalBody(r *request.Request, v reflect.Value) { defer r.HTTPResponse.Body.Close() b, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { - r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) - } else { - payload.Set(reflect.ValueOf(b)) + return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) } + + payload.Set(reflect.ValueOf(b)) + case *string: defer r.HTTPResponse.Body.Close() b, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { - r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) - } else { - str := string(b) - payload.Set(reflect.ValueOf(&str)) + return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) } + + str := string(b) + payload.Set(reflect.ValueOf(&str)) + default: switch payload.Type().String() { case "io.ReadCloser": payload.Set(reflect.ValueOf(r.HTTPResponse.Body)) + case "io.ReadSeeker": b, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { - r.Error = awserr.New(request.ErrCodeSerialization, + return awserr.New(request.ErrCodeSerialization, "failed to read response body", err) - return } payload.Set(reflect.ValueOf(ioutil.NopCloser(bytes.NewReader(b)))) + default: io.Copy(ioutil.Discard, r.HTTPResponse.Body) - defer r.HTTPResponse.Body.Close() - r.Error = awserr.New(request.ErrCodeSerialization, + r.HTTPResponse.Body.Close() + return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", fmt.Errorf("unknown payload type %s", payload.Type())) } @@ -94,9 +109,11 @@ func unmarshalBody(r *request.Request, v reflect.Value) { } } } + + return nil } -func unmarshalLocationElements(r *request.Request, v reflect.Value) { +func unmarshalLocationElements(resp *http.Response, v reflect.Value, lowerCaseHeaderMaps bool) error { for i := 0; i < v.NumField(); i++ { m, field := v.Field(i), v.Type().Field(i) if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) { @@ -111,26 +128,25 @@ func unmarshalLocationElements(r *request.Request, v reflect.Value) { switch field.Tag.Get("location") { case "statusCode": - unmarshalStatusCode(m, r.HTTPResponse.StatusCode) + unmarshalStatusCode(m, resp.StatusCode) + case "header": - err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name), field.Tag) + err := unmarshalHeader(m, resp.Header.Get(name), field.Tag) if err != nil { - r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) - break + return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) } + case "headers": prefix := field.Tag.Get("locationName") - err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix) + err := unmarshalHeaderMap(m, resp.Header, prefix, lowerCaseHeaderMaps) if err != nil { - r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) - break + awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) } } } - if r.Error != nil { - return - } } + + return nil } func unmarshalStatusCode(v reflect.Value, statusCode int) { @@ -145,7 +161,7 @@ func unmarshalStatusCode(v reflect.Value, statusCode int) { } } -func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error { +func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string, normalize bool) error { if len(headers) == 0 { return nil } @@ -153,8 +169,12 @@ func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) err case map[string]*string: // we only support string map value types out := map[string]*string{} for k, v := range headers { - k = http.CanonicalHeaderKey(k) - if strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) { + if awsStrings.HasPrefixFold(k, prefix) { + if normalize == true { + k = strings.ToLower(k) + } else { + k = http.CanonicalHeaderKey(k) + } out[k[len(prefix):]] = &v[0] } } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go index 07a6187ea..b1ae36487 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go @@ -2,8 +2,8 @@ // requests and responses. package restxml -//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/rest-xml.json build_test.go -//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/rest-xml.json unmarshal_test.go +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/rest-xml.json build_test.go +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/rest-xml.json unmarshal_test.go import ( "bytes" diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go index da1a68111..f614ef898 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go @@ -19,3 +19,9 @@ func UnmarshalDiscardBody(r *request.Request) { io.Copy(ioutil.Discard, r.HTTPResponse.Body) r.HTTPResponse.Body.Close() } + +// ResponseMetadata provides the SDK response metadata attributes. +type ResponseMetadata struct { + StatusCode int + RequestID string +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go new file mode 100644 index 000000000..cc857f136 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go @@ -0,0 +1,65 @@ +package protocol + +import ( + "net/http" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// UnmarshalErrorHandler provides unmarshaling errors API response errors for +// both typed and untyped errors. +type UnmarshalErrorHandler struct { + unmarshaler ErrorUnmarshaler +} + +// ErrorUnmarshaler is an abstract interface for concrete implementations to +// unmarshal protocol specific response errors. +type ErrorUnmarshaler interface { + UnmarshalError(*http.Response, ResponseMetadata) (error, error) +} + +// NewUnmarshalErrorHandler returns an UnmarshalErrorHandler +// initialized for the set of exception names to the error unmarshalers +func NewUnmarshalErrorHandler(unmarshaler ErrorUnmarshaler) *UnmarshalErrorHandler { + return &UnmarshalErrorHandler{ + unmarshaler: unmarshaler, + } +} + +// UnmarshalErrorHandlerName is the name of the named handler. +const UnmarshalErrorHandlerName = "awssdk.protocol.UnmarshalError" + +// NamedHandler returns a NamedHandler for the unmarshaler using the set of +// errors the unmarshaler was initialized for. +func (u *UnmarshalErrorHandler) NamedHandler() request.NamedHandler { + return request.NamedHandler{ + Name: UnmarshalErrorHandlerName, + Fn: u.UnmarshalError, + } +} + +// UnmarshalError will attempt to unmarshal the API response's error message +// into either a generic SDK error type, or a typed error corresponding to the +// errors exception name. +func (u *UnmarshalErrorHandler) UnmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + respMeta := ResponseMetadata{ + StatusCode: r.HTTPResponse.StatusCode, + RequestID: r.RequestID, + } + + v, err := u.unmarshaler.UnmarshalError(r.HTTPResponse, respMeta) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal response error", err), + respMeta.StatusCode, + respMeta.RequestID, + ) + return + } + + r.Error = v +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go index cf981fe95..09ad95159 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go @@ -8,6 +8,7 @@ import ( "reflect" "sort" "strconv" + "strings" "time" "github.com/aws/aws-sdk-go/private/protocol" @@ -60,6 +61,14 @@ func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag refle return nil } + xml := tag.Get("xml") + if len(xml) != 0 { + name := strings.SplitAfterN(xml, ",", 2)[0] + if name == "-" { + return nil + } + } + t := tag.Get("type") if t == "" { switch value.Kind() { diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go index 7108d3800..107c053f8 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go @@ -64,6 +64,14 @@ func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error { // parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect // will be used to determine the type from r. func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + xml := tag.Get("xml") + if len(xml) != 0 { + name := strings.SplitAfterN(xml, ",", 2)[0] + if name == "-" { + return nil + } + } + rtype := r.Type() if rtype.Kind() == reflect.Ptr { rtype = rtype.Elem() // check kind of actual element type diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go index b4a4e8c4a..228cd3cfe 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -7,7 +7,6 @@ import ( "fmt" "io" "sync" - "sync/atomic" "time" "github.com/aws/aws-sdk-go/aws" @@ -15,11 +14,13 @@ import ( "github.com/aws/aws-sdk-go/aws/awsutil" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/checksum" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/eventstream" "github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi" "github.com/aws/aws-sdk-go/private/protocol/rest" "github.com/aws/aws-sdk-go/private/protocol/restxml" + "github.com/aws/aws-sdk-go/service/s3/internal/arn" ) const opAbortMultipartUpload = "AbortMultipartUpload" @@ -66,11 +67,31 @@ func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req // AbortMultipartUpload API operation for Amazon Simple Storage Service. // -// Aborts a multipart upload. +// This operation aborts a multipart upload. After a multipart upload is aborted, +// no additional parts can be uploaded using that upload ID. The storage consumed +// by any previously uploaded parts will be freed. However, if any part uploads +// are currently in progress, those part uploads might or might not succeed. +// As a result, it might be necessary to abort a given multipart upload multiple +// times in order to completely free all storage consumed by all parts. // // To verify that all parts have been removed, so you don't get charged for -// the part storage, you should call the List Parts operation and ensure the -// parts list is empty. +// the part storage, you should call the ListParts operation and ensure that +// the parts list is empty. +// +// For information about permissions required to use the multipart upload API, +// see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// +// The following operations are related to AbortMultipartUpload: +// +// * CreateMultipartUpload +// +// * UploadPart +// +// * CompleteMultipartUpload +// +// * ListParts +// +// * ListMultipartUploads // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -151,6 +172,64 @@ func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) // // Completes a multipart upload by assembling previously uploaded parts. // +// You first initiate the multipart upload and then upload all parts using the +// UploadPart operation. After successfully uploading all relevant parts of +// an upload, you call this operation to complete the upload. Upon receiving +// this request, Amazon S3 concatenates all the parts in ascending order by +// part number to create a new object. In the Complete Multipart Upload request, +// you must provide the parts list. You must ensure that the parts list is complete. +// This operation concatenates the parts that you provide in the list. For each +// part in the list, you must provide the part number and the ETag value, returned +// after that part was uploaded. +// +// Processing of a Complete Multipart Upload request could take several minutes +// to complete. After Amazon S3 begins processing the request, it sends an HTTP +// response header that specifies a 200 OK response. While processing is in +// progress, Amazon S3 periodically sends white space characters to keep the +// connection from timing out. Because a request could fail after the initial +// 200 OK response has been sent, it is important that you check the response +// body to determine whether the request succeeded. +// +// Note that if CompleteMultipartUpload fails, applications should be prepared +// to retry the failed requests. For more information, see Amazon S3 Error Best +// Practices (https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html). +// +// For more information about multipart uploads, see Uploading Objects Using +// Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). +// +// For information about permissions required to use the multipart upload API, +// see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// +// GetBucketLifecycle has the following special errors: +// +// * Error code: EntityTooSmall Description: Your proposed upload is smaller +// than the minimum allowed object size. Each part must be at least 5 MB +// in size, except the last part. 400 Bad Request +// +// * Error code: InvalidPart Description: One or more of the specified parts +// could not be found. The part might not have been uploaded, or the specified +// entity tag might not have matched the part's entity tag. 400 Bad Request +// +// * Error code: InvalidPartOrder Description: The list of parts was not +// in ascending order. The parts list must be specified in order by part +// number. 400 Bad Request +// +// * Error code: NoSuchUpload Description: The specified multipart upload +// does not exist. The upload ID might be invalid, or the multipart upload +// might have been aborted or completed. 404 Not Found +// +// The following operations are related to CompleteMultipartUpload: +// +// * CreateMultipartUpload +// +// * UploadPart +// +// * AbortMultipartUpload +// +// * ListParts +// +// * ListMultipartUploads +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -225,6 +304,153 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // // Creates a copy of an object that is already stored in Amazon S3. // +// You can store individual objects of up to 5 TB in Amazon S3. You create a +// copy of your object up to 5 GB in size in a single atomic operation using +// this API. However, to copy an object greater than 5 GB, you must use the +// multipart upload Upload Part - Copy API. For more information, see Copy Object +// Using the REST Multipart Upload API (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html). +// +// All copy requests must be authenticated. Additionally, you must have read +// access to the source object and write access to the destination bucket. For +// more information, see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). +// Both the Region that you want to copy the object from and the Region that +// you want to copy the object to must be enabled for your account. +// +// A copy request might return an error when Amazon S3 receives the copy request +// or while Amazon S3 is copying the files. If the error occurs before the copy +// operation starts, you receive a standard Amazon S3 error. If the error occurs +// during the copy operation, the error response is embedded in the 200 OK response. +// This means that a 200 OK response can contain either a success or an error. +// Design your application to parse the contents of the response and handle +// it appropriately. +// +// If the copy is successful, you receive a response with information about +// the copied object. +// +// If the request is an HTTP 1.1 request, the response is chunk encoded. If +// it were not, it would not contain the content-length, and you would need +// to read the entire body. +// +// The copy request charge is based on the storage class and Region that you +// specify for the destination object. For pricing information, see Amazon S3 +// pricing (https://aws.amazon.com/s3/pricing/). +// +// Amazon S3 transfer acceleration does not support cross-Region copies. If +// you request a cross-Region copy using a transfer acceleration endpoint, you +// get a 400 Bad Request error. For more information, see Transfer Acceleration +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html). +// +// Metadata +// +// When copying an object, you can preserve all metadata (default) or specify +// new metadata. However, the ACL is not preserved and is set to private for +// the user making the request. To override the default ACL setting, specify +// a new ACL when generating a copy request. For more information, see Using +// ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). +// +// To specify whether you want the object metadata copied from the source object +// or replaced with metadata provided in the request, you can optionally add +// the x-amz-metadata-directive header. When you grant permissions, you can +// use the s3:x-amz-metadata-directive condition key to enforce certain metadata +// behavior when objects are uploaded. For more information, see Specifying +// Conditions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html) +// in the Amazon S3 Developer Guide. For a complete list of Amazon S3-specific +// condition keys, see Actions, Resources, and Condition Keys for Amazon S3 +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html). +// +// x-amz-copy-source-if Headers +// +// To only copy an object under certain conditions, such as whether the Etag +// matches or whether the object was modified before or after a specified date, +// use the following request parameters: +// +// * x-amz-copy-source-if-match +// +// * x-amz-copy-source-if-none-match +// +// * x-amz-copy-source-if-unmodified-since +// +// * x-amz-copy-source-if-modified-since +// +// If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since +// headers are present in the request and evaluate as follows, Amazon S3 returns +// 200 OK and copies the data: +// +// * x-amz-copy-source-if-match condition evaluates to true +// +// * x-amz-copy-source-if-unmodified-since condition evaluates to false +// +// If both the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since +// headers are present in the request and evaluate as follows, Amazon S3 returns +// the 412 Precondition Failed response code: +// +// * x-amz-copy-source-if-none-match condition evaluates to false +// +// * x-amz-copy-source-if-modified-since condition evaluates to true +// +// All headers with the x-amz- prefix, including x-amz-copy-source, must be +// signed. +// +// Encryption +// +// The source object that you are copying can be encrypted or unencrypted. The +// source object can be encrypted with server-side encryption using AWS managed +// encryption keys (SSE-S3 or SSE-KMS) or by using a customer-provided encryption +// key. With server-side encryption, Amazon S3 encrypts your data as it writes +// it to disks in its data centers and decrypts the data when you access it. +// +// You can optionally use the appropriate encryption-related headers to request +// server-side encryption for the target object. You have the option to provide +// your own encryption key or use SSE-S3 or SSE-KMS, regardless of the form +// of server-side encryption that was used to encrypt the source object. You +// can even request encryption if the source object was not encrypted. For more +// information about server-side encryption, see Using Server-Side Encryption +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html). +// +// Access Control List (ACL)-Specific Request Headers +// +// When copying an object, you can optionally use headers to grant ACL-based +// permissions. By default, all objects are private. Only the owner has full +// access control. When adding a new object, you can grant permissions to individual +// AWS accounts or to predefined groups defined by Amazon S3. These permissions +// are then added to the ACL on the object. For more information, see Access +// Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) +// and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html). +// +// Storage Class Options +// +// You can use the CopyObject operation to change the storage class of an object +// that is already stored in Amazon S3 using the StorageClass parameter. For +// more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) +// in the Amazon S3 Service Developer Guide. +// +// Versioning +// +// By default, x-amz-copy-source identifies the current version of an object +// to copy. If the current version is a delete marker, Amazon S3 behaves as +// if the object was deleted. To copy a different version, use the versionId +// subresource. +// +// If you enable versioning on the target bucket, Amazon S3 generates a unique +// version ID for the object being copied. This version ID is different from +// the version ID of the source object. Amazon S3 returns the version ID of +// the copied object in the x-amz-version-id response header in the response. +// +// If you do not enable versioning or suspend it on the target bucket, the version +// ID that Amazon S3 generates is always null. +// +// If the source object's storage class is GLACIER, you must restore a copy +// of this object before you can use it as a source object for the copy operation. +// For more information, see . +// +// The following operations are related to CopyObject: +// +// * PutObject +// +// * GetObject +// +// For more information, see Copying Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -235,7 +461,7 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // Returned Error Codes: // * ErrCodeObjectNotInActiveTierError "ObjectNotInActiveTierError" // The source object of the COPY operation is not in the active tier and is -// only stored in Amazon Glacier. +// only stored in Amazon S3 Glacier. // // See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject func (c *S3) CopyObject(input *CopyObjectInput) (*CopyObjectOutput, error) { @@ -303,7 +529,67 @@ func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request // CreateBucket API operation for Amazon Simple Storage Service. // -// Creates a new bucket. +// Creates a new bucket. To create a bucket, you must register with Amazon S3 +// and have a valid AWS Access Key ID to authenticate requests. Anonymous requests +// are never allowed to create buckets. By creating the bucket, you become the +// bucket owner. +// +// Not every string is an acceptable bucket name. For information on bucket +// naming restrictions, see Working with Amazon S3 Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html). +// +// By default, the bucket is created in the US East (N. Virginia) Region. You +// can optionally specify a Region in the request body. You might choose a Region +// to optimize latency, minimize costs, or address regulatory requirements. +// For example, if you reside in Europe, you will probably find it advantageous +// to create buckets in the Europe (Ireland) Region. For more information, see +// How to Select a Region for Your Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro). +// +// If you send your create bucket request to the s3.amazonaws.com endpoint, +// the request goes to the us-east-1 Region. Accordingly, the signature calculations +// in Signature Version 4 must use us-east-1 as the Region, even if the location +// constraint in the request specifies another Region where the bucket is to +// be created. If you create a bucket in a Region other than US East (N. Virginia), +// your application must be able to handle 307 redirect. For more information, +// see Virtual Hosting of Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html). +// +// When creating a bucket using this operation, you can optionally specify the +// accounts or groups that should be granted specific permissions on the bucket. +// There are two ways to grant the appropriate permissions using the request +// headers. +// +// * Specify a canned ACL using the x-amz-acl request header. Amazon S3 supports +// a set of predefined ACLs, known as canned ACLs. Each canned ACL has a +// predefined set of grantees and permissions. For more information, see +// Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly using the x-amz-grant-read, x-amz-grant-write, +// x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control +// headers. These headers map to the set of permissions Amazon S3 supports +// in an ACL. For more information, see Access Control List (ACL) Overview +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). You +// specify each grantee as a type=value pair, where the type is one of the +// following: id – if the value specified is the canonical user ID of an +// AWS account uri – if you are granting permissions to a predefined group +// emailAddress – if the value specified is the email address of an AWS +// account Using email addresses to specify a grantee is only supported in +// the following AWS Regions: US East (N. Virginia) US West (N. California) +// US West (Oregon) Asia Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific +// (Tokyo) Europe (Ireland) South America (São Paulo) For a list of all +// the Amazon S3 supported Regions and endpoints, see Regions and Endpoints +// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in +// the AWS General Reference. For example, the following x-amz-grant-read +// header grants the AWS accounts identified by account IDs permissions to +// read object data and its metadata: x-amz-grant-read: id="11112222333", +// id="444455556666" +// +// You can use either a canned ACL or specify access permissions explicitly. +// You cannot do both. +// +// The following operations are related to CreateBucket: +// +// * PutObject +// +// * DeleteBucket // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -318,6 +604,11 @@ func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request // by all users of the system. Please select a different name and try again. // // * ErrCodeBucketAlreadyOwnedByYou "BucketAlreadyOwnedByYou" +// The bucket you tried to create already exists, and you own it. Amazon S3 +// returns this error in all AWS Regions except in the North Virginia Region. +// For legacy compatibility, if you re-create an existing bucket that you already +// own in the North Virginia Region, Amazon S3 returns 200 OK and resets the +// bucket access control lists (ACLs). // // See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket func (c *S3) CreateBucket(input *CreateBucketInput) (*CreateBucketOutput, error) { @@ -385,13 +676,154 @@ func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (re // CreateMultipartUpload API operation for Amazon Simple Storage Service. // -// Initiates a multipart upload and returns an upload ID. +// This operation initiates a multipart upload and returns an upload ID. This +// upload ID is used to associate all of the parts in the specific multipart +// upload. You specify this upload ID in each of your subsequent upload part +// requests (see UploadPart). You also include this upload ID in the final request +// to either complete or abort the multipart upload request. // -// Note: After you initiate multipart upload and upload one or more parts, you -// must either complete or abort multipart upload in order to stop getting charged -// for storage of the uploaded parts. Only after you either complete or abort -// multipart upload, Amazon S3 frees up the parts storage and stops charging -// you for the parts storage. +// For more information about multipart uploads, see Multipart Upload Overview +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html). +// +// If you have configured a lifecycle rule to abort incomplete multipart uploads, +// the upload must complete within the number of days specified in the bucket +// lifecycle configuration. Otherwise, the incomplete multipart upload becomes +// eligible for an abort operation and Amazon S3 aborts the multipart upload. +// For more information, see Aborting Incomplete Multipart Uploads Using a Bucket +// Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). +// +// For information about the permissions required to use the multipart upload +// API, see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// +// For request signing, multipart upload is just a series of regular requests. +// You initiate a multipart upload, send one or more requests to upload parts, +// and then complete the multipart upload process. You sign each request individually. +// There is nothing special about signing multipart upload requests. For more +// information about signing, see Authenticating Requests (AWS Signature Version +// 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html). +// +// After you initiate a multipart upload and upload one or more parts, to stop +// being charged for storing the uploaded parts, you must either complete or +// abort the multipart upload. Amazon S3 frees up the space used to store the +// parts and stop charging you for storing them only after you either complete +// or abort a multipart upload. +// +// You can optionally request server-side encryption. For server-side encryption, +// Amazon S3 encrypts your data as it writes it to disks in its data centers +// and decrypts it when you access it. You can provide your own encryption key, +// or use AWS Key Management Service (AWS KMS) customer master keys (CMKs) or +// Amazon S3-managed encryption keys. If you choose to provide your own encryption +// key, the request headers you provide in UploadPart) and UploadPartCopy) requests +// must match the headers you used in the request to initiate the upload by +// using CreateMultipartUpload. +// +// To perform a multipart upload with encryption using an AWS KMS CMK, the requester +// must have permission to the kms:Encrypt, kms:Decrypt, kms:ReEncrypt*, kms:GenerateDataKey*, +// and kms:DescribeKey actions on the key. These permissions are required because +// Amazon S3 must decrypt and read data from the encrypted file parts before +// it completes the multipart upload. +// +// If your AWS Identity and Access Management (IAM) user or role is in the same +// AWS account as the AWS KMS CMK, then you must have these permissions on the +// key policy. If your IAM user or role belongs to a different account than +// the key, then you must have the permissions on both the key policy and your +// IAM user or role. +// +// For more information, see Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html). +// +// Access Permissions +// +// When copying an object, you can optionally specify the accounts or groups +// that should be granted specific permissions on the new object. There are +// two ways to grant the permissions using the request headers: +// +// * Specify a canned ACL with the x-amz-acl request header. For more information, +// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, +// x-amz-grant-write-acp, and x-amz-grant-full-control headers. These parameters +// map to the set of permissions that Amazon S3 supports in an ACL. For more +// information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). +// +// You can use either a canned ACL or specify access permissions explicitly. +// You cannot do both. +// +// Server-Side- Encryption-Specific Request Headers +// +// You can optionally tell Amazon S3 to encrypt data at rest using server-side +// encryption. Server-side encryption is for data encryption at rest. Amazon +// S3 encrypts your data as it writes it to disks in its data centers and decrypts +// it when you access it. The option you use depends on whether you want to +// use AWS managed encryption keys or provide your own encryption key. +// +// * Use encryption keys managed by Amazon S3 or customer master keys (CMKs) +// stored in AWS Key Management Service (AWS KMS) – If you want AWS to +// manage the keys used to encrypt data, specify the following headers in +// the request. x-amz-server-side​-encryption x-amz-server-side-encryption-aws-kms-key-id +// x-amz-server-side-encryption-context If you specify x-amz-server-side-encryption:aws:kms, +// but don't provide x-amz-server-side-encryption-aws-kms-key-id, Amazon +// S3 uses the AWS managed CMK in AWS KMS to protect the data. All GET and +// PUT requests for an object protected by AWS KMS fail if you don't make +// them with SSL or by using SigV4. For more information about server-side +// encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data +// Using Server-Side Encryption with CMKs stored in AWS KMS (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). +// +// * Use customer-provided encryption keys – If you want to manage your +// own encryption keys, provide all the following headers in the request. +// x-amz-server-side​-encryption​-customer-algorithm x-amz-server-side​-encryption​-customer-key +// x-amz-server-side​-encryption​-customer-key-MD5 For more information +// about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see +// Protecting Data Using Server-Side Encryption with CMKs stored in AWS KMS +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). +// +// Access-Control-List (ACL)-Specific Request Headers +// +// You also can use the following access control–related headers with this +// operation. By default, all objects are private. Only the owner has full access +// control. When adding a new object, you can grant permissions to individual +// AWS accounts or to predefined groups defined by Amazon S3. These permissions +// are then added to the access control list (ACL) on the object. For more information, +// see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). +// With this operation, you can grant access permissions using one of the following +// two methods: +// +// * Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of predefined +// ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees +// and permissions. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly — To explicitly grant access +// permissions to specific AWS accounts or groups, use the following headers. +// Each header maps to specific permissions that Amazon S3 supports in an +// ACL. For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). +// In the header, you specify a list of grantees who get the specific permission. +// To grant permissions explicitly, use: x-amz-grant-read x-amz-grant-write +// x-amz-grant-read-acp x-amz-grant-write-acp x-amz-grant-full-control You +// specify each grantee as a type=value pair, where the type is one of the +// following: id – if the value specified is the canonical user ID of an +// AWS account uri – if you are granting permissions to a predefined group +// emailAddress – if the value specified is the email address of an AWS +// account Using email addresses to specify a grantee is only supported in +// the following AWS Regions: US East (N. Virginia) US West (N. California) +// US West (Oregon) Asia Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific +// (Tokyo) Europe (Ireland) South America (São Paulo) For a list of all +// the Amazon S3 supported Regions and endpoints, see Regions and Endpoints +// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in +// the AWS General Reference. For example, the following x-amz-grant-read +// header grants the AWS accounts identified by account IDs permissions to +// read object data and its metadata: x-amz-grant-read: id="11112222333", +// id="444455556666" +// +// The following operations are related to CreateMultipartUpload: +// +// * UploadPart +// +// * CompleteMultipartUpload +// +// * AbortMultipartUpload +// +// * ListParts +// +// * ListMultipartUploads // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -466,8 +898,14 @@ func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request // DeleteBucket API operation for Amazon Simple Storage Service. // -// Deletes the bucket. All objects (including all object versions and Delete -// Markers) in the bucket must be deleted before the bucket itself can be deleted. +// Deletes the bucket. All objects (including all object versions and delete +// markers) in the bucket must be deleted before the bucket itself can be deleted. +// +// Related Resources +// +// * +// +// * // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -547,7 +985,20 @@ func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyt // // To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration // action. The bucket owner has this permission by default. The bucket owner -// can grant this permission to others. +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about the Amazon S3 analytics feature, see Amazon S3 Analytics +// – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html). +// +// The following operations are related to DeleteBucketAnalyticsConfiguration: +// +// * +// +// * +// +// * // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -622,7 +1073,20 @@ func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request // DeleteBucketCors API operation for Amazon Simple Storage Service. // -// Deletes the CORS configuration information set for the bucket. +// Deletes the cors configuration information set for the bucket. +// +// To use this operation, you must have permission to perform the s3:PutBucketCORS +// action. The bucket owner has this permission by default and can grant this +// permission to others. +// +// For information about cors, see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Related Resources: +// +// * +// +// * RESTOPTIONSobject // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -697,7 +1161,23 @@ func (c *S3) DeleteBucketEncryptionRequest(input *DeleteBucketEncryptionInput) ( // DeleteBucketEncryption API operation for Amazon Simple Storage Service. // -// Deletes the server-side encryption configuration from the bucket. +// This implementation of the DELETE operation removes default encryption from +// the bucket. For information about the Amazon S3 default encryption feature, +// see Amazon S3 Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Related Resources +// +// * PutBucketEncryption +// +// * GetBucketEncryption // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -775,6 +1255,23 @@ func (c *S3) DeleteBucketInventoryConfigurationRequest(input *DeleteBucketInvent // Deletes an inventory configuration (identified by the inventory ID) from // the bucket. // +// To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about the Amazon S3 inventory feature, see Amazon S3 Inventory +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html). +// +// Operations related to DeleteBucketInventoryConfiguration include: +// +// * GetBucketInventoryConfiguration +// +// * PutBucketInventoryConfiguration +// +// * ListBucketInventoryConfigurations +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -848,7 +1345,27 @@ func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (re // DeleteBucketLifecycle API operation for Amazon Simple Storage Service. // -// Deletes the lifecycle configuration from the bucket. +// Deletes the lifecycle configuration from the specified bucket. Amazon S3 +// removes all the lifecycle configuration rules in the lifecycle subresource +// associated with the bucket. Your objects never expire, and Amazon S3 no longer +// automatically deletes any objects on the basis of rules contained in the +// deleted lifecycle configuration. +// +// To use this operation, you must have permission to perform the s3:PutLifecycleConfiguration +// action. By default, the bucket owner has this permission and the bucket owner +// can grant this permission to others. +// +// There is usually some time lag before lifecycle configuration deletion is +// fully propagated to all the Amazon S3 systems. +// +// For more information about the object expiration, see Elements to Describe +// Lifecycle Actions (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions). +// +// Related actions include: +// +// * PutBucketLifecycleConfiguration +// +// * GetBucketLifecycleConfiguration // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -923,8 +1440,28 @@ func (c *S3) DeleteBucketMetricsConfigurationRequest(input *DeleteBucketMetricsC // DeleteBucketMetricsConfiguration API operation for Amazon Simple Storage Service. // -// Deletes a metrics configuration (specified by the metrics configuration ID) -// from the bucket. +// Deletes a metrics configuration for the Amazon CloudWatch request metrics +// (specified by the metrics configuration ID) from the bucket. Note that this +// doesn't include the daily storage metrics. +// +// To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about CloudWatch request metrics for Amazon S3, see Monitoring +// Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). +// +// The following operations are related to DeleteBucketMetricsConfiguration: +// +// * GetBucketMetricsConfiguration +// +// * PutBucketMetricsConfiguration +// +// * ListBucketMetricsConfigurations +// +// * Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -999,7 +1536,29 @@ func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *req // DeleteBucketPolicy API operation for Amazon Simple Storage Service. // -// Deletes the policy from the bucket. +// This implementation of the DELETE operation uses the policy subresource to +// delete the policy of a specified bucket. If you are using an identity other +// than the root user of the AWS account that owns the bucket, the calling identity +// must have the DeleteBucketPolicy permissions on the specified bucket and +// belong to the bucket owner's account to use this operation. +// +// If you don't have DeleteBucketPolicy permissions, Amazon S3 returns a 403 +// Access Denied error. If you have the correct permissions, but you're not +// using an identity that belongs to the bucket owner's account, Amazon S3 returns +// a 405 Method Not Allowed error. +// +// As a security precaution, the root user of the AWS account that owns a bucket +// can always use this operation, even if the policy explicitly denies the root +// user the ability to perform this action. +// +// For more information about bucket policies, see Using Bucket Policies and +// UserPolicies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// +// The following operations are related to DeleteBucketPolicy +// +// * CreateBucket +// +// * DeleteObject // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1074,10 +1633,26 @@ func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) // DeleteBucketReplication API operation for Amazon Simple Storage Service. // -// Deletes the replication configuration from the bucket. For information about -// replication configuration, see Cross-Region Replication (CRR) (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) +// Deletes the replication configuration from the bucket. +// +// To use this operation, you must have permissions to perform the s3:PutReplicationConfiguration +// action. The bucket owner has these permissions by default and can grant it +// to others. For more information about permissions, see Permissions Related +// to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// It can take a while for the deletion of a replication configuration to fully +// propagate. +// +// For information about replication configuration, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) // in the Amazon S3 Developer Guide. // +// The following operations are related to DeleteBucketReplication: +// +// * PutBucketReplication +// +// * GetBucketReplication +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -1153,6 +1728,16 @@ func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *r // // Deletes the tags from the bucket. // +// To use this operation, you must have permission to perform the s3:PutBucketTagging +// action. By default, the bucket owner has this permission and can grant this +// permission to others. +// +// The following operations are related to DeleteBucketTagging: +// +// * GetBucketTagging +// +// * PutBucketTagging +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -1226,7 +1811,26 @@ func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *r // DeleteBucketWebsite API operation for Amazon Simple Storage Service. // -// This operation removes the website configuration from the bucket. +// This operation removes the website configuration for a bucket. Amazon S3 +// returns a 200 OK response upon successfully deleting a website configuration +// on the specified bucket. You will get a 200 OK response if the website configuration +// you are trying to delete does not exist on the bucket. Amazon S3 returns +// a 404 response if the bucket specified in the request does not exist. +// +// This DELETE operation requires the S3:DeleteBucketWebsite permission. By +// default, only the bucket owner can delete the website configuration attached +// to a bucket. However, bucket owners can grant other users permission to delete +// the website configuration by writing a bucket policy granting them the S3:DeleteBucketWebsite +// permission. +// +// For more information about hosting websites, see Hosting Websites on Amazon +// S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). +// +// The following operations are related to DeleteBucketWebsite: +// +// * GetBucketWebsite +// +// * PutBucketWebsite // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1304,6 +1908,29 @@ func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request // marker, which becomes the latest version of the object. If there isn't a // null version, Amazon S3 does not remove any objects. // +// To remove a specific version, you must be the bucket owner and you must use +// the version Id subresource. Using this subresource permanently deletes the +// version. If the object deleted is a delete marker, Amazon S3 sets the response +// header, x-amz-delete-marker, to true. +// +// If the object you want to delete is in a bucket where the bucket versioning +// configuration is MFA Delete enabled, you must include the x-amz-mfa request +// header in the DELETE versionId request. Requests that include x-amz-mfa must +// use HTTPS. +// +// For more information about MFA Delete, see Using MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html). +// To see sample requests that use versioning, see Sample Request (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete). +// +// You can delete objects by explicitly calling the DELETE Object API or configure +// its lifecycle (PutBucketLifecycle) to enable Amazon S3 to remove them for +// you. If you want to block users or accounts from removing or deleting objects +// from your bucket, you must deny them the s3:DeleteObject, s3:DeleteObjectVersion, +// and s3:PutLifeCycleConfiguration actions. +// +// The following operation is related to DeleteObject: +// +// * PutObject +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -1376,7 +2003,21 @@ func (c *S3) DeleteObjectTaggingRequest(input *DeleteObjectTaggingInput) (req *r // DeleteObjectTagging API operation for Amazon Simple Storage Service. // -// Removes the tag-set from an existing object. +// Removes the entire tag set from the specified object. For more information +// about managing object tags, see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). +// +// To use this operation, you must have permission to perform the s3:DeleteObjectTagging +// action. +// +// To delete tags of a specific object version, add the versionId query parameter +// in the request. You will need permission for the s3:DeleteObjectVersionTagging +// action. +// +// The following operations are related to DeleteBucketMetricsConfiguration: +// +// * PutObjectTagging +// +// * GetObjectTagging // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1445,13 +2086,57 @@ func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Reque output = &DeleteObjectsOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } // DeleteObjects API operation for Amazon Simple Storage Service. // // This operation enables you to delete multiple objects from a bucket using -// a single HTTP request. You may specify up to 1000 keys. +// a single HTTP request. If you know the object keys that you want to delete, +// then this operation provides a suitable alternative to sending individual +// delete requests, reducing per-request overhead. +// +// The request contains a list of up to 1000 keys that you want to delete. In +// the XML, you provide the object key names, and optionally, version IDs if +// you want to delete a specific version of the object from a versioning-enabled +// bucket. For each key, Amazon S3 performs a delete operation and returns the +// result of that delete, success, or failure, in the response. Note that if +// the object specified in the request is not found, Amazon S3 returns the result +// as deleted. +// +// The operation supports two modes for the response: verbose and quiet. By +// default, the operation uses verbose mode in which the response includes the +// result of deletion of each key in your request. In quiet mode the response +// includes only keys where the delete operation encountered an error. For a +// successful deletion, the operation does not return any information about +// the delete in the response body. +// +// When performing this operation on an MFA Delete enabled bucket, that attempts +// to delete any versioned objects, you must include an MFA token. If you do +// not provide one, the entire request will fail, even if there are non-versioned +// objects you are trying to delete. If you provide an invalid token, whether +// there are versioned keys in the request or not, the entire Multi-Object Delete +// request will fail. For information about MFA Delete, see MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete). +// +// Finally, the Content-MD5 header is required for all Multi-Object Delete requests. +// Amazon S3 uses the header value to ensure that your request body has not +// been altered in transit. +// +// The following operations are related to DeleteObjects: +// +// * CreateMultipartUpload +// +// * UploadPart +// +// * CompleteMultipartUpload +// +// * ListParts +// +// * AbortMultipartUpload // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1526,7 +2211,21 @@ func (c *S3) DeletePublicAccessBlockRequest(input *DeletePublicAccessBlockInput) // DeletePublicAccessBlock API operation for Amazon Simple Storage Service. // -// Removes the PublicAccessBlock configuration from an Amazon S3 bucket. +// Removes the PublicAccessBlock configuration for an Amazon S3 bucket. To use +// this operation, you must have the s3:PutBucketPublicAccessBlock permission. +// For more information about permissions, see Permissions Related to Bucket +// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// The following operations are related to DeletePublicAccessBlock: +// +// * Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// +// * GetPublicAccessBlock +// +// * PutPublicAccessBlock +// +// * GetBucketPolicyStatus // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1600,7 +2299,32 @@ func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateC // GetBucketAccelerateConfiguration API operation for Amazon Simple Storage Service. // -// Returns the accelerate configuration of a bucket. +// This implementation of the GET operation uses the accelerate subresource +// to return the Transfer Acceleration state of a bucket, which is either Enabled +// or Suspended. Amazon S3 Transfer Acceleration is a bucket-level feature that +// enables you to perform faster data transfers to and from Amazon S3. +// +// To use this operation, you must have permission to perform the s3:GetAccelerateConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// You set the Transfer Acceleration state of an existing bucket to Enabled +// or Suspended by using the PutBucketAccelerateConfiguration operation. +// +// A GET accelerate request does not return a state value for a bucket that +// has no transfer acceleration state. A bucket has no Transfer Acceleration +// state if a state has never been set on the bucket. +// +// For more information about transfer acceleration, see Transfer Acceleration +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Related Resources +// +// * PutBucketAccelerateConfiguration // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1674,7 +2398,15 @@ func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request // GetBucketAcl API operation for Amazon Simple Storage Service. // -// Gets the access control policy for the bucket. +// This implementation of the GET operation uses the acl subresource to return +// the access control list (ACL) of a bucket. To use GET to return the ACL of +// the bucket, you must have READ_ACP access to the bucket. If READ_ACP permission +// is granted to the anonymous user, you can return the ACL of the bucket without +// using an authorization header. +// +// Related Resources +// +// * // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1748,8 +2480,27 @@ func (c *S3) GetBucketAnalyticsConfigurationRequest(input *GetBucketAnalyticsCon // GetBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. // -// Gets an analytics configuration for the bucket (specified by the analytics -// configuration ID). +// This implementation of the GET operation returns an analytics configuration +// (identified by the analytics configuration ID) from the bucket. +// +// To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// For information about Amazon S3 analytics feature, see Amazon S3 Analytics +// – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Related Resources +// +// * +// +// * +// +// * // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1823,7 +2574,20 @@ func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Reque // GetBucketCors API operation for Amazon Simple Storage Service. // -// Returns the CORS configuration for the bucket. +// Returns the cors configuration information set for the bucket. +// +// To use this operation, you must have permission to perform the s3:GetBucketCORS +// action. By default, the bucket owner has this permission and can grant it +// to others. +// +// For more information about cors, see Enabling Cross-Origin Resource Sharing +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html). +// +// The following operations are related to GetBucketCors: +// +// * PutBucketCors +// +// * DeleteBucketCors // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1897,7 +2661,21 @@ func (c *S3) GetBucketEncryptionRequest(input *GetBucketEncryptionInput) (req *r // GetBucketEncryption API operation for Amazon Simple Storage Service. // -// Returns the server-side encryption configuration of a bucket. +// Returns the default encryption configuration for an Amazon S3 bucket. For +// information about the Amazon S3 default encryption feature, see Amazon S3 +// Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html). +// +// To use this operation, you must have permission to perform the s3:GetEncryptionConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// The following operations are related to GetBucketEncryption: +// +// * PutBucketEncryption +// +// * DeleteBucketEncryption // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1971,8 +2749,25 @@ func (c *S3) GetBucketInventoryConfigurationRequest(input *GetBucketInventoryCon // GetBucketInventoryConfiguration API operation for Amazon Simple Storage Service. // -// Returns an inventory configuration (identified by the inventory ID) from -// the bucket. +// Returns an inventory configuration (identified by the inventory configuration +// ID) from the bucket. +// +// To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration +// action. The bucket owner has this permission by default and can grant this +// permission to others. For more information about permissions, see Permissions +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about the Amazon S3 inventory feature, see Amazon S3 Inventory +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html). +// +// The following operations are related to GetBucketInventoryConfiguration: +// +// * DeleteBucketInventoryConfiguration +// +// * ListBucketInventoryConfigurations +// +// * PutBucketInventoryConfiguration // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2051,7 +2846,34 @@ func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *req // GetBucketLifecycle API operation for Amazon Simple Storage Service. // -// No longer used, see the GetBucketLifecycleConfiguration operation. +// +// For an updated version of this API, see GetBucketLifecycleConfiguration. +// If you configured a bucket lifecycle using the filter element, you should +// see the updated version of this topic. This topic is provided for backward +// compatibility. +// +// Returns the lifecycle configuration information set on the bucket. For information +// about lifecycle configuration, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). +// +// To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// GetBucketLifecycle has the following special error: +// +// * Error code: NoSuchLifecycleConfiguration Description: The lifecycle +// configuration does not exist. HTTP Status Code: 404 Not Found SOAP Fault +// Code Prefix: Client +// +// The following operations are related to GetBucketLifecycle: +// +// * GetBucketLifecycleConfiguration +// +// * PutBucketLifecycle +// +// * DeleteBucketLifecycle // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2129,7 +2951,37 @@ func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleCon // GetBucketLifecycleConfiguration API operation for Amazon Simple Storage Service. // -// Returns the lifecycle configuration information set on the bucket. +// +// Bucket lifecycle configuration now supports specifying a lifecycle rule using +// an object key name prefix, one or more object tags, or a combination of both. +// Accordingly, this section describes the latest API. The response describes +// the new filter element that you can use to specify a filter to select a subset +// of objects to which the rule applies. If you are still using previous version +// of the lifecycle configuration, it works. For the earlier API description, +// see GetBucketLifecycle. +// +// Returns the lifecycle configuration information set on the bucket. For information +// about lifecycle configuration, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). +// +// To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration +// action. The bucket owner has this permission, by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// GetBucketLifecycleConfiguration has the following special error: +// +// * Error code: NoSuchLifecycleConfiguration Description: The lifecycle +// configuration does not exist. HTTP Status Code: 404 Not Found SOAP Fault +// Code Prefix: Client +// +// The following operations are related to GetBucketLifecycleConfiguration: +// +// * GetBucketLifecycle +// +// * PutBucketLifecycle +// +// * DeleteBucketLifecycle // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2203,7 +3055,17 @@ func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *reque // GetBucketLocation API operation for Amazon Simple Storage Service. // -// Returns the region the bucket resides in. +// Returns the Region the bucket resides in. You set the bucket's Region using +// the LocationConstraint request parameter in a CreateBucket request. For more +// information, see CreateBucket. +// +// To use this implementation of the operation, you must be the bucket owner. +// +// The following operations are related to GetBucketLocation: +// +// * GetObject +// +// * CreateBucket // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2280,6 +3142,12 @@ func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request // Returns the logging status of a bucket and the permissions users have to // view and modify that status. To use GET, you must be the bucket owner. // +// The following operations are related to GetBucketLogging: +// +// * CreateBucket +// +// * PutBucketLogging +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -2353,7 +3221,26 @@ func (c *S3) GetBucketMetricsConfigurationRequest(input *GetBucketMetricsConfigu // GetBucketMetricsConfiguration API operation for Amazon Simple Storage Service. // // Gets a metrics configuration (specified by the metrics configuration ID) -// from the bucket. +// from the bucket. Note that this doesn't include the daily storage metrics. +// +// To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about CloudWatch request metrics for Amazon S3, see Monitoring +// Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). +// +// The following operations are related to GetBucketMetricsConfiguration: +// +// * PutBucketMetricsConfiguration +// +// * DeleteBucketMetricsConfiguration +// +// * ListBucketMetricsConfigurations +// +// * Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2432,7 +3319,7 @@ func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurat // GetBucketNotification API operation for Amazon Simple Storage Service. // -// No longer used, see the GetBucketNotificationConfiguration operation. +// No longer used, see GetBucketNotificationConfiguration. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2512,6 +3399,22 @@ func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificat // // Returns the notification configuration of a bucket. // +// If notifications are not enabled on the bucket, the operation returns an +// empty NotificationConfiguration element. +// +// By default, you must be the bucket owner to read the notification configuration +// of a bucket. However, the bucket owner can use a bucket policy to grant permission +// to other users to read this configuration with the s3:GetBucketNotification +// permission. +// +// For more information about setting and reading the notification configuration +// on a bucket, see Setting Up Notification of Bucket Events (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). +// For more information about bucket policies, see Using Bucket Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// +// The following operation is related to GetBucketNotification: +// +// * PutBucketNotification +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -2584,7 +3487,26 @@ func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.R // GetBucketPolicy API operation for Amazon Simple Storage Service. // -// Returns the policy of a specified bucket. +// Returns the policy of a specified bucket. If you are using an identity other +// than the root user of the AWS account that owns the bucket, the calling identity +// must have the GetBucketPolicy permissions on the specified bucket and belong +// to the bucket owner's account in order to use this operation. +// +// If you don't have GetBucketPolicy permissions, Amazon S3 returns a 403 Access +// Denied error. If you have the correct permissions, but you're not using an +// identity that belongs to the bucket owner's account, Amazon S3 returns a +// 405 Method Not Allowed error. +// +// As a security precaution, the root user of the AWS account that owns a bucket +// can always use this operation, even if the policy explicitly denies the root +// user the ability to perform this action. +// +// For more information about bucket policies, see Using Bucket Policies and +// User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// +// The following operation is related to GetBucketPolicy: +// +// * GetObject // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2659,7 +3581,22 @@ func (c *S3) GetBucketPolicyStatusRequest(input *GetBucketPolicyStatusInput) (re // GetBucketPolicyStatus API operation for Amazon Simple Storage Service. // // Retrieves the policy status for an Amazon S3 bucket, indicating whether the -// bucket is public. +// bucket is public. In order to use this operation, you must have the s3:GetBucketPolicyStatus +// permission. For more information about Amazon S3 permissions, see Specifying +// Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// +// For more information about when Amazon S3 considers a bucket public, see +// The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status). +// +// The following operations are related to GetBucketPolicyStatus: +// +// * Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// +// * GetPublicAccessBlock +// +// * PutPublicAccessBlock +// +// * DeletePublicAccessBlock // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2739,6 +3676,25 @@ func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req // to all Amazon S3 systems. Therefore, a get request soon after put or delete // can return a wrong result. // +// For information about replication configuration, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// This operation requires permissions for the s3:GetReplicationConfiguration +// action. For more information about permissions, see Using Bucket Policies +// and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// +// If you include the Filter element in a replication configuration, you must +// also include the DeleteMarkerReplication and Priority elements. The response +// also returns those elements. +// +// For information about GetBucketReplication errors, see ReplicationErrorCodeList +// +// The following operations are related to GetBucketReplication: +// +// * PutBucketReplication +// +// * DeleteBucketReplication +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -2811,7 +3767,13 @@ func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) // GetBucketRequestPayment API operation for Amazon Simple Storage Service. // -// Returns the request payment configuration of a bucket. +// Returns the request payment configuration of a bucket. To use this version +// of the operation, you must be the bucket owner. For more information, see +// Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html). +// +// The following operations are related to GetBucketRequestPayment: +// +// * ListObjects // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2887,6 +3849,21 @@ func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request // // Returns the tag set associated with the bucket. // +// To use this operation, you must have permission to perform the s3:GetBucketTagging +// action. By default, the bucket owner has this permission and can grant this +// permission to others. +// +// GetBucketTagging has the following special error: +// +// * Error code: NoSuchTagSetError Description: There is no tag set associated +// with the bucket. +// +// The following operations are related to GetBucketTagging: +// +// * PutBucketTagging +// +// * DeleteBucketTagging +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -2961,6 +3938,20 @@ func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *r // // Returns the versioning state of a bucket. // +// To retrieve the versioning state of a bucket, you must be the bucket owner. +// +// This implementation also returns the MFA Delete status of the versioning +// state. If the MFA Delete status is enabled, the bucket owner must use an +// authentication device to change the versioning state of the bucket. +// +// The following operations are related to GetBucketVersioning: +// +// * GetObject +// +// * PutObject +// +// * DeleteObject +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -3033,7 +4024,21 @@ func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request // GetBucketWebsite API operation for Amazon Simple Storage Service. // -// Returns the website configuration for a bucket. +// Returns the website configuration for a bucket. To host website on Amazon +// S3, you can configure a bucket as website by adding a website configuration. +// For more information about hosting websites, see Hosting Websites on Amazon +// S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). +// +// This GET operation requires the S3:GetBucketWebsite permission. By default, +// only the bucket owner can read the bucket website configuration. However, +// bucket owners can allow other users to read the website configuration by +// writing a bucket policy granting them the S3:GetBucketWebsite permission. +// +// The following operations are related to DeleteBucketWebsite: +// +// * DeleteBucketWebsite +// +// * PutBucketWebsite // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3107,7 +4112,130 @@ func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, outp // GetObject API operation for Amazon Simple Storage Service. // -// Retrieves objects from Amazon S3. +// Retrieves objects from Amazon S3. To use GET, you must have READ access to +// the object. If you grant READ access to the anonymous user, you can return +// the object without using an authorization header. +// +// An Amazon S3 bucket has no directory hierarchy such as you would find in +// a typical computer file system. You can, however, create a logical hierarchy +// by using object key names that imply a folder structure. For example, instead +// of naming an object sample.jpg, you can name it photos/2006/February/sample.jpg. +// +// To get an object from such a logical hierarchy, specify the full key name +// for the object in the GET operation. For a virtual hosted-style request example, +// if you have the object photos/2006/February/sample.jpg, specify the resource +// as /photos/2006/February/sample.jpg. For a path-style request example, if +// you have the object photos/2006/February/sample.jpg in the bucket named examplebucket, +// specify the resource as /examplebucket/photos/2006/February/sample.jpg. For +// more information about request types, see HTTP Host Header Bucket Specification +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket). +// +// To distribute large files to many people, you can save bandwidth costs by +// using BitTorrent. For more information, see Amazon S3 Torrent (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3Torrent.html). +// For more information about returning the ACL of an object, see GetObjectAcl. +// +// If the object you are retrieving is stored in the GLACIER or DEEP_ARCHIVE +// storage classes, before you can retrieve the object you must first restore +// a copy using . Otherwise, this operation returns an InvalidObjectStateError +// error. For information about restoring archived objects, see Restoring Archived +// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html). +// +// Encryption request headers, like x-amz-server-side-encryption, should not +// be sent for GET requests if your object uses server-side encryption with +// CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed +// encryption keys (SSE-S3). If your object does use these types of keys, you’ll +// get an HTTP 400 BadRequest error. +// +// If you encrypt an object by using server-side encryption with customer-provided +// encryption keys (SSE-C) when you store the object in Amazon S3, then when +// you GET the object, you must use the following headers: +// +// * x-amz-server-side​-encryption​-customer-algorithm +// +// * x-amz-server-side​-encryption​-customer-key +// +// * x-amz-server-side​-encryption​-customer-key-MD5 +// +// For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided +// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). +// +// Assuming you have permission to read object tags (permission for the s3:GetObjectVersionTagging +// action), the response also returns the x-amz-tagging-count header that provides +// the count of number of tags associated with the object. You can use GetObjectTagging +// to retrieve the tag set associated with an object. +// +// Permissions +// +// You need the s3:GetObject permission for this operation. For more information, +// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// If the object you request does not exist, the error Amazon S3 returns depends +// on whether you also have the s3:ListBucket permission. +// +// * If you have the s3:ListBucket permission on the bucket, Amazon S3 will +// return an HTTP status code 404 ("no such key") error. +// +// * If you don’t have the s3:ListBucket permission, Amazon S3 will return +// an HTTP status code 403 ("access denied") error. +// +// Versioning +// +// By default, the GET operation returns the current version of an object. To +// return a different version, use the versionId subresource. +// +// If the current version of the object is a delete marker, Amazon S3 behaves +// as if the object was deleted and includes x-amz-delete-marker: true in the +// response. +// +// For more information about versioning, see PutBucketVersioning. +// +// Overriding Response Header Values +// +// There are times when you want to override certain response header values +// in a GET response. For example, you might override the Content-Disposition +// response header value in your GET request. +// +// You can override values for a set of response headers using the following +// query parameters. These response header values are sent only on a successful +// request, that is, when status code 200 OK is returned. The set of headers +// you can override using these parameters is a subset of the headers that Amazon +// S3 accepts when you create an object. The response headers that you can override +// for the GET response are Content-Type, Content-Language, Expires, Cache-Control, +// Content-Disposition, and Content-Encoding. To override these header values +// in the GET response, you use the following request parameters. +// +// You must sign the request, either using an Authorization header or a presigned +// URL, when using these parameters. They cannot be used with an unsigned (anonymous) +// request. +// +// * response-content-type +// +// * response-content-language +// +// * response-expires +// +// * response-cache-control +// +// * response-content-disposition +// +// * response-content-encoding +// +// Additional Considerations about Request Headers +// +// If both of the If-Match and If-Unmodified-Since headers are present in the +// request as follows: If-Match condition evaluates to true, and; If-Unmodified-Since +// condition evaluates to false; then, S3 returns 200 OK and the data requested. +// +// If both of the If-None-Match and If-Modified-Since headers are present in +// the request as follows:If-None-Match condition evaluates to false, and; If-Modified-Since +// condition evaluates to true; then, S3 returns 304 Not Modified response code. +// +// For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). +// +// The following operations are related to GetObject: +// +// * ListBuckets +// +// * GetObjectAcl // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3186,7 +4314,21 @@ func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request // GetObjectAcl API operation for Amazon Simple Storage Service. // -// Returns the access control list (ACL) of an object. +// Returns the access control list (ACL) of an object. To use this operation, +// you must have READ_ACP access to the object. +// +// Versioning +// +// By default, GET returns ACL information about the current version of an object. +// To return ACL information about a different version, use the versionId subresource. +// +// The following operations are related to GetObjectAcl: +// +// * GetObject +// +// * DeleteObject +// +// * PutObject // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3265,7 +4407,8 @@ func (c *S3) GetObjectLegalHoldRequest(input *GetObjectLegalHoldInput) (req *req // GetObjectLegalHold API operation for Amazon Simple Storage Service. // -// Gets an object's current Legal Hold status. +// Gets an object's current Legal Hold status. For more information, see Locking +// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3339,9 +4482,10 @@ func (c *S3) GetObjectLockConfigurationRequest(input *GetObjectLockConfiguration // GetObjectLockConfiguration API operation for Amazon Simple Storage Service. // -// Gets the object lock configuration for a bucket. The rule specified in the -// object lock configuration will be applied by default to every new object -// placed in the specified bucket. +// Gets the Object Lock configuration for a bucket. The rule specified in the +// Object Lock configuration will be applied by default to every new object +// placed in the specified bucket. For more information, see Locking Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3415,7 +4559,8 @@ func (c *S3) GetObjectRetentionRequest(input *GetObjectRetentionInput) (req *req // GetObjectRetention API operation for Amazon Simple Storage Service. // -// Retrieves an object's retention settings. +// Retrieves an object's retention settings. For more information, see Locking +// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3489,7 +4634,25 @@ func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request // GetObjectTagging API operation for Amazon Simple Storage Service. // -// Returns the tag-set of an object. +// Returns the tag-set of an object. You send the GET request against the tagging +// subresource associated with the object. +// +// To use this operation, you must have permission to perform the s3:GetObjectTagging +// action. By default, the GET operation returns information about current version +// of an object. For a versioned bucket, you can have multiple versions of an +// object in your bucket. To retrieve tags of any other version, use the versionId +// query parameter. You also need permission for the s3:GetObjectVersionTagging +// action. +// +// By default, the bucket owner has this permission and can grant this permission +// to others. +// +// For information about the Amazon S3 object tagging feature, see Object Tagging +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). +// +// The following operation is related to GetObjectTagging: +// +// * PutObjectTagging // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3563,7 +4726,19 @@ func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request // GetObjectTorrent API operation for Amazon Simple Storage Service. // -// Return torrent files from a bucket. +// Return torrent files from a bucket. BitTorrent can save you bandwidth when +// you're distributing large files. For more information about BitTorrent, see +// Amazon S3 Torrent (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3Torrent.html). +// +// You can get torrent only for objects that are less than 5 GB in size and +// that are not encrypted using server-side encryption with customer-provided +// encryption key. +// +// To use GET, you must have READ access to the object. +// +// The following operation is related to GetObjectTorrent: +// +// * GetObject // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3637,7 +4812,30 @@ func (c *S3) GetPublicAccessBlockRequest(input *GetPublicAccessBlockInput) (req // GetPublicAccessBlock API operation for Amazon Simple Storage Service. // -// Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket. +// Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket. To +// use this operation, you must have the s3:GetBucketPublicAccessBlock permission. +// For more information about Amazon S3 permissions, see Specifying Permissions +// in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// +// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket +// or an object, it checks the PublicAccessBlock configuration for both the +// bucket (or the bucket that contains the object) and the bucket owner's account. +// If the PublicAccessBlock settings are different between the bucket and the +// account, Amazon S3 uses the most restrictive combination of the bucket-level +// and account-level settings. +// +// For more information about when Amazon S3 considers a bucket or an object +// public, see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status). +// +// The following operations are related to GetPublicAccessBlock: +// +// * Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// +// * PutPublicAccessBlock +// +// * GetPublicAccessBlock +// +// * DeletePublicAccessBlock // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3713,7 +4911,15 @@ func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, ou // HeadBucket API operation for Amazon Simple Storage Service. // // This operation is useful to determine if a bucket exists and you have permission -// to access it. +// to access it. The operation returns a 200 OK if the bucket exists and you +// have permission to access it. Otherwise, the operation might return responses +// such as 404 Not Found and 403 Forbidden. +// +// To use this operation, you must have permissions to perform the s3:ListBucket +// action. The bucket owner has this permission by default and can grant this +// permission to others. For more information about permissions, see Permissions +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3796,6 +5002,63 @@ func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, ou // object itself. This operation is useful if you're only interested in an object's // metadata. To use HEAD, you must have READ access to the object. // +// A HEAD request has the same options as a GET operation on an object. The +// response is identical to the GET response except that there is no response +// body. +// +// If you encrypt an object by using server-side encryption with customer-provided +// encryption keys (SSE-C) when you store the object in Amazon S3, then when +// you retrieve the metadata from the object, you must use the following headers: +// +// * x-amz-server-side​-encryption​-customer-algorithm +// +// * x-amz-server-side​-encryption​-customer-key +// +// * x-amz-server-side​-encryption​-customer-key-MD5 +// +// For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided +// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). +// +// Encryption request headers, like x-amz-server-side-encryption, should not +// be sent for GET requests if your object uses server-side encryption with +// CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed +// encryption keys (SSE-S3). If your object does use these types of keys, you’ll +// get an HTTP 400 BadRequest error. +// +// Request headers are limited to 8 KB in size. For more information, see Common +// Request Headers (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html). +// +// Consider the following when using request headers: +// +// * Consideration 1 – If both of the If-Match and If-Unmodified-Since +// headers are present in the request as follows: If-Match condition evaluates +// to true, and; If-Unmodified-Since condition evaluates to false; Then Amazon +// S3 returns 200 OK and the data requested. +// +// * Consideration 2 – If both of the If-None-Match and If-Modified-Since +// headers are present in the request as follows: If-None-Match condition +// evaluates to false, and; If-Modified-Since condition evaluates to true; +// Then Amazon S3 returns the 304 Not Modified response code. +// +// For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). +// +// Permissions +// +// You need the s3:GetObject permission for this operation. For more information, +// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// If the object you request does not exist, the error Amazon S3 returns depends +// on whether you also have the s3:ListBucket permission. +// +// * If you have the s3:ListBucket permission on the bucket, Amazon S3 returns +// an HTTP status code 404 ("no such key") error. +// +// * If you don’t have the s3:ListBucket permission, Amazon S3 returns +// an HTTP status code 403 ("access denied") error. +// +// The following operation is related to HeadObject: +// +// * GetObject +// // See http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#RESTErrorResponses // for more information on returned errors. // @@ -3871,7 +5134,33 @@ func (c *S3) ListBucketAnalyticsConfigurationsRequest(input *ListBucketAnalytics // ListBucketAnalyticsConfigurations API operation for Amazon Simple Storage Service. // -// Lists the analytics configurations for the bucket. +// Lists the analytics configurations for the bucket. You can have up to 1,000 +// analytics configurations per bucket. +// +// This operation supports list pagination and does not return more than 100 +// configurations at a time. You should always check the IsTruncated element +// in the response. If there are no more configurations to list, IsTruncated +// is set to false. If there are more configurations to list, IsTruncated is +// set to true, and there will be a value in NextContinuationToken. You use +// the NextContinuationToken value to continue the pagination of the list by +// passing the value in continuation-token in the request to GET the next page. +// +// To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about Amazon S3 analytics feature, see Amazon S3 Analytics +// – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html). +// +// The following operations are related to ListBucketAnalyticsConfigurations: +// +// * GetBucketAnalyticsConfiguration +// +// * DeleteBucketAnalyticsConfiguration +// +// * PutBucketAnalyticsConfiguration // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3945,7 +5234,33 @@ func (c *S3) ListBucketInventoryConfigurationsRequest(input *ListBucketInventory // ListBucketInventoryConfigurations API operation for Amazon Simple Storage Service. // -// Returns a list of inventory configurations for the bucket. +// Returns a list of inventory configurations for the bucket. You can have up +// to 1,000 analytics configurations per bucket. +// +// This operation supports list pagination and does not return more than 100 +// configurations at a time. Always check the IsTruncated element in the response. +// If there are no more configurations to list, IsTruncated is set to false. +// If there are more configurations to list, IsTruncated is set to true, and +// there is a value in NextContinuationToken. You use the NextContinuationToken +// value to continue the pagination of the list by passing the value in continuation-token +// in the request to GET the next page. +// +// To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about the Amazon S3 inventory feature, see Amazon S3 Inventory +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) +// +// The following operations are related to ListBucketInventoryConfigurations: +// +// * GetBucketInventoryConfiguration +// +// * DeleteBucketInventoryConfiguration +// +// * PutBucketInventoryConfiguration // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4019,7 +5334,34 @@ func (c *S3) ListBucketMetricsConfigurationsRequest(input *ListBucketMetricsConf // ListBucketMetricsConfigurations API operation for Amazon Simple Storage Service. // -// Lists the metrics configurations for the bucket. +// Lists the metrics configurations for the bucket. The metrics configurations +// are only for the request metrics of the bucket and do not provide information +// on daily storage metrics. You can have up to 1,000 configurations per bucket. +// +// This operation supports list pagination and does not return more than 100 +// configurations at a time. Always check the IsTruncated element in the response. +// If there are no more configurations to list, IsTruncated is set to false. +// If there are more configurations to list, IsTruncated is set to true, and +// there is a value in NextContinuationToken. You use the NextContinuationToken +// value to continue the pagination of the list by passing the value in continuation-token +// in the request to GET the next page. +// +// To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For more information about metrics configurations and CloudWatch request +// metrics, see Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). +// +// The following operations are related to ListBucketMetricsConfigurations: +// +// * PutBucketMetricsConfiguration +// +// * GetBucketMetricsConfiguration +// +// * DeleteBucketMetricsConfiguration // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4173,7 +5515,40 @@ func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req // ListMultipartUploads API operation for Amazon Simple Storage Service. // -// This operation lists in-progress multipart uploads. +// This operation lists in-progress multipart uploads. An in-progress multipart +// upload is a multipart upload that has been initiated using the Initiate Multipart +// Upload request, but has not yet been completed or aborted. +// +// This operation returns at most 1,000 multipart uploads in the response. 1,000 +// multipart uploads is the maximum number of uploads a response can include, +// which is also the default value. You can further limit the number of uploads +// in a response by specifying the max-uploads parameter in the response. If +// additional multipart uploads satisfy the list criteria, the response will +// contain an IsTruncated element with the value true. To list the additional +// multipart uploads, use the key-marker and upload-id-marker request parameters. +// +// In the response, the uploads are sorted by key. If your application has initiated +// more than one multipart upload using the same object key, then uploads in +// the response are first sorted by key. Additionally, uploads are sorted in +// ascending order within each key by the upload initiation time. +// +// For more information on multipart uploads, see Uploading Objects Using Multipart +// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). +// +// For information on permissions required to use the multipart upload API, +// see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// +// The following operations are related to ListMultipartUploads: +// +// * CreateMultipartUpload +// +// * UploadPart +// +// * CompleteMultipartUpload +// +// * ListParts +// +// * AbortMultipartUpload // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4246,10 +5621,12 @@ func (c *S3) ListMultipartUploadsPagesWithContext(ctx aws.Context, input *ListMu }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListMultipartUploadsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListMultipartUploadsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4303,7 +5680,24 @@ func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *req // ListObjectVersions API operation for Amazon Simple Storage Service. // -// Returns metadata about all of the versions of objects in a bucket. +// Returns metadata about all of the versions of objects in a bucket. You can +// also use request parameters as selection criteria to return metadata about +// a subset of all the object versions. +// +// A 200 OK response can contain valid or invalid XML. Make sure to design your +// application to parse the contents of the response and handle it appropriately. +// +// To use this operation, you must have READ access to the bucket. +// +// The following operations are related to ListObjectVersions: +// +// * ListObjectsV2 +// +// * GetObject +// +// * PutObject +// +// * DeleteObject // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4376,10 +5770,12 @@ func (c *S3) ListObjectVersionsPagesWithContext(ctx aws.Context, input *ListObje }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListObjectVersionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListObjectVersionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4433,9 +5829,27 @@ func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, // ListObjects API operation for Amazon Simple Storage Service. // -// Returns some or all (up to 1000) of the objects in a bucket. You can use +// Returns some or all (up to 1,000) of the objects in a bucket. You can use // the request parameters as selection criteria to return a subset of the objects -// in a bucket. +// in a bucket. A 200 OK response can contain valid or invalid XML. Be sure +// to design your application to parse the contents of the response and handle +// it appropriately. +// +// This API has been revised. We recommend that you use the newer version, ListObjectsV2, +// when developing applications. For backward compatibility, Amazon S3 continues +// to support ListObjects. +// +// The following operations are related to ListObjects: +// +// * ListObjectsV2 +// +// * GetObject +// +// * PutObject +// +// * CreateBucket +// +// * ListBuckets // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4513,10 +5927,12 @@ func (c *S3) ListObjectsPagesWithContext(ctx aws.Context, input *ListObjectsInpu }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListObjectsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListObjectsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4570,10 +5986,34 @@ func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Reque // ListObjectsV2 API operation for Amazon Simple Storage Service. // -// Returns some or all (up to 1000) of the objects in a bucket. You can use +// Returns some or all (up to 1,000) of the objects in a bucket. You can use // the request parameters as selection criteria to return a subset of the objects -// in a bucket. Note: ListObjectsV2 is the revised List Objects API and we recommend -// you use this revised API for new application development. +// in a bucket. A 200 OK response can contain valid or invalid XML. Make sure +// to design your application to parse the contents of the response and handle +// it appropriately. +// +// To use this operation, you must have READ access to the bucket. +// +// To use this operation in an AWS Identity and Access Management (IAM) policy, +// you must have permissions to perform the s3:ListBucket action. The bucket +// owner has this permission by default and can grant this permission to others. +// For more information about permissions, see Permissions Related to Bucket +// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// This section describes the latest revision of the API. We recommend that +// you use this revised API for application development. For backward compatibility, +// Amazon S3 continues to support the prior version of this API, ListObjects. +// +// To get a list of your buckets, see ListBuckets. +// +// The following operations are related to ListObjectsV2: +// +// * GetObject +// +// * PutObject +// +// * CreateBucket // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4651,10 +6091,12 @@ func (c *S3) ListObjectsV2PagesWithContext(ctx aws.Context, input *ListObjectsV2 }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListObjectsV2Output), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListObjectsV2Output), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4709,6 +6151,33 @@ func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, outp // ListParts API operation for Amazon Simple Storage Service. // // Lists the parts that have been uploaded for a specific multipart upload. +// This operation must include the upload ID, which you obtain by sending the +// initiate multipart upload request (see CreateMultipartUpload). This request +// returns a maximum of 1,000 uploaded parts. The default number of parts returned +// is 1,000 parts. You can restrict the number of parts returned by specifying +// the max-parts request parameter. If your multipart upload consists of more +// than 1,000 parts, the response returns an IsTruncated field with the value +// of true, and a NextPartNumberMarker element. In subsequent ListParts requests +// you can include the part-number-marker query string parameter and set its +// value to the NextPartNumberMarker field value from the previous response. +// +// For more information on multipart uploads, see Uploading Objects Using Multipart +// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). +// +// For information on permissions required to use the multipart upload API, +// see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// +// The following operations are related to ListParts: +// +// * CreateMultipartUpload +// +// * UploadPart +// +// * CompleteMultipartUpload +// +// * AbortMultipartUpload +// +// * ListMultipartUploads // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4781,10 +6250,12 @@ func (c *S3) ListPartsPagesWithContext(ctx aws.Context, input *ListPartsInput, f }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListPartsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListPartsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4833,7 +6304,41 @@ func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateC // PutBucketAccelerateConfiguration API operation for Amazon Simple Storage Service. // -// Sets the accelerate configuration of an existing bucket. +// Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer +// Acceleration is a bucket-level feature that enables you to perform faster +// data transfers to Amazon S3. +// +// To use this operation, you must have permission to perform the s3:PutAccelerateConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// The Transfer Acceleration state of a bucket can be set to one of the following +// two values: +// +// * Enabled – Enables accelerated data transfers to the bucket. +// +// * Suspended – Disables accelerated data transfers to the bucket. +// +// The GetBucketAccelerateConfiguration operation returns the transfer acceleration +// state of a bucket. +// +// After setting the Transfer Acceleration state of a bucket to Enabled, it +// might take up to thirty minutes before the data transfer rates to the bucket +// increase. +// +// The name of the bucket used for Transfer Acceleration must be DNS-compliant +// and must not contain periods ("."). +// +// For more information about transfer acceleration, see Transfer Acceleration +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html). +// +// The following operations are related to PutBucketAccelerateConfiguration: +// +// * GetBucketAccelerateConfiguration +// +// * CreateBucket // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4903,12 +6408,101 @@ func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request output = &PutBucketAclOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } // PutBucketAcl API operation for Amazon Simple Storage Service. // -// Sets the permissions on a bucket using access control lists (ACL). +// Sets the permissions on an existing bucket using access control lists (ACL). +// For more information, see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). +// To set the ACL of a bucket, you must have WRITE_ACP permission. +// +// You can use one of the following two ways to set a bucket's permissions: +// +// * Specify the ACL in the request body +// +// * Specify permissions using request headers +// +// You cannot specify access permission using both the body and the request +// headers. +// +// Depending on your application needs, you may choose to set the ACL on a bucket +// using either the request body or the headers. For example, if you have an +// existing application that updates a bucket ACL using the request body, then +// you can continue to use that approach. +// +// Access Permissions +// +// You can set access permissions using one of the following methods: +// +// * Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports +// a set of predefined ACLs, known as canned ACLs. Each canned ACL has a +// predefined set of grantees and permissions. Specify the canned ACL name +// as the value of x-amz-acl. If you use this header, you cannot use other +// access control-specific headers in your request. For more information, +// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, +// x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using +// these headers, you specify explicit access permissions and grantees (AWS +// accounts or Amazon S3 groups) who will receive the permission. If you +// use these ACL-specific headers, you cannot use the x-amz-acl header to +// set a canned ACL. These parameters map to the set of permissions that +// Amazon S3 supports in an ACL. For more information, see Access Control +// List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). +// You specify each grantee as a type=value pair, where the type is one of +// the following: id – if the value specified is the canonical user ID +// of an AWS account uri – if you are granting permissions to a predefined +// group emailAddress – if the value specified is the email address of +// an AWS account Using email addresses to specify a grantee is only supported +// in the following AWS Regions: US East (N. Virginia) US West (N. California) +// US West (Oregon) Asia Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific +// (Tokyo) Europe (Ireland) South America (São Paulo) For a list of all +// the Amazon S3 supported Regions and endpoints, see Regions and Endpoints +// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in +// the AWS General Reference. For example, the following x-amz-grant-write +// header grants create, overwrite, and delete objects permission to LogDelivery +// group predefined by Amazon S3 and two AWS accounts identified by their +// email addresses. x-amz-grant-write: uri="http://acs.amazonaws.com/groups/s3/LogDelivery", +// id="111122223333", id="555566667777" +// +// You can use either a canned ACL or specify access permissions explicitly. +// You cannot do both. +// +// Grantee Values +// +// You can specify the person (grantee) to whom you're assigning access rights +// (using request elements) in the following ways: +// +// * By the person's ID: <>ID<><>GranteesEmail<> +// DisplayName is optional and ignored in the request +// +// * By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// +// * By Email address: <>Grantees@email.com<>lt;/Grantee> +// The grantee is resolved to the CanonicalUser and, in a response to a GET +// Object acl request, appears as the CanonicalUser. Using email addresses +// to specify a grantee is only supported in the following AWS Regions: US +// East (N. Virginia) US West (N. California) US West (Oregon) Asia Pacific +// (Singapore) Asia Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) +// South America (São Paulo) For a list of all the Amazon S3 supported Regions +// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) +// in the AWS General Reference. +// +// Related Resources +// +// * CreateBucket +// +// * DeleteBucket +// +// * GetObjectAcl // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4984,7 +6578,50 @@ func (c *S3) PutBucketAnalyticsConfigurationRequest(input *PutBucketAnalyticsCon // PutBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. // // Sets an analytics configuration for the bucket (specified by the analytics -// configuration ID). +// configuration ID). You can have up to 1,000 analytics configurations per +// bucket. +// +// You can choose to have storage class analysis export analysis reports sent +// to a comma-separated values (CSV) flat file. See the DataExport request element. +// Reports are updated daily and are based on the object filters that you configure. +// When selecting data export, you specify a destination bucket and an optional +// destination prefix where the file is written. You can export the data to +// a destination bucket in a different account. However, the destination bucket +// must be in the same Region as the bucket that you are making the PUT analytics +// configuration to. For more information, see Amazon S3 Analytics – Storage +// Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html). +// +// You must create a bucket policy on the destination bucket where the exported +// file is written to grant permissions to Amazon S3 to write objects to the +// bucket. For an example policy, see Granting Permissions for Amazon S3 Inventory +// and Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9). +// +// To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// Special Errors +// +// * HTTP Error: HTTP 400 Bad Request Code: InvalidArgument Cause: Invalid +// argument. +// +// * HTTP Error: HTTP 400 Bad Request Code: TooManyConfigurations Cause: +// You are attempting to create a new configuration but have already reached +// the 1,000-configuration limit. +// +// * HTTP Error: HTTP 403 Forbidden Code: AccessDenied Cause: You are not +// the owner of the specified bucket, or you do not have the s3:PutAnalyticsConfiguration +// bucket permission to set the configuration on the bucket. +// +// Related Resources +// +// * +// +// * +// +// * // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5054,12 +6691,58 @@ func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Reque output = &PutBucketCorsOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } // PutBucketCors API operation for Amazon Simple Storage Service. // -// Sets the CORS configuration for a bucket. +// Sets the cors configuration for your bucket. If the configuration exists, +// Amazon S3 replaces it. +// +// To use this operation, you must be allowed to perform the s3:PutBucketCORS +// action. By default, the bucket owner has this permission and can grant it +// to others. +// +// You set this configuration on a bucket so that the bucket can service cross-origin +// requests. For example, you might want to enable a request whose origin is +// http://www.example.com to access your Amazon S3 bucket at my.example.bucket.com +// by using the browser's XMLHttpRequest capability. +// +// To enable cross-origin resource sharing (CORS) on a bucket, you add the cors +// subresource to the bucket. The cors subresource is an XML document in which +// you configure rules that identify origins and the HTTP methods that can be +// executed on your bucket. The document is limited to 64 KB in size. +// +// When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS request) +// against a bucket, it evaluates the cors configuration on the bucket and uses +// the first CORSRule rule that matches the incoming browser request to enable +// a cross-origin request. For a rule to match, the following conditions must +// be met: +// +// * The request's Origin header must match AllowedOrigin elements. +// +// * The request method (for example, GET, PUT, HEAD, and so on) or the Access-Control-Request-Method +// header in case of a pre-flight OPTIONS request must be one of the AllowedMethod +// elements. +// +// * Every header specified in the Access-Control-Request-Headers request +// header of a pre-flight request must match an AllowedHeader element. +// +// For more information about CORS, go to Enabling Cross-Origin Resource Sharing +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon +// Simple Storage Service Developer Guide. +// +// Related Resources +// +// * GetBucketCors +// +// * DeleteBucketCors +// +// * RESTOPTIONSobject // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5129,13 +6812,38 @@ func (c *S3) PutBucketEncryptionRequest(input *PutBucketEncryptionInput) (req *r output = &PutBucketEncryptionOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } // PutBucketEncryption API operation for Amazon Simple Storage Service. // -// Creates a new server-side encryption configuration (or replaces an existing -// one, if present). +// This implementation of the PUT operation uses the encryption subresource +// to set the default encryption state of an existing bucket. +// +// This implementation of the PUT operation sets default encryption for a bucket +// using server-side encryption with Amazon S3-managed keys SSE-S3 or AWS KMS +// customer master keys (CMKs) (SSE-KMS). For information about the Amazon S3 +// default encryption feature, see Amazon S3 Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html). +// +// This operation requires AWS Signature Version 4. For more information, see +// Authenticating Requests (AWS Signature Version 4) (sig-v4-authenticating-requests.html). +// +// To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Related Resources +// +// * GetBucketEncryption +// +// * DeleteBucketEncryption // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5210,8 +6918,54 @@ func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryCon // PutBucketInventoryConfiguration API operation for Amazon Simple Storage Service. // -// Adds an inventory configuration (identified by the inventory ID) from the -// bucket. +// This implementation of the PUT operation adds an inventory configuration +// (identified by the inventory ID) to the bucket. You can have up to 1,000 +// inventory configurations per bucket. +// +// Amazon S3 inventory generates inventories of the objects in the bucket on +// a daily or weekly basis, and the results are published to a flat file. The +// bucket that is inventoried is called the source bucket, and the bucket where +// the inventory flat file is stored is called the destination bucket. The destination +// bucket must be in the same AWS Region as the source bucket. +// +// When you configure an inventory for a source bucket, you specify the destination +// bucket where you want the inventory to be stored, and whether to generate +// the inventory daily or weekly. You can also configure what object metadata +// to include and whether to inventory all object versions or only current versions. +// For more information, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// You must create a bucket policy on the destination bucket to grant permissions +// to Amazon S3 to write objects to the bucket in the defined location. For +// an example policy, see Granting Permissions for Amazon S3 Inventory and Storage +// Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9). +// +// To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration +// action. The bucket owner has this permission by default and can grant this +// permission to others. For more information about permissions, see Permissions +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Special Errors +// +// * HTTP 400 Bad Request Error Code: InvalidArgument Cause: Invalid Argument +// +// * HTTP 400 Bad Request Error Code: TooManyConfigurations Cause: You are +// attempting to create a new configuration but have already reached the +// 1,000-configuration limit. +// +// * HTTP 403 Forbidden Error Code: AccessDenied Cause: You are not the owner +// of the specified bucket, or you do not have the s3:PutInventoryConfiguration +// bucket permission to set the configuration on the bucket. +// +// Related Resources +// +// * GetBucketInventoryConfiguration +// +// * DeleteBucketInventoryConfiguration +// +// * ListBucketInventoryConfigurations // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5286,12 +7040,64 @@ func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *req output = &PutBucketLifecycleOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } // PutBucketLifecycle API operation for Amazon Simple Storage Service. // -// No longer used, see the PutBucketLifecycleConfiguration operation. +// +// For an updated version of this API, see PutBucketLifecycleConfiguration. +// This version has been deprecated. Existing lifecycle configurations will +// work. For new lifecycle configurations, use the updated API. +// +// Creates a new lifecycle configuration for the bucket or replaces an existing +// lifecycle configuration. For information about lifecycle configuration, see +// Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// By default, all Amazon S3 resources, including buckets, objects, and related +// subresources (for example, lifecycle configuration and website configuration) +// are private. Only the resource owner, the AWS account that created the resource, +// can access it. The resource owner can optionally grant access permissions +// to others by writing an access policy. For this operation, users must get +// the s3:PutLifecycleConfiguration permission. +// +// You can also explicitly deny permissions. Explicit denial also supersedes +// any other permissions. If you want to prevent users or accounts from removing +// or deleting objects from your bucket, you must deny them permissions for +// the following actions: +// +// * s3:DeleteObject +// +// * s3:DeleteObjectVersion +// +// * s3:PutLifecycleConfiguration +// +// For more information about permissions, see Managing Access Permissions to +// your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// For more examples of transitioning objects to storage classes such as STANDARD_IA +// or ONEZONE_IA, see Examples of Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#lifecycle-configuration-examples). +// +// Related Resources +// +// * GetBucketLifecycle(Deprecated) +// +// * GetBucketLifecycleConfiguration +// +// * +// +// * By default, a resource owner—in this case, a bucket owner, which is +// the AWS account that created the bucket—can perform any of the operations. +// A resource owner can also grant others permission to perform the operation. +// For more information, see the following topics in the Amazon Simple Storage +// Service Developer Guide: Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5365,24 +7171,89 @@ func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleCon output = &PutBucketLifecycleConfigurationOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } // PutBucketLifecycleConfiguration API operation for Amazon Simple Storage Service. // -// Sets lifecycle configuration for your bucket. If a lifecycle configuration -// exists, it replaces it. +// Creates a new lifecycle configuration for the bucket or replaces an existing +// lifecycle configuration. For information about lifecycle configuration, see +// Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). // -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. +// Bucket lifecycle configuration now supports specifying a lifecycle rule using +// an object key name prefix, one or more object tags, or a combination of both. +// Accordingly, this section describes the latest API. The previous version +// of the API supported filtering based only on an object key name prefix, which +// is supported for backward compatibility. For the related API description, +// see PutBucketLifecycle. // -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketLifecycleConfiguration for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration -func (c *S3) PutBucketLifecycleConfiguration(input *PutBucketLifecycleConfigurationInput) (*PutBucketLifecycleConfigurationOutput, error) { - req, out := c.PutBucketLifecycleConfigurationRequest(input) - return out, req.Send() +// Rules +// +// You specify the lifecycle configuration in your request body. The lifecycle +// configuration is specified as XML consisting of one or more rules. Each rule +// consists of the following: +// +// * Filter identifying a subset of objects to which the rule applies. The +// filter can be based on a key name prefix, object tags, or a combination +// of both. +// +// * Status whether the rule is in effect. +// +// * One or more lifecycle transition and expiration actions that you want +// Amazon S3 to perform on the objects identified by the filter. If the state +// of your bucket is versioning-enabled or versioning-suspended, you can +// have many versions of the same object (one current version and zero or +// more noncurrent versions). Amazon S3 provides predefined actions that +// you can specify for current and noncurrent object versions. +// +// For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) +// and Lifecycle Configuration Elements (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html). +// +// Permissions +// +// By default, all Amazon S3 resources are private, including buckets, objects, +// and related subresources (for example, lifecycle configuration and website +// configuration). Only the resource owner (that is, the AWS account that created +// it) can access the resource. The resource owner can optionally grant access +// permissions to others by writing an access policy. For this operation, a +// user must get the s3:PutLifecycleConfiguration permission. +// +// You can also explicitly deny permissions. Explicit deny also supersedes any +// other permissions. If you want to block users or accounts from removing or +// deleting objects from your bucket, you must deny them permissions for the +// following actions: +// +// * s3:DeleteObject +// +// * s3:DeleteObjectVersion +// +// * s3:PutLifecycleConfiguration +// +// For more information about permissions, see Managing Access Permissions to +// Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// The following are related to PutBucketLifecycleConfiguration: +// +// * Examples of Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-configuration-examples.html) +// +// * GetBucketLifecycleConfiguration +// +// * DeleteBucketLifecycle +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketLifecycleConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration +func (c *S3) PutBucketLifecycleConfiguration(input *PutBucketLifecycleConfigurationInput) (*PutBucketLifecycleConfigurationOutput, error) { + req, out := c.PutBucketLifecycleConfigurationRequest(input) + return out, req.Send() } // PutBucketLifecycleConfigurationWithContext is the same as PutBucketLifecycleConfiguration with the addition of @@ -5441,15 +7312,62 @@ func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request output = &PutBucketLoggingOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } // PutBucketLogging API operation for Amazon Simple Storage Service. // // Set the logging parameters for a bucket and to specify permissions for who -// can view and modify the logging parameters. To set the logging status of +// can view and modify the logging parameters. All logs are saved to buckets +// in the same AWS Region as the source bucket. To set the logging status of // a bucket, you must be the bucket owner. // +// The bucket owner is automatically granted FULL_CONTROL to all logs. You use +// the Grantee request element to grant access to other people. The Permissions +// request element specifies the kind of access the grantee has to the logs. +// +// Grantee Values +// +// You can specify the person (grantee) to whom you're assigning access rights +// (using request elements) in the following ways: +// +// * By the person's ID: <>ID<><>GranteesEmail<> +// DisplayName is optional and ignored in the request. +// +// * By Email address: <>Grantees@email.com<> +// The grantee is resolved to the CanonicalUser and, in a response to a GET +// Object acl request, appears as the CanonicalUser. +// +// * By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// +// To enable logging, you use LoggingEnabled and its children request elements. +// To disable logging, you use an empty BucketLoggingStatus request element: +// +// +// +// For more information about server access logging, see Server Access Logging +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerLogs.html). +// +// For more information about creating a bucket, see CreateBucket. For more +// information about returning the logging status of a bucket, see GetBucketLogging. +// +// The following operations are related to PutBucketLogging: +// +// * PutObject +// +// * DeleteBucket +// +// * CreateBucket +// +// * GetBucketLogging +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -5524,7 +7442,33 @@ func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigu // PutBucketMetricsConfiguration API operation for Amazon Simple Storage Service. // // Sets a metrics configuration (specified by the metrics configuration ID) -// for the bucket. +// for the bucket. You can have up to 1,000 metrics configurations per bucket. +// If you're updating an existing metrics configuration, note that this is a +// full replacement of the existing metrics configuration. If you don't include +// the elements you want to keep, they are erased. +// +// To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about CloudWatch request metrics for Amazon S3, see Monitoring +// Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). +// +// The following operations are related to PutBucketMetricsConfiguration: +// +// * DeleteBucketMetricsConfiguration +// +// * PutBucketMetricsConfiguration +// +// * ListBucketMetricsConfigurations +// +// GetBucketLifecycle has the following special error: +// +// * Error code: TooManyConfigurations Description: You are attempting to +// create a new configuration but have already reached the 1,000-configuration +// limit. HTTP Status Code: HTTP 400 Bad Request // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5599,6 +7543,10 @@ func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (re output = &PutBucketNotificationOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } @@ -5683,7 +7631,55 @@ func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificat // PutBucketNotificationConfiguration API operation for Amazon Simple Storage Service. // -// Enables notifications of specified events for a bucket. +// Enables notifications of specified events for a bucket. For more information +// about event notifications, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). +// +// Using this API, you can replace an existing notification configuration. The +// configuration is an XML file that defines the event types that you want Amazon +// S3 to publish and the destination where you want Amazon S3 to publish an +// event notification when it detects an event of the specified type. +// +// By default, your bucket has no event notifications configured. That is, the +// notification configuration will be an empty NotificationConfiguration. +// +// +// +// +// +// This operation replaces the existing notification configuration with the +// configuration you include in the request body. +// +// After Amazon S3 receives this request, it first verifies that any Amazon +// Simple Notification Service (Amazon SNS) or Amazon Simple Queue Service (Amazon +// SQS) destination exists, and that the bucket owner has permission to publish +// to it by sending a test notification. In the case of AWS Lambda destinations, +// Amazon S3 verifies that the Lambda function permissions grant Amazon S3 permission +// to invoke the function from the Amazon S3 bucket. For more information, see +// Configuring Notifications for Amazon S3 Events (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). +// +// You can disable notifications by adding the empty NotificationConfiguration +// element. +// +// By default, only the bucket owner can configure notifications on a bucket. +// However, bucket owners can use a bucket policy to grant permission to other +// users to set this configuration with s3:PutBucketNotification permission. +// +// The PUT notification is an atomic operation. For example, suppose your notification +// configuration includes SNS topic, SQS queue, and Lambda function configurations. +// When you send a PUT request with this configuration, Amazon S3 sends test +// messages to your SNS topic. If the message fails, the entire PUT operation +// will fail, and Amazon S3 will not add the configuration to your bucket. +// +// Responses +// +// If the configuration in the request body includes only one TopicConfiguration +// specifying only the s3:ReducedRedundancyLostObject event type, the response +// will also include the x-amz-sns-test-message-id header containing the message +// ID of the test notification sent to the topic. +// +// The following operation is related to PutBucketNotificationConfiguration: +// +// * GetBucketNotificationConfiguration // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5753,12 +7749,37 @@ func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.R output = &PutBucketPolicyOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } // PutBucketPolicy API operation for Amazon Simple Storage Service. // -// Applies an Amazon S3 bucket policy to an Amazon S3 bucket. +// Applies an Amazon S3 bucket policy to an Amazon S3 bucket. If you are using +// an identity other than the root user of the AWS account that owns the bucket, +// the calling identity must have the PutBucketPolicy permissions on the specified +// bucket and belong to the bucket owner's account in order to use this operation. +// +// If you don't have PutBucketPolicy permissions, Amazon S3 returns a 403 Access +// Denied error. If you have the correct permissions, but you're not using an +// identity that belongs to the bucket owner's account, Amazon S3 returns a +// 405 Method Not Allowed error. +// +// As a security precaution, the root user of the AWS account that owns a bucket +// can always use this operation, even if the policy explicitly denies the root +// user the ability to perform this action. +// +// For more information about bucket policies, see Using Bucket Policies and +// User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// +// The following operations are related to PutBucketPolicy: +// +// * CreateBucket +// +// * DeleteBucket // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5828,15 +7849,66 @@ func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req output = &PutBucketReplicationOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } // PutBucketReplication API operation for Amazon Simple Storage Service. // // Creates a replication configuration or replaces an existing one. For more -// information, see Cross-Region Replication (CRR) (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) +// information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) // in the Amazon S3 Developer Guide. // +// To perform this operation, the user or role performing the operation must +// have the iam:PassRole (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html) +// permission. +// +// Specify the replication configuration in the request body. In the replication +// configuration, you provide the name of the destination bucket where you want +// Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume to +// replicate objects on your behalf, and other relevant information. +// +// A replication configuration must include at least one rule, and can contain +// a maximum of 1,000. Each rule identifies a subset of objects to replicate +// by filtering the objects in the source bucket. To choose additional subsets +// of objects to replicate, add a rule for each subset. All rules must specify +// the same destination bucket. +// +// To specify a subset of the objects in the source bucket to apply a replication +// rule to, add the Filter element as a child of the Rule element. You can filter +// objects based on an object key prefix, one or more object tags, or both. +// When you add the Filter element in the configuration, you must also add the +// following elements: DeleteMarkerReplication, Status, and Priority. +// +// For information about enabling versioning on a bucket, see Using Versioning +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html). +// +// By default, a resource owner, in this case the AWS account that created the +// bucket, can perform this operation. The resource owner can also grant others +// permissions to perform the operation. For more information about permissions, +// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// Handling Replication of Encrypted Objects +// +// By default, Amazon S3 doesn't replicate objects that are stored at rest using +// server-side encryption with CMKs stored in AWS KMS. To replicate AWS KMS-encrypted +// objects, add the following: SourceSelectionCriteria, SseKmsEncryptedObjects, +// Status, EncryptionConfiguration, and ReplicaKmsKeyID. For information about +// replication configuration, see Replicating Objects Created with SSE Using +// CMKs stored in AWS KMS (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-config-for-kms-objects.html). +// +// For information on PutBucketReplication errors, see ReplicationErrorCodeList +// +// The following operations are related to PutBucketReplication: +// +// * GetBucketReplication +// +// * DeleteBucketReplication +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -5905,6 +7977,10 @@ func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) output = &PutBucketRequestPaymentOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } @@ -5913,8 +7989,14 @@ func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) // Sets the request payment configuration for a bucket. By default, the bucket // owner pays for downloads from the bucket. This configuration parameter enables // the bucket owner (only) to specify that the person requesting the download -// will be charged for the download. Documentation on requester pays buckets -// can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html +// will be charged for the download. For more information, see Requester Pays +// Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html). +// +// The following operations are related to PutBucketRequestPayment: +// +// * CreateBucket +// +// * GetBucketRequestPayment // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5984,6 +8066,10 @@ func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request output = &PutBucketTaggingOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } @@ -5991,6 +8077,47 @@ func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request // // Sets the tags for a bucket. // +// Use tags to organize your AWS bill to reflect your own cost structure. To +// do this, sign up to get your AWS account bill with tag key values included. +// Then, to see the cost of combined resources, organize your billing information +// according to resources with the same tag key values. For example, you can +// tag several resources with a specific application name, and then organize +// your billing information to see the total cost of that application across +// several services. For more information, see Cost Allocation and Tagging (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html). +// +// Within a bucket, if you add a tag that has the same key as an existing tag, +// the new value overwrites the old value. For more information, see Using Cost +// Allocation in Amazon S3 Bucket Tags (https://docs.aws.amazon.com/AmazonS3/latest/dev/CostAllocTagging.html). +// +// To use this operation, you must have permissions to perform the s3:PutBucketTagging +// action. The bucket owner has this permission by default and can grant this +// permission to others. For more information about permissions, see Permissions +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// PutBucketTagging has the following special errors: +// +// * Error code: InvalidTagError Description: The tag provided was not a +// valid tag. This error can occur if the tag did not pass input validation. +// For information about tag restrictions, see User-Defined Tag Restrictions +// (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html) +// and AWS-Generated Cost Allocation Tag Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/aws-tag-restrictions.html). +// +// * Error code: MalformedXMLError Description: The XML provided does not +// match the schema. +// +// * Error code: OperationAbortedError Description: A conflicting conditional +// operation is currently in progress against this resource. Please try again. +// +// * Error code: InternalError Description: The service was unable to apply +// the provided tag to the bucket. +// +// The following operations are related to PutBucketTagging: +// +// * GetBucketTagging +// +// * DeleteBucketTagging +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -6059,6 +8186,10 @@ func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *r output = &PutBucketVersioningOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } @@ -6067,6 +8198,38 @@ func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *r // Sets the versioning state of an existing bucket. To set the versioning state, // you must be the bucket owner. // +// You can set the versioning state with one of the following values: +// +// Enabled—Enables versioning for the objects in the bucket. All objects added +// to the bucket receive a unique version ID. +// +// Suspended—Disables versioning for the objects in the bucket. All objects +// added to the bucket receive the version ID null. +// +// If the versioning state has never been set on a bucket, it has no versioning +// state; a GetBucketVersioning request does not return a versioning state value. +// +// If the bucket owner enables MFA Delete in the bucket versioning configuration, +// the bucket owner must include the x-amz-mfa request header and the Status +// and the MfaDelete request elements in a request to set the versioning state +// of the bucket. +// +// If you have an object expiration lifecycle policy in your non-versioned bucket +// and you want to maintain the same permanent delete behavior when you enable +// versioning, you must add a noncurrent expiration policy. The noncurrent expiration +// lifecycle policy will manage the deletes of the noncurrent object versions +// in the version-enabled bucket. (A version-enabled bucket maintains one current +// and zero or more noncurrent object versions.) For more information, see Lifecycle +// and Versioning (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-and-other-bucket-config). +// +// Related Resources +// +// * CreateBucket +// +// * DeleteBucket +// +// * GetBucketVersioning +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -6135,12 +8298,81 @@ func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request output = &PutBucketWebsiteOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } // PutBucketWebsite API operation for Amazon Simple Storage Service. // -// Set the website configuration for a bucket. +// Sets the configuration of the website that is specified in the website subresource. +// To configure a bucket as a website, you can add this subresource on the bucket +// with website configuration information such as the file name of the index +// document and any redirect rules. For more information, see Hosting Websites +// on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). +// +// This PUT operation requires the S3:PutBucketWebsite permission. By default, +// only the bucket owner can configure the website attached to a bucket; however, +// bucket owners can allow other users to set the website configuration by writing +// a bucket policy that grants them the S3:PutBucketWebsite permission. +// +// To redirect all website requests sent to the bucket's website endpoint, you +// add a website configuration with the following elements. Because all requests +// are sent to another website, you don't need to provide index document name +// for the bucket. +// +// * WebsiteConfiguration +// +// * RedirectAllRequestsTo +// +// * HostName +// +// * Protocol +// +// If you want granular control over redirects, you can use the following elements +// to add routing rules that describe conditions for redirecting requests and +// information about the redirect destination. In this case, the website configuration +// must provide an index document for the bucket, because some requests might +// not be redirected. +// +// * WebsiteConfiguration +// +// * IndexDocument +// +// * Suffix +// +// * ErrorDocument +// +// * Key +// +// * RoutingRules +// +// * RoutingRule +// +// * Condition +// +// * HttpErrorCodeReturnedEquals +// +// * KeyPrefixEquals +// +// * Redirect +// +// * Protocol +// +// * HostName +// +// * ReplaceKeyPrefixWith +// +// * ReplaceKeyWith +// +// * HttpRedirectCode +// +// Amazon S3 has a limitation of 50 routing rules per website configuration. +// If you require more than 50 routing rules, you can use object redirect. For +// more information, see Configuring an Object Redirect (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html) +// in the Amazon Simple Storage Service Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6214,7 +8446,70 @@ func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, outp // PutObject API operation for Amazon Simple Storage Service. // -// Adds an object to a bucket. +// Adds an object to a bucket. You must have WRITE permissions on a bucket to +// add an object to it. +// +// Amazon S3 never adds partial objects; if you receive a success response, +// Amazon S3 added the entire object to the bucket. +// +// Amazon S3 is a distributed system. If it receives multiple write requests +// for the same object simultaneously, it overwrites all but the last object +// written. Amazon S3 does not provide object locking; if you need this, make +// sure to build it into your application layer or use versioning instead. +// +// To ensure that data is not corrupted traversing the network, use the Content-MD5 +// header. When you use this header, Amazon S3 checks the object against the +// provided MD5 value and, if they do not match, returns an error. Additionally, +// you can calculate the MD5 while putting an object to Amazon S3 and compare +// the returned ETag to the calculated MD5 value. +// +// The Content-MD5 header is required for any request to upload an object with +// a retention period configured using Amazon S3 Object Lock. For more information +// about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Server-side Encryption +// +// You can optionally request server-side encryption. With server-side encryption, +// Amazon S3 encrypts your data as it writes it to disks in its data centers +// and decrypts the data when you access it. You have the option to provide +// your own encryption key or use AWS managed encryption keys. For more information, +// see Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html). +// +// Access Control List (ACL)-Specific Request Headers +// +// You can use headers to grant ACL- based permissions. By default, all objects +// are private. Only the owner has full access control. When adding a new object, +// you can grant permissions to individual AWS accounts or to predefined groups +// defined by Amazon S3. These permissions are then added to the ACL on the +// object. For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) +// and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html). +// +// Storage Class Options +// +// By default, Amazon S3 uses the STANDARD storage class to store newly created +// objects. The STANDARD storage class provides high durability and high availability. +// Depending on performance needs, you can specify a different storage class. +// For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) +// in the Amazon S3 Service Developer Guide. +// +// Versioning +// +// If you enable versioning for a bucket, Amazon S3 automatically generates +// a unique version ID for the object being stored. Amazon S3 returns this ID +// in the response. When you enable versioning for a bucket, if Amazon S3 receives +// multiple write requests for the same object simultaneously, it stores all +// of the objects. +// +// For more information about versioning, see Adding Objects to Versioning Enabled +// Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html). +// For information about returning the versioning state of a bucket, see GetBucketVersioning. +// +// Related Resources +// +// * CopyObject +// +// * DeleteObject // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6283,13 +8578,97 @@ func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request output = &PutObjectAclOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } // PutObjectAcl API operation for Amazon Simple Storage Service. // -// uses the acl subresource to set the access control list (ACL) permissions -// for an object that already exists in a bucket +// Uses the acl subresource to set the access control list (ACL) permissions +// for an object that already exists in a bucket. You must have WRITE_ACP permission +// to set the ACL of an object. +// +// Depending on your application needs, you can choose to set the ACL on an +// object using either the request body or the headers. For example, if you +// have an existing application that updates a bucket ACL using the request +// body, you can continue to use that approach. For more information, see Access +// Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) +// in the Amazon S3 Developer Guide. +// +// Access Permissions +// +// You can set access permissions using one of the following methods: +// +// * Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports +// a set of predefined ACLs, known as canned ACLs. Each canned ACL has a +// predefined set of grantees and permissions. Specify the canned ACL name +// as the value of x-amz-acl. If you use this header, you cannot use other +// access control-specific headers in your request. For more information, +// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, +// x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using +// these headers, you specify explicit access permissions and grantees (AWS +// accounts or Amazon S3 groups) who will receive the permission. If you +// use these ACL-specific headers, you cannot use x-amz-acl header to set +// a canned ACL. These parameters map to the set of permissions that Amazon +// S3 supports in an ACL. For more information, see Access Control List (ACL) +// Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). +// You specify each grantee as a type=value pair, where the type is one of +// the following: id – if the value specified is the canonical user ID +// of an AWS account uri – if you are granting permissions to a predefined +// group emailAddress – if the value specified is the email address of +// an AWS account Using email addresses to specify a grantee is only supported +// in the following AWS Regions: US East (N. Virginia) US West (N. California) +// US West (Oregon) Asia Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific +// (Tokyo) Europe (Ireland) South America (São Paulo) For a list of all +// the Amazon S3 supported Regions and endpoints, see Regions and Endpoints +// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in +// the AWS General Reference. For example, the following x-amz-grant-read +// header grants list objects permission to the two AWS accounts identified +// by their email addresses. x-amz-grant-read: emailAddress="xyz@amazon.com", +// emailAddress="abc@amazon.com" +// +// You can use either a canned ACL or specify access permissions explicitly. +// You cannot do both. +// +// Grantee Values +// +// You can specify the person (grantee) to whom you're assigning access rights +// (using request elements) in the following ways: +// +// * By the person's ID: <>ID<><>GranteesEmail<> +// DisplayName is optional and ignored in the request. +// +// * By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// +// * By Email address: <>Grantees@email.com<>lt;/Grantee> +// The grantee is resolved to the CanonicalUser and, in a response to a GET +// Object acl request, appears as the CanonicalUser. Using email addresses +// to specify a grantee is only supported in the following AWS Regions: US +// East (N. Virginia) US West (N. California) US West (Oregon) Asia Pacific +// (Singapore) Asia Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) +// South America (São Paulo) For a list of all the Amazon S3 supported Regions +// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) +// in the AWS General Reference. +// +// Versioning +// +// The ACL of an object is set at the object version level. By default, PUT +// sets the ACL of the current version of an object. To set the ACL of a different +// version, use the versionId subresource. +// +// Related Resources +// +// * CopyObject +// +// * GetObject // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6363,6 +8742,10 @@ func (c *S3) PutObjectLegalHoldRequest(input *PutObjectLegalHoldInput) (req *req output = &PutObjectLegalHoldOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } @@ -6370,6 +8753,10 @@ func (c *S3) PutObjectLegalHoldRequest(input *PutObjectLegalHoldInput) (req *req // // Applies a Legal Hold configuration to the specified object. // +// Related Resources +// +// * Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -6437,15 +8824,26 @@ func (c *S3) PutObjectLockConfigurationRequest(input *PutObjectLockConfiguration output = &PutObjectLockConfigurationOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } // PutObjectLockConfiguration API operation for Amazon Simple Storage Service. // -// Places an object lock configuration on the specified bucket. The rule specified -// in the object lock configuration will be applied by default to every new +// Places an Object Lock configuration on the specified bucket. The rule specified +// in the Object Lock configuration will be applied by default to every new // object placed in the specified bucket. // +// DefaultRetention requires either Days or Years. You can't specify both at +// the same time. +// +// Related Resources +// +// * Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -6513,6 +8911,10 @@ func (c *S3) PutObjectRetentionRequest(input *PutObjectRetentionInput) (req *req output = &PutObjectRetentionOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } @@ -6520,6 +8922,10 @@ func (c *S3) PutObjectRetentionRequest(input *PutObjectRetentionInput) (req *req // // Places an Object Retention configuration on an object. // +// Related Resources +// +// * Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -6587,12 +8993,53 @@ func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request output = &PutObjectTaggingOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } // PutObjectTagging API operation for Amazon Simple Storage Service. // -// Sets the supplied tag-set to an object that already exists in a bucket +// Sets the supplied tag-set to an object that already exists in a bucket. +// +// A tag is a key-value pair. You can associate tags with an object by sending +// a PUT request against the tagging subresource that is associated with the +// object. You can retrieve tags by sending a GET request. For more information, +// see GetObjectTagging. +// +// For tagging-related restrictions related to characters and encodings, see +// Tag Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html). +// Note that Amazon S3 limits the maximum number of tags to 10 tags per object. +// +// To use this operation, you must have permission to perform the s3:PutObjectTagging +// action. By default, the bucket owner has this permission and can grant this +// permission to others. +// +// To put tags of any other version, use the versionId query parameter. You +// also need permission for the s3:PutObjectVersionTagging action. +// +// For information about the Amazon S3 object tagging feature, see Object Tagging +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). +// +// Special Errors +// +// * Code: InvalidTagError Cause: The tag provided was not a valid tag. This +// error can occur if the tag did not pass input validation. For more information, +// see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). +// +// * Code: MalformedXMLError Cause: The XML provided does not match the schema. +// +// * Code: OperationAbortedError Cause: A conflicting conditional operation +// is currently in progress against this resource. Please try again. +// +// * Code: InternalError Cause: The service was unable to apply the provided +// tag to the object. +// +// Related Resources +// +// * GetObjectTagging // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6662,13 +9109,39 @@ func (c *S3) PutPublicAccessBlockRequest(input *PutPublicAccessBlockInput) (req output = &PutPublicAccessBlockOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } // PutPublicAccessBlock API operation for Amazon Simple Storage Service. // // Creates or modifies the PublicAccessBlock configuration for an Amazon S3 -// bucket. +// bucket. To use this operation, you must have the s3:PutBucketPublicAccessBlock +// permission. For more information about Amazon S3 permissions, see Specifying +// Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// +// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket +// or an object, it checks the PublicAccessBlock configuration for both the +// bucket (or the bucket that contains the object) and the bucket owner's account. +// If the PublicAccessBlock configurations are different between the bucket +// and the account, Amazon S3 uses the most restrictive combination of the bucket-level +// and account-level settings. +// +// For more information about when Amazon S3 considers a bucket or an object +// public, see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status). +// +// Related Resources +// +// * GetPublicAccessBlock +// +// * DeletePublicAccessBlock +// +// * GetBucketPolicyStatus +// +// * Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6744,6 +9217,190 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // // Restores an archived copy of an object back into Amazon S3 // +// This operation performs the following types of requests: +// +// * select - Perform a select query on an archived object +// +// * restore an archive - Restore an archived object +// +// To use this operation, you must have permissions to perform the s3:RestoreObject +// action. The bucket owner has this permission by default and can grant this +// permission to others. For more information about permissions, see Permissions +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Querying Archives with Select Requests +// +// You use a select type of request to perform SQL queries on archived objects. +// The archived objects that are being queried by the select request must be +// formatted as uncompressed comma-separated values (CSV) files. You can run +// queries and custom analytics on your archived data without having to restore +// your data to a hotter Amazon S3 tier. For an overview about select requests, +// see Querying Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// When making a select request, do the following: +// +// * Define an output location for the select query's output. This must be +// an Amazon S3 bucket in the same AWS Region as the bucket that contains +// the archive object that is being queried. The AWS account that initiates +// the job must have permissions to write to the S3 bucket. You can specify +// the storage class and encryption for the output objects stored in the +// bucket. For more information about output, see Querying Archived Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html) +// in the Amazon Simple Storage Service Developer Guide. For more information +// about the S3 structure in the request body, see the following: PutObject +// Managing Access with ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) +// in the Amazon Simple Storage Service Developer Guide Protecting Data Using +// Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) +// in the Amazon Simple Storage Service Developer Guide +// +// * Define the SQL expression for the SELECT type of restoration for your +// query in the request body's SelectParameters structure. You can use expressions +// like the following examples. The following expression returns all records +// from the specified object. SELECT * FROM Object Assuming that you are +// not using any headers for data stored in the object, you can specify columns +// with positional headers. SELECT s._1, s._2 FROM Object s WHERE s._3 > +// 100 If you have headers and you set the fileHeaderInfo in the CSV structure +// in the request body to USE, you can specify headers in the query. (If +// you set the fileHeaderInfo field to IGNORE, the first row is skipped for +// the query.) You cannot mix ordinal positions with header column names. +// SELECT s.Id, s.FirstName, s.SSN FROM S3Object s +// +// For more information about using SQL with S3 Glacier Select restore, see +// SQL Reference for Amazon S3 Select and S3 Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// When making a select request, you can also do the following: +// +// * To expedite your queries, specify the Expedited tier. For more information +// about tiers, see "Restoring Archives," later in this topic. +// +// * Specify details about the data serialization format of both the input +// object that is being queried and the serialization of the CSV-encoded +// query results. +// +// The following are additional important facts about the select feature: +// +// * The output results are new Amazon S3 objects. Unlike archive retrievals, +// they are stored until explicitly deleted-manually or through a lifecycle +// policy. +// +// * You can issue more than one select request on the same Amazon S3 object. +// Amazon S3 doesn't deduplicate requests, so avoid issuing duplicate requests. +// +// * Amazon S3 accepts a select request even if the object has already been +// restored. A select request doesn’t return error response 409. +// +// Restoring Archives +// +// Objects in the GLACIER and DEEP_ARCHIVE storage classes are archived. To +// access an archived object, you must first initiate a restore request. This +// restores a temporary copy of the archived object. In a restore request, you +// specify the number of days that you want the restored copy to exist. After +// the specified period, Amazon S3 deletes the temporary copy but the object +// remains archived in the GLACIER or DEEP_ARCHIVE storage class that object +// was restored from. +// +// To restore a specific object version, you can provide a version ID. If you +// don't provide a version ID, Amazon S3 restores the current version. +// +// The time it takes restore jobs to finish depends on which storage class the +// object is being restored from and which data access tier you specify. +// +// When restoring an archived object (or using a select request), you can specify +// one of the following data access tier options in the Tier element of the +// request body: +// +// * Expedited - Expedited retrievals allow you to quickly access your data +// stored in the GLACIER storage class when occasional urgent requests for +// a subset of archives are required. For all but the largest archived objects +// (250 MB+), data accessed using Expedited retrievals are typically made +// available within 1–5 minutes. Provisioned capacity ensures that retrieval +// capacity for Expedited retrievals is available when you need it. Expedited +// retrievals and provisioned capacity are not available for the DEEP_ARCHIVE +// storage class. +// +// * Standard - S3 Standard retrievals allow you to access any of your archived +// objects within several hours. This is the default option for the GLACIER +// and DEEP_ARCHIVE retrieval requests that do not specify the retrieval +// option. S3 Standard retrievals typically complete within 3-5 hours from +// the GLACIER storage class and typically complete within 12 hours from +// the DEEP_ARCHIVE storage class. +// +// * Bulk - Bulk retrievals are Amazon S3 Glacier’s lowest-cost retrieval +// option, enabling you to retrieve large amounts, even petabytes, of data +// inexpensively in a day. Bulk retrievals typically complete within 5-12 +// hours from the GLACIER storage class and typically complete within 48 +// hours from the DEEP_ARCHIVE storage class. +// +// For more information about archive retrieval options and provisioned capacity +// for Expedited data access, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// You can use Amazon S3 restore speed upgrade to change the restore speed to +// a faster speed while it is in progress. You upgrade the speed of an in-progress +// restoration by issuing another restore request to the same object, setting +// a new Tier request element. When issuing a request to upgrade the restore +// tier, you must choose a tier that is faster than the tier that the in-progress +// restore is using. You must not change any other parameters, such as the Days +// request element. For more information, see Upgrading the Speed of an In-Progress +// Restore (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html#restoring-objects-upgrade-tier.title.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// To get the status of object restoration, you can send a HEAD request. Operations +// return the x-amz-restore header, which provides information about the restoration +// status, in the response. You can use Amazon S3 event notifications to notify +// you when a restore is initiated or completed. For more information, see Configuring +// Amazon S3 Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// After restoring an archived object, you can update the restoration period +// by reissuing the request with a new period. Amazon S3 updates the restoration +// period relative to the current time and charges only for the request-there +// are no data transfer charges. You cannot update the restoration period when +// Amazon S3 is actively processing your current restore request for the object. +// +// If your bucket has a lifecycle configuration with a rule that includes an +// expiration action, the object expiration overrides the life span that you +// specify in a restore request. For example, if you restore an object copy +// for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes +// the object in 3 days. For more information about lifecycle configuration, +// see PutBucketLifecycleConfiguration and Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) +// in Amazon Simple Storage Service Developer Guide. +// +// Responses +// +// A successful operation returns either the 200 OK or 202 Accepted status code. +// +// * If the object copy is not previously restored, then Amazon S3 returns +// 202 Accepted in the response. +// +// * If the object copy is previously restored, Amazon S3 returns 200 OK +// in the response. +// +// Special Errors +// +// * Code: RestoreAlreadyInProgress Cause: Object restore is already in progress. +// (This error does not apply to SELECT type requests.) HTTP Status Code: +// 409 Conflict SOAP Fault Code Prefix: Client +// +// * Code: GlacierExpeditedRetrievalNotAvailable Cause: S3 Glacier expedited +// retrievals are currently not available. Try again later. (Returned if +// there is insufficient capacity to process the Expedited request. This +// error applies only to Expedited retrievals and not to S3 Standard or Bulk +// retrievals.) HTTP Status Code: 503 SOAP Fault Code Prefix: N/A +// +// Related Resources +// +// * PutBucketLifecycleConfiguration +// +// * GetBucketNotificationConfiguration +// +// * SQL Reference for Amazon S3 Select and S3 Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) +// in the Amazon Simple Storage Service Developer Guide +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -6753,7 +9410,7 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // // Returned Error Codes: // * ErrCodeObjectAlreadyInActiveTierError "ObjectAlreadyInActiveTierError" -// This operation is not allowed against this storage tier +// This operation is not allowed against this storage tier. // // See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) { @@ -6816,20 +9473,103 @@ func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *r output = &SelectObjectContentOutput{} req = c.newRequest(op, input, output) + + es := newSelectObjectContentEventStream() + req.Handlers.Unmarshal.PushBack(es.setStreamCloser) + output.EventStream = es + req.Handlers.Send.Swap(client.LogHTTPResponseHandler.Name, client.LogHTTPResponseHeaderHandler) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, rest.UnmarshalHandler) - req.Handlers.Unmarshal.PushBack(output.runEventStreamLoop) + req.Handlers.Unmarshal.PushBack(es.runOutputStream) + req.Handlers.Unmarshal.PushBack(es.runOnStreamPartClose) return } // SelectObjectContent API operation for Amazon Simple Storage Service. // // This operation filters the contents of an Amazon S3 object based on a simple -// Structured Query Language (SQL) statement. In the request, along with the -// SQL expression, you must also specify a data serialization format (JSON or -// CSV) of the object. Amazon S3 uses this to parse object data into records, -// and returns only records that match the specified SQL expression. You must -// also specify the data serialization format for the response. +// structured query language (SQL) statement. In the request, along with the +// SQL expression, you must also specify a data serialization format (JSON, +// CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse +// object data into records, and returns only records that match the specified +// SQL expression. You must also specify the data serialization format for the +// response. +// +// For more information about Amazon S3 Select, see Selecting Content from Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// For more information about using SQL with Amazon S3 Select, see SQL Reference +// for Amazon S3 Select and S3 Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Permissions +// +// You must have s3:GetObject permission for this operation. Amazon S3 Select +// does not support anonymous access. For more information about permissions, +// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Object Data Formats +// +// You can use Amazon S3 Select to query objects that have the following format +// properties: +// +// * CSV, JSON, and Parquet - Objects must be in CSV, JSON, or Parquet format. +// +// * UTF-8 - UTF-8 is the only encoding type Amazon S3 Select supports. +// +// * GZIP or BZIP2 - CSV and JSON files can be compressed using GZIP or BZIP2. +// GZIP and BZIP2 are the only compression formats that Amazon S3 Select +// supports for CSV and JSON files. Amazon S3 Select supports columnar compression +// for Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object +// compression for Parquet objects. +// +// * Server-side encryption - Amazon S3 Select supports querying objects +// that are protected with server-side encryption. For objects that are encrypted +// with customer-provided encryption keys (SSE-C), you must use HTTPS, and +// you must use the headers that are documented in the GetObject. For more +// information about SSE-C, see Server-Side Encryption (Using Customer-Provided +// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) +// in the Amazon Simple Storage Service Developer Guide. For objects that +// are encrypted with Amazon S3 managed encryption keys (SSE-S3) and customer +// master keys (CMKs) stored in AWS Key Management Service (SSE-KMS), server-side +// encryption is handled transparently, so you don't need to specify anything. +// For more information about server-side encryption, including SSE-S3 and +// SSE-KMS, see Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Working with the Response Body +// +// Given the response size is unknown, Amazon S3 Select streams the response +// as a series of messages and includes a Transfer-Encoding header with chunked +// as its value in the response. For more information, see RESTSelectObjectAppendix . +// +// GetObject Support +// +// The SelectObjectContent operation does not support the following GetObject +// functionality. For more information, see GetObject. +// +// * Range: Although you can specify a scan range for an Amazon S3 Select +// request (see SelectObjectContentRequest$ScanRange in the request parameters), +// you cannot specify the range of bytes of an object to return. +// +// * GLACIER, DEEP_ARCHIVE and REDUCED_REDUNDANCY storage classes: You cannot +// specify the GLACIER, DEEP_ARCHIVE, or REDUCED_REDUNDANCY storage classes. +// For more information, about storage classes see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html#storage-class-intro) +// in the Amazon Simple Storage Service Developer Guide. +// +// Special Errors +// +// For a list of special errors for this operation, see SelectObjectContentErrorCodeList +// +// Related Resources +// +// * GetObject +// +// * GetBucketLifecycleConfiguration +// +// * PutBucketLifecycleConfiguration // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6859,11 +9599,152 @@ func (c *S3) SelectObjectContentWithContext(ctx aws.Context, input *SelectObject return out, req.Send() } -const opUploadPart = "UploadPart" +// SelectObjectContentEventStream provides the event stream handling for the SelectObjectContent. +type SelectObjectContentEventStream struct { -// UploadPartRequest generates a "aws/request.Request" representing the -// client's request for the UploadPart operation. The "output" return -// value will be populated with the request's response once the request completes + // Reader is the EventStream reader for the SelectObjectContentEventStream + // events. This value is automatically set by the SDK when the API call is made + // Use this member when unit testing your code with the SDK to mock out the + // EventStream Reader. + // + // Must not be nil. + Reader SelectObjectContentEventStreamReader + + outputReader io.ReadCloser + + // StreamCloser is the io.Closer for the EventStream connection. For HTTP + // EventStream this is the response Body. The stream will be closed when + // the Close method of the EventStream is called. + StreamCloser io.Closer + + done chan struct{} + closeOnce sync.Once + err *eventstreamapi.OnceError +} + +func newSelectObjectContentEventStream() *SelectObjectContentEventStream { + return &SelectObjectContentEventStream{ + done: make(chan struct{}), + err: eventstreamapi.NewOnceError(), + } +} + +func (es *SelectObjectContentEventStream) setStreamCloser(r *request.Request) { + es.StreamCloser = r.HTTPResponse.Body +} + +func (es *SelectObjectContentEventStream) runOnStreamPartClose(r *request.Request) { + if es.done == nil { + return + } + go es.waitStreamPartClose() + +} + +func (es *SelectObjectContentEventStream) waitStreamPartClose() { + var outputErrCh <-chan struct{} + if v, ok := es.Reader.(interface{ ErrorSet() <-chan struct{} }); ok { + outputErrCh = v.ErrorSet() + } + var outputClosedCh <-chan struct{} + if v, ok := es.Reader.(interface{ Closed() <-chan struct{} }); ok { + outputClosedCh = v.Closed() + } + + select { + case <-es.done: + case <-outputErrCh: + es.err.SetError(es.Reader.Err()) + es.Close() + case <-outputClosedCh: + if err := es.Reader.Err(); err != nil { + es.err.SetError(es.Reader.Err()) + } + es.Close() + } +} + +// Events returns a channel to read events from. +// +// These events are: +// +// * ContinuationEvent +// * EndEvent +// * ProgressEvent +// * RecordsEvent +// * StatsEvent +func (es *SelectObjectContentEventStream) Events() <-chan SelectObjectContentEventStreamEvent { + return es.Reader.Events() +} + +func (es *SelectObjectContentEventStream) runOutputStream(r *request.Request) { + var opts []func(*eventstream.Decoder) + if r.Config.Logger != nil && r.Config.LogLevel.Matches(aws.LogDebugWithEventStreamBody) { + opts = append(opts, eventstream.DecodeWithLogger(r.Config.Logger)) + } + + unmarshalerForEvent := unmarshalerForSelectObjectContentEventStreamEvent{ + metadata: protocol.ResponseMetadata{ + StatusCode: r.HTTPResponse.StatusCode, + RequestID: r.RequestID, + }, + }.UnmarshalerForEventName + + decoder := eventstream.NewDecoder(r.HTTPResponse.Body, opts...) + eventReader := eventstreamapi.NewEventReader(decoder, + protocol.HandlerPayloadUnmarshal{ + Unmarshalers: r.Handlers.UnmarshalStream, + }, + unmarshalerForEvent, + ) + + es.outputReader = r.HTTPResponse.Body + es.Reader = newReadSelectObjectContentEventStream(eventReader) +} + +// Close closes the stream. This will also cause the stream to be closed. +// Close must be called when done using the stream API. Not calling Close +// may result in resource leaks. +// +// You can use the closing of the Reader's Events channel to terminate your +// application's read from the API's stream. +// +func (es *SelectObjectContentEventStream) Close() (err error) { + es.closeOnce.Do(es.safeClose) + return es.Err() +} + +func (es *SelectObjectContentEventStream) safeClose() { + if es.done != nil { + close(es.done) + } + + es.Reader.Close() + if es.outputReader != nil { + es.outputReader.Close() + } + + es.StreamCloser.Close() +} + +// Err returns any error that occurred while reading or writing EventStream +// Events from the service API's response. Returns nil if there were no errors. +func (es *SelectObjectContentEventStream) Err() error { + if err := es.err.Err(); err != nil { + return err + } + if err := es.Reader.Err(); err != nil { + return err + } + + return nil +} + +const opUploadPart = "UploadPart" + +// UploadPartRequest generates a "aws/request.Request" representing the +// client's request for the UploadPart operation. The "output" return +// value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. @@ -6905,12 +9786,87 @@ func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, ou // // Uploads a part in a multipart upload. // +// In this operation, you provide part data in your request. However, you have +// an option to specify your existing Amazon S3 object as a data source for +// the part you are uploading. To upload a part from an existing object, you +// use the UploadPartCopy operation. +// +// You must initiate a multipart upload (see CreateMultipartUpload) before you +// can upload any part. In response to your initiate request, Amazon S3 returns +// an upload ID, a unique identifier, that you must include in your upload part +// request. +// +// Part numbers can be any number from 1 to 10,000, inclusive. A part number +// uniquely identifies a part and also defines its position within the object +// being created. If you upload a new part using the same part number that was +// used with a previous part, the previously uploaded part is overwritten. Each +// part must be at least 5 MB in size, except the last part. There is no size +// limit on the last part of your multipart upload. +// +// To ensure that data is not corrupted when traversing the network, specify +// the Content-MD5 header in the upload part request. Amazon S3 checks the part +// data against the provided MD5 value. If they do not match, Amazon S3 returns +// an error. +// // Note: After you initiate multipart upload and upload one or more parts, you // must either complete or abort multipart upload in order to stop getting charged // for storage of the uploaded parts. Only after you either complete or abort // multipart upload, Amazon S3 frees up the parts storage and stops charging // you for the parts storage. // +// For more information on multipart uploads, go to Multipart Upload Overview +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) in the +// Amazon Simple Storage Service Developer Guide . +// +// For information on the permissions required to use the multipart upload API, +// go to Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// You can optionally request server-side encryption where Amazon S3 encrypts +// your data as it writes it to disks in its data centers and decrypts it for +// you when you access it. You have the option of providing your own encryption +// key, or you can use the AWS managed encryption keys. If you choose to provide +// your own encryption key, the request headers you provide in the request must +// match the headers you used in the request to initiate the upload by using +// CreateMultipartUpload. For more information, go to Using Server-Side Encryption +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Server-side encryption is supported by the S3 Multipart Upload actions. Unless +// you are using a customer-provided encryption key, you don't need to specify +// the encryption parameters in each UploadPart request. Instead, you only need +// to specify the server-side encryption parameters in the initial Initiate +// Multipart request. For more information, see CreateMultipartUpload. +// +// If you requested server-side encryption using a customer-provided encryption +// key in your initiate multipart upload request, you must provide identical +// encryption information in each part upload using the following headers. +// +// * x-amz-server-side​-encryption​-customer-algorithm +// +// * x-amz-server-side​-encryption​-customer-key +// +// * x-amz-server-side​-encryption​-customer-key-MD5 +// +// Special Errors +// +// * Code: NoSuchUpload Cause: The specified multipart upload does not exist. +// The upload ID might be invalid, or the multipart upload might have been +// aborted or completed. HTTP Status Code: 404 Not Found SOAP Fault Code +// Prefix: Client +// +// Related Resources +// +// * CreateMultipartUpload +// +// * CompleteMultipartUpload +// +// * AbortMultipartUpload +// +// * ListParts +// +// * ListMultipartUploads +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -6983,7 +9939,94 @@ func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Req // UploadPartCopy API operation for Amazon Simple Storage Service. // -// Uploads a part by copying data from an existing object as data source. +// Uploads a part by copying data from an existing object as data source. You +// specify the data source by adding the request header x-amz-copy-source in +// your request and a byte range by adding the request header x-amz-copy-source-range +// in your request. +// +// The minimum allowable part size for a multipart upload is 5 MB. For more +// information about multipart upload limits, go to Quick Facts (https://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Instead of using an existing object as part data, you might use the UploadPart +// operation and provide data in your request. +// +// You must initiate a multipart upload before you can upload any part. In response +// to your initiate request. Amazon S3 returns a unique identifier, the upload +// ID, that you must include in your upload part request. +// +// For more information about using the UploadPartCopy operation, see the following: +// +// * For conceptual information about multipart uploads, see Uploading Objects +// Using Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// * For information about permissions required to use the multipart upload +// API, see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// * For information about copying objects using a single atomic operation +// vs. the multipart upload, see Operations on Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// * For information about using server-side encryption with customer-provided +// encryption keys with the UploadPartCopy operation, see CopyObject and +// UploadPart. +// +// Note the following additional considerations about the request headers x-amz-copy-source-if-match, +// x-amz-copy-source-if-none-match, x-amz-copy-source-if-unmodified-since, and +// x-amz-copy-source-if-modified-since: +// +// * Consideration 1 - If both of the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since +// headers are present in the request as follows: x-amz-copy-source-if-match +// condition evaluates to true, and; x-amz-copy-source-if-unmodified-since +// condition evaluates to false; Amazon S3 returns 200 OK and copies the +// data. +// +// * Consideration 2 - If both of the x-amz-copy-source-if-none-match and +// x-amz-copy-source-if-modified-since headers are present in the request +// as follows: x-amz-copy-source-if-none-match condition evaluates to false, +// and; x-amz-copy-source-if-modified-since condition evaluates to true; +// Amazon S3 returns 412 Precondition Failed response code. +// +// Versioning +// +// If your bucket has versioning enabled, you could have multiple versions of +// the same object. By default, x-amz-copy-source identifies the current version +// of the object to copy. If the current version is a delete marker and you +// don't specify a versionId in the x-amz-copy-source, Amazon S3 returns a 404 +// error, because the object does not exist. If you specify versionId in the +// x-amz-copy-source and the versionId is a delete marker, Amazon S3 returns +// an HTTP 400 error, because you are not allowed to specify a delete marker +// as a version for the x-amz-copy-source. +// +// You can optionally specify a specific version of the source object to copy +// by adding the versionId subresource as shown in the following example: +// +// x-amz-copy-source: /bucket/object?versionId=version id +// +// Special Errors +// +// * Code: NoSuchUpload Cause: The specified multipart upload does not exist. +// The upload ID might be invalid, or the multipart upload might have been +// aborted or completed. HTTP Status Code: 404 Not Found +// +// * Code: InvalidRequest Cause: The specified copy source is not supported +// as a byte-range copy source. HTTP Status Code: 400 Bad Request +// +// Related Resources +// +// * CreateMultipartUpload +// +// * UploadPart +// +// * CompleteMultipartUpload +// +// * AbortMultipartUpload +// +// * ListParts +// +// * ListMultipartUploads // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -7045,7 +10088,14 @@ func (s *AbortIncompleteMultipartUpload) SetDaysAfterInitiation(v int64) *AbortI type AbortMultipartUploadInput struct { _ struct{} `locationName:"AbortMultipartUploadRequest" type:"structure"` - // Name of the bucket to which the multipart upload was initiated. + // The bucket name to which the upload was taking place. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -7055,10 +10105,11 @@ type AbortMultipartUploadInput struct { // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // Upload ID that identifies the multipart upload. @@ -7133,6 +10184,20 @@ func (s *AbortMultipartUploadInput) SetUploadId(v string) *AbortMultipartUploadI return s } +func (s *AbortMultipartUploadInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *AbortMultipartUploadInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type AbortMultipartUploadOutput struct { _ struct{} `type:"structure"` @@ -7335,9 +10400,6 @@ func (s *AnalyticsAndOperator) SetTags(v []*Tag) *AnalyticsAndOperator { // Specifies the configuration and any analyses for the analytics filter of // an Amazon S3 bucket. -// -// For more information, see GET Bucket analytics (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETAnalyticsConfig.html) -// in the Amazon Simple Storage Service API Reference. type AnalyticsConfiguration struct { _ struct{} `type:"structure"` @@ -7456,6 +10518,9 @@ func (s *AnalyticsExportDestination) SetS3BucketDestination(v *AnalyticsS3Bucket return s } +// The filter used to describe a set of objects for analyses. A filter must +// have exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). +// If no filter is provided, all objects will be considered in any analysis. type AnalyticsFilter struct { _ struct{} `type:"structure"` @@ -7518,6 +10583,7 @@ func (s *AnalyticsFilter) SetTag(v *Tag) *AnalyticsFilter { return s } +// Contains information about where to publish the analytics results. type AnalyticsS3BucketDestination struct { _ struct{} `type:"structure"` @@ -7526,8 +10592,11 @@ type AnalyticsS3BucketDestination struct { // Bucket is a required field Bucket *string `type:"string" required:"true"` - // The account ID that owns the destination bucket. If no account ID is provided, - // the owner will not be validated prior to exporting data. + // The account ID that owns the destination S3 bucket. If no account ID is provided, + // the owner is not validated before exporting data. + // + // Although this value is optional, we strongly recommend that you set it to + // help prevent problems if the destination bucket ownership changes. BucketAccountId *string `type:"string"` // Specifies the file format used when exporting data to Amazon S3. @@ -7596,6 +10665,8 @@ func (s *AnalyticsS3BucketDestination) SetPrefix(v string) *AnalyticsS3BucketDes return s } +// In terms of implementation, a Bucket is a resource. An Amazon S3 bucket name +// is globally unique, and the namespace is shared by all AWS accounts. type Bucket struct { _ struct{} `type:"structure"` @@ -7679,6 +10750,7 @@ func (s *BucketLifecycleConfiguration) SetRules(v []*LifecycleRule) *BucketLifec return s } +// Container for logging status information. type BucketLoggingStatus struct { _ struct{} `type:"structure"` @@ -7727,7 +10799,8 @@ func (s *BucketLoggingStatus) SetLoggingEnabled(v *LoggingEnabled) *BucketLoggin type CORSConfiguration struct { _ struct{} `type:"structure"` - // A set of allowed origins and methods. + // A set of origins and methods (cross-origin access that you want to allow). + // You can add up to 100 rules to the configuration. // // CORSRules is a required field CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true" required:"true"` @@ -7859,7 +10932,8 @@ func (s *CORSRule) SetMaxAgeSeconds(v int64) *CORSRule { return s } -// Describes how a CSV-formatted input object is formatted. +// Describes how an uncompressed comma-separated values (CSV)-formatted input +// object is formatted. type CSVInput struct { _ struct{} `type:"structure"` @@ -7868,24 +10942,45 @@ type CSVInput struct { // to TRUE may lower performance. AllowQuotedRecordDelimiter *bool `type:"boolean"` - // The single character used to indicate a row should be ignored when present - // at the start of a row. + // A single character used to indicate that a row should be ignored when the + // character is present at the start of that row. You can specify any character + // to indicate a comment line. Comments *string `type:"string"` - // The value used to separate individual fields in a record. + // A single character used to separate individual fields in a record. You can + // specify an arbitrary delimiter. FieldDelimiter *string `type:"string"` - // Describes the first line of input. Valid values: None, Ignore, Use. + // Describes the first line of input. Valid values are: + // + // * NONE: First line is not a header. + // + // * IGNORE: First line is a header, but you can't use the header values + // to indicate the column in an expression. You can use column position (such + // as _1, _2, …) to indicate the column (SELECT s._1 FROM OBJECT s). + // + // * Use: First line is a header, and you can use the header value to identify + // a column in an expression (SELECT "name" FROM OBJECT). FileHeaderInfo *string `type:"string" enum:"FileHeaderInfo"` - // Value used for escaping where the field delimiter is part of the value. + // A single character used for escaping when the field delimiter is part of + // the value. For example, if the value is a, b, Amazon S3 wraps this field + // value in quotation marks, as follows: " a , b ". + // + // Type: String + // + // Default: " + // + // Ancestors: CSV QuoteCharacter *string `type:"string"` - // The single character used for escaping the quote character inside an already - // escaped value. + // A single character used for escaping the quotation mark character inside + // an already escaped value. For example, the value """ a , b """ is parsed + // as " a , b ". QuoteEscapeCharacter *string `type:"string"` - // The value used to separate individual records. + // A single character used to separate individual records in the input. Instead + // of the default value, you can specify an arbitrary delimiter. RecordDelimiter *string `type:"string"` } @@ -7941,24 +11036,33 @@ func (s *CSVInput) SetRecordDelimiter(v string) *CSVInput { return s } -// Describes how CSV-formatted results are formatted. +// Describes how uncompressed comma-separated values (CSV)-formatted results +// are formatted. type CSVOutput struct { _ struct{} `type:"structure"` - // The value used to separate individual fields in a record. + // The value used to separate individual fields in a record. You can specify + // an arbitrary delimiter. FieldDelimiter *string `type:"string"` - // The value used for escaping where the field delimiter is part of the value. + // A single character used for escaping when the field delimiter is part of + // the value. For example, if the value is a, b, Amazon S3 wraps this field + // value in quotation marks, as follows: " a , b ". QuoteCharacter *string `type:"string"` - // Th single character used for escaping the quote character inside an already + // The single character used for escaping the quote character inside an already // escaped value. QuoteEscapeCharacter *string `type:"string"` - // Indicates whether or not all output fields should be quoted. + // Indicates whether to use quotation marks around output fields. + // + // * ALWAYS: Always use quotation marks for output fields. + // + // * ASNEEDED: Use quotation marks for output fields when needed. QuoteFields *string `type:"string" enum:"QuoteFields"` - // The value used to separate individual records. + // A single character used to separate individual records in the output. Instead + // of the default value, you can specify an arbitrary delimiter. RecordDelimiter *string `type:"string"` } @@ -8002,9 +11106,12 @@ func (s *CSVOutput) SetRecordDelimiter(v string) *CSVOutput { return s } +// Container for specifying the AWS Lambda notification configuration. type CloudFunctionConfiguration struct { _ struct{} `type:"structure"` + // Lambda cloud function ARN that Amazon S3 can invoke when it detects events + // of the specified type. CloudFunction *string `type:"string"` // The bucket event for which to send notifications. @@ -8012,12 +11119,14 @@ type CloudFunctionConfiguration struct { // Deprecated: Event has been deprecated Event *string `deprecated:"true" type:"string" enum:"Event"` + // Bucket events for which to send notifications. Events []*string `locationName:"Event" type:"list" flattened:"true"` // An optional unique identifier for configurations in a notification configuration. // If you don't provide one, Amazon S3 will assign an ID. Id *string `type:"string"` + // The role supporting the invocation of the Lambda function InvocationRole *string `type:"string"` } @@ -8061,9 +11170,15 @@ func (s *CloudFunctionConfiguration) SetInvocationRole(v string) *CloudFunctionC return s } +// Container for all (if there are any) keys between Prefix and the next occurrence +// of the string specified by a delimiter. CommonPrefixes lists keys that act +// like subdirectories in the directory specified by Prefix. For example, if +// the prefix is notes/ and the delimiter is a slash (/) as in notes/summer/july, +// the common prefix is notes/summer/. type CommonPrefix struct { _ struct{} `type:"structure"` + // Container for the specified common prefix. Prefix *string `type:"string"` } @@ -8086,20 +11201,28 @@ func (s *CommonPrefix) SetPrefix(v string) *CommonPrefix { type CompleteMultipartUploadInput struct { _ struct{} `locationName:"CompleteMultipartUploadRequest" type:"structure" payload:"MultipartUpload"` + // Name of the bucket to which the multipart upload was initiated. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Object key for which the multipart upload was initiated. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + // The container for the multipart upload request information. MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + // ID for the initiated multipart upload. + // // UploadId is a required field UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` } @@ -8176,35 +11299,61 @@ func (s *CompleteMultipartUploadInput) SetUploadId(v string) *CompleteMultipartU return s } +func (s *CompleteMultipartUploadInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *CompleteMultipartUploadInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type CompleteMultipartUploadOutput struct { _ struct{} `type:"structure"` + // The name of the bucket that contains the newly created object. Bucket *string `type:"string"` - // Entity tag of the object. + // Entity tag that identifies the newly created object's data. Objects with + // different object data will have different entity tags. The entity tag is + // an opaque string. The entity tag may or may not be an MD5 digest of the object + // data. If the entity tag is not an MD5 digest of the object data, it will + // contain one or more nonhexadecimal characters and/or will consist of less + // than 32 or more than 32 hexadecimal digits. ETag *string `type:"string"` // If the object expiration is configured, this will contain the expiration // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + // The object key of the newly created object. Key *string `min:"1" type:"string"` + // The URI that identifies the newly created object. Location *string `type:"string"` // If present, indicates that the requester was successfully charged for the // request. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. + // If present, specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) that was used for the + // object. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). + // If you specified server-side encryption either with an Amazon S3-managed + // encryption key or an AWS KMS customer master key (CMK) in your initiate multipart + // upload request, the response includes this header. It confirms the encryption + // algorithm that Amazon S3 used to encrypt the object. ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - // Version of the object. + // Version ID of the newly created object, in case the bucket has versioning + // turned on. VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` } @@ -8279,9 +11428,11 @@ func (s *CompleteMultipartUploadOutput) SetVersionId(v string) *CompleteMultipar return s } +// The container for the completed multipart upload details. type CompletedMultipartUpload struct { _ struct{} `type:"structure"` + // Array of CompletedPart data types. Parts []*CompletedPart `locationName:"Part" type:"list" flattened:"true"` } @@ -8301,6 +11452,7 @@ func (s *CompletedMultipartUpload) SetParts(v []*CompletedPart) *CompletedMultip return s } +// Details of the parts that were uploaded. type CompletedPart struct { _ struct{} `type:"structure"` @@ -8334,7 +11486,10 @@ func (s *CompletedPart) SetPartNumber(v int64) *CompletedPart { return s } -// Specifies a condition that must be met for a redirect to apply. +// A container for describing a condition that must be met for the specified +// redirect to apply. For example, 1. If request is for pages in the /docs folder, +// redirect to the /documents folder. 2. If request results in HTTP error 4xx, +// redirect request to another host where you might process the error. type Condition struct { _ struct{} `type:"structure"` @@ -8403,12 +11558,19 @@ func (s *ContinuationEvent) UnmarshalEvent( return nil } +func (s *ContinuationEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) + return msg, err +} + type CopyObjectInput struct { _ struct{} `locationName:"CopyObjectRequest" type:"structure"` // The canned ACL to apply to the object. ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + // The name of the destination bucket. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -8448,7 +11610,8 @@ type CopyObjectInput struct { // Copies the object if it hasn't been modified since the specified time. CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp"` - // Specifies the algorithm to use when decrypting the source object (e.g., AES256). + // Specifies the algorithm to use when decrypting the source object (for example, + // AES256). CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt @@ -8457,8 +11620,8 @@ type CopyObjectInput struct { CopySourceSSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` // The date and time at which the object is no longer cacheable. @@ -8476,6 +11639,8 @@ type CopyObjectInput struct { // Allows grantee to write the ACL for the applicable object. GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + // The key of the destination object. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -8489,31 +11654,33 @@ type CopyObjectInput struct { // Specifies whether you want to apply a Legal Hold to the copied object. ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` - // The object lock mode that you want to apply to the copied object. + // The Object Lock mode that you want to apply to the copied object. ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` - // The date and time when you want the copied object's object lock to expire. + // The date and time when you want the copied object's Object Lock to expire. ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` // Specifies the AWS KMS Encryption Context to use for object encryption. The @@ -8523,12 +11690,14 @@ type CopyObjectInput struct { // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT // requests for an object protected by AWS KMS will fail if not made via SSL - // or using SigV4. Documentation on configuring any of the officially supported - // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + // or using SigV4. For information about configuring using any of the officially + // supported AWS SDKs and AWS CLI, see Specifying the Signature Version in Request + // Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) + // in the Amazon S3 Developer Guide. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // The type of storage to use for the object. Defaults to 'STANDARD'. @@ -8536,7 +11705,7 @@ type CopyObjectInput struct { // The tag-set for the object destination object this value must be used in // conjunction with the TaggingDirective. The tag-set must be encoded as URL - // Query parameters + // Query parameters. Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` // Specifies whether the object tag-set are copied from the source object or @@ -8827,11 +11996,27 @@ func (s *CopyObjectInput) SetWebsiteRedirectLocation(v string) *CopyObjectInput return s } +func (s *CopyObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *CopyObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type CopyObjectOutput struct { _ struct{} `type:"structure" payload:"CopyObjectResult"` + // Container for all response elements. CopyObjectResult *CopyObjectResult `type:"structure"` + // Version of the copied object in the destination bucket. CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` // If the object expiration is configured, the response includes this header. @@ -8847,7 +12032,7 @@ type CopyObjectOutput struct { SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity + // the response will include this header to provide round-trip message integrity // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` @@ -8856,12 +12041,13 @@ type CopyObjectOutput struct { // the encryption context key-value pairs. SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. + // If present, specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) that was used for the + // object. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // Version ID of the newly created copy. @@ -8938,11 +12124,16 @@ func (s *CopyObjectOutput) SetVersionId(v string) *CopyObjectOutput { return s } +// Container for all response elements. type CopyObjectResult struct { _ struct{} `type:"structure"` + // Returns the ETag of the new object. The ETag reflects only changes to the + // contents of an object, not its metadata. The source and destination ETag + // is identical for a successfully copied object. ETag *string `type:"string"` + // Returns the date that the object was last modified. LastModified *time.Time `type:"timestamp"` } @@ -8968,6 +12159,7 @@ func (s *CopyObjectResult) SetLastModified(v time.Time) *CopyObjectResult { return s } +// Container for all response elements. type CopyPartResult struct { _ struct{} `type:"structure"` @@ -9000,11 +12192,12 @@ func (s *CopyPartResult) SetLastModified(v time.Time) *CopyPartResult { return s } +// The configuration information for the bucket. type CreateBucketConfiguration struct { _ struct{} `type:"structure"` - // Specifies the region where the bucket will be created. If you don't specify - // a region, the bucket is created in US East (N. Virginia) Region (us-east-1). + // Specifies the Region where the bucket will be created. If you don't specify + // a Region, the bucket is created in the US East (N. Virginia) Region (us-east-1). LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` } @@ -9030,9 +12223,12 @@ type CreateBucketInput struct { // The canned ACL to apply to the bucket. ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` + // The name of the bucket to create. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The configuration information for the bucket. CreateBucketConfiguration *CreateBucketConfiguration `locationName:"CreateBucketConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` // Allows grantee the read, write, read ACP, and write ACP permissions on the @@ -9051,8 +12247,7 @@ type CreateBucketInput struct { // Allows grantee to write the ACL for the applicable bucket. GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` - // Specifies whether you want Amazon S3 object lock to be enabled for the new - // bucket. + // Specifies whether you want S3 Object Lock to be enabled for the new bucket. ObjectLockEnabledForBucket *bool `location:"header" locationName:"x-amz-bucket-object-lock-enabled" type:"boolean"` } @@ -9146,6 +12341,9 @@ func (s *CreateBucketInput) SetObjectLockEnabledForBucket(v bool) *CreateBucketI type CreateBucketOutput struct { _ struct{} `type:"structure"` + // Specifies the Region where the bucket will be created. If you are creating + // a bucket on the US East (N. Virginia) Region (us-east-1), you do not need + // to specify the location. Location *string `location:"header" locationName:"Location" type:"string"` } @@ -9171,6 +12369,8 @@ type CreateMultipartUploadInput struct { // The canned ACL to apply to the object. ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + // The name of the bucket to which to initiate the upload + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -9206,6 +12406,8 @@ type CreateMultipartUploadInput struct { // Allows grantee to write the ACL for the applicable object. GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + // Object key for which the multipart upload is to be initiated. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -9215,31 +12417,33 @@ type CreateMultipartUploadInput struct { // Specifies whether you want to apply a Legal Hold to the uploaded object. ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` - // Specifies the object lock mode that you want to apply to the uploaded object. + // Specifies the Object Lock mode that you want to apply to the uploaded object. ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` - // Specifies the date and time when you want the object lock to expire. + // Specifies the date and time when you want the Object Lock to expire. ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` // Specifies the AWS KMS Encryption Context to use for object encryption. The @@ -9247,20 +12451,22 @@ type CreateMultipartUploadInput struct { // encryption context key-value pairs. SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT - // requests for an object protected by AWS KMS will fail if not made via SSL - // or using SigV4. Documentation on configuring any of the officially supported - // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + // Specifies the ID of the symmetric customer managed AWS KMS CMK to use for + // object encryption. All GET and PUT requests for an object protected by AWS + // KMS will fail if not made via SSL or using SigV4. For information about configuring + // using any of the officially supported AWS SDKs and AWS CLI, see Specifying + // the Signature Version in Request Authentication (https://docs.aws.amazon.com/http:/docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) + // in the Amazon S3 Developer Guide. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // The type of storage to use for the object. Defaults to 'STANDARD'. StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` - // The tag-set for the object. The tag-set must be encoded as URL Query parameters + // The tag-set for the object. The tag-set must be encoded as URL Query parameters. Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` // If the bucket is configured as a website, redirects requests for this object @@ -9477,17 +12683,47 @@ func (s *CreateMultipartUploadInput) SetWebsiteRedirectLocation(v string) *Creat return s } +func (s *CreateMultipartUploadInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *CreateMultipartUploadInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type CreateMultipartUploadOutput struct { _ struct{} `type:"structure"` - // Date when multipart upload will become eligible for abort operation by lifecycle. + // If the bucket has a lifecycle rule configured with an action to abort incomplete + // multipart uploads and the prefix in the lifecycle rule matches the object + // name in the request, the response includes this header. The header indicates + // when the initiated multipart upload becomes eligible for an abort operation. + // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket + // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). + // + // The response also includes the x-amz-abort-rule-id header that provides the + // ID of the lifecycle configuration rule that defines this action. AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"` - // Id of the lifecycle rule that makes a multipart upload eligible for abort - // operation. + // This header is returned along with the x-amz-abort-date header. It identifies + // the applicable lifecycle configuration rule that defines the action to abort + // incomplete multipart uploads. AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` // Name of the bucket to which the multipart upload was initiated. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. Bucket *string `locationName:"Bucket" type:"string"` // Object key for which the multipart upload was initiated. @@ -9503,7 +12739,7 @@ type CreateMultipartUploadOutput struct { SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity + // the response will include this header to provide round-trip message integrity // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` @@ -9512,12 +12748,13 @@ type CreateMultipartUploadOutput struct { // the encryption context key-value pairs. SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. + // If present, specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) that was used for the + // object. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // ID for the initiated multipart upload. @@ -9607,7 +12844,7 @@ func (s *CreateMultipartUploadOutput) SetUploadId(v string) *CreateMultipartUplo return s } -// The container element for specifying the default object lock retention settings +// The container element for specifying the default Object Lock retention settings // for new objects placed in the specified bucket. type DefaultRetention struct { _ struct{} `type:"structure"` @@ -9615,7 +12852,7 @@ type DefaultRetention struct { // The number of days that you want to specify for the default retention period. Days *int64 `type:"integer"` - // The default object lock retention mode you want to apply to new objects placed + // The default Object Lock retention mode you want to apply to new objects placed // in the specified bucket. Mode *string `type:"string" enum:"ObjectLockRetentionMode"` @@ -9651,9 +12888,12 @@ func (s *DefaultRetention) SetYears(v int64) *DefaultRetention { return s } +// Container for the objects to delete. type Delete struct { _ struct{} `type:"structure"` + // The objects to delete. + // // Objects is a required field Objects []*ObjectIdentifier `locationName:"Object" type:"list" flattened:"true" required:"true"` @@ -9769,6 +13009,20 @@ func (s *DeleteBucketAnalyticsConfigurationInput) SetId(v string) *DeleteBucketA return s } +func (s *DeleteBucketAnalyticsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketAnalyticsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteBucketAnalyticsConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -9786,6 +13040,8 @@ func (s DeleteBucketAnalyticsConfigurationOutput) GoString() string { type DeleteBucketCorsInput struct { _ struct{} `locationName:"DeleteBucketCorsRequest" type:"structure"` + // Specifies the bucket whose cors configuration is being deleted. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -9829,6 +13085,20 @@ func (s *DeleteBucketCorsInput) getBucket() (v string) { return *s.Bucket } +func (s *DeleteBucketCorsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketCorsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteBucketCorsOutput struct { _ struct{} `type:"structure"` } @@ -9892,6 +13162,20 @@ func (s *DeleteBucketEncryptionInput) getBucket() (v string) { return *s.Bucket } +func (s *DeleteBucketEncryptionInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketEncryptionInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteBucketEncryptionOutput struct { _ struct{} `type:"structure"` } @@ -9909,6 +13193,8 @@ func (s DeleteBucketEncryptionOutput) GoString() string { type DeleteBucketInput struct { _ struct{} `locationName:"DeleteBucketRequest" type:"structure"` + // Specifies the bucket being deleted. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -9952,11 +13238,25 @@ func (s *DeleteBucketInput) getBucket() (v string) { return *s.Bucket } -type DeleteBucketInventoryConfigurationInput struct { - _ struct{} `locationName:"DeleteBucketInventoryConfigurationRequest" type:"structure"` +func (s *DeleteBucketInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} - // The name of the bucket containing the inventory configuration to delete. - // +func (s *DeleteBucketInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +type DeleteBucketInventoryConfigurationInput struct { + _ struct{} `locationName:"DeleteBucketInventoryConfigurationRequest" type:"structure"` + + // The name of the bucket containing the inventory configuration to delete. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -10014,6 +13314,20 @@ func (s *DeleteBucketInventoryConfigurationInput) SetId(v string) *DeleteBucketI return s } +func (s *DeleteBucketInventoryConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketInventoryConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteBucketInventoryConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -10031,6 +13345,8 @@ func (s DeleteBucketInventoryConfigurationOutput) GoString() string { type DeleteBucketLifecycleInput struct { _ struct{} `locationName:"DeleteBucketLifecycleRequest" type:"structure"` + // The bucket name of the lifecycle to delete. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -10074,6 +13390,20 @@ func (s *DeleteBucketLifecycleInput) getBucket() (v string) { return *s.Bucket } +func (s *DeleteBucketLifecycleInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketLifecycleInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteBucketLifecycleOutput struct { _ struct{} `type:"structure"` } @@ -10150,6 +13480,20 @@ func (s *DeleteBucketMetricsConfigurationInput) SetId(v string) *DeleteBucketMet return s } +func (s *DeleteBucketMetricsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketMetricsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteBucketMetricsConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -10181,6 +13525,8 @@ func (s DeleteBucketOutput) GoString() string { type DeleteBucketPolicyInput struct { _ struct{} `locationName:"DeleteBucketPolicyRequest" type:"structure"` + // The bucket name. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -10224,6 +13570,20 @@ func (s *DeleteBucketPolicyInput) getBucket() (v string) { return *s.Bucket } +func (s *DeleteBucketPolicyInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketPolicyInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteBucketPolicyOutput struct { _ struct{} `type:"structure"` } @@ -10243,9 +13603,6 @@ type DeleteBucketReplicationInput struct { // The bucket name. // - // It can take a while to propagate the deletion of a replication configuration - // to all Amazon S3 systems. - // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -10289,6 +13646,20 @@ func (s *DeleteBucketReplicationInput) getBucket() (v string) { return *s.Bucket } +func (s *DeleteBucketReplicationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketReplicationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteBucketReplicationOutput struct { _ struct{} `type:"structure"` } @@ -10306,6 +13677,8 @@ func (s DeleteBucketReplicationOutput) GoString() string { type DeleteBucketTaggingInput struct { _ struct{} `locationName:"DeleteBucketTaggingRequest" type:"structure"` + // The bucket that has the tag set to be removed. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -10349,6 +13722,20 @@ func (s *DeleteBucketTaggingInput) getBucket() (v string) { return *s.Bucket } +func (s *DeleteBucketTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteBucketTaggingOutput struct { _ struct{} `type:"structure"` } @@ -10366,6 +13753,8 @@ func (s DeleteBucketTaggingOutput) GoString() string { type DeleteBucketWebsiteInput struct { _ struct{} `locationName:"DeleteBucketWebsiteRequest" type:"structure"` + // The bucket name for which you want to remove the website configuration. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -10409,6 +13798,20 @@ func (s *DeleteBucketWebsiteInput) getBucket() (v string) { return *s.Bucket } +func (s *DeleteBucketWebsiteInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketWebsiteInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteBucketWebsiteOutput struct { _ struct{} `type:"structure"` } @@ -10423,6 +13826,7 @@ func (s DeleteBucketWebsiteOutput) GoString() string { return s.String() } +// Information about the delete marker. type DeleteMarkerEntry struct { _ struct{} `type:"structure"` @@ -10436,6 +13840,7 @@ type DeleteMarkerEntry struct { // Date and time the object was last modified. LastModified *time.Time `type:"timestamp"` + // The account that created the delete marker.> Owner *Owner `type:"structure"` // Version ID of an object. @@ -10482,11 +13887,21 @@ func (s *DeleteMarkerEntry) SetVersionId(v string) *DeleteMarkerEntry { return s } -// Specifies whether Amazon S3 should replicate delete makers. +// Specifies whether Amazon S3 replicates the delete markers. If you specify +// a Filter, you must specify this element. However, in the latest version of +// replication configuration (when Filter is specified), Amazon S3 doesn't replicate +// delete markers. Therefore, the DeleteMarkerReplication element can contain +// only Disabled. For an example configuration, see Basic Rule +// Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config). +// +// If you don't specify the Filter element, Amazon S3 assumes that the replication +// configuration is the earlier version, V1. In the earlier version, Amazon +// S3 handled replication of delete markers differently. For more information, +// see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations). type DeleteMarkerReplication struct { _ struct{} `type:"structure"` - // The status of the delete marker replication. + // Indicates whether to replicate delete markers. // // In the current implementation, Amazon S3 doesn't replicate the delete markers. // The status must be Disabled. @@ -10512,24 +13927,38 @@ func (s *DeleteMarkerReplication) SetStatus(v string) *DeleteMarkerReplication { type DeleteObjectInput struct { _ struct{} `locationName:"DeleteObjectRequest" type:"structure"` + // The bucket name of the bucket containing the object. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Indicates whether Amazon S3 object lock should bypass governance-mode restrictions + // Indicates whether S3 Object Lock should bypass Governance-mode restrictions // to process this operation. BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` + // Key name of the object to delete. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` // The concatenation of the authentication device's serial number, a space, - // and the value that is displayed on your authentication device. + // and the value that is displayed on your authentication device. Required to + // permanently delete a versioned object if versioning is configured with MFA + // delete enabled. MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // VersionId used to reference a specific version of the object. @@ -10611,6 +14040,20 @@ func (s *DeleteObjectInput) SetVersionId(v string) *DeleteObjectInput { return s } +func (s *DeleteObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteObjectOutput struct { _ struct{} `type:"structure"` @@ -10658,9 +14101,20 @@ func (s *DeleteObjectOutput) SetVersionId(v string) *DeleteObjectOutput { type DeleteObjectTaggingInput struct { _ struct{} `locationName:"DeleteObjectTaggingRequest" type:"structure"` + // The bucket name containing the objects from which to remove the tags. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Name of the tag. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -10725,6 +14179,20 @@ func (s *DeleteObjectTaggingInput) SetVersionId(v string) *DeleteObjectTaggingIn return s } +func (s *DeleteObjectTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteObjectTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteObjectTaggingOutput struct { _ struct{} `type:"structure"` @@ -10751,25 +14219,39 @@ func (s *DeleteObjectTaggingOutput) SetVersionId(v string) *DeleteObjectTaggingO type DeleteObjectsInput struct { _ struct{} `locationName:"DeleteObjectsRequest" type:"structure" payload:"Delete"` + // The bucket name containing the objects to delete. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // Specifies whether you want to delete this object even if it has a Governance-type - // object lock in place. You must have sufficient permissions to perform this + // Object Lock in place. You must have sufficient permissions to perform this // operation. BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` + // Container for the request. + // // Delete is a required field Delete *Delete `locationName:"Delete" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` // The concatenation of the authentication device's serial number, a space, - // and the value that is displayed on your authentication device. + // and the value that is displayed on your authentication device. Required to + // permanently delete a versioned object if versioning is configured with MFA + // delete enabled. MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` } @@ -10844,11 +14326,29 @@ func (s *DeleteObjectsInput) SetRequestPayer(v string) *DeleteObjectsInput { return s } +func (s *DeleteObjectsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteObjectsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteObjectsOutput struct { _ struct{} `type:"structure"` + // Container element for a successful delete. It identifies the object that + // was successfully deleted. Deleted []*DeletedObject `type:"list" flattened:"true"` + // Container for a failed delete operation that describes the object that Amazon + // S3 attempted to delete and the error it encountered. Errors []*Error `locationName:"Error" type:"list" flattened:"true"` // If present, indicates that the requester was successfully charged for the @@ -10932,6 +14432,20 @@ func (s *DeletePublicAccessBlockInput) getBucket() (v string) { return *s.Bucket } +func (s *DeletePublicAccessBlockInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeletePublicAccessBlockInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeletePublicAccessBlockOutput struct { _ struct{} `type:"structure"` } @@ -10946,15 +14460,24 @@ func (s DeletePublicAccessBlockOutput) GoString() string { return s.String() } +// Information about the deleted object. type DeletedObject struct { _ struct{} `type:"structure"` + // Specifies whether the versioned object that was permanently deleted was (true) + // or was not (false) a delete marker. In a simple DELETE, this header indicates + // whether (true) or not (false) a delete marker was created. DeleteMarker *bool `type:"boolean"` + // The version ID of the delete marker created as a result of the DELETE operation. + // If you delete a specific object version, the value returned by this header + // is the version ID of the object version deleted. DeleteMarkerVersionId *string `type:"string"` + // The name of the deleted object. Key *string `min:"1" type:"string"` + // The version ID of the deleted object. VersionId *string `type:"string"` } @@ -10993,7 +14516,7 @@ func (s *DeletedObject) SetVersionId(v string) *DeletedObject { } // Specifies information about where to publish analysis or configuration results -// for an Amazon S3 bucket. +// for an Amazon S3 bucket and S3 Replication Time Control (S3 RTC). type Destination struct { _ struct{} `type:"structure"` @@ -11008,17 +14531,12 @@ type Destination struct { // direct Amazon S3 to change replica ownership to the AWS account that owns // the destination bucket by specifying the AccessControlTranslation property, // this is the account ID of the destination bucket owner. For more information, - // see Cross-Region Replication Additional Configuration: Change Replica Owner - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr-change-owner.html) in - // the Amazon Simple Storage Service Developer Guide. + // see Replication Additional Configuration: Changing the Replica Owner (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-change-owner.html) + // in the Amazon Simple Storage Service Developer Guide. Account *string `type:"string"` // The Amazon Resource Name (ARN) of the bucket where you want Amazon S3 to - // store replicas of the object identified by the rule. - // - // A replication configuration can replicate objects to only one destination - // bucket. If there are multiple rules in your replication configuration, all - // rules must specify the same destination bucket. + // store the results. // // Bucket is a required field Bucket *string `type:"string" required:"true"` @@ -11027,9 +14545,19 @@ type Destination struct { // is specified, you must specify this element. EncryptionConfiguration *EncryptionConfiguration `type:"structure"` - // The storage class to use when replicating objects, such as standard or reduced - // redundancy. By default, Amazon S3 uses the storage class of the source object - // to create the object replica. + // A container specifying replication metrics-related settings enabling metrics + // and Amazon S3 events for S3 Replication Time Control (S3 RTC). Must be specified + // together with a ReplicationTime block. + Metrics *Metrics `type:"structure"` + + // A container specifying S3 Replication Time Control (S3 RTC), including whether + // S3 RTC is enabled and the time when all objects and operations on objects + // must be replicated. Must be specified together with a Metrics block. + ReplicationTime *ReplicationTime `type:"structure"` + + // The storage class to use when replicating objects, such as S3 Standard or + // reduced redundancy. By default, Amazon S3 uses the storage class of the source + // object to create the object replica. // // For valid values, see the StorageClass element of the PUT Bucket replication // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) @@ -11058,6 +14586,16 @@ func (s *Destination) Validate() error { invalidParams.AddNested("AccessControlTranslation", err.(request.ErrInvalidParams)) } } + if s.Metrics != nil { + if err := s.Metrics.Validate(); err != nil { + invalidParams.AddNested("Metrics", err.(request.ErrInvalidParams)) + } + } + if s.ReplicationTime != nil { + if err := s.ReplicationTime.Validate(); err != nil { + invalidParams.AddNested("ReplicationTime", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -11096,19 +14634,30 @@ func (s *Destination) SetEncryptionConfiguration(v *EncryptionConfiguration) *De return s } +// SetMetrics sets the Metrics field's value. +func (s *Destination) SetMetrics(v *Metrics) *Destination { + s.Metrics = v + return s +} + +// SetReplicationTime sets the ReplicationTime field's value. +func (s *Destination) SetReplicationTime(v *ReplicationTime) *Destination { + s.ReplicationTime = v + return s +} + // SetStorageClass sets the StorageClass field's value. func (s *Destination) SetStorageClass(v string) *Destination { s.StorageClass = &v return s } -// Describes the server-side encryption that will be applied to the restore -// results. +// Contains the type of server-side encryption used. type Encryption struct { _ struct{} `type:"structure"` // The server-side encryption algorithm used when storing job results in Amazon - // S3 (e.g., AES256, aws:kms). + // S3 (for example, AES256, aws:kms). // // EncryptionType is a required field EncryptionType *string `type:"string" required:"true" enum:"ServerSideEncryption"` @@ -11117,8 +14666,11 @@ type Encryption struct { // the encryption context for the restore results. KMSContext *string `type:"string"` - // If the encryption type is aws:kms, this optional value specifies the AWS - // KMS key ID to use for encryption of job results. + // If the encryption type is aws:kms, this optional value specifies the ID of + // the symmetric customer managed AWS KMS CMK to use for encryption of job results. + // Amazon S3 only supports symmetric CMKs. For more information, see Using Symmetric + // and Asymmetric Keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) + // in the AWS Key Management Service Developer Guide. KMSKeyId *string `type:"string" sensitive:"true"` } @@ -11168,8 +14720,12 @@ func (s *Encryption) SetKMSKeyId(v string) *Encryption { type EncryptionConfiguration struct { _ struct{} `type:"structure"` - // Specifies the AWS KMS Key ID (Key ARN or Alias ARN) for the destination bucket. - // Amazon S3 uses this key to encrypt replica objects. + // Specifies the ID (Key ARN or Alias ARN) of the customer managed customer + // master key (CMK) stored in AWS Key Management Service (KMS) for the destination + // bucket. Amazon S3 uses this key to encrypt replica objects. Amazon S3 only + // supports symmetric customer managed CMKs. For more information, see Using + // Symmetric and Asymmetric Keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) + // in the AWS Key Management Service Developer Guide. ReplicaKmsKeyID *string `type:"string"` } @@ -11189,6 +14745,9 @@ func (s *EncryptionConfiguration) SetReplicaKmsKeyID(v string) *EncryptionConfig return s } +// A message that indicates the request is complete and no more messages will +// be sent. You should not assume that the request is complete until the client +// receives an EndEvent. type EndEvent struct { _ struct{} `locationName:"EndEvent" type:"structure"` } @@ -11215,15 +14774,380 @@ func (s *EndEvent) UnmarshalEvent( return nil } +func (s *EndEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) + return msg, err +} + +// Container for all error elements. type Error struct { _ struct{} `type:"structure"` + // The error code is a string that uniquely identifies an error condition. It + // is meant to be read and understood by programs that detect and handle errors + // by type. + // + // Amazon S3 error codes + // + // * Code: AccessDenied Description: Access Denied HTTP Status Code: 403 + // Forbidden SOAP Fault Code Prefix: Client + // + // * Code: AccountProblem Description: There is a problem with your AWS account + // that prevents the operation from completing successfully. Contact AWS + // Support for further assistance. HTTP Status Code: 403 Forbidden SOAP Fault + // Code Prefix: Client + // + // * Code: AllAccessDisabled Description: All access to this Amazon S3 resource + // has been disabled. Contact AWS Support for further assistance. HTTP Status + // Code: 403 Forbidden SOAP Fault Code Prefix: Client + // + // * Code: AmbiguousGrantByEmailAddress Description: The email address you + // provided is associated with more than one account. HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: AuthorizationHeaderMalformed Description: The authorization header + // you provided is invalid. HTTP Status Code: 400 Bad Request HTTP Status + // Code: N/A + // + // * Code: BadDigest Description: The Content-MD5 you specified did not match + // what we received. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: BucketAlreadyExists Description: The requested bucket name is + // not available. The bucket namespace is shared by all users of the system. + // Please select a different name and try again. HTTP Status Code: 409 Conflict + // SOAP Fault Code Prefix: Client + // + // * Code: BucketAlreadyOwnedByYou Description: The bucket you tried to create + // already exists, and you own it. Amazon S3 returns this error in all AWS + // Regions except in the North Virginia Region. For legacy compatibility, + // if you re-create an existing bucket that you already own in the North + // Virginia Region, Amazon S3 returns 200 OK and resets the bucket access + // control lists (ACLs). Code: 409 Conflict (in all Regions except the North + // Virginia Region) SOAP Fault Code Prefix: Client + // + // * Code: BucketNotEmpty Description: The bucket you tried to delete is + // not empty. HTTP Status Code: 409 Conflict SOAP Fault Code Prefix: Client + // + // * Code: CredentialsNotSupported Description: This request does not support + // credentials. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: CrossLocationLoggingProhibited Description: Cross-location logging + // not allowed. Buckets in one geographic location cannot log information + // to a bucket in another location. HTTP Status Code: 403 Forbidden SOAP + // Fault Code Prefix: Client + // + // * Code: EntityTooSmall Description: Your proposed upload is smaller than + // the minimum allowed object size. HTTP Status Code: 400 Bad Request SOAP + // Fault Code Prefix: Client + // + // * Code: EntityTooLarge Description: Your proposed upload exceeds the maximum + // allowed object size. HTTP Status Code: 400 Bad Request SOAP Fault Code + // Prefix: Client + // + // * Code: ExpiredToken Description: The provided token has expired. HTTP + // Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: IllegalVersioningConfigurationException Description: Indicates + // that the versioning configuration specified in the request is invalid. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: IncompleteBody Description: You did not provide the number of + // bytes specified by the Content-Length HTTP header HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: IncorrectNumberOfFilesInPostRequest Description: POST requires + // exactly one file upload per request. HTTP Status Code: 400 Bad Request + // SOAP Fault Code Prefix: Client + // + // * Code: InlineDataTooLarge Description: Inline data exceeds the maximum + // allowed size. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: InternalError Description: We encountered an internal error. Please + // try again. HTTP Status Code: 500 Internal Server Error SOAP Fault Code + // Prefix: Server + // + // * Code: InvalidAccessKeyId Description: The AWS access key ID you provided + // does not exist in our records. HTTP Status Code: 403 Forbidden SOAP Fault + // Code Prefix: Client + // + // * Code: InvalidAddressingHeader Description: You must specify the Anonymous + // role. HTTP Status Code: N/A SOAP Fault Code Prefix: Client + // + // * Code: InvalidArgument Description: Invalid Argument HTTP Status Code: + // 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidBucketName Description: The specified bucket is not valid. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidBucketState Description: The request is not valid with + // the current state of the bucket. HTTP Status Code: 409 Conflict SOAP Fault + // Code Prefix: Client + // + // * Code: InvalidDigest Description: The Content-MD5 you specified is not + // valid. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidEncryptionAlgorithmError Description: The encryption request + // you specified is not valid. The valid value is AES256. HTTP Status Code: + // 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidLocationConstraint Description: The specified location + // constraint is not valid. For more information about Regions, see How to + // Select a Region for Your Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro). + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidObjectState Description: The operation is not valid for + // the current state of the object. HTTP Status Code: 403 Forbidden SOAP + // Fault Code Prefix: Client + // + // * Code: InvalidPart Description: One or more of the specified parts could + // not be found. The part might not have been uploaded, or the specified + // entity tag might not have matched the part's entity tag. HTTP Status Code: + // 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidPartOrder Description: The list of parts was not in ascending + // order. Parts list must be specified in order by part number. HTTP Status + // Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidPayer Description: All access to this object has been disabled. + // Please contact AWS Support for further assistance. HTTP Status Code: 403 + // Forbidden SOAP Fault Code Prefix: Client + // + // * Code: InvalidPolicyDocument Description: The content of the form does + // not meet the conditions specified in the policy document. HTTP Status + // Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidRange Description: The requested range cannot be satisfied. + // HTTP Status Code: 416 Requested Range Not Satisfiable SOAP Fault Code + // Prefix: Client + // + // * Code: InvalidRequest Description: Please use AWS4-HMAC-SHA256. HTTP + // Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: SOAP requests must be made over an + // HTTPS connection. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration is + // not supported for buckets with non-DNS compliant names. HTTP Status Code: + // 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration is + // not supported for buckets with periods (.) in their names. HTTP Status + // Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Accelerate endpoint + // only supports virtual style requests. HTTP Status Code: 400 Bad Request + // Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Accelerate is not + // configured on this bucket. HTTP Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Accelerate is disabled + // on this bucket. HTTP Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration is + // not supported on this bucket. Contact AWS Support for more information. + // HTTP Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration cannot + // be enabled on this bucket. Contact AWS Support for more information. HTTP + // Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidSecurity Description: The provided security credentials + // are not valid. HTTP Status Code: 403 Forbidden SOAP Fault Code Prefix: + // Client + // + // * Code: InvalidSOAPRequest Description: The SOAP request body is invalid. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidStorageClass Description: The storage class you specified + // is not valid. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: InvalidTargetBucketForLogging Description: The target bucket for + // logging does not exist, is not owned by you, or does not have the appropriate + // grants for the log-delivery group. HTTP Status Code: 400 Bad Request SOAP + // Fault Code Prefix: Client + // + // * Code: InvalidToken Description: The provided token is malformed or otherwise + // invalid. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidURI Description: Couldn't parse the specified URI. HTTP + // Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: KeyTooLongError Description: Your key is too long. HTTP Status + // Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: MalformedACLError Description: The XML you provided was not well-formed + // or did not validate against our published schema. HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: MalformedPOSTRequest Description: The body of your POST request + // is not well-formed multipart/form-data. HTTP Status Code: 400 Bad Request + // SOAP Fault Code Prefix: Client + // + // * Code: MalformedXML Description: This happens when the user sends malformed + // XML (XML that doesn't conform to the published XSD) for the configuration. + // The error message is, "The XML you provided was not well-formed or did + // not validate against our published schema." HTTP Status Code: 400 Bad + // Request SOAP Fault Code Prefix: Client + // + // * Code: MaxMessageLengthExceeded Description: Your request was too big. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: MaxPostPreDataLengthExceededError Description: Your POST request + // fields preceding the upload file were too large. HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: MetadataTooLarge Description: Your metadata headers exceed the + // maximum allowed metadata size. HTTP Status Code: 400 Bad Request SOAP + // Fault Code Prefix: Client + // + // * Code: MethodNotAllowed Description: The specified method is not allowed + // against this resource. HTTP Status Code: 405 Method Not Allowed SOAP Fault + // Code Prefix: Client + // + // * Code: MissingAttachment Description: A SOAP attachment was expected, + // but none were found. HTTP Status Code: N/A SOAP Fault Code Prefix: Client + // + // * Code: MissingContentLength Description: You must provide the Content-Length + // HTTP header. HTTP Status Code: 411 Length Required SOAP Fault Code Prefix: + // Client + // + // * Code: MissingRequestBodyError Description: This happens when the user + // sends an empty XML document as a request. The error message is, "Request + // body is empty." HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: MissingSecurityElement Description: The SOAP 1.1 request is missing + // a security element. HTTP Status Code: 400 Bad Request SOAP Fault Code + // Prefix: Client + // + // * Code: MissingSecurityHeader Description: Your request is missing a required + // header. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: NoLoggingStatusForKey Description: There is no such thing as a + // logging status subresource for a key. HTTP Status Code: 400 Bad Request + // SOAP Fault Code Prefix: Client + // + // * Code: NoSuchBucket Description: The specified bucket does not exist. + // HTTP Status Code: 404 Not Found SOAP Fault Code Prefix: Client + // + // * Code: NoSuchBucketPolicy Description: The specified bucket does not + // have a bucket policy. HTTP Status Code: 404 Not Found SOAP Fault Code + // Prefix: Client + // + // * Code: NoSuchKey Description: The specified key does not exist. HTTP + // Status Code: 404 Not Found SOAP Fault Code Prefix: Client + // + // * Code: NoSuchLifecycleConfiguration Description: The lifecycle configuration + // does not exist. HTTP Status Code: 404 Not Found SOAP Fault Code Prefix: + // Client + // + // * Code: NoSuchUpload Description: The specified multipart upload does + // not exist. The upload ID might be invalid, or the multipart upload might + // have been aborted or completed. HTTP Status Code: 404 Not Found SOAP Fault + // Code Prefix: Client + // + // * Code: NoSuchVersion Description: Indicates that the version ID specified + // in the request does not match an existing version. HTTP Status Code: 404 + // Not Found SOAP Fault Code Prefix: Client + // + // * Code: NotImplemented Description: A header you provided implies functionality + // that is not implemented. HTTP Status Code: 501 Not Implemented SOAP Fault + // Code Prefix: Server + // + // * Code: NotSignedUp Description: Your account is not signed up for the + // Amazon S3 service. You must sign up before you can use Amazon S3. You + // can sign up at the following URL: https://aws.amazon.com/s3 HTTP Status + // Code: 403 Forbidden SOAP Fault Code Prefix: Client + // + // * Code: OperationAborted Description: A conflicting conditional operation + // is currently in progress against this resource. Try again. HTTP Status + // Code: 409 Conflict SOAP Fault Code Prefix: Client + // + // * Code: PermanentRedirect Description: The bucket you are attempting to + // access must be addressed using the specified endpoint. Send all future + // requests to this endpoint. HTTP Status Code: 301 Moved Permanently SOAP + // Fault Code Prefix: Client + // + // * Code: PreconditionFailed Description: At least one of the preconditions + // you specified did not hold. HTTP Status Code: 412 Precondition Failed + // SOAP Fault Code Prefix: Client + // + // * Code: Redirect Description: Temporary redirect. HTTP Status Code: 307 + // Moved Temporarily SOAP Fault Code Prefix: Client + // + // * Code: RestoreAlreadyInProgress Description: Object restore is already + // in progress. HTTP Status Code: 409 Conflict SOAP Fault Code Prefix: Client + // + // * Code: RequestIsNotMultiPartContent Description: Bucket POST must be + // of the enclosure-type multipart/form-data. HTTP Status Code: 400 Bad Request + // SOAP Fault Code Prefix: Client + // + // * Code: RequestTimeout Description: Your socket connection to the server + // was not read from or written to within the timeout period. HTTP Status + // Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: RequestTimeTooSkewed Description: The difference between the request + // time and the server's time is too large. HTTP Status Code: 403 Forbidden + // SOAP Fault Code Prefix: Client + // + // * Code: RequestTorrentOfBucketError Description: Requesting the torrent + // file of a bucket is not permitted. HTTP Status Code: 400 Bad Request SOAP + // Fault Code Prefix: Client + // + // * Code: SignatureDoesNotMatch Description: The request signature we calculated + // does not match the signature you provided. Check your AWS secret access + // key and signing method. For more information, see REST Authentication + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) + // and SOAP Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/SOAPAuthentication.html) + // for details. HTTP Status Code: 403 Forbidden SOAP Fault Code Prefix: Client + // + // * Code: ServiceUnavailable Description: Reduce your request rate. HTTP + // Status Code: 503 Service Unavailable SOAP Fault Code Prefix: Server + // + // * Code: SlowDown Description: Reduce your request rate. HTTP Status Code: + // 503 Slow Down SOAP Fault Code Prefix: Server + // + // * Code: TemporaryRedirect Description: You are being redirected to the + // bucket while DNS updates. HTTP Status Code: 307 Moved Temporarily SOAP + // Fault Code Prefix: Client + // + // * Code: TokenRefreshRequired Description: The provided token must be refreshed. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: TooManyBuckets Description: You have attempted to create more + // buckets than allowed. HTTP Status Code: 400 Bad Request SOAP Fault Code + // Prefix: Client + // + // * Code: UnexpectedContent Description: This request does not support content. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: UnresolvableGrantByEmailAddress Description: The email address + // you provided does not match any account on record. HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: UserKeyMustBeSpecified Description: The bucket POST must contain + // the specified field name. If it is specified, check the order of the fields. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client Code *string `type:"string"` + // The error key. Key *string `min:"1" type:"string"` + // The error message contains a generic description of the error condition in + // English. It is intended for a human audience. Simple programs display the + // message directly to the end user if they encounter an error condition they + // don't know how or don't care to handle. Sophisticated programs with more + // exhaustive error handling and proper internationalization are more likely + // to ignore the error message. Message *string `type:"string"` + // The version ID of the error. VersionId *string `type:"string"` } @@ -11261,33 +15185,73 @@ func (s *Error) SetVersionId(v string) *Error { return s } -type ErrorDocument struct { +// The error information. +type ErrorDocument struct { + _ struct{} `type:"structure"` + + // The object key name to use when a 4XX class error occurs. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ErrorDocument) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ErrorDocument) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ErrorDocument) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ErrorDocument"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *ErrorDocument) SetKey(v string) *ErrorDocument { + s.Key = &v + return s +} + +// Optional configuration to replicate existing source bucket objects. For more +// information, see Replicating Existing Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication) +// in the Amazon S3 Developer Guide. +type ExistingObjectReplication struct { _ struct{} `type:"structure"` - // The object key name to use when a 4XX class error occurs. - // - // Key is a required field - Key *string `min:"1" type:"string" required:"true"` + // Status is a required field + Status *string `type:"string" required:"true" enum:"ExistingObjectReplicationStatus"` } // String returns the string representation -func (s ErrorDocument) String() string { +func (s ExistingObjectReplication) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ErrorDocument) GoString() string { +func (s ExistingObjectReplication) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ErrorDocument) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ErrorDocument"} - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) +func (s *ExistingObjectReplication) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExistingObjectReplication"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) } if invalidParams.Len() > 0 { @@ -11296,9 +15260,9 @@ func (s *ErrorDocument) Validate() error { return nil } -// SetKey sets the Key field's value. -func (s *ErrorDocument) SetKey(v string) *ErrorDocument { - s.Key = &v +// SetStatus sets the Status field's value. +func (s *ExistingObjectReplication) SetStatus(v string) *ExistingObjectReplication { + s.Status = &v return s } @@ -11388,6 +15352,20 @@ func (s *GetBucketAccelerateConfigurationInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketAccelerateConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketAccelerateConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketAccelerateConfigurationOutput struct { _ struct{} `type:"structure"` @@ -11414,6 +15392,8 @@ func (s *GetBucketAccelerateConfigurationOutput) SetStatus(v string) *GetBucketA type GetBucketAclInput struct { _ struct{} `locationName:"GetBucketAclRequest" type:"structure"` + // Specifies the S3 bucket whose ACL is being requested. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -11457,12 +15437,27 @@ func (s *GetBucketAclInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketAclInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketAclInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketAclOutput struct { _ struct{} `type:"structure"` // A list of grants. Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + // Container for the bucket owner's display name and ID. Owner *Owner `type:"structure"` } @@ -11550,6 +15545,20 @@ func (s *GetBucketAnalyticsConfigurationInput) SetId(v string) *GetBucketAnalyti return s } +func (s *GetBucketAnalyticsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketAnalyticsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketAnalyticsConfigurationOutput struct { _ struct{} `type:"structure" payload:"AnalyticsConfiguration"` @@ -11576,6 +15585,8 @@ func (s *GetBucketAnalyticsConfigurationOutput) SetAnalyticsConfiguration(v *Ana type GetBucketCorsInput struct { _ struct{} `locationName:"GetBucketCorsRequest" type:"structure"` + // The bucket name for which to get the cors configuration. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -11619,9 +15630,25 @@ func (s *GetBucketCorsInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketCorsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketCorsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketCorsOutput struct { _ struct{} `type:"structure"` + // A set of origins and methods (cross-origin access that you want to allow). + // You can add up to 100 rules to the configuration. CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true"` } @@ -11690,6 +15717,20 @@ func (s *GetBucketEncryptionInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketEncryptionInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketEncryptionInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketEncryptionOutput struct { _ struct{} `type:"structure" payload:"ServerSideEncryptionConfiguration"` @@ -11775,6 +15816,20 @@ func (s *GetBucketInventoryConfigurationInput) SetId(v string) *GetBucketInvento return s } +func (s *GetBucketInventoryConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketInventoryConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketInventoryConfigurationOutput struct { _ struct{} `type:"structure" payload:"InventoryConfiguration"` @@ -11801,6 +15856,8 @@ func (s *GetBucketInventoryConfigurationOutput) SetInventoryConfiguration(v *Inv type GetBucketLifecycleConfigurationInput struct { _ struct{} `locationName:"GetBucketLifecycleConfigurationRequest" type:"structure"` + // The name of the bucket for which to get the lifecycle information. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -11844,9 +15901,24 @@ func (s *GetBucketLifecycleConfigurationInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketLifecycleConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketLifecycleConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketLifecycleConfigurationOutput struct { _ struct{} `type:"structure"` + // Container for a lifecycle rule. Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true"` } @@ -11869,6 +15941,8 @@ func (s *GetBucketLifecycleConfigurationOutput) SetRules(v []*LifecycleRule) *Ge type GetBucketLifecycleInput struct { _ struct{} `locationName:"GetBucketLifecycleRequest" type:"structure"` + // The name of the bucket for which to get the lifecycle information. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -11912,9 +15986,24 @@ func (s *GetBucketLifecycleInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketLifecycleInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketLifecycleInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketLifecycleOutput struct { _ struct{} `type:"structure"` + // Container for a lifecycle rule. Rules []*Rule `locationName:"Rule" type:"list" flattened:"true"` } @@ -11937,6 +16026,8 @@ func (s *GetBucketLifecycleOutput) SetRules(v []*Rule) *GetBucketLifecycleOutput type GetBucketLocationInput struct { _ struct{} `locationName:"GetBucketLocationRequest" type:"structure"` + // The name of the bucket for which to get the location. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -11980,9 +16071,26 @@ func (s *GetBucketLocationInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketLocationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketLocationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketLocationOutput struct { _ struct{} `type:"structure"` + // Specifies the Region where the bucket resides. For a list of all the Amazon + // S3 supported location constraints by Region, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region). + // Buckets in Region us-east-1 have a LocationConstraint of null. LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` } @@ -12005,6 +16113,8 @@ func (s *GetBucketLocationOutput) SetLocationConstraint(v string) *GetBucketLoca type GetBucketLoggingInput struct { _ struct{} `locationName:"GetBucketLoggingRequest" type:"structure"` + // The bucket name for which to get the logging information. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -12048,6 +16158,20 @@ func (s *GetBucketLoggingInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketLoggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketLoggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketLoggingOutput struct { _ struct{} `type:"structure"` @@ -12136,6 +16260,20 @@ func (s *GetBucketMetricsConfigurationInput) SetId(v string) *GetBucketMetricsCo return s } +func (s *GetBucketMetricsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketMetricsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketMetricsConfigurationOutput struct { _ struct{} `type:"structure" payload:"MetricsConfiguration"` @@ -12162,7 +16300,7 @@ func (s *GetBucketMetricsConfigurationOutput) SetMetricsConfiguration(v *Metrics type GetBucketNotificationConfigurationRequest struct { _ struct{} `locationName:"GetBucketNotificationConfigurationRequest" type:"structure"` - // Name of the bucket to get the notification configuration for. + // Name of the bucket for which to get the notification configuration. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -12207,9 +16345,25 @@ func (s *GetBucketNotificationConfigurationRequest) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketNotificationConfigurationRequest) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketNotificationConfigurationRequest) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketPolicyInput struct { _ struct{} `locationName:"GetBucketPolicyRequest" type:"structure"` + // The bucket name for which to get the bucket policy. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -12253,6 +16407,20 @@ func (s *GetBucketPolicyInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketPolicyInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketPolicyInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketPolicyOutput struct { _ struct{} `type:"structure" payload:"Policy"` @@ -12324,6 +16492,20 @@ func (s *GetBucketPolicyStatusInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketPolicyStatusInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketPolicyStatusInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketPolicyStatusOutput struct { _ struct{} `type:"structure" payload:"PolicyStatus"` @@ -12350,6 +16532,8 @@ func (s *GetBucketPolicyStatusOutput) SetPolicyStatus(v *PolicyStatus) *GetBucke type GetBucketReplicationInput struct { _ struct{} `locationName:"GetBucketReplicationRequest" type:"structure"` + // The bucket name for which to get the replication information. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -12393,6 +16577,20 @@ func (s *GetBucketReplicationInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketReplicationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketReplicationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketReplicationOutput struct { _ struct{} `type:"structure" payload:"ReplicationConfiguration"` @@ -12420,6 +16618,8 @@ func (s *GetBucketReplicationOutput) SetReplicationConfiguration(v *ReplicationC type GetBucketRequestPaymentInput struct { _ struct{} `locationName:"GetBucketRequestPaymentRequest" type:"structure"` + // The name of the bucket for which to get the payment request configuration + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -12463,6 +16663,20 @@ func (s *GetBucketRequestPaymentInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketRequestPaymentInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketRequestPaymentInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketRequestPaymentOutput struct { _ struct{} `type:"structure"` @@ -12489,6 +16703,8 @@ func (s *GetBucketRequestPaymentOutput) SetPayer(v string) *GetBucketRequestPaym type GetBucketTaggingInput struct { _ struct{} `locationName:"GetBucketTaggingRequest" type:"structure"` + // The name of the bucket for which to get the tagging information. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -12532,9 +16748,25 @@ func (s *GetBucketTaggingInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketTaggingOutput struct { _ struct{} `type:"structure"` + // Contains the tag set. + // // TagSet is a required field TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` } @@ -12558,6 +16790,8 @@ func (s *GetBucketTaggingOutput) SetTagSet(v []*Tag) *GetBucketTaggingOutput { type GetBucketVersioningInput struct { _ struct{} `locationName:"GetBucketVersioningRequest" type:"structure"` + // The name of the bucket for which to get the versioning information. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -12601,6 +16835,20 @@ func (s *GetBucketVersioningInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketVersioningInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketVersioningInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketVersioningOutput struct { _ struct{} `type:"structure"` @@ -12638,6 +16886,8 @@ func (s *GetBucketVersioningOutput) SetStatus(v string) *GetBucketVersioningOutp type GetBucketWebsiteInput struct { _ struct{} `locationName:"GetBucketWebsiteRequest" type:"structure"` + // The bucket name for which to get the website configuration. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -12681,17 +16931,34 @@ func (s *GetBucketWebsiteInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketWebsiteInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketWebsiteInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketWebsiteOutput struct { _ struct{} `type:"structure"` + // The object key name of the website error document to use for 4XX class errors. ErrorDocument *ErrorDocument `type:"structure"` + // The name of the index document for the website (for example index.html). IndexDocument *IndexDocument `type:"structure"` // Specifies the redirect behavior of all requests to a website endpoint of // an Amazon S3 bucket. RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` + // Rules that define when a redirect is applied and the redirect behavior. RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` } @@ -12732,16 +16999,28 @@ func (s *GetBucketWebsiteOutput) SetRoutingRules(v []*RoutingRule) *GetBucketWeb type GetObjectAclInput struct { _ struct{} `locationName:"GetObjectAclRequest" type:"structure"` + // The bucket name that contains the object for which to get the ACL information. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The key of the object for which to get the ACL information. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // VersionId used to reference a specific version of the object. @@ -12811,12 +17090,27 @@ func (s *GetObjectAclInput) SetVersionId(v string) *GetObjectAclInput { return s } +func (s *GetObjectAclInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectAclInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetObjectAclOutput struct { _ struct{} `type:"structure"` // A list of grants. Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + // Container for the bucket owner's display name and ID. Owner *Owner `type:"structure"` // If present, indicates that the requester was successfully charged for the @@ -12855,6 +17149,15 @@ func (s *GetObjectAclOutput) SetRequestCharged(v string) *GetObjectAclOutput { type GetObjectInput struct { _ struct{} `locationName:"GetObjectRequest" type:"structure"` + // The bucket name containing the object. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -12874,6 +17177,8 @@ type GetObjectInput struct { // otherwise return a 412 (precondition failed). IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"` + // Key of the object to get. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -12883,13 +17188,17 @@ type GetObjectInput struct { PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"` // Downloads the specified range bytes of an object. For more information about - // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. + // the HTTP Range header, see https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35 + // (https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35). + // + // Amazon S3 doesn't support retrieving multiple ranges of data per GET request. Range *string `location:"header" locationName:"Range" type:"string"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // Sets the Cache-Control header of the response. @@ -12910,19 +17219,20 @@ type GetObjectInput struct { // Sets the Expires header of the response. ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp"` - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` // VersionId used to reference a specific version of the object. @@ -13089,10 +17399,32 @@ func (s *GetObjectInput) SetVersionId(v string) *GetObjectInput { return s } +func (s *GetObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetObjectLegalHoldInput struct { _ struct{} `locationName:"GetObjectLegalHoldRequest" type:"structure"` - // The bucket containing the object whose Legal Hold status you want to retrieve. + // The bucket name containing the object whose Legal Hold status you want to + // retrieve. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -13102,10 +17434,11 @@ type GetObjectLegalHoldInput struct { // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // The version ID of the object whose Legal Hold status you want to retrieve. @@ -13175,6 +17508,20 @@ func (s *GetObjectLegalHoldInput) SetVersionId(v string) *GetObjectLegalHoldInpu return s } +func (s *GetObjectLegalHoldInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectLegalHoldInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetObjectLegalHoldOutput struct { _ struct{} `type:"structure" payload:"LegalHold"` @@ -13201,7 +17548,7 @@ func (s *GetObjectLegalHoldOutput) SetLegalHold(v *ObjectLockLegalHold) *GetObje type GetObjectLockConfigurationInput struct { _ struct{} `locationName:"GetObjectLockConfigurationRequest" type:"structure"` - // The bucket whose object lock configuration you want to retrieve. + // The bucket whose Object Lock configuration you want to retrieve. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -13246,10 +17593,24 @@ func (s *GetObjectLockConfigurationInput) getBucket() (v string) { return *s.Bucket } +func (s *GetObjectLockConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectLockConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetObjectLockConfigurationOutput struct { _ struct{} `type:"structure" payload:"ObjectLockConfiguration"` - // The specified bucket's object lock configuration. + // The specified bucket's Object Lock configuration. ObjectLockConfiguration *ObjectLockConfiguration `type:"structure"` } @@ -13272,6 +17633,7 @@ func (s *GetObjectLockConfigurationOutput) SetObjectLockConfiguration(v *ObjectL type GetObjectOutput struct { _ struct{} `type:"structure" payload:"Body"` + // Indicates that a range of bytes was specified. AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` // Object data. @@ -13305,11 +17667,11 @@ type GetObjectOutput struct { DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` // An ETag is an opaque identifier assigned by a web server to a specific version - // of a resource found at a URL + // of a resource found at a URL. ETag *string `location:"header" locationName:"ETag" type:"string"` // If the object expiration is configured (see PUT Bucket lifecycle), the response - // includes this header. It includes the expiry-date and rule-id key value pairs + // includes this header. It includes the expiry-date and rule-id key-value pairs // providing object expiration information. The value of the rule-id is URL // encoded. Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` @@ -13321,6 +17683,10 @@ type GetObjectOutput struct { LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` // A map of metadata to store with the object in S3. + // + // By default unmarshaled keys are written as a map keys in following canonicalized format: + // the first letter and any letter following a hyphen will be capitalized, and the rest as lowercase. + // Set `aws.Config.LowerCaseHeaderMaps` to `true` to write unmarshaled keys to the map as lowercase. Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` // This is set to the number of metadata entries not returned in x-amz-meta @@ -13333,15 +17699,17 @@ type GetObjectOutput struct { // returned if you have permission to view an object's legal hold status. ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` - // The object lock mode currently in place for this object. + // The Object Lock mode currently in place for this object. ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` - // The date and time when this object's object lock will expire. + // The date and time when this object's Object Lock will expire. ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` // The count of parts this object has. PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` + // Amazon S3 can return this if your request involves a bucket that is either + // a source or destination in a replication rule. ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` // If present, indicates that the requester was successfully charged for the @@ -13358,18 +17726,21 @@ type GetObjectOutput struct { SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity + // the response will include this header to provide round-trip message integrity // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. + // If present, specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) that was used for the + // object. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + // Provides storage class information of the object. Amazon S3 returns this + // header for all objects except for S3 Standard storage class objects. StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` // The number of tags, if any, on the object. @@ -13583,7 +17954,15 @@ func (s *GetObjectOutput) SetWebsiteRedirectLocation(v string) *GetObjectOutput type GetObjectRetentionInput struct { _ struct{} `locationName:"GetObjectRetentionRequest" type:"structure"` - // The bucket containing the object whose retention settings you want to retrieve. + // The bucket name containing the object whose retention settings you want to + // retrieve. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -13593,10 +17972,11 @@ type GetObjectRetentionInput struct { // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // The version ID for the object whose retention settings you want to retrieve. @@ -13660,10 +18040,24 @@ func (s *GetObjectRetentionInput) SetRequestPayer(v string) *GetObjectRetentionI return s } -// SetVersionId sets the VersionId field's value. -func (s *GetObjectRetentionInput) SetVersionId(v string) *GetObjectRetentionInput { - s.VersionId = &v - return s +// SetVersionId sets the VersionId field's value. +func (s *GetObjectRetentionInput) SetVersionId(v string) *GetObjectRetentionInput { + s.VersionId = &v + return s +} + +func (s *GetObjectRetentionInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectRetentionInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) } type GetObjectRetentionOutput struct { @@ -13692,12 +18086,24 @@ func (s *GetObjectRetentionOutput) SetRetention(v *ObjectLockRetention) *GetObje type GetObjectTaggingInput struct { _ struct{} `locationName:"GetObjectTaggingRequest" type:"structure"` + // The bucket name containing the object for which to get the tagging information. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Object key for which to get the tagging information. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + // The versionId of the object for which to get the tagging information. VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } @@ -13758,12 +18164,29 @@ func (s *GetObjectTaggingInput) SetVersionId(v string) *GetObjectTaggingInput { return s } +func (s *GetObjectTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetObjectTaggingOutput struct { _ struct{} `type:"structure"` + // Contains the tag set. + // // TagSet is a required field TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` + // The versionId of the object for which you got the tagging information. VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` } @@ -13792,16 +18215,22 @@ func (s *GetObjectTaggingOutput) SetVersionId(v string) *GetObjectTaggingOutput type GetObjectTorrentInput struct { _ struct{} `locationName:"GetObjectTorrentRequest" type:"structure"` + // The name of the bucket containing the object for which to get the torrent + // files. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The object key for which to get the information. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` } @@ -13862,9 +18291,24 @@ func (s *GetObjectTorrentInput) SetRequestPayer(v string) *GetObjectTorrentInput return s } +func (s *GetObjectTorrentInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectTorrentInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetObjectTorrentOutput struct { _ struct{} `type:"structure" payload:"Body"` + // A Bencoded dictionary as defined by the BitTorrent specification Body io.ReadCloser `type:"blob"` // If present, indicates that the requester was successfully charged for the @@ -13943,6 +18387,20 @@ func (s *GetPublicAccessBlockInput) getBucket() (v string) { return *s.Bucket } +func (s *GetPublicAccessBlockInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetPublicAccessBlockInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetPublicAccessBlockOutput struct { _ struct{} `type:"structure" payload:"PublicAccessBlockConfiguration"` @@ -13967,10 +18425,11 @@ func (s *GetPublicAccessBlockOutput) SetPublicAccessBlockConfiguration(v *Public return s } +// Container for S3 Glacier job parameters. type GlacierJobParameters struct { _ struct{} `type:"structure"` - // Glacier retrieval tier at which the restore will be processed. + // S3 Glacier retrieval tier at which the restore will be processed. // // Tier is a required field Tier *string `type:"string" required:"true" enum:"Tier"` @@ -14005,9 +18464,11 @@ func (s *GlacierJobParameters) SetTier(v string) *GlacierJobParameters { return s } +// Container for grant information. type Grant struct { _ struct{} `type:"structure"` + // The person being granted permissions. Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` // Specifies the permission given to the grantee. @@ -14051,6 +18512,7 @@ func (s *Grant) SetPermission(v string) *Grant { return s } +// Container for the person being granted permissions. type Grantee struct { _ struct{} `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` @@ -14058,6 +18520,29 @@ type Grantee struct { DisplayName *string `type:"string"` // Email address of the grantee. + // + // Using email addresses to specify a grantee is only supported in the following + // AWS Regions: + // + // * US East (N. Virginia) + // + // * US West (N. California) + // + // * US West (Oregon) + // + // * Asia Pacific (Singapore) + // + // * Asia Pacific (Sydney) + // + // * Asia Pacific (Tokyo) + // + // * Europe (Ireland) + // + // * South America (São Paulo) + // + // For a list of all the Amazon S3 supported Regions and endpoints, see Regions + // and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // in the AWS General Reference. EmailAddress *string `type:"string"` // The canonical user ID of the grantee. @@ -14128,6 +18613,8 @@ func (s *Grantee) SetURI(v string) *Grantee { type HeadBucketInput struct { _ struct{} `locationName:"HeadBucketRequest" type:"structure"` + // The bucket name. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -14171,6 +18658,20 @@ func (s *HeadBucketInput) getBucket() (v string) { return *s.Bucket } +func (s *HeadBucketInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *HeadBucketInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type HeadBucketOutput struct { _ struct{} `type:"structure"` } @@ -14188,6 +18689,8 @@ func (s HeadBucketOutput) GoString() string { type HeadObjectInput struct { _ struct{} `locationName:"HeadObjectRequest" type:"structure"` + // The name of the bucket containing the object. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -14207,6 +18710,8 @@ type HeadObjectInput struct { // otherwise return a 412 (precondition failed). IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"` + // The object key. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -14217,28 +18722,32 @@ type HeadObjectInput struct { PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"` // Downloads the specified range bytes of an object. For more information about - // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. + // the HTTP Range header, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. + // + // Amazon S3 doesn't support retrieving multiple ranges of data per GET request. Range *string `location:"header" locationName:"Range" type:"string"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` // VersionId used to reference a specific version of the object. @@ -14369,9 +18878,24 @@ func (s *HeadObjectInput) SetVersionId(v string) *HeadObjectInput { return s } +func (s *HeadObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *HeadObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type HeadObjectOutput struct { _ struct{} `type:"structure"` + // Indicates that a range of bytes was specified. AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` // Specifies caching behavior along the request/reply chain. @@ -14399,11 +18923,11 @@ type HeadObjectOutput struct { DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` // An ETag is an opaque identifier assigned by a web server to a specific version - // of a resource found at a URL + // of a resource found at a URL. ETag *string `location:"header" locationName:"ETag" type:"string"` // If the object expiration is configured (see PUT Bucket lifecycle), the response - // includes this header. It includes the expiry-date and rule-id key value pairs + // includes this header. It includes the expiry-date and rule-id key-value pairs // providing object expiration information. The value of the rule-id is URL // encoded. Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` @@ -14415,6 +18939,10 @@ type HeadObjectOutput struct { LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` // A map of metadata to store with the object in S3. + // + // By default unmarshaled keys are written as a map keys in following canonicalized format: + // the first letter and any letter following a hyphen will be capitalized, and the rest as lowercase. + // Set `aws.Config.LowerCaseHeaderMaps` to `true` to write unmarshaled keys to the map as lowercase. Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` // This is set to the number of metadata entries not returned in x-amz-meta @@ -14423,26 +18951,69 @@ type HeadObjectOutput struct { // you can create metadata whose values are not legal HTTP headers. MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` - // The Legal Hold status for the specified object. + // Specifies whether a legal hold is in effect for this object. This header + // is only returned if the requester has the s3:GetObjectLegalHold permission. + // This header is not returned if the specified version of this object has never + // had a legal hold applied. For more information about S3 Object Lock, see + // Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` - // The object lock mode currently in place for this object. + // The Object Lock mode, if any, that's in effect for this object. This header + // is only returned if the requester has the s3:GetObjectRetention permission. + // For more information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` - // The date and time when this object's object lock expires. + // The date and time when the Object Lock retention period expires. This header + // is only returned if the requester has the s3:GetObjectRetention permission. ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` // The count of parts this object has. PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` + // Amazon S3 can return this header if your request involves a bucket that is + // either a source or destination in a replication rule. + // + // In replication, you have a source bucket on which you configure replication + // and destination bucket where Amazon S3 stores object replicas. When you request + // an object (GetObject) or object metadata (HeadObject) from these buckets, + // Amazon S3 will return the x-amz-replication-status header in the response + // as follows: + // + // * If requesting an object from the source bucket — Amazon S3 will return + // the x-amz-replication-status header if the object in your request is eligible + // for replication. For example, suppose that in your replication configuration, + // you specify object prefix TaxDocs requesting Amazon S3 to replicate objects + // with key prefix TaxDocs. Any objects you upload with this key name prefix, + // for example TaxDocs/document1.pdf, are eligible for replication. For any + // object request with this key name prefix, Amazon S3 will return the x-amz-replication-status + // header with value PENDING, COMPLETED or FAILED indicating object replication + // status. + // + // * If requesting an object from the destination bucket — Amazon S3 will + // return the x-amz-replication-status header with value REPLICA if the object + // in your request is a replica that Amazon S3 created. + // + // For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` // If present, indicates that the requester was successfully charged for the // request. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - // Provides information about object restoration operation and expiration time - // of the restored object copy. + // If the object is an archived object (an object whose storage class is GLACIER), + // the response includes this header if either the archive restoration is in + // progress (see RestoreObject or an archive copy is already restored. + // + // If an archive copy is already restored, the header value indicates when Amazon + // S3 is scheduled to delete the object copy. For example: + // + // x-amz-restore: ongoing-request="false", expiry-date="Fri, 23 Dec 2012 00:00:00 + // GMT" + // + // If the object restoration is in progress, the header returns the value ongoing-request="true". + // + // For more information about archiving objects, see Transitioning Objects: + // General Considerations (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-transition-general-considerations). Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` // If server-side encryption with a customer-provided encryption key was requested, @@ -14451,18 +19022,25 @@ type HeadObjectOutput struct { SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity + // the response will include this header to provide round-trip message integrity // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. + // If present, specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) that was used for the + // object. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). + // If the object is stored using server-side encryption either with an AWS KMS + // customer master key (CMK) or an Amazon S3-managed encryption key, the response + // includes this header with the value of the server-side encryption algorithm + // used when storing this object in Amazon S3 (for example, AES256, aws:kms). ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + // Provides storage class information of the object. Amazon S3 returns this + // header for all objects except for S3 Standard storage class objects. + // + // For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html). StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` // Version of the object. @@ -14652,13 +19230,15 @@ func (s *HeadObjectOutput) SetWebsiteRedirectLocation(v string) *HeadObjectOutpu return s } +// Container for the Suffix element. type IndexDocument struct { _ struct{} `type:"structure"` // A suffix that is appended to a request that is for a directory on the website - // endpoint (e.g. if the suffix is index.html and you make a request to samplebucket/images/ - // the data that is returned will be for the object with the key name images/index.html) - // The suffix must not be empty and must not include a slash character. + // endpoint (for example,if the suffix is index.html and you make a request + // to samplebucket/images/ the data that is returned will be for the object + // with the key name images/index.html) The suffix must not be empty and must + // not include a slash character. // // Suffix is a required field Suffix *string `type:"string" required:"true"` @@ -14693,6 +19273,7 @@ func (s *IndexDocument) SetSuffix(v string) *IndexDocument { return s } +// Container element that identifies who initiated the multipart upload. type Initiator struct { _ struct{} `type:"structure"` @@ -14913,6 +19494,7 @@ func (s *InventoryConfiguration) SetSchedule(v *InventorySchedule) *InventoryCon return s } +// Specifies the inventory configuration for an Amazon S3 bucket. type InventoryDestination struct { _ struct{} `type:"structure"` @@ -14962,10 +19544,10 @@ func (s *InventoryDestination) SetS3BucketDestination(v *InventoryS3BucketDestin type InventoryEncryption struct { _ struct{} `type:"structure"` - // Specifies the use of SSE-KMS to encrypt delivered Inventory reports. + // Specifies the use of SSE-KMS to encrypt delivered inventory reports. SSEKMS *SSEKMS `locationName:"SSE-KMS" type:"structure"` - // Specifies the use of SSE-S3 to encrypt delivered Inventory reports. + // Specifies the use of SSE-S3 to encrypt delivered inventory reports. SSES3 *SSES3 `locationName:"SSE-S3" type:"structure"` } @@ -15006,6 +19588,8 @@ func (s *InventoryEncryption) SetSSES3(v *SSES3) *InventoryEncryption { return s } +// Specifies an inventory filter. The inventory only includes objects that meet +// the filter's criteria. type InventoryFilter struct { _ struct{} `type:"structure"` @@ -15044,13 +19628,19 @@ func (s *InventoryFilter) SetPrefix(v string) *InventoryFilter { return s } +// Contains the bucket name, file format, bucket owner (optional), and prefix +// (optional) where inventory results are published. type InventoryS3BucketDestination struct { _ struct{} `type:"structure"` - // The ID of the account that owns the destination bucket. + // The account ID that owns the destination S3 bucket. If no account ID is provided, + // the owner is not validated before exporting data. + // + // Although this value is optional, we strongly recommend that you set it to + // help prevent problems if the destination bucket ownership changes. AccountId *string `type:"string"` - // The Amazon resource name (ARN) of the bucket where inventory results will + // The Amazon Resource Name (ARN) of the bucket where inventory results will // be published. // // Bucket is a required field @@ -15137,6 +19727,7 @@ func (s *InventoryS3BucketDestination) SetPrefix(v string) *InventoryS3BucketDes return s } +// Specifies the schedule for generating inventory results. type InventorySchedule struct { _ struct{} `type:"structure"` @@ -15175,6 +19766,7 @@ func (s *InventorySchedule) SetFrequency(v string) *InventorySchedule { return s } +// Specifies JSON as object's input serialization format. type JSONInput struct { _ struct{} `type:"structure"` @@ -15198,10 +19790,12 @@ func (s *JSONInput) SetType(v string) *JSONInput { return s } +// Specifies JSON as request's output serialization format. type JSONOutput struct { _ struct{} `type:"structure"` - // The value used to separate individual records in the output. + // The value used to separate individual records in the output. If no value + // is specified, Amazon S3 uses a newline character ('\n'). RecordDelimiter *string `type:"string"` } @@ -15225,7 +19819,7 @@ func (s *JSONOutput) SetRecordDelimiter(v string) *JSONOutput { type KeyFilter struct { _ struct{} `type:"structure"` - // A list of containers for the key value pair that defines the criteria for + // A list of containers for the key-value pair that defines the criteria for // the filter rule. FilterRules []*FilterRule `locationName:"FilterRule" type:"list" flattened:"true"` } @@ -15323,9 +19917,12 @@ func (s *LambdaFunctionConfiguration) SetLambdaFunctionArn(v string) *LambdaFunc return s } +// Container for lifecycle rules. You can add as many as 1000 rules. type LifecycleConfiguration struct { _ struct{} `type:"structure"` + // Specifies lifecycle configuration rules for an Amazon S3 bucket. + // // Rules is a required field Rules []*Rule `locationName:"Rule" type:"list" flattened:"true" required:"true"` } @@ -15369,6 +19966,7 @@ func (s *LifecycleConfiguration) SetRules(v []*Rule) *LifecycleConfiguration { return s } +// Container for the expiration for the lifecycle of the object. type LifecycleExpiration struct { _ struct{} `type:"structure"` @@ -15415,6 +20013,7 @@ func (s *LifecycleExpiration) SetExpiredObjectDeleteMarker(v bool) *LifecycleExp return s } +// A lifecycle rule for individual objects in an Amazon S3 bucket. type LifecycleRule struct { _ struct{} `type:"structure"` @@ -15425,6 +20024,8 @@ type LifecycleRule struct { // in the Amazon Simple Storage Service Developer Guide. AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` + // Specifies the expiration for the lifecycle of the object in the form of date, + // days and, whether the object has a delete marker. Expiration *LifecycleExpiration `type:"structure"` // The Filter is used to identify objects that a Lifecycle Rule applies to. @@ -15441,6 +20042,11 @@ type LifecycleRule struct { // period in the object's lifetime. NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` + // Specifies the transition rule for the lifecycle rule that describes when + // noncurrent objects transition to a specific storage class. If your bucket + // is versioning-enabled (or versioning is suspended), you can set this action + // to request that Amazon S3 transition noncurrent object versions to a specific + // storage class at a set period in the object's lifetime. NoncurrentVersionTransitions []*NoncurrentVersionTransition `locationName:"NoncurrentVersionTransition" type:"list" flattened:"true"` // Prefix identifying one or more objects to which the rule applies. This is @@ -15455,6 +20061,7 @@ type LifecycleRule struct { // Status is a required field Status *string `type:"string" required:"true" enum:"ExpirationStatus"` + // Specifies when an Amazon S3 object transitions to a specified storage class. Transitions []*Transition `locationName:"Transition" type:"list" flattened:"true"` } @@ -15546,6 +20153,7 @@ func (s *LifecycleRule) SetTransitions(v []*Transition) *LifecycleRule { type LifecycleRuleAndOperator struct { _ struct{} `type:"structure"` + // Prefix identifying one or more objects to which the rule applies. Prefix *string `type:"string"` // All of these tags must exist in the object's tag set in order for the rule @@ -15718,13 +20326,28 @@ func (s *ListBucketAnalyticsConfigurationsInput) SetContinuationToken(v string) return s } +func (s *ListBucketAnalyticsConfigurationsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListBucketAnalyticsConfigurationsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type ListBucketAnalyticsConfigurationsOutput struct { _ struct{} `type:"structure"` // The list of analytics configurations for a bucket. AnalyticsConfigurationList []*AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"list" flattened:"true"` - // The ContinuationToken that represents where this request began. + // The marker that is used as a starting point for this analytics configuration + // list response. This value is present if it was sent in the request. ContinuationToken *string `type:"string"` // Indicates whether the returned list of analytics configurations is complete. @@ -15832,6 +20455,20 @@ func (s *ListBucketInventoryConfigurationsInput) SetContinuationToken(v string) return s } +func (s *ListBucketInventoryConfigurationsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListBucketInventoryConfigurationsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type ListBucketInventoryConfigurationsOutput struct { _ struct{} `type:"structure"` @@ -15842,8 +20479,9 @@ type ListBucketInventoryConfigurationsOutput struct { // The list of inventory configurations for a bucket. InventoryConfigurationList []*InventoryConfiguration `locationName:"InventoryConfiguration" type:"list" flattened:"true"` - // Indicates whether the returned list of inventory configurations is truncated - // in this response. A value of true indicates that the list is truncated. + // Tells whether the returned list of inventory configurations is complete. + // A value of true indicates that the list is not complete and the NextContinuationToken + // is provided for a subsequent request. IsTruncated *bool `type:"boolean"` // The marker used to continue this inventory configuration listing. Use the @@ -15946,6 +20584,20 @@ func (s *ListBucketMetricsConfigurationsInput) SetContinuationToken(v string) *L return s } +func (s *ListBucketMetricsConfigurationsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListBucketMetricsConfigurationsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type ListBucketMetricsConfigurationsOutput struct { _ struct{} `type:"structure"` @@ -16019,8 +20671,10 @@ func (s ListBucketsInput) GoString() string { type ListBucketsOutput struct { _ struct{} `type:"structure"` + // The list of buckets owned by the requestor. Buckets []*Bucket `locationNameList:"Bucket" type:"list"` + // The owner of the buckets listed. Owner *Owner `type:"structure"` } @@ -16049,10 +20703,26 @@ func (s *ListBucketsOutput) SetOwner(v *Owner) *ListBucketsOutput { type ListMultipartUploadsInput struct { _ struct{} `locationName:"ListMultipartUploadsRequest" type:"structure"` + // Name of the bucket to which the multipart upload was initiated. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // Character you use to group keys. + // + // All keys that contain the same string between the prefix, if specified, and + // the first occurrence of the delimiter after the prefix are grouped under + // a single result element, CommonPrefixes. If you don't specify the prefix + // parameter, then the substring starts at the beginning of the key. The keys + // that are grouped under CommonPrefixes result element are not returned elsewhere + // in the response. Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` // Requests Amazon S3 to encode the object keys in the response and specifies @@ -16065,6 +20735,13 @@ type ListMultipartUploadsInput struct { // Together with upload-id-marker, this parameter specifies the multipart upload // after which listing should begin. + // + // If upload-id-marker is not specified, only the keys lexicographically greater + // than the specified key-marker will be included in the list. + // + // If upload-id-marker is specified, any multipart uploads for a key equal to + // the key-marker might also be included, provided those multipart uploads have + // upload IDs lexicographically greater than the specified upload-id-marker. KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` // Sets the maximum number of multipart uploads, from 1 to 1,000, to return @@ -16073,12 +20750,16 @@ type ListMultipartUploadsInput struct { MaxUploads *int64 `location:"querystring" locationName:"max-uploads" type:"integer"` // Lists in-progress uploads only for those keys that begin with the specified - // prefix. + // prefix. You can use prefixes to separate a bucket into different grouping + // of keys. (You can think of using prefix to make groups in the same way you'd + // use a folder in a file system.) Prefix *string `location:"querystring" locationName:"prefix" type:"string"` // Together with key-marker, specifies the multipart upload after which listing // should begin. If key-marker is not specified, the upload-id-marker parameter - // is ignored. + // is ignored. Otherwise, any multipart uploads for a key equal to the key-marker + // might be included in the list only if they have an upload ID lexicographically + // greater than the specified upload-id-marker. UploadIdMarker *string `location:"querystring" locationName:"upload-id-marker" type:"string"` } @@ -16157,17 +20838,42 @@ func (s *ListMultipartUploadsInput) SetUploadIdMarker(v string) *ListMultipartUp return s } +func (s *ListMultipartUploadsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListMultipartUploadsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type ListMultipartUploadsOutput struct { _ struct{} `type:"structure"` // Name of the bucket to which the multipart upload was initiated. Bucket *string `type:"string"` + // If you specify a delimiter in the request, then the result returns each distinct + // key prefix containing the delimiter in a CommonPrefixes element. The distinct + // key prefixes are returned in the Prefix child element. CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + // Contains the delimiter you specified in the request. If you don't specify + // a delimiter in your request, this element is absent from the response. Delimiter *string `type:"string"` // Encoding type used by Amazon S3 to encode object keys in the response. + // + // If you specify encoding-type request parameter, Amazon S3 includes this element + // in the response, and returns encoded key name values in the following response + // elements: + // + // Delimiter, KeyMarker, Prefix, NextKeyMarker, Key. EncodingType *string `type:"string" enum:"EncodingType"` // Indicates whether the returned list of multipart uploads is truncated. A @@ -16198,6 +20904,8 @@ type ListMultipartUploadsOutput struct { // Upload ID after which listing began. UploadIdMarker *string `type:"string"` + // Container for elements related to a particular multipart upload. A response + // can contain zero or more Upload elements. Uploads []*MultipartUpload `locationName:"Upload" type:"list" flattened:"true"` } @@ -16293,10 +21001,23 @@ func (s *ListMultipartUploadsOutput) SetUploads(v []*MultipartUpload) *ListMulti type ListObjectVersionsInput struct { _ struct{} `locationName:"ListObjectVersionsRequest" type:"structure"` + // The bucket name that contains the objects. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // A delimiter is a character you use to group keys. + // A delimiter is a character that you specify to group keys. All keys that + // contain the same string between the prefix and the first occurrence of the + // delimiter are grouped under a single result element in CommonPrefixes. These + // groups are counted as one result against the max-keys limitation. These keys + // are not returned elsewhere in the response. Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` // Requests Amazon S3 to encode the object keys in the response and specifies @@ -16310,11 +21031,19 @@ type ListObjectVersionsInput struct { // Specifies the key to start with when listing objects in a bucket. KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` - // Sets the maximum number of keys returned in the response. The response might - // contain fewer keys but will never contain more. + // Sets the maximum number of keys returned in the response. By default the + // API returns up to 1,000 key names. The response might contain fewer keys + // but will never contain more. If additional keys satisfy the search criteria, + // but were not returned because max-keys was exceeded, the response contains + // true. To return the additional keys, see key-marker + // and version-id-marker. MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` - // Limits the response to keys that begin with the specified prefix. + // Use this parameter to select only those keys that begin with the specified + // prefix. You can use prefixes to separate a bucket into different groupings + // of keys. (You can think of using prefix to make groups in the same way you'd + // use a folder in a file system.) You can use prefix with delimiter to roll + // up numerous objects into a single result under CommonPrefixes. Prefix *string `location:"querystring" locationName:"prefix" type:"string"` // Specifies the object version you want to start listing from. @@ -16396,42 +21125,81 @@ func (s *ListObjectVersionsInput) SetVersionIdMarker(v string) *ListObjectVersio return s } +func (s *ListObjectVersionsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListObjectVersionsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type ListObjectVersionsOutput struct { _ struct{} `type:"structure"` + // All of the keys rolled up into a common prefix count as a single return when + // calculating the number of returns. CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + // Container for an object that is a delete marker. DeleteMarkers []*DeleteMarkerEntry `locationName:"DeleteMarker" type:"list" flattened:"true"` + // The delimiter grouping the included keys. A delimiter is a character that + // you specify to group keys. All keys that contain the same string between + // the prefix and the first occurrence of the delimiter are grouped under a + // single result element in CommonPrefixes. These groups are counted as one + // result against the max-keys limitation. These keys are not returned elsewhere + // in the response. Delimiter *string `type:"string"` - // Encoding type used by Amazon S3 to encode object keys in the response. + // Encoding type used by Amazon S3 to encode object key names in the XML response. + // + // If you specify encoding-type request parameter, Amazon S3 includes this element + // in the response, and returns encoded key name values in the following response + // elements: + // + // KeyMarker, NextKeyMarker, Prefix, Key, and Delimiter. EncodingType *string `type:"string" enum:"EncodingType"` - // A flag that indicates whether or not Amazon S3 returned all of the results - // that satisfied the search criteria. If your results were truncated, you can - // make a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker + // A flag that indicates whether Amazon S3 returned all of the results that + // satisfied the search criteria. If your results were truncated, you can make + // a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker // response parameters as a starting place in another request to return the // rest of the results. IsTruncated *bool `type:"boolean"` - // Marks the last Key returned in a truncated response. + // Marks the last key returned in a truncated response. KeyMarker *string `type:"string"` + // Specifies the maximum number of objects to return. MaxKeys *int64 `type:"integer"` + // Bucket name. Name *string `type:"string"` - // Use this value for the key marker request parameter in a subsequent request. + // When the number of responses exceeds the value of MaxKeys, NextKeyMarker + // specifies the first key not returned that satisfies the search criteria. + // Use this value for the key-marker request parameter in a subsequent request. NextKeyMarker *string `type:"string"` - // Use this value for the next version id marker parameter in a subsequent request. + // When the number of responses exceeds the value of MaxKeys, NextVersionIdMarker + // specifies the first object version not returned that satisfies the search + // criteria. Use this value for the version-id-marker request parameter in a + // subsequent request. NextVersionIdMarker *string `type:"string"` + // Selects objects that start with the value supplied by this parameter. Prefix *string `type:"string"` + // Marks the last version of the key returned in a truncated response. VersionIdMarker *string `type:"string"` + // Container for version information. Versions []*ObjectVersion `locationName:"Version" type:"list" flattened:"true"` } @@ -16526,6 +21294,8 @@ func (s *ListObjectVersionsOutput) SetVersions(v []*ObjectVersion) *ListObjectVe type ListObjectsInput struct { _ struct{} `locationName:"ListObjectsRequest" type:"structure"` + // The name of the bucket containing the objects. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -16543,8 +21313,9 @@ type ListObjectsInput struct { // Specifies the key to start with when listing objects in a bucket. Marker *string `location:"querystring" locationName:"marker" type:"string"` - // Sets the maximum number of keys returned in the response. The response might - // contain fewer keys but will never contain more. + // Sets the maximum number of keys returned in the response. By default the + // API returns up to 1,000 key names. The response might contain fewer keys + // but will never contain more. MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` // Limits the response to keys that begin with the specified prefix. @@ -16631,26 +21402,65 @@ func (s *ListObjectsInput) SetRequestPayer(v string) *ListObjectsInput { return s } +func (s *ListObjectsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListObjectsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type ListObjectsOutput struct { _ struct{} `type:"structure"` + // All of the keys rolled up in a common prefix count as a single return when + // calculating the number of returns. + // + // A response can contain CommonPrefixes only if you specify a delimiter. + // + // CommonPrefixes contains all (if there are any) keys between Prefix and the + // next occurrence of the string specified by the delimiter. + // + // CommonPrefixes lists keys that act like subdirectories in the directory specified + // by Prefix. + // + // For example, if the prefix is notes/ and the delimiter is a slash (/) as + // in notes/summer/july, the common prefix is notes/summer/. All of the keys + // that roll up into a common prefix count as a single return when calculating + // the number of returns. CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + // Metadata about each object returned. Contents []*Object `type:"list" flattened:"true"` + // Causes keys that contain the same string between the prefix and the first + // occurrence of the delimiter to be rolled up into a single result element + // in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere + // in the response. Each rolled-up result counts as only one return against + // the MaxKeys value. Delimiter *string `type:"string"` // Encoding type used by Amazon S3 to encode object keys in the response. EncodingType *string `type:"string" enum:"EncodingType"` - // A flag that indicates whether or not Amazon S3 returned all of the results - // that satisfied the search criteria. + // A flag that indicates whether Amazon S3 returned all of the results that + // satisfied the search criteria. IsTruncated *bool `type:"boolean"` + // Indicates where in the bucket listing begins. Marker is included in the response + // if it was sent with the request. Marker *string `type:"string"` + // The maximum number of keys returned in the response body. MaxKeys *int64 `type:"integer"` + // Bucket name. Name *string `type:"string"` // When response is truncated (the IsTruncated element value in the response @@ -16662,6 +21472,7 @@ type ListObjectsOutput struct { // subsequent request to get the next set of object keys. NextMarker *string `type:"string"` + // Keys that begin with the indicated prefix. Prefix *string `type:"string"` } @@ -16738,14 +21549,21 @@ func (s *ListObjectsOutput) SetPrefix(v string) *ListObjectsOutput { type ListObjectsV2Input struct { _ struct{} `locationName:"ListObjectsV2Request" type:"structure"` - // Name of the bucket to list. + // Bucket name to list. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // ContinuationToken indicates Amazon S3 that the list is being continued on // this bucket with a token. ContinuationToken is obfuscated and is not a real - // key + // key. ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` // A delimiter is a character you use to group keys. @@ -16756,11 +21574,12 @@ type ListObjectsV2Input struct { // The owner field is not present in listV2 by default, if you want to return // owner field with each key in the result then set the fetch owner field to - // true + // true. FetchOwner *bool `location:"querystring" locationName:"fetch-owner" type:"boolean"` - // Sets the maximum number of keys returned in the response. The response might - // contain fewer keys but will never contain more. + // Sets the maximum number of keys returned in the response. By default the + // API returns up to 1,000 key names. The response might contain fewer keys + // but will never contain more. MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` // Limits the response to keys that begin with the specified prefix. @@ -16772,7 +21591,7 @@ type ListObjectsV2Input struct { RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts - // listing after this specified key. StartAfter can be any key in the bucket + // listing after this specified key. StartAfter can be any key in the bucket. StartAfter *string `location:"querystring" locationName:"start-after" type:"string"` } @@ -16863,29 +21682,65 @@ func (s *ListObjectsV2Input) SetStartAfter(v string) *ListObjectsV2Input { return s } +func (s *ListObjectsV2Input) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListObjectsV2Input) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type ListObjectsV2Output struct { _ struct{} `type:"structure"` + // All of the keys rolled up into a common prefix count as a single return when + // calculating the number of returns. + // + // A response can contain CommonPrefixes only if you specify a delimiter. + // // CommonPrefixes contains all (if there are any) keys between Prefix and the - // next occurrence of the string specified by delimiter + // next occurrence of the string specified by a delimiter. + // + // CommonPrefixes lists keys that act like subdirectories in the directory specified + // by Prefix. + // + // For example, if the prefix is notes/ and the delimiter is a slash (/) as + // in notes/summer/july, the common prefix is notes/summer/. All of the keys + // that roll up into a common prefix count as a single return when calculating + // the number of returns. CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` // Metadata about each object returned. Contents []*Object `type:"list" flattened:"true"` - // ContinuationToken indicates Amazon S3 that the list is being continued on - // this bucket with a token. ContinuationToken is obfuscated and is not a real - // key + // If ContinuationToken was sent with the request, it is included in the response. ContinuationToken *string `type:"string"` - // A delimiter is a character you use to group keys. + // Causes keys that contain the same string between the prefix and the first + // occurrence of the delimiter to be rolled up into a single result element + // in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere + // in the response. Each rolled-up result counts as only one return against + // the MaxKeys value. Delimiter *string `type:"string"` - // Encoding type used by Amazon S3 to encode object keys in the response. + // Encoding type used by Amazon S3 to encode object key names in the XML response. + // + // If you specify the encoding-type request parameter, Amazon S3 includes this + // element in the response, and returns encoded key name values in the following + // response elements: + // + // Delimiter, Prefix, Key, and StartAfter. EncodingType *string `type:"string" enum:"EncodingType"` - // A flag that indicates whether or not Amazon S3 returned all of the results - // that satisfied the search criteria. + // Set to false if all of the results were returned. Set to true if more keys + // are available to return. If the number of results exceeds that specified + // by MaxKeys, all of the results might not be returned. IsTruncated *bool `type:"boolean"` // KeyCount is the number of keys returned with this request. KeyCount will @@ -16893,24 +21748,31 @@ type ListObjectsV2Output struct { // result will include less than equals 50 keys KeyCount *int64 `type:"integer"` - // Sets the maximum number of keys returned in the response. The response might - // contain fewer keys but will never contain more. + // Sets the maximum number of keys returned in the response. By default the + // API returns up to 1,000 key names. The response might contain fewer keys + // but will never contain more. MaxKeys *int64 `type:"integer"` - // Name of the bucket to list. + // Bucket name. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. Name *string `type:"string"` - // NextContinuationToken is sent when isTruncated is true which means there + // NextContinuationToken is sent when isTruncated is true, which means there // are more keys in the bucket that can be listed. The next list requests to // Amazon S3 can be continued with this NextContinuationToken. NextContinuationToken // is obfuscated and is not a real key NextContinuationToken *string `type:"string"` - // Limits the response to keys that begin with the specified prefix. + // Keys that begin with the indicated prefix. Prefix *string `type:"string"` - // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts - // listing after this specified key. StartAfter can be any key in the bucket + // If StartAfter was sent with the request, it is included in the response. StartAfter *string `type:"string"` } @@ -16999,9 +21861,20 @@ func (s *ListObjectsV2Output) SetStartAfter(v string) *ListObjectsV2Output { type ListPartsInput struct { _ struct{} `locationName:"ListPartsRequest" type:"structure"` + // Name of the bucket to which the parts are being uploaded. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Object key for which the multipart upload was initiated. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -17012,10 +21885,11 @@ type ListPartsInput struct { // part numbers will be listed. PartNumberMarker *int64 `location:"querystring" locationName:"part-number-marker" type:"integer"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // Upload ID identifying the multipart upload whose parts are being listed. @@ -17102,23 +21976,51 @@ func (s *ListPartsInput) SetUploadId(v string) *ListPartsInput { return s } +func (s *ListPartsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListPartsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type ListPartsOutput struct { _ struct{} `type:"structure"` - // Date when multipart upload will become eligible for abort operation by lifecycle. + // If the bucket has a lifecycle rule configured with an action to abort incomplete + // multipart uploads and the prefix in the lifecycle rule matches the object + // name in the request, then the response includes this header indicating when + // the initiated multipart upload will become eligible for abort operation. + // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket + // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). + // + // The response will also include the x-amz-abort-rule-id header that will provide + // the ID of the lifecycle configuration rule that defines this action. AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"` - // Id of the lifecycle rule that makes a multipart upload eligible for abort - // operation. + // This header is returned along with the x-amz-abort-date header. It identifies + // applicable lifecycle configuration rule that defines the action to abort + // incomplete multipart uploads. AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` // Name of the bucket to which the multipart upload was initiated. Bucket *string `type:"string"` - // Identifies who initiated the multipart upload. + // Container element that identifies who initiated the multipart upload. If + // the initiator is an AWS account, this element provides the same information + // as the Owner element. If the initiator is an IAM User, this element provides + // the user ARN and display name. Initiator *Initiator `type:"structure"` - // Indicates whether the returned list of parts is truncated. + // Indicates whether the returned list of parts is truncated. A true value indicates + // that the list was truncated. A list can be truncated if the number of parts + // exceeds the limit returned in the MaxParts element. IsTruncated *bool `type:"boolean"` // Object key for which the multipart upload was initiated. @@ -17132,18 +22034,26 @@ type ListPartsOutput struct { // in a subsequent request. NextPartNumberMarker *int64 `type:"integer"` + // Container element that identifies the object owner, after the object is created. + // If multipart upload is initiated by an IAM user, this element provides the + // parent account ID and display name. Owner *Owner `type:"structure"` - // Part number after which listing begins. + // When a list is truncated, this element specifies the last part in the list, + // as well as the value to use for the part-number-marker request parameter + // in a subsequent request. PartNumberMarker *int64 `type:"integer"` + // Container for elements related to a particular part. A response can contain + // zero or more Part elements. Parts []*Part `locationName:"Part" type:"list" flattened:"true"` // If present, indicates that the requester was successfully charged for the // request. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - // The class of storage used to store the object. + // Class of storage (STANDARD or REDUCED_REDUNDANCY) used to store the uploaded + // object. StorageClass *string `type:"string" enum:"StorageClass"` // Upload ID identifying the multipart upload whose parts are being listed. @@ -17251,7 +22161,8 @@ func (s *ListPartsOutput) SetUploadId(v string) *ListPartsOutput { return s } -// Describes an S3 location that will receive the results of the restore request. +// Describes an Amazon S3 location that will receive the results of the restore +// request. type Location struct { _ struct{} `type:"structure"` @@ -17266,8 +22177,7 @@ type Location struct { // The canned ACL to apply to the restore results. CannedACL *string `type:"string" enum:"ObjectCannedACL"` - // Describes the server-side encryption that will be applied to the restore - // results. + // Contains the type of server-side encryption used. Encryption *Encryption `type:"structure"` // The prefix that is prepended to the restore results for this request. @@ -17389,13 +22299,14 @@ type LoggingEnabled struct { // Specifies the bucket where you want Amazon S3 to store server access logs. // You can have your logs delivered to any bucket that you own, including the // same bucket that is being logged. You can also configure multiple buckets - // to deliver their logs to the same target bucket. In this case you should + // to deliver their logs to the same target bucket. In this case, you should // choose a different TargetPrefix for each source bucket so that the delivered // log files can be distinguished by key. // // TargetBucket is a required field TargetBucket *string `type:"string" required:"true"` + // Container for granting information. TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"` // A prefix for all log object keys. If you store log files from multiple Amazon @@ -17464,8 +22375,10 @@ func (s *LoggingEnabled) SetTargetPrefix(v string) *LoggingEnabled { type MetadataEntry struct { _ struct{} `type:"structure"` + // Name of the Object. Name *string `type:"string"` + // Value of the Object. Value *string `type:"string"` } @@ -17491,6 +22404,65 @@ func (s *MetadataEntry) SetValue(v string) *MetadataEntry { return s } +// A container specifying replication metrics-related settings enabling metrics +// and Amazon S3 events for S3 Replication Time Control (S3 RTC). Must be specified +// together with a ReplicationTime block. +type Metrics struct { + _ struct{} `type:"structure"` + + // A container specifying the time threshold for emitting the s3:Replication:OperationMissedThreshold + // event. + // + // EventThreshold is a required field + EventThreshold *ReplicationTimeValue `type:"structure" required:"true"` + + // Specifies whether the replication metrics are enabled. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"MetricsStatus"` +} + +// String returns the string representation +func (s Metrics) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Metrics) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Metrics) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Metrics"} + if s.EventThreshold == nil { + invalidParams.Add(request.NewErrParamRequired("EventThreshold")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEventThreshold sets the EventThreshold field's value. +func (s *Metrics) SetEventThreshold(v *ReplicationTimeValue) *Metrics { + s.EventThreshold = v + return s +} + +// SetStatus sets the Status field's value. +func (s *Metrics) SetStatus(v string) *Metrics { + s.Status = &v + return s +} + +// A conjunction (logical AND) of predicates, which is used in evaluating a +// metrics filter. The operator must have at least two predicates, and an object +// must match all of the predicates in order for the filter to apply. type MetricsAndOperator struct { _ struct{} `type:"structure"` @@ -17604,6 +22576,9 @@ func (s *MetricsConfiguration) SetId(v string) *MetricsConfiguration { return s } +// Specifies a metrics configuration filter. The metrics configuration only +// includes objects that meet the filter's criteria. A filter must be a prefix, +// a tag, or a conjunction (MetricsAndOperator). type MetricsFilter struct { _ struct{} `type:"structure"` @@ -17667,6 +22642,7 @@ func (s *MetricsFilter) SetTag(v *Tag) *MetricsFilter { return s } +// Container for the MultipartUpload for the Amazon S3 object. type MultipartUpload struct { _ struct{} `type:"structure"` @@ -17679,6 +22655,7 @@ type MultipartUpload struct { // Key of the object for which the multipart upload was initiated. Key *string `min:"1" type:"string"` + // Specifies the owner of the object that is part of the multipart upload. Owner *Owner `type:"structure"` // The class of storage used to store the object. @@ -17778,8 +22755,8 @@ type NoncurrentVersionTransition struct { // Specifies the number of days an object is noncurrent before Amazon S3 can // perform the associated action. For information about the noncurrent days - // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) + // calculations, see How Amazon S3 Calculates How Long an Object Has Been Noncurrent + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations) // in the Amazon Simple Storage Service Developer Guide. NoncurrentDays *int64 `type:"integer"` @@ -17898,10 +22875,17 @@ func (s *NotificationConfiguration) SetTopicConfigurations(v []*TopicConfigurati type NotificationConfigurationDeprecated struct { _ struct{} `type:"structure"` + // Container for specifying the AWS Lambda notification configuration. CloudFunctionConfiguration *CloudFunctionConfiguration `type:"structure"` + // This data type is deprecated. This data type specifies the configuration + // for publishing messages to an Amazon Simple Queue Service (Amazon SQS) queue + // when Amazon S3 detects specified events. QueueConfiguration *QueueConfigurationDeprecated `type:"structure"` + // This data type is deprecated. A container for specifying the configuration + // for publication of messages to an Amazon Simple Notification Service (Amazon + // SNS) topic when Amazon S3 detects specified events. TopicConfiguration *TopicConfigurationDeprecated `type:"structure"` } @@ -17959,17 +22943,25 @@ func (s *NotificationConfigurationFilter) SetKey(v *KeyFilter) *NotificationConf return s } +// An object consists of data and its descriptive metadata. type Object struct { _ struct{} `type:"structure"` + // The entity tag is an MD5 hash of the object. ETag reflects only changes to + // the contents of an object, not its metadata. ETag *string `type:"string"` + // The name that you assign to an object. You use the object key to retrieve + // the object. Key *string `min:"1" type:"string"` + // The date the Object was Last Modified LastModified *time.Time `type:"timestamp"` + // The owner of the object Owner *Owner `type:"structure"` + // Size in bytes of the object Size *int64 `type:"integer"` // The class of storage used to store the object. @@ -18022,6 +23014,7 @@ func (s *Object) SetStorageClass(v string) *Object { return s } +// Object Identifier is unique value to identify objects. type ObjectIdentifier struct { _ struct{} `type:"structure"` @@ -18072,14 +23065,14 @@ func (s *ObjectIdentifier) SetVersionId(v string) *ObjectIdentifier { return s } -// The container element for object lock configuration parameters. +// The container element for Object Lock configuration parameters. type ObjectLockConfiguration struct { _ struct{} `type:"structure"` - // Indicates whether this bucket has an object lock configuration enabled. + // Indicates whether this bucket has an Object Lock configuration enabled. ObjectLockEnabled *string `type:"string" enum:"ObjectLockEnabled"` - // The object lock rule in place for the specified object. + // The Object Lock rule in place for the specified object. Rule *ObjectLockRule `type:"structure"` } @@ -18136,7 +23129,7 @@ type ObjectLockRetention struct { // Indicates the Retention mode for the specified object. Mode *string `type:"string" enum:"ObjectLockRetentionMode"` - // The date on which this object lock retention expires. + // The date on which this Object Lock Retention will expire. RetainUntilDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` } @@ -18162,7 +23155,7 @@ func (s *ObjectLockRetention) SetRetainUntilDate(v time.Time) *ObjectLockRetenti return s } -// The container element for an object lock rule. +// The container element for an Object Lock rule. type ObjectLockRule struct { _ struct{} `type:"structure"` @@ -18187,9 +23180,11 @@ func (s *ObjectLockRule) SetDefaultRetention(v *DefaultRetention) *ObjectLockRul return s } +// The version of an object. type ObjectVersion struct { _ struct{} `type:"structure"` + // The entity tag is an MD5 hash of that version of the object. ETag *string `type:"string"` // Specifies whether the object is (true) or is not (false) the latest version @@ -18202,6 +23197,7 @@ type ObjectVersion struct { // Date and time the object was last modified. LastModified *time.Time `type:"timestamp"` + // Specifies the owner of the object. Owner *Owner `type:"structure"` // Size in bytes of the object. @@ -18344,11 +23340,14 @@ func (s *OutputSerialization) SetJSON(v *JSONOutput) *OutputSerialization { return s } +// Container for the owner's display name and ID. type Owner struct { _ struct{} `type:"structure"` + // Container for the display name of the owner. DisplayName *string `type:"string"` + // Container for the ID of the owner. ID *string `type:"string"` } @@ -18374,6 +23373,7 @@ func (s *Owner) SetID(v string) *Owner { return s } +// Container for Parquet. type ParquetInput struct { _ struct{} `type:"structure"` } @@ -18388,6 +23388,7 @@ func (s ParquetInput) GoString() string { return s.String() } +// Container for elements related to a part. type Part struct { _ struct{} `type:"structure"` @@ -18464,6 +23465,7 @@ func (s *PolicyStatus) SetIsPublic(v bool) *PolicyStatus { return s } +// This data type contains information about progress of an operation. type Progress struct { _ struct{} `type:"structure"` @@ -18505,6 +23507,7 @@ func (s *Progress) SetBytesScanned(v int64) *Progress { return s } +// This data type contains information about the progress event of an operation. type ProgressEvent struct { _ struct{} `locationName:"ProgressEvent" type:"structure" payload:"Details"` @@ -18545,7 +23548,21 @@ func (s *ProgressEvent) UnmarshalEvent( return nil } -// Specifies the Block Public Access configuration for an Amazon S3 bucket. +func (s *ProgressEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) + var buf bytes.Buffer + if err = pm.MarshalPayload(&buf, s); err != nil { + return eventstream.Message{}, err + } + msg.Payload = buf.Bytes() + return msg, err +} + +// The PublicAccessBlock configuration that you want to apply to this Amazon +// S3 bucket. You can enable the configuration options in any combination. For +// more information about when Amazon S3 considers a bucket or object public, +// see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) +// in the Amazon Simple Storage Service Developer Guide. type PublicAccessBlockConfiguration struct { _ struct{} `type:"structure"` @@ -18558,6 +23575,8 @@ type PublicAccessBlockConfiguration struct { // // * PUT Object calls fail if the request includes a public ACL. // + // * PUT Bucket calls fail if the request includes a public ACL. + // // Enabling this setting doesn't affect existing policies or ACLs. BlockPublicAcls *bool `locationName:"BlockPublicAcls" type:"boolean"` @@ -18624,7 +23643,7 @@ func (s *PublicAccessBlockConfiguration) SetRestrictPublicBuckets(v bool) *Publi type PutBucketAccelerateConfigurationInput struct { _ struct{} `locationName:"PutBucketAccelerateConfigurationRequest" type:"structure" payload:"AccelerateConfiguration"` - // Specifies the Accelerate Configuration you want to set for the bucket. + // Container for setting the transfer acceleration state. // // AccelerateConfiguration is a required field AccelerateConfiguration *AccelerateConfiguration `locationName:"AccelerateConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` @@ -18683,6 +23702,20 @@ func (s *PutBucketAccelerateConfigurationInput) getBucket() (v string) { return *s.Bucket } +func (s *PutBucketAccelerateConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketAccelerateConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketAccelerateConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -18706,6 +23739,8 @@ type PutBucketAclInput struct { // Contains the elements that set the ACL permissions for an object per grantee. AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + // The bucket to which to apply the ACL. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -18812,6 +23847,20 @@ func (s *PutBucketAclInput) SetGrantWriteACP(v string) *PutBucketAclInput { return s } +func (s *PutBucketAclInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketAclInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketAclOutput struct { _ struct{} `type:"structure"` } @@ -18907,6 +23956,20 @@ func (s *PutBucketAnalyticsConfigurationInput) SetId(v string) *PutBucketAnalyti return s } +func (s *PutBucketAnalyticsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketAnalyticsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketAnalyticsConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -18924,6 +23987,8 @@ func (s PutBucketAnalyticsConfigurationOutput) GoString() string { type PutBucketCorsInput struct { _ struct{} `locationName:"PutBucketCorsRequest" type:"structure" payload:"CORSConfiguration"` + // Specifies the bucket impacted by the corsconfiguration. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -18989,6 +24054,20 @@ func (s *PutBucketCorsInput) SetCORSConfiguration(v *CORSConfiguration) *PutBuck return s } +func (s *PutBucketCorsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketCorsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketCorsOutput struct { _ struct{} `type:"structure"` } @@ -19007,9 +24086,9 @@ type PutBucketEncryptionInput struct { _ struct{} `locationName:"PutBucketEncryptionRequest" type:"structure" payload:"ServerSideEncryptionConfiguration"` // Specifies default encryption for a bucket using server-side encryption with - // Amazon S3-managed keys (SSE-S3) or AWS KMS-managed keys (SSE-KMS). For information - // about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket - // Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) + // Amazon S3-managed keys (SSE-S3) or customer master keys stored in AWS KMS + // (SSE-KMS). For information about the Amazon S3 default encryption feature, + // see Amazon S3 Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) // in the Amazon Simple Storage Service Developer Guide. // // Bucket is a required field @@ -19074,6 +24153,20 @@ func (s *PutBucketEncryptionInput) SetServerSideEncryptionConfiguration(v *Serve return s } +func (s *PutBucketEncryptionInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketEncryptionInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketEncryptionOutput struct { _ struct{} `type:"structure"` } @@ -19169,6 +24262,20 @@ func (s *PutBucketInventoryConfigurationInput) SetInventoryConfiguration(v *Inve return s } +func (s *PutBucketInventoryConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketInventoryConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketInventoryConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -19186,12 +24293,12 @@ func (s PutBucketInventoryConfigurationOutput) GoString() string { type PutBucketLifecycleConfigurationInput struct { _ struct{} `locationName:"PutBucketLifecycleConfigurationRequest" type:"structure" payload:"LifecycleConfiguration"` + // The name of the bucket for which to set the configuration. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Specifies the lifecycle configuration for objects in an Amazon S3 bucket. - // For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) - // in the Amazon Simple Storage Service Developer Guide. + // Container for lifecycle rules. You can add as many as 1,000 rules. LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } @@ -19245,6 +24352,20 @@ func (s *PutBucketLifecycleConfigurationInput) SetLifecycleConfiguration(v *Buck return s } +func (s *PutBucketLifecycleConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketLifecycleConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketLifecycleConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -19265,6 +24386,7 @@ type PutBucketLifecycleInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Container for lifecycle rules. You can add as many as 1000 rules. LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } @@ -19318,6 +24440,20 @@ func (s *PutBucketLifecycleInput) SetLifecycleConfiguration(v *LifecycleConfigur return s } +func (s *PutBucketLifecycleInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketLifecycleInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketLifecycleOutput struct { _ struct{} `type:"structure"` } @@ -19335,9 +24471,13 @@ func (s PutBucketLifecycleOutput) GoString() string { type PutBucketLoggingInput struct { _ struct{} `locationName:"PutBucketLoggingRequest" type:"structure" payload:"BucketLoggingStatus"` + // The name of the bucket for which to set the logging parameters. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Container for logging status information. + // // BucketLoggingStatus is a required field BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } @@ -19395,6 +24535,20 @@ func (s *PutBucketLoggingInput) SetBucketLoggingStatus(v *BucketLoggingStatus) * return s } +func (s *PutBucketLoggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketLoggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketLoggingOutput struct { _ struct{} `type:"structure"` } @@ -19490,6 +24644,20 @@ func (s *PutBucketMetricsConfigurationInput) SetMetricsConfiguration(v *MetricsC return s } +func (s *PutBucketMetricsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketMetricsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketMetricsConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -19507,6 +24675,8 @@ func (s PutBucketMetricsConfigurationOutput) GoString() string { type PutBucketNotificationConfigurationInput struct { _ struct{} `locationName:"PutBucketNotificationConfigurationRequest" type:"structure" payload:"NotificationConfiguration"` + // The name of the bucket. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -19570,6 +24740,20 @@ func (s *PutBucketNotificationConfigurationInput) SetNotificationConfiguration(v return s } +func (s *PutBucketNotificationConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketNotificationConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketNotificationConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -19587,9 +24771,13 @@ func (s PutBucketNotificationConfigurationOutput) GoString() string { type PutBucketNotificationInput struct { _ struct{} `locationName:"PutBucketNotificationRequest" type:"structure" payload:"NotificationConfiguration"` + // The name of the bucket. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The container for the configuration. + // // NotificationConfiguration is a required field NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } @@ -19642,6 +24830,20 @@ func (s *PutBucketNotificationInput) SetNotificationConfiguration(v *Notificatio return s } +func (s *PutBucketNotificationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketNotificationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketNotificationOutput struct { _ struct{} `type:"structure"` } @@ -19659,6 +24861,8 @@ func (s PutBucketNotificationOutput) GoString() string { type PutBucketPolicyInput struct { _ struct{} `locationName:"PutBucketPolicyRequest" type:"structure" payload:"Policy"` + // The name of the bucket. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -19726,6 +24930,20 @@ func (s *PutBucketPolicyInput) SetPolicy(v string) *PutBucketPolicyInput { return s } +func (s *PutBucketPolicyInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketPolicyInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketPolicyOutput struct { _ struct{} `type:"structure"` } @@ -19743,6 +24961,8 @@ func (s PutBucketPolicyOutput) GoString() string { type PutBucketReplicationInput struct { _ struct{} `locationName:"PutBucketReplicationRequest" type:"structure" payload:"ReplicationConfiguration"` + // The name of the bucket + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -19752,7 +24972,6 @@ type PutBucketReplicationInput struct { // ReplicationConfiguration is a required field ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - // A token that allows Amazon S3 object lock to be enabled for an existing bucket. Token *string `location:"header" locationName:"x-amz-bucket-object-lock-token" type:"string"` } @@ -19815,6 +25034,20 @@ func (s *PutBucketReplicationInput) SetToken(v string) *PutBucketReplicationInpu return s } +func (s *PutBucketReplicationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketReplicationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketReplicationOutput struct { _ struct{} `type:"structure"` } @@ -19832,9 +25065,13 @@ func (s PutBucketReplicationOutput) GoString() string { type PutBucketRequestPaymentInput struct { _ struct{} `locationName:"PutBucketRequestPaymentRequest" type:"structure" payload:"RequestPaymentConfiguration"` + // The bucket name. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Container for Payer. + // // RequestPaymentConfiguration is a required field RequestPaymentConfiguration *RequestPaymentConfiguration `locationName:"RequestPaymentConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } @@ -19892,6 +25129,20 @@ func (s *PutBucketRequestPaymentInput) SetRequestPaymentConfiguration(v *Request return s } +func (s *PutBucketRequestPaymentInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketRequestPaymentInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketRequestPaymentOutput struct { _ struct{} `type:"structure"` } @@ -19909,9 +25160,13 @@ func (s PutBucketRequestPaymentOutput) GoString() string { type PutBucketTaggingInput struct { _ struct{} `locationName:"PutBucketTaggingRequest" type:"structure" payload:"Tagging"` + // The bucket name. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Container for the TagSet and Tag elements. + // // Tagging is a required field Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } @@ -19969,6 +25224,20 @@ func (s *PutBucketTaggingInput) SetTagging(v *Tagging) *PutBucketTaggingInput { return s } +func (s *PutBucketTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketTaggingOutput struct { _ struct{} `type:"structure"` } @@ -19986,6 +25255,8 @@ func (s PutBucketTaggingOutput) GoString() string { type PutBucketVersioningInput struct { _ struct{} `locationName:"PutBucketVersioningRequest" type:"structure" payload:"VersioningConfiguration"` + // The bucket name. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -19993,9 +25264,7 @@ type PutBucketVersioningInput struct { // and the value that is displayed on your authentication device. MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` - // Describes the versioning state of an Amazon S3 bucket. For more information, - // see PUT Bucket versioning (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html) - // in the Amazon Simple Storage Service API Reference. + // Container for setting the versioning state. // // VersioningConfiguration is a required field VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` @@ -20055,6 +25324,20 @@ func (s *PutBucketVersioningInput) SetVersioningConfiguration(v *VersioningConfi return s } +func (s *PutBucketVersioningInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketVersioningInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketVersioningOutput struct { _ struct{} `type:"structure"` } @@ -20072,10 +25355,12 @@ func (s PutBucketVersioningOutput) GoString() string { type PutBucketWebsiteInput struct { _ struct{} `locationName:"PutBucketWebsiteRequest" type:"structure" payload:"WebsiteConfiguration"` + // The bucket name. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Specifies website configuration parameters for an Amazon S3 bucket. + // Container for the request. // // WebsiteConfiguration is a required field WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` @@ -20134,6 +25419,20 @@ func (s *PutBucketWebsiteInput) SetWebsiteConfiguration(v *WebsiteConfiguration) return s } +func (s *PutBucketWebsiteInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketWebsiteInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketWebsiteOutput struct { _ struct{} `type:"structure"` } @@ -20151,12 +25450,23 @@ func (s PutBucketWebsiteOutput) GoString() string { type PutObjectAclInput struct { _ struct{} `locationName:"PutObjectAclRequest" type:"structure" payload:"AccessControlPolicy"` - // The canned ACL to apply to the object. + // The canned ACL to apply to the object. For more information, see Canned ACL + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` // Contains the elements that set the ACL permissions for an object per grantee. AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + // The bucket name that contains the object to which you want to attach the + // ACL. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -20176,13 +25486,16 @@ type PutObjectAclInput struct { // Allows grantee to write the ACL for the applicable bucket. GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + // Key for which the PUT operation was initiated. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // VersionId used to reference a specific version of the object. @@ -20299,6 +25612,20 @@ func (s *PutObjectAclInput) SetVersionId(v string) *PutObjectAclInput { return s } +func (s *PutObjectAclInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectAclInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutObjectAclOutput struct { _ struct{} `type:"structure"` @@ -20326,44 +25653,62 @@ func (s *PutObjectAclOutput) SetRequestCharged(v string) *PutObjectAclOutput { type PutObjectInput struct { _ struct{} `locationName:"PutObjectRequest" type:"structure" payload:"Body"` - // The canned ACL to apply to the object. + // The canned ACL to apply to the object. For more information, see Canned ACL + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` // Object data. Body io.ReadSeeker `type:"blob"` - // Name of the bucket to which the PUT operation was initiated. + // Bucket name to which the PUT operation was initiated. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Specifies caching behavior along the request/reply chain. + // Can be used to specify caching behavior along the request/reply chain. For + // more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9). CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` - // Specifies presentational information for the object. + // Specifies presentational information for the object. For more information, + // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1). ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` // Specifies what content encodings have been applied to the object and thus // what decoding mechanisms must be applied to obtain the media-type referenced - // by the Content-Type header field. + // by the Content-Type header field. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11). ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` // The language the content is in. ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` // Size of the body in bytes. This parameter is useful when the size of the - // body cannot be determined automatically. + // body cannot be determined automatically. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13). ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` - // The base64-encoded 128-bit MD5 digest of the part data. This parameter is - // auto-populated when using the command from the CLI. This parameted is required - // if object lock parameters are specified. + // The base64-encoded 128-bit MD5 digest of the message (without the headers) + // according to RFC 1864. This header can be used as a message integrity check + // to verify that the data is the same data that was originally sent. Although + // it is optional, we recommend using the Content-MD5 mechanism as an end-to-end + // integrity check. For more information about REST request authentication, + // see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` - // A standard MIME type describing the format of the object data. + // A standard MIME type describing the format of the contents. For more information, + // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17). ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - // The date and time at which the object is no longer cacheable. + // The date and time at which the object is no longer cacheable. For more information, + // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21). Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. @@ -20386,34 +25731,37 @@ type PutObjectInput struct { // A map of metadata to store with the object in S3. Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` - // The Legal Hold status that you want to apply to the specified object. + // Specifies whether a legal hold will be applied to this object. For more information + // about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` - // The object lock mode that you want to apply to this object. + // The Object Lock mode that you want to apply to this object. ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` - // The date and time when you want this object's object lock to expire. + // The date and time when you want this object's Object Lock to expire. ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` // Specifies the AWS KMS Encryption Context to use for object encryption. The @@ -20421,17 +25769,24 @@ type PutObjectInput struct { // encryption context key-value pairs. SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT - // requests for an object protected by AWS KMS will fail if not made via SSL - // or using SigV4. Documentation on configuring any of the officially supported - // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + // If x-amz-server-side-encryption is present and has the value of aws:kms, + // this header specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetrical customer managed customer master key (CMK) that was used for + // the object. + // + // If the value of x-amz-server-side-encryption is aws:kms, this header specifies + // the ID of the symmetric customer managed AWS KMS CMK that will be used for + // the object. If you specify x-amz-server-side-encryption:aws:kms, but do not + // providex-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the AWS + // managed CMK in AWS to protect the data. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - // The type of storage to use for the object. Defaults to 'STANDARD'. + // If you don't specify, S3 Standard is the default storage class. Amazon S3 + // supports other storage classes. StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` // The tag-set for the object. The tag-set must be encoded as URL Query parameters. @@ -20440,7 +25795,22 @@ type PutObjectInput struct { // If the bucket is configured as a website, redirects requests for this object // to another object in the same bucket or to an external URL. Amazon S3 stores - // the value of this header in the object metadata. + // the value of this header in the object metadata. For information about object + // metadata, see Object Key and Metadata (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html). + // + // In the following example, the request header sets the redirect to an object + // (anotherPage.html) in the same bucket: + // + // x-amz-website-redirect-location: /anotherPage.html + // + // In the following example, the request header sets the object redirect to + // another website: + // + // x-amz-website-redirect-location: http://www.example.com/ + // + // For more information about website hosting in Amazon S3, see Hosting Websites + // on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) + // and How to Configure Website Page Redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html). WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` } @@ -20670,10 +26040,32 @@ func (s *PutObjectInput) SetWebsiteRedirectLocation(v string) *PutObjectInput { return s } +func (s *PutObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutObjectLegalHoldInput struct { _ struct{} `locationName:"PutObjectLegalHoldRequest" type:"structure" payload:"LegalHold"` - // The bucket containing the object that you want to place a Legal Hold on. + // The bucket name containing the object that you want to place a Legal Hold + // on. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -20687,10 +26079,11 @@ type PutObjectLegalHoldInput struct { // specified object. LegalHold *ObjectLockLegalHold `locationName:"LegalHold" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // The version ID of the object that you want to place a Legal Hold on. @@ -20766,6 +26159,20 @@ func (s *PutObjectLegalHoldInput) SetVersionId(v string) *PutObjectLegalHoldInpu return s } +func (s *PutObjectLegalHoldInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectLegalHoldInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutObjectLegalHoldOutput struct { _ struct{} `type:"structure"` @@ -20793,21 +26200,22 @@ func (s *PutObjectLegalHoldOutput) SetRequestCharged(v string) *PutObjectLegalHo type PutObjectLockConfigurationInput struct { _ struct{} `locationName:"PutObjectLockConfigurationRequest" type:"structure" payload:"ObjectLockConfiguration"` - // The bucket whose object lock configuration you want to create or replace. + // The bucket whose Object Lock configuration you want to create or replace. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The object lock configuration that you want to apply to the specified bucket. + // The Object Lock configuration that you want to apply to the specified bucket. ObjectLockConfiguration *ObjectLockConfiguration `locationName:"ObjectLockConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // A token to allow Amazon S3 object lock to be enabled for an existing bucket. + // A token to allow Object Lock to be enabled for an existing bucket. Token *string `location:"header" locationName:"x-amz-bucket-object-lock-token" type:"string"` } @@ -20868,6 +26276,20 @@ func (s *PutObjectLockConfigurationInput) SetToken(v string) *PutObjectLockConfi return s } +func (s *PutObjectLockConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectLockConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutObjectLockConfigurationOutput struct { _ struct{} `type:"structure"` @@ -20898,8 +26320,10 @@ type PutObjectOutput struct { // Entity tag for the uploaded object. ETag *string `location:"header" locationName:"ETag" type:"string"` - // If the object expiration is configured, this will contain the expiration - // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. + // If the expiration is configured for the object (see PutBucketLifecycleConfiguration), + // the response includes this header. It includes the expiry-date and rule-id + // key-value pairs that provide information about object expiration. The value + // of the rule-id is URL encoded. Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` // If present, indicates that the requester was successfully charged for the @@ -20912,7 +26336,7 @@ type PutObjectOutput struct { SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity + // the response will include this header to provide round-trip message integrity // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` @@ -20921,12 +26345,16 @@ type PutObjectOutput struct { // the encryption context key-value pairs. SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. + // If x-amz-server-side-encryption is present and has the value of aws:kms, + // this header specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) that was used for the + // object. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). + // If you specified server-side encryption either with an AWS KMS customer master + // key (CMK) or Amazon S3-managed encryption key in your PUT request, the response + // includes this header. It confirms the encryption algorithm that Amazon S3 + // used to encrypt the object. ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // Version of the object. @@ -21000,13 +26428,20 @@ func (s *PutObjectOutput) SetVersionId(v string) *PutObjectOutput { type PutObjectRetentionInput struct { _ struct{} `locationName:"PutObjectRetentionRequest" type:"structure" payload:"Retention"` - // The bucket that contains the object you want to apply this Object Retention + // The bucket name that contains the object you want to apply this Object Retention // configuration to. // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Indicates whether this operation should bypass Governance-mode restrictions.j + // Indicates whether this operation should bypass Governance-mode restrictions. BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` // The key name for the object that you want to apply this Object Retention @@ -21015,10 +26450,11 @@ type PutObjectRetentionInput struct { // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // The container element for the Object Retention configuration. @@ -21104,6 +26540,20 @@ func (s *PutObjectRetentionInput) SetVersionId(v string) *PutObjectRetentionInpu return s } +func (s *PutObjectRetentionInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectRetentionInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutObjectRetentionOutput struct { _ struct{} `type:"structure"` @@ -21131,15 +26581,29 @@ func (s *PutObjectRetentionOutput) SetRequestCharged(v string) *PutObjectRetenti type PutObjectTaggingInput struct { _ struct{} `locationName:"PutObjectTaggingRequest" type:"structure" payload:"Tagging"` + // The bucket name containing the object. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Name of the tag. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + // Container for the TagSet and Tag elements + // // Tagging is a required field Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + // The versionId of the object that the tag-set will be added to. VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } @@ -21214,9 +26678,24 @@ func (s *PutObjectTaggingInput) SetVersionId(v string) *PutObjectTaggingInput { return s } +func (s *PutObjectTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutObjectTaggingOutput struct { _ struct{} `type:"structure"` + // The versionId of the object the tag-set was added to. VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` } @@ -21303,6 +26782,20 @@ func (s *PutPublicAccessBlockInput) SetPublicAccessBlockConfiguration(v *PublicA return s } +func (s *PutPublicAccessBlockInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutPublicAccessBlockInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutPublicAccessBlockOutput struct { _ struct{} `type:"structure"` } @@ -21322,6 +26815,8 @@ func (s PutPublicAccessBlockOutput) GoString() string { type QueueConfiguration struct { _ struct{} `type:"structure"` + // A collection of bucket events for which to send notifications + // // Events is a required field Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` @@ -21391,6 +26886,10 @@ func (s *QueueConfiguration) SetQueueArn(v string) *QueueConfiguration { return s } +// This data type is deprecated. Use QueueConfiguration for the same purposes. +// This data type specifies the configuration for publishing messages to an +// Amazon Simple Queue Service (Amazon SQS) queue when Amazon S3 detects specified +// events. type QueueConfigurationDeprecated struct { _ struct{} `type:"structure"` @@ -21399,12 +26898,15 @@ type QueueConfigurationDeprecated struct { // Deprecated: Event has been deprecated Event *string `deprecated:"true" type:"string" enum:"Event"` + // A collection of bucket events for which to send notifications Events []*string `locationName:"Event" type:"list" flattened:"true"` // An optional unique identifier for configurations in a notification configuration. // If you don't provide one, Amazon S3 will assign an ID. Id *string `type:"string"` + // The Amazon Resource Name (ARN) of the Amazon SQS queue to which Amazon S3 + // publishes a message when it detects events of the specified type. Queue *string `type:"string"` } @@ -21442,6 +26944,7 @@ func (s *QueueConfigurationDeprecated) SetQueue(v string) *QueueConfigurationDep return s } +// The container for the records event. type RecordsEvent struct { _ struct{} `locationName:"RecordsEvent" type:"structure" payload:"Payload"` @@ -21481,6 +26984,13 @@ func (s *RecordsEvent) UnmarshalEvent( return nil } +func (s *RecordsEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) + msg.Headers.Set(":content-type", eventstream.StringValue("application/octet-stream")) + msg.Payload = s.Payload + return msg, err +} + // Specifies how requests are redirected. In the event of an error, you can // specify a different error code to return. type Redirect struct { @@ -21608,7 +27118,7 @@ type ReplicationConfiguration struct { // The Amazon Resource Name (ARN) of the AWS Identity and Access Management // (IAM) role that Amazon S3 assumes when replicating objects. For more information, - // see How to Set Up Cross-Region Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr-how-setup.html) + // see How to Set Up Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-how-setup.html) // in the Amazon Simple Storage Service Developer Guide. // // Role is a required field @@ -21673,14 +27183,30 @@ func (s *ReplicationConfiguration) SetRules(v []*ReplicationRule) *ReplicationCo type ReplicationRule struct { _ struct{} `type:"structure"` - // Specifies whether Amazon S3 should replicate delete makers. + // Specifies whether Amazon S3 replicates the delete markers. If you specify + // a Filter, you must specify this element. However, in the latest version of + // replication configuration (when Filter is specified), Amazon S3 doesn't replicate + // delete markers. Therefore, the DeleteMarkerReplication element can contain + // only Disabled. For an example configuration, see Basic Rule + // Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config). + // + // If you don't specify the Filter element, Amazon S3 assumes that the replication + // configuration is the earlier version, V1. In the earlier version, Amazon + // S3 handled replication of delete markers differently. For more information, + // see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations). DeleteMarkerReplication *DeleteMarkerReplication `type:"structure"` - // A container for information about the replication destination. + // A container for information about the replication destination and its configurations + // including enabling the S3 Replication Time Control (S3 RTC). // // Destination is a required field Destination *Destination `type:"structure" required:"true"` + // Optional configuration to replicate existing source bucket objects. For more + // information, see Replicating Existing Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication) + // in the Amazon S3 Developer Guide. + ExistingObjectReplication *ExistingObjectReplication `type:"structure"` + // A filter that identifies the subset of objects to which the replication rule // applies. A Filter must specify exactly one Prefix, Tag, or an And child element. Filter *ReplicationRuleFilter `type:"structure"` @@ -21688,9 +27214,9 @@ type ReplicationRule struct { // A unique identifier for the rule. The maximum value is 255 characters. ID *string `type:"string"` - // An object keyname prefix that identifies the object or objects to which the - // rule applies. The maximum prefix length is 1,024 characters. To include all - // objects in a bucket, specify an empty string. + // An object key name prefix that identifies the object or objects to which + // the rule applies. The maximum prefix length is 1,024 characters. To include + // all objects in a bucket, specify an empty string. // // Deprecated: Prefix has been deprecated Prefix *string `deprecated:"true" type:"string"` @@ -21700,21 +27226,21 @@ type ReplicationRule struct { // when filtering. If two or more rules identify the same object based on a // specified filter, the rule with higher priority takes precedence. For example: // - // * Same object quality prefix based filter criteria If prefixes you specified + // * Same object quality prefix-based filter criteria if prefixes you specified // in multiple rules overlap // - // * Same object qualify tag based filter criteria specified in multiple + // * Same object qualify tag-based filter criteria specified in multiple // rules // - // For more information, see Cross-Region Replication (CRR) (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) - // in the Amazon S3 Developer Guide. + // For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) + // in the Amazon Simple Storage Service Developer Guide. Priority *int64 `type:"integer"` // A container that describes additional filters for identifying the source // objects that you want to replicate. You can choose to enable or disable the // replication of these objects. Currently, Amazon S3 supports only the filter // that you can specify for objects created with server-side encryption using - // an AWS KMS-Managed Key (SSE-KMS). + // a customer master key (CMK) stored in AWS Key Management Service (SSE-KMS). SourceSelectionCriteria *SourceSelectionCriteria `type:"structure"` // Specifies whether the rule is enabled. @@ -21747,6 +27273,11 @@ func (s *ReplicationRule) Validate() error { invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) } } + if s.ExistingObjectReplication != nil { + if err := s.ExistingObjectReplication.Validate(); err != nil { + invalidParams.AddNested("ExistingObjectReplication", err.(request.ErrInvalidParams)) + } + } if s.Filter != nil { if err := s.Filter.Validate(); err != nil { invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) @@ -21776,6 +27307,12 @@ func (s *ReplicationRule) SetDestination(v *Destination) *ReplicationRule { return s } +// SetExistingObjectReplication sets the ExistingObjectReplication field's value. +func (s *ReplicationRule) SetExistingObjectReplication(v *ExistingObjectReplication) *ReplicationRule { + s.ExistingObjectReplication = v + return s +} + // SetFilter sets the Filter field's value. func (s *ReplicationRule) SetFilter(v *ReplicationRuleFilter) *ReplicationRule { s.Filter = v @@ -21812,11 +27349,25 @@ func (s *ReplicationRule) SetStatus(v string) *ReplicationRule { return s } +// A container for specifying rule filters. The filters determine the subset +// of objects to which the rule applies. This element is required only if you +// specify more than one filter. +// +// For example: +// +// * If you specify both a Prefix and a Tag filter, wrap these filters in +// an And tag. +// +// * If you specify a filter based on multiple tags, wrap the Tag elements +// in an And tag type ReplicationRuleAndOperator struct { _ struct{} `type:"structure"` + // An object key name prefix that identifies the subset of objects to which + // the rule applies. Prefix *string `type:"string"` + // An array of tags containing key and value pairs. Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` } @@ -21878,8 +27429,8 @@ type ReplicationRuleFilter struct { // in an And tag. And *ReplicationRuleAndOperator `type:"structure"` - // An object keyname prefix that identifies the subset of objects to which the - // rule applies. + // An object key name prefix that identifies the subset of objects to which + // the rule applies. Prefix *string `type:"string"` // A container for specifying a tag key and value. @@ -21936,6 +27487,91 @@ func (s *ReplicationRuleFilter) SetTag(v *Tag) *ReplicationRuleFilter { return s } +// A container specifying S3 Replication Time Control (S3 RTC) related information, +// including whether S3 RTC is enabled and the time when all objects and operations +// on objects must be replicated. Must be specified together with a Metrics +// block. +type ReplicationTime struct { + _ struct{} `type:"structure"` + + // Specifies whether the replication time is enabled. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ReplicationTimeStatus"` + + // A container specifying the time by which replication should be complete for + // all objects and operations on objects. + // + // Time is a required field + Time *ReplicationTimeValue `type:"structure" required:"true"` +} + +// String returns the string representation +func (s ReplicationTime) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationTime) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationTime) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationTime"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.Time == nil { + invalidParams.Add(request.NewErrParamRequired("Time")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStatus sets the Status field's value. +func (s *ReplicationTime) SetStatus(v string) *ReplicationTime { + s.Status = &v + return s +} + +// SetTime sets the Time field's value. +func (s *ReplicationTime) SetTime(v *ReplicationTimeValue) *ReplicationTime { + s.Time = v + return s +} + +// A container specifying the time value for S3 Replication Time Control (S3 +// RTC) and replication metrics EventThreshold. +type ReplicationTimeValue struct { + _ struct{} `type:"structure"` + + // Contains an integer specifying time in minutes. + // + // Valid values: 15 minutes. + Minutes *int64 `type:"integer"` +} + +// String returns the string representation +func (s ReplicationTimeValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationTimeValue) GoString() string { + return s.String() +} + +// SetMinutes sets the Minutes field's value. +func (s *ReplicationTimeValue) SetMinutes(v int64) *ReplicationTimeValue { + s.Minutes = &v + return s +} + +// Container for Payer. type RequestPaymentConfiguration struct { _ struct{} `type:"structure"` @@ -21974,6 +27610,7 @@ func (s *RequestPaymentConfiguration) SetPayer(v string) *RequestPaymentConfigur return s } +// Container for specifying if periodic QueryProgress messages should be sent. type RequestProgress struct { _ struct{} `type:"structure"` @@ -22001,21 +27638,34 @@ func (s *RequestProgress) SetEnabled(v bool) *RequestProgress { type RestoreObjectInput struct { _ struct{} `locationName:"RestoreObjectRequest" type:"structure" payload:"RestoreRequest"` + // The bucket name or containing the object to restore. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Object key for which the operation was initiated. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // Container for restore job parameters. RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + // VersionId used to reference a specific version of the object. VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } @@ -22093,6 +27743,20 @@ func (s *RestoreObjectInput) SetVersionId(v string) *RestoreObjectInput { return s } +func (s *RestoreObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *RestoreObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type RestoreObjectOutput struct { _ struct{} `type:"structure"` @@ -22138,7 +27802,7 @@ type RestoreRequest struct { // The optional description for the job. Description *string `type:"string"` - // Glacier related parameters pertaining to this job. Do not use with restores + // S3 Glacier related parameters pertaining to this job. Do not use with restores // that specify OutputLocation. GlacierJobParameters *GlacierJobParameters `type:"structure"` @@ -22148,7 +27812,7 @@ type RestoreRequest struct { // Describes the parameters for Select job types. SelectParameters *SelectParameters `type:"structure"` - // Glacier retrieval tier at which the restore will be processed. + // S3 Glacier retrieval tier at which the restore will be processed. Tier *string `type:"string" enum:"Tier"` // Type of restore request. @@ -22286,8 +27950,9 @@ func (s *RoutingRule) SetRedirect(v *Redirect) *RoutingRule { } // Specifies lifecycle rules for an Amazon S3 bucket. For more information, -// see PUT Bucket lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlifecycle.html) -// in the Amazon Simple Storage Service API Reference. +// see Put Bucket Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlifecycle.html) +// in the Amazon Simple Storage Service API Reference. For examples, see Put +// Bucket Lifecycle Configuration Examples (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html#API_PutBucketLifecycleConfiguration_Examples) type Rule struct { _ struct{} `type:"structure"` @@ -22298,6 +27963,7 @@ type Rule struct { // in the Amazon Simple Storage Service Developer Guide. AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` + // Specifies the expiration for the lifecycle of the object. Expiration *LifecycleExpiration `type:"structure"` // Unique identifier for the rule. The value can't be longer than 255 characters. @@ -22331,7 +27997,10 @@ type Rule struct { // Status is a required field Status *string `type:"string" required:"true" enum:"ExpirationStatus"` - // Specifies when an object transitions to a specified storage class. + // Specifies when an object transitions to a specified storage class. For more + // information about Amazon S3 lifecycle configuration rules, see Transitioning + // Objects Using Amazon S3 Lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html) + // in the Amazon Simple Storage Service Developer Guide. Transition *Transition `type:"structure"` } @@ -22409,12 +28078,12 @@ func (s *Rule) SetTransition(v *Transition) *Rule { return s } -// Specifies the use of SSE-KMS to encrypt delivered Inventory reports. +// Specifies the use of SSE-KMS to encrypt delivered inventory reports. type SSEKMS struct { _ struct{} `locationName:"SSE-KMS" type:"structure"` - // Specifies the ID of the AWS Key Management Service (KMS) master encryption - // key to use for encrypting Inventory reports. + // Specifies the ID of the AWS Key Management Service (AWS KMS) symmetric customer + // managed customer master key (CMK) to use for encrypting inventory reports. // // KeyId is a required field KeyId *string `type:"string" required:"true" sensitive:"true"` @@ -22449,7 +28118,7 @@ func (s *SSEKMS) SetKeyId(v string) *SSEKMS { return s } -// Specifies the use of SSE-S3 to encrypt delivered Inventory reports. +// Specifies the use of SSE-S3 to encrypt delivered inventory reports. type SSES3 struct { _ struct{} `locationName:"SSE-S3" type:"structure"` } @@ -22464,75 +28133,51 @@ func (s SSES3) GoString() string { return s.String() } -// SelectObjectContentEventStream provides handling of EventStreams for -// the SelectObjectContent API. -// -// Use this type to receive SelectObjectContentEventStream events. The events -// can be read from the Events channel member. -// -// The events that can be received are: -// -// * ContinuationEvent -// * EndEvent -// * ProgressEvent -// * RecordsEvent -// * StatsEvent -type SelectObjectContentEventStream struct { - // Reader is the EventStream reader for the SelectObjectContentEventStream - // events. This value is automatically set by the SDK when the API call is made - // Use this member when unit testing your code with the SDK to mock out the - // EventStream Reader. - // - // Must not be nil. - Reader SelectObjectContentEventStreamReader +// Specifies the byte range of the object to get the records from. A record +// is processed when its first byte is contained by the range. This parameter +// is optional, but when specified, it must not be empty. See RFC 2616, Section +// 14.35.1 about how to specify the start and end of the range. +type ScanRange struct { + _ struct{} `type:"structure"` - // StreamCloser is the io.Closer for the EventStream connection. For HTTP - // EventStream this is the response Body. The stream will be closed when - // the Close method of the EventStream is called. - StreamCloser io.Closer + // Specifies the end of the byte range. This parameter is optional. Valid values: + // non-negative integers. The default value is one less than the size of the + // object being queried. If only the End parameter is supplied, it is interpreted + // to mean scan the last N bytes of the file. For example, 50 + // means scan the last 50 bytes. + End *int64 `type:"long"` + + // Specifies the start of the byte range. This parameter is optional. Valid + // values: non-negative integers. The default value is 0. If only start is supplied, + // it means scan from that point to the end of the file.For example; 50 + // means scan from byte 50 until the end of the file. + Start *int64 `type:"long"` } -// Close closes the EventStream. This will also cause the Events channel to be -// closed. You can use the closing of the Events channel to terminate your -// application's read from the API's EventStream. -// -// Will close the underlying EventStream reader. For EventStream over HTTP -// connection this will also close the HTTP connection. -// -// Close must be called when done using the EventStream API. Not calling Close -// may result in resource leaks. -func (es *SelectObjectContentEventStream) Close() (err error) { - es.Reader.Close() - return es.Err() +// String returns the string representation +func (s ScanRange) String() string { + return awsutil.Prettify(s) } -// Err returns any error that occurred while reading EventStream Events from -// the service API's response. Returns nil if there were no errors. -func (es *SelectObjectContentEventStream) Err() error { - if err := es.Reader.Err(); err != nil { - return err - } - es.StreamCloser.Close() +// GoString returns the string representation +func (s ScanRange) GoString() string { + return s.String() +} - return nil +// SetEnd sets the End field's value. +func (s *ScanRange) SetEnd(v int64) *ScanRange { + s.End = &v + return s } -// Events returns a channel to read EventStream Events from the -// SelectObjectContent API. -// -// These events are: -// -// * ContinuationEvent -// * EndEvent -// * ProgressEvent -// * RecordsEvent -// * StatsEvent -func (es *SelectObjectContentEventStream) Events() <-chan SelectObjectContentEventStreamEvent { - return es.Reader.Events() +// SetStart sets the Start field's value. +func (s *ScanRange) SetStart(v int64) *ScanRange { + s.Start = &v + return s } // SelectObjectContentEventStreamEvent groups together all EventStream -// events read from the SelectObjectContent API. +// events writes for SelectObjectContentEventStream. // // These events are: // @@ -22543,11 +28188,12 @@ func (es *SelectObjectContentEventStream) Events() <-chan SelectObjectContentEve // * StatsEvent type SelectObjectContentEventStreamEvent interface { eventSelectObjectContentEventStream() + eventstreamapi.Marshaler + eventstreamapi.Unmarshaler } -// SelectObjectContentEventStreamReader provides the interface for reading EventStream -// Events from the SelectObjectContent API. The -// default implementation for this interface will be SelectObjectContentEventStream. +// SelectObjectContentEventStreamReader provides the interface for reading to the stream. The +// default implementation for this interface will be SelectObjectContentEventStreamData. // // The reader's Close method must allow multiple concurrent calls. // @@ -22562,8 +28208,7 @@ type SelectObjectContentEventStreamReader interface { // Returns a channel of events as they are read from the event stream. Events() <-chan SelectObjectContentEventStreamEvent - // Close will close the underlying event stream reader. For event stream over - // HTTP this will also close the HTTP connection. + // Close will stop the reader reading events from the stream. Close() error // Returns any error that has occurred while reading from the event stream. @@ -22573,57 +28218,44 @@ type SelectObjectContentEventStreamReader interface { type readSelectObjectContentEventStream struct { eventReader *eventstreamapi.EventReader stream chan SelectObjectContentEventStreamEvent - errVal atomic.Value + err *eventstreamapi.OnceError done chan struct{} closeOnce sync.Once } -func newReadSelectObjectContentEventStream( - reader io.ReadCloser, - unmarshalers request.HandlerList, - logger aws.Logger, - logLevel aws.LogLevelType, -) *readSelectObjectContentEventStream { +func newReadSelectObjectContentEventStream(eventReader *eventstreamapi.EventReader) *readSelectObjectContentEventStream { r := &readSelectObjectContentEventStream{ - stream: make(chan SelectObjectContentEventStreamEvent), - done: make(chan struct{}), + eventReader: eventReader, + stream: make(chan SelectObjectContentEventStreamEvent), + done: make(chan struct{}), + err: eventstreamapi.NewOnceError(), } - - r.eventReader = eventstreamapi.NewEventReader( - reader, - protocol.HandlerPayloadUnmarshal{ - Unmarshalers: unmarshalers, - }, - r.unmarshalerForEventType, - ) - r.eventReader.UseLogger(logger, logLevel) + go r.readEventStream() return r } -// Close will close the underlying event stream reader. For EventStream over -// HTTP this will also close the HTTP connection. +// Close will close the underlying event stream reader. func (r *readSelectObjectContentEventStream) Close() error { r.closeOnce.Do(r.safeClose) - return r.Err() } +func (r *readSelectObjectContentEventStream) ErrorSet() <-chan struct{} { + return r.err.ErrorSet() +} + +func (r *readSelectObjectContentEventStream) Closed() <-chan struct{} { + return r.done +} + func (r *readSelectObjectContentEventStream) safeClose() { close(r.done) - err := r.eventReader.Close() - if err != nil { - r.errVal.Store(err) - } } func (r *readSelectObjectContentEventStream) Err() error { - if v := r.errVal.Load(); v != nil { - return v.(error) - } - - return nil + return r.err.Err() } func (r *readSelectObjectContentEventStream) Events() <-chan SelectObjectContentEventStreamEvent { @@ -22631,6 +28263,7 @@ func (r *readSelectObjectContentEventStream) Events() <-chan SelectObjectContent } func (r *readSelectObjectContentEventStream) readEventStream() { + defer r.Close() defer close(r.stream) for { @@ -22645,7 +28278,7 @@ func (r *readSelectObjectContentEventStream) readEventStream() { return default: } - r.errVal.Store(err) + r.err.SetError(err) return } @@ -22657,22 +28290,20 @@ func (r *readSelectObjectContentEventStream) readEventStream() { } } -func (r *readSelectObjectContentEventStream) unmarshalerForEventType( - eventType string, -) (eventstreamapi.Unmarshaler, error) { +type unmarshalerForSelectObjectContentEventStreamEvent struct { + metadata protocol.ResponseMetadata +} + +func (u unmarshalerForSelectObjectContentEventStreamEvent) UnmarshalerForEventName(eventType string) (eventstreamapi.Unmarshaler, error) { switch eventType { case "Cont": return &ContinuationEvent{}, nil - case "End": return &EndEvent{}, nil - case "Progress": return &ProgressEvent{}, nil - case "Records": return &RecordsEvent{}, nil - case "Stats": return &StatsEvent{}, nil default: @@ -22704,7 +28335,7 @@ type SelectObjectContentInput struct { // Expression is a required field Expression *string `type:"string" required:"true"` - // The type of the provided expression (for example., SQL). + // The type of the provided expression (for example, SQL). // // ExpressionType is a required field ExpressionType *string `type:"string" required:"true" enum:"ExpressionType"` @@ -22738,6 +28369,24 @@ type SelectObjectContentInput struct { // The SSE Customer Key MD5. For more information, see Server-Side Encryption // (Using Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the byte range of the object to get the records from. A record + // is processed when its first byte is contained by the range. This parameter + // is optional, but when specified, it must not be empty. See RFC 2616, Section + // 14.35.1 about how to specify the start and end of the range. + // + // ScanRangemay be used in the following ways: + // + // * 50100 - process only + // the records starting between the bytes 50 and 100 (inclusive, counting + // from zero) + // + // * 50 - process only the records + // starting after the byte 50 + // + // * 50 - process only the records within + // the last 50 bytes of the file. + ScanRange *ScanRange `type:"structure"` } // String returns the string representation @@ -22858,11 +28507,30 @@ func (s *SelectObjectContentInput) SetSSECustomerKeyMD5(v string) *SelectObjectC return s } +// SetScanRange sets the ScanRange field's value. +func (s *SelectObjectContentInput) SetScanRange(v *ScanRange) *SelectObjectContentInput { + s.ScanRange = v + return s +} + +func (s *SelectObjectContentInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *SelectObjectContentInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type SelectObjectContentOutput struct { _ struct{} `type:"structure" payload:"Payload"` - // Use EventStream to use the API's stream. - EventStream *SelectObjectContentEventStream `type:"structure"` + EventStream *SelectObjectContentEventStream } // String returns the string representation @@ -22875,29 +28543,17 @@ func (s SelectObjectContentOutput) GoString() string { return s.String() } -// SetEventStream sets the EventStream field's value. func (s *SelectObjectContentOutput) SetEventStream(v *SelectObjectContentEventStream) *SelectObjectContentOutput { s.EventStream = v return s } +func (s *SelectObjectContentOutput) GetEventStream() *SelectObjectContentEventStream { + return s.EventStream +} -func (s *SelectObjectContentOutput) runEventStreamLoop(r *request.Request) { - if r.Error != nil { - return - } - reader := newReadSelectObjectContentEventStream( - r.HTTPResponse.Body, - r.Handlers.UnmarshalStream, - r.Config.Logger, - r.Config.LogLevel.Value(), - ) - go reader.readEventStream() - - eventStream := &SelectObjectContentEventStream{ - StreamCloser: r.HTTPResponse.Body, - Reader: reader, - } - s.EventStream = eventStream +// GetStream returns the type to interact with the event stream. +func (s *SelectObjectContentOutput) GetStream() *SelectObjectContentEventStream { + return s.EventStream } // Describes the parameters for Select job types. @@ -22909,7 +28565,7 @@ type SelectParameters struct { // Expression is a required field Expression *string `type:"string" required:"true"` - // The type of the provided expression (e.g., SQL). + // The type of the provided expression (for example, SQL). // // ExpressionType is a required field ExpressionType *string `type:"string" required:"true" enum:"ExpressionType"` @@ -22989,8 +28645,24 @@ func (s *SelectParameters) SetOutputSerialization(v *OutputSerialization) *Selec type ServerSideEncryptionByDefault struct { _ struct{} `type:"structure"` - // KMS master key ID to use for the default encryption. This parameter is allowed - // if and only if SSEAlgorithm is set to aws:kms. + // AWS Key Management Service (KMS) customer master key ID to use for the default + // encryption. This parameter is allowed if and only if SSEAlgorithm is set + // to aws:kms. + // + // You can specify the key ID or the Amazon Resource Name (ARN) of the CMK. + // However, if you are using encryption with cross-account operations, you must + // use a fully qualified CMK ARN. For more information, see Using encryption + // for cross-account operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy). + // + // For example: + // + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // Amazon S3 only supports symmetric CMKs and not asymmetric CMKs. For more + // information, see Using Symmetric and Asymmetric Keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) + // in the AWS Key Management Service Developer Guide. KMSMasterKeyID *string `type:"string" sensitive:"true"` // Server-side encryption algorithm to use for the default encryption. @@ -23129,7 +28801,7 @@ func (s *ServerSideEncryptionRule) SetApplyServerSideEncryptionByDefault(v *Serv // objects that you want to replicate. You can choose to enable or disable the // replication of these objects. Currently, Amazon S3 supports only the filter // that you can specify for objects created with server-side encryption using -// an AWS KMS-Managed Key (SSE-KMS). +// a customer master key (CMK) stored in AWS Key Management Service (SSE-KMS). type SourceSelectionCriteria struct { _ struct{} `type:"structure"` @@ -23176,7 +28848,7 @@ type SseKmsEncryptedObjects struct { _ struct{} `type:"structure"` // Specifies whether Amazon S3 replicates objects created with server-side encryption - // using an AWS KMS-managed key. + // using a customer master key (CMK) stored in AWS Key Management Service. // // Status is a required field Status *string `type:"string" required:"true" enum:"SseKmsEncryptedObjectsStatus"` @@ -23211,6 +28883,7 @@ func (s *SseKmsEncryptedObjects) SetStatus(v string) *SseKmsEncryptedObjects { return s } +// Container for the stats details. type Stats struct { _ struct{} `type:"structure"` @@ -23252,6 +28925,7 @@ func (s *Stats) SetBytesScanned(v int64) *Stats { return s } +// Container for the Stats Event. type StatsEvent struct { _ struct{} `locationName:"StatsEvent" type:"structure" payload:"Details"` @@ -23292,6 +28966,16 @@ func (s *StatsEvent) UnmarshalEvent( return nil } +func (s *StatsEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) + var buf bytes.Buffer + if err = pm.MarshalPayload(&buf, s); err != nil { + return eventstream.Message{}, err + } + msg.Payload = buf.Bytes() + return msg, err +} + // Specifies data related to access patterns to be collected and made available // to analyze the tradeoffs between different storage classes for an Amazon // S3 bucket. @@ -23334,6 +29018,8 @@ func (s *StorageClassAnalysis) SetDataExport(v *StorageClassAnalysisDataExport) return s } +// Container for data related to the storage class analysis for an Amazon S3 +// bucket for export. type StorageClassAnalysisDataExport struct { _ struct{} `type:"structure"` @@ -23391,6 +29077,7 @@ func (s *StorageClassAnalysisDataExport) SetOutputSchemaVersion(v string) *Stora return s } +// A container of a key value name pair. type Tag struct { _ struct{} `type:"structure"` @@ -23446,9 +29133,12 @@ func (s *Tag) SetValue(v string) *Tag { return s } +// Container for TagSet elements. type Tagging struct { _ struct{} `type:"structure"` + // A collection for a set of tags + // // TagSet is a required field TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` } @@ -23492,9 +29182,11 @@ func (s *Tagging) SetTagSet(v []*Tag) *Tagging { return s } +// Container for granting information. type TargetGrant struct { _ struct{} `type:"structure"` + // Container for the person being granted permissions. Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` // Logging permissions assigned to the Grantee for the bucket. @@ -23617,6 +29309,10 @@ func (s *TopicConfiguration) SetTopicArn(v string) *TopicConfiguration { return s } +// A container for specifying the configuration for publication of messages +// to an Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3 +// detects specified events. This data type is deprecated. Use TopicConfiguration +// instead. type TopicConfigurationDeprecated struct { _ struct{} `type:"structure"` @@ -23625,6 +29321,7 @@ type TopicConfigurationDeprecated struct { // Deprecated: Event has been deprecated Event *string `deprecated:"true" type:"string" enum:"Event"` + // A collection of events related to objects Events []*string `locationName:"Event" type:"list" flattened:"true"` // An optional unique identifier for configurations in a notification configuration. @@ -23670,7 +29367,10 @@ func (s *TopicConfigurationDeprecated) SetTopic(v string) *TopicConfigurationDep return s } -// Specifies when an object transitions to a specified storage class. +// Specifies when an object transitions to a specified storage class. For more +// information about Amazon S3 lifecycle configuration rules, see Transitioning +// Objects Using Amazon S3 Lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html) +// in the Amazon Simple Storage Service Developer Guide. type Transition struct { _ struct{} `type:"structure"` @@ -23717,6 +29417,8 @@ func (s *Transition) SetStorageClass(v string) *Transition { type UploadPartCopyInput struct { _ struct{} `locationName:"UploadPartCopyRequest" type:"structure"` + // The bucket name. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -23742,11 +29444,12 @@ type UploadPartCopyInput struct { // The range of bytes to copy from the source object. The range value must use // the form bytes=first-last, where the first and last are the zero-based byte // offsets to copy. For example, bytes=0-9 indicates that you want to copy the - // first ten bytes of the source. You can copy a range only if the source object + // first 10 bytes of the source. You can copy a range only if the source object // is greater than 5 MB. CopySourceRange *string `location:"header" locationName:"x-amz-copy-source-range" type:"string"` - // Specifies the algorithm to use when decrypting the source object (e.g., AES256). + // Specifies the algorithm to use when decrypting the source object (for example, + // AES256). CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt @@ -23755,10 +29458,12 @@ type UploadPartCopyInput struct { CopySourceSSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` + // Object key for which the multipart upload was initiated. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -23768,26 +29473,28 @@ type UploadPartCopyInput struct { // PartNumber is a required field PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. This must be the same encryption key specified in the initiate multipart // upload request. SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` // Upload ID identifying the multipart upload whose part is being copied. @@ -23960,9 +29667,24 @@ func (s *UploadPartCopyInput) SetUploadId(v string) *UploadPartCopyInput { return s } +func (s *UploadPartCopyInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *UploadPartCopyInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type UploadPartCopyOutput struct { _ struct{} `type:"structure" payload:"CopyPartResult"` + // Container for all response elements. CopyPartResult *CopyPartResult `type:"structure"` // The version of the source object that was copied, if you have enabled versioning @@ -23979,16 +29701,17 @@ type UploadPartCopyOutput struct { SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity + // the response will include this header to provide round-trip message integrity // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. + // If present, specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) that was used for the + // object. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` } @@ -24060,7 +29783,7 @@ type UploadPartInput struct { ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` // The base64-encoded 128-bit MD5 digest of the part data. This parameter is - // auto-populated when using the command from the CLI. This parameted is required + // auto-populated when using the command from the CLI. This parameter is required // if object lock parameters are specified. ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` @@ -24075,26 +29798,28 @@ type UploadPartInput struct { // PartNumber is a required field PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. This must be the same encryption key specified in the initiate multipart // upload request. SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` // Upload ID identifying the multipart upload whose part is being uploaded. @@ -24221,6 +29946,20 @@ func (s *UploadPartInput) SetUploadId(v string) *UploadPartInput { return s } +func (s *UploadPartInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *UploadPartInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type UploadPartOutput struct { _ struct{} `type:"structure"` @@ -24237,16 +29976,16 @@ type UploadPartOutput struct { SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity + // the response will include this header to provide round-trip message integrity // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. + // If present, specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) was used for the object. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` } @@ -24561,11 +30300,37 @@ const ( // EventS3ObjectRemovedDeleteMarkerCreated is a Event enum value EventS3ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated" + // EventS3ObjectRestore is a Event enum value + EventS3ObjectRestore = "s3:ObjectRestore:*" + // EventS3ObjectRestorePost is a Event enum value EventS3ObjectRestorePost = "s3:ObjectRestore:Post" // EventS3ObjectRestoreCompleted is a Event enum value EventS3ObjectRestoreCompleted = "s3:ObjectRestore:Completed" + + // EventS3Replication is a Event enum value + EventS3Replication = "s3:Replication:*" + + // EventS3ReplicationOperationFailedReplication is a Event enum value + EventS3ReplicationOperationFailedReplication = "s3:Replication:OperationFailedReplication" + + // EventS3ReplicationOperationNotTracked is a Event enum value + EventS3ReplicationOperationNotTracked = "s3:Replication:OperationNotTracked" + + // EventS3ReplicationOperationMissedThreshold is a Event enum value + EventS3ReplicationOperationMissedThreshold = "s3:Replication:OperationMissedThreshold" + + // EventS3ReplicationOperationReplicatedAfterThreshold is a Event enum value + EventS3ReplicationOperationReplicatedAfterThreshold = "s3:Replication:OperationReplicatedAfterThreshold" +) + +const ( + // ExistingObjectReplicationStatusEnabled is a ExistingObjectReplicationStatus enum value + ExistingObjectReplicationStatusEnabled = "Enabled" + + // ExistingObjectReplicationStatusDisabled is a ExistingObjectReplicationStatus enum value + ExistingObjectReplicationStatusDisabled = "Disabled" ) const ( @@ -24657,6 +30422,9 @@ const ( // InventoryOptionalFieldObjectLockLegalHoldStatus is a InventoryOptionalField enum value InventoryOptionalFieldObjectLockLegalHoldStatus = "ObjectLockLegalHoldStatus" + + // InventoryOptionalFieldIntelligentTieringAccessTier is a InventoryOptionalField enum value + InventoryOptionalFieldIntelligentTieringAccessTier = "IntelligentTieringAccessTier" ) const ( @@ -24691,6 +30459,14 @@ const ( MetadataDirectiveReplace = "REPLACE" ) +const ( + // MetricsStatusEnabled is a MetricsStatus enum value + MetricsStatusEnabled = "Enabled" + + // MetricsStatusDisabled is a MetricsStatus enum value + MetricsStatusDisabled = "Disabled" +) + const ( // ObjectCannedACLPrivate is a ObjectCannedACL enum value ObjectCannedACLPrivate = "private" @@ -24839,6 +30615,14 @@ const ( ReplicationStatusReplica = "REPLICA" ) +const ( + // ReplicationTimeStatusEnabled is a ReplicationTimeStatus enum value + ReplicationTimeStatusEnabled = "Enabled" + + // ReplicationTimeStatusDisabled is a ReplicationTimeStatus enum value + ReplicationTimeStatusDisabled = "Disabled" +) + // If present, indicates that the requester was successfully charged for the // request. const ( @@ -24846,10 +30630,11 @@ const ( RequestChargedRequester = "requester" ) -// Confirms that the requester knows that she or he will be charged for the -// request. Bucket owners need not specify this parameter in their requests. -// Documentation on downloading objects from requester pays buckets can be found -// at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html +// Confirms that the requester knows that they will be charged for the request. +// Bucket owners need not specify this parameter in their requests. For information +// about downloading objects from requester pays buckets, see Downloading Objects +// in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) +// in the Amazon S3 Developer Guide. const ( // RequestPayerRequester is a RequestPayer enum value RequestPayerRequester = "requester" diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go b/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go index 5c8ce5cc8..407f06b6e 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go @@ -13,7 +13,6 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/sdkio" ) const ( @@ -25,30 +24,6 @@ const ( appendMD5TxEncoding = "append-md5" ) -// contentMD5 computes and sets the HTTP Content-MD5 header for requests that -// require it. -func contentMD5(r *request.Request) { - h := md5.New() - - if !aws.IsReaderSeekable(r.Body) { - if r.Config.Logger != nil { - r.Config.Logger.Log(fmt.Sprintf( - "Unable to compute Content-MD5 for unseekable body, S3.%s", - r.Operation.Name)) - } - return - } - - if _, err := copySeekableBody(h, r.Body); err != nil { - r.Error = awserr.New("ContentMD5", "failed to compute body MD5", err) - return - } - - // encode the md5 checksum in base64 and set the request header. - v := base64.StdEncoding.EncodeToString(h.Sum(nil)) - r.HTTPRequest.Header.Set(contentMD5Header, v) -} - // computeBodyHashes will add Content MD5 and Content Sha256 hashes to the // request. If the body is not seekable or S3DisableContentMD5Validation set // this handler will be ignored. @@ -90,7 +65,7 @@ func computeBodyHashes(r *request.Request) { dst = io.MultiWriter(hashers...) } - if _, err := copySeekableBody(dst, r.Body); err != nil { + if _, err := aws.CopySeekableBody(dst, r.Body); err != nil { r.Error = awserr.New("BodyHashError", "failed to compute body hashes", err) return } @@ -119,28 +94,6 @@ const ( sha256HexEncLen = sha256.Size * 2 // hex.EncodedLen ) -func copySeekableBody(dst io.Writer, src io.ReadSeeker) (int64, error) { - curPos, err := src.Seek(0, sdkio.SeekCurrent) - if err != nil { - return 0, err - } - - // hash the body. seek back to the first position after reading to reset - // the body for transmission. copy errors may be assumed to be from the - // body. - n, err := io.Copy(dst, src) - if err != nil { - return n, err - } - - _, err = src.Seek(curPos, sdkio.SeekStart) - if err != nil { - return n, err - } - - return n, nil -} - // Adds the x-amz-te: append_md5 header to the request. This requests the service // responds with a trailing MD5 checksum. // diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go index 23d386b16..a7698d5eb 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go @@ -4,6 +4,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/internal/s3err" + "github.com/aws/aws-sdk-go/service/s3/internal/arn" ) func init() { @@ -13,7 +14,7 @@ func init() { func defaultInitClientFn(c *client.Client) { // Support building custom endpoints based on config - c.Handlers.Build.PushFront(updateEndpointForS3Config) + c.Handlers.Build.PushFront(endpointHandler) // Require SSL when using SSE keys c.Handlers.Validate.PushBack(validateSSERequiresSSL) @@ -27,17 +28,11 @@ func defaultInitClientFn(c *client.Client) { } func defaultInitRequestFn(r *request.Request) { - // Add reuest handlers for specific platforms. + // Add request handlers for specific platforms. // e.g. 100-continue support for PUT requests using Go 1.6 platformRequestHandlers(r) switch r.Operation.Name { - case opPutBucketCors, opPutBucketLifecycle, opPutBucketPolicy, - opPutBucketTagging, opDeleteObjects, opPutBucketLifecycleConfiguration, - opPutObjectLegalHold, opPutObjectRetention, opPutObjectLockConfiguration, - opPutBucketReplication: - // These S3 operations require Content-MD5 to be set - r.Handlers.Build.PushBack(contentMD5) case opGetBucketLocation: // GetBucketLocation has custom parsing logic r.Handlers.Unmarshal.PushFront(buildGetBucketLocation) @@ -73,3 +68,8 @@ type sseCustomerKeyGetter interface { type copySourceSSECustomerKeyGetter interface { getCopySourceSSECustomerKey() string } + +type endpointARNGetter interface { + getEndpointARN() (arn.Resource, error) + hasEndpointARN() bool +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go new file mode 100644 index 000000000..c4048fbfb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go @@ -0,0 +1,233 @@ +package s3 + +import ( + "net/url" + "strings" + + "github.com/aws/aws-sdk-go/aws" + awsarn "github.com/aws/aws-sdk-go/aws/arn" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/service/s3/internal/arn" +) + +// Used by shapes with members decorated as endpoint ARN. +func parseEndpointARN(v string) (arn.Resource, error) { + return arn.ParseResource(v, accessPointResourceParser) +} + +func accessPointResourceParser(a awsarn.ARN) (arn.Resource, error) { + resParts := arn.SplitResource(a.Resource) + switch resParts[0] { + case "accesspoint": + return arn.ParseAccessPointResource(a, resParts[1:]) + default: + return nil, arn.InvalidARNError{ARN: a, Reason: "unknown resource type"} + } +} + +func endpointHandler(req *request.Request) { + endpoint, ok := req.Params.(endpointARNGetter) + if !ok || !endpoint.hasEndpointARN() { + updateBucketEndpointFromParams(req) + return + } + + resource, err := endpoint.getEndpointARN() + if err != nil { + req.Error = newInvalidARNError(nil, err) + return + } + + resReq := resourceRequest{ + Resource: resource, + Request: req, + } + + if resReq.IsCrossPartition() { + req.Error = newClientPartitionMismatchError(resource, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + return + } + + if !resReq.AllowCrossRegion() && resReq.IsCrossRegion() { + req.Error = newClientRegionMismatchError(resource, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + return + } + + if resReq.HasCustomEndpoint() { + req.Error = newInvalidARNWithCustomEndpointError(resource, nil) + return + } + + switch tv := resource.(type) { + case arn.AccessPointARN: + err = updateRequestAccessPointEndpoint(req, tv) + if err != nil { + req.Error = err + } + default: + req.Error = newInvalidARNError(resource, nil) + } +} + +type resourceRequest struct { + Resource arn.Resource + Request *request.Request +} + +func (r resourceRequest) ARN() awsarn.ARN { + return r.Resource.GetARN() +} + +func (r resourceRequest) AllowCrossRegion() bool { + return aws.BoolValue(r.Request.Config.S3UseARNRegion) +} + +func (r resourceRequest) UseFIPS() bool { + return isFIPS(aws.StringValue(r.Request.Config.Region)) +} + +func (r resourceRequest) IsCrossPartition() bool { + return r.Request.ClientInfo.PartitionID != r.Resource.GetARN().Partition +} + +func (r resourceRequest) IsCrossRegion() bool { + return isCrossRegion(r.Request, r.Resource.GetARN().Region) +} + +func (r resourceRequest) HasCustomEndpoint() bool { + return len(aws.StringValue(r.Request.Config.Endpoint)) > 0 +} + +func isFIPS(clientRegion string) bool { + return strings.HasPrefix(clientRegion, "fips-") || strings.HasSuffix(clientRegion, "-fips") +} +func isCrossRegion(req *request.Request, otherRegion string) bool { + return req.ClientInfo.SigningRegion != otherRegion +} + +func updateBucketEndpointFromParams(r *request.Request) { + bucket, ok := bucketNameFromReqParams(r.Params) + if !ok { + // Ignore operation requests if the bucket name was not provided + // if this is an input validation error the validation handler + // will report it. + return + } + updateEndpointForS3Config(r, bucket) +} + +func updateRequestAccessPointEndpoint(req *request.Request, accessPoint arn.AccessPointARN) error { + // Accelerate not supported + if aws.BoolValue(req.Config.S3UseAccelerate) { + return newClientConfiguredForAccelerateError(accessPoint, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + } + + // Ignore the disable host prefix for access points since custom endpoints + // are not supported. + req.Config.DisableEndpointHostPrefix = aws.Bool(false) + + if err := accessPointEndpointBuilder(accessPoint).Build(req); err != nil { + return err + } + + removeBucketFromPath(req.HTTPRequest.URL) + + return nil +} + +func removeBucketFromPath(u *url.URL) { + u.Path = strings.Replace(u.Path, "/{Bucket}", "", -1) + if u.Path == "" { + u.Path = "/" + } +} + +type accessPointEndpointBuilder arn.AccessPointARN + +const ( + accessPointPrefixLabel = "accesspoint" + accountIDPrefixLabel = "accountID" + accesPointPrefixTemplate = "{" + accessPointPrefixLabel + "}-{" + accountIDPrefixLabel + "}." +) + +func (a accessPointEndpointBuilder) Build(req *request.Request) error { + resolveRegion := arn.AccessPointARN(a).Region + cfgRegion := aws.StringValue(req.Config.Region) + + if isFIPS(cfgRegion) { + if aws.BoolValue(req.Config.S3UseARNRegion) && isCrossRegion(req, resolveRegion) { + // FIPS with cross region is not supported, the SDK must fail + // because there is no well defined method for SDK to construct a + // correct FIPS endpoint. + return newClientConfiguredForCrossRegionFIPSError(arn.AccessPointARN(a), + req.ClientInfo.PartitionID, cfgRegion, nil) + } + resolveRegion = cfgRegion + } + + endpoint, err := resolveRegionalEndpoint(req, resolveRegion) + if err != nil { + return newFailedToResolveEndpointError(arn.AccessPointARN(a), + req.ClientInfo.PartitionID, cfgRegion, err) + } + + if err = updateRequestEndpoint(req, endpoint.URL); err != nil { + return err + } + + const serviceEndpointLabel = "s3-accesspoint" + + // dualstack provided by endpoint resolver + cfgHost := req.HTTPRequest.URL.Host + if strings.HasPrefix(cfgHost, "s3") { + req.HTTPRequest.URL.Host = serviceEndpointLabel + cfgHost[2:] + } + + protocol.HostPrefixBuilder{ + Prefix: accesPointPrefixTemplate, + LabelsFn: a.hostPrefixLabelValues, + }.Build(req) + + req.ClientInfo.SigningName = endpoint.SigningName + req.ClientInfo.SigningRegion = endpoint.SigningRegion + + err = protocol.ValidateEndpointHost(req.Operation.Name, req.HTTPRequest.URL.Host) + if err != nil { + return newInvalidARNError(arn.AccessPointARN(a), err) + } + + return nil +} + +func (a accessPointEndpointBuilder) hostPrefixLabelValues() map[string]string { + return map[string]string{ + accessPointPrefixLabel: arn.AccessPointARN(a).AccessPointName, + accountIDPrefixLabel: arn.AccessPointARN(a).AccountID, + } +} + +func resolveRegionalEndpoint(r *request.Request, region string) (endpoints.ResolvedEndpoint, error) { + return r.Config.EndpointResolver.EndpointFor(EndpointsID, region, func(opts *endpoints.Options) { + opts.DisableSSL = aws.BoolValue(r.Config.DisableSSL) + opts.UseDualStack = aws.BoolValue(r.Config.UseDualStack) + opts.S3UsEast1RegionalEndpoint = endpoints.RegionalS3UsEast1Endpoint + }) +} + +func updateRequestEndpoint(r *request.Request, endpoint string) (err error) { + endpoint = endpoints.AddScheme(endpoint, aws.BoolValue(r.Config.DisableSSL)) + + r.HTTPRequest.URL, err = url.Parse(endpoint + r.Operation.HTTPPath) + if err != nil { + return awserr.New(request.ErrCodeSerialization, + "failed to parse endpoint URL", err) + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_errors.go b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_errors.go new file mode 100644 index 000000000..9df03e78d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_errors.go @@ -0,0 +1,151 @@ +package s3 + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/s3/internal/arn" +) + +const ( + invalidARNErrorErrCode = "InvalidARNError" + configurationErrorErrCode = "ConfigurationError" +) + +type invalidARNError struct { + message string + resource arn.Resource + origErr error +} + +func (e invalidARNError) Error() string { + var extra string + if e.resource != nil { + extra = "ARN: " + e.resource.String() + } + return awserr.SprintError(e.Code(), e.Message(), extra, e.origErr) +} + +func (e invalidARNError) Code() string { + return invalidARNErrorErrCode +} + +func (e invalidARNError) Message() string { + return e.message +} + +func (e invalidARNError) OrigErr() error { + return e.origErr +} + +func newInvalidARNError(resource arn.Resource, err error) invalidARNError { + return invalidARNError{ + message: "invalid ARN", + origErr: err, + resource: resource, + } +} + +func newInvalidARNWithCustomEndpointError(resource arn.Resource, err error) invalidARNError { + return invalidARNError{ + message: "resource ARN not supported with custom client endpoints", + origErr: err, + resource: resource, + } +} + +// ARN not supported for the target partition +func newInvalidARNWithUnsupportedPartitionError(resource arn.Resource, err error) invalidARNError { + return invalidARNError{ + message: "resource ARN not supported for the target ARN partition", + origErr: err, + resource: resource, + } +} + +type configurationError struct { + message string + resource arn.Resource + clientPartitionID string + clientRegion string + origErr error +} + +func (e configurationError) Error() string { + extra := fmt.Sprintf("ARN: %s, client partition: %s, client region: %s", + e.resource, e.clientPartitionID, e.clientRegion) + + return awserr.SprintError(e.Code(), e.Message(), extra, e.origErr) +} + +func (e configurationError) Code() string { + return configurationErrorErrCode +} + +func (e configurationError) Message() string { + return e.message +} + +func (e configurationError) OrigErr() error { + return e.origErr +} + +func newClientPartitionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError { + return configurationError{ + message: "client partition does not match provided ARN partition", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +func newClientRegionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError { + return configurationError{ + message: "client region does not match provided ARN region", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +func newFailedToResolveEndpointError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError { + return configurationError{ + message: "endpoint resolver failed to find an endpoint for the provided ARN region", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +func newClientConfiguredForFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError { + return configurationError{ + message: "client configured for fips but cross-region resource ARN provided", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +func newClientConfiguredForAccelerateError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError { + return configurationError{ + message: "client configured for S3 Accelerate but is supported with resource ARN", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +func newClientConfiguredForCrossRegionFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError { + return configurationError{ + message: "client configured for FIPS with cross-region enabled but is supported with cross-region resource ARN", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go index 931cb17bb..49aeff16f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go @@ -13,6 +13,12 @@ const ( // ErrCodeBucketAlreadyOwnedByYou for service response error code // "BucketAlreadyOwnedByYou". + // + // The bucket you tried to create already exists, and you own it. Amazon S3 + // returns this error in all AWS Regions except in the North Virginia Region. + // For legacy compatibility, if you re-create an existing bucket that you already + // own in the North Virginia Region, Amazon S3 returns 200 OK and resets the + // bucket access control lists (ACLs). ErrCodeBucketAlreadyOwnedByYou = "BucketAlreadyOwnedByYou" // ErrCodeNoSuchBucket for service response error code @@ -36,13 +42,13 @@ const ( // ErrCodeObjectAlreadyInActiveTierError for service response error code // "ObjectAlreadyInActiveTierError". // - // This operation is not allowed against this storage tier + // This operation is not allowed against this storage tier. ErrCodeObjectAlreadyInActiveTierError = "ObjectAlreadyInActiveTierError" // ErrCodeObjectNotInActiveTierError for service response error code // "ObjectNotInActiveTierError". // // The source object of the COPY operation is not in the active tier and is - // only stored in Amazon Glacier. + // only stored in Amazon S3 Glacier. ErrCodeObjectNotInActiveTierError = "ObjectNotInActiveTierError" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go index a7fbc2de2..81cdec1ae 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go @@ -30,10 +30,10 @@ var accelerateOpBlacklist = operationBlacklist{ opListBuckets, opCreateBucket, opDeleteBucket, } -// Request handler to automatically add the bucket name to the endpoint domain +// Automatically add the bucket name to the endpoint domain // if possible. This style of bucket is valid for all bucket names which are // DNS compatible and do not contain "." -func updateEndpointForS3Config(r *request.Request) { +func updateEndpointForS3Config(r *request.Request, bucketName string) { forceHostStyle := aws.BoolValue(r.Config.S3ForcePathStyle) accelerate := aws.BoolValue(r.Config.S3UseAccelerate) @@ -43,45 +43,29 @@ func updateEndpointForS3Config(r *request.Request) { r.Config.Logger.Log("ERROR: aws.Config.S3UseAccelerate is not compatible with aws.Config.S3ForcePathStyle, ignoring S3ForcePathStyle.") } } - updateEndpointForAccelerate(r) + updateEndpointForAccelerate(r, bucketName) } else if !forceHostStyle && r.Operation.Name != opGetBucketLocation { - updateEndpointForHostStyle(r) + updateEndpointForHostStyle(r, bucketName) } } -func updateEndpointForHostStyle(r *request.Request) { - bucket, ok := bucketNameFromReqParams(r.Params) - if !ok { - // Ignore operation requests if the bucketname was not provided - // if this is an input validation error the validation handler - // will report it. - return - } - - if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) { +func updateEndpointForHostStyle(r *request.Request, bucketName string) { + if !hostCompatibleBucketName(r.HTTPRequest.URL, bucketName) { // bucket name must be valid to put into the host return } - moveBucketToHost(r.HTTPRequest.URL, bucket) + moveBucketToHost(r.HTTPRequest.URL, bucketName) } var ( accelElem = []byte("s3-accelerate.dualstack.") ) -func updateEndpointForAccelerate(r *request.Request) { - bucket, ok := bucketNameFromReqParams(r.Params) - if !ok { - // Ignore operation requests if the bucketname was not provided - // if this is an input validation error the validation handler - // will report it. - return - } - - if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) { +func updateEndpointForAccelerate(r *request.Request, bucketName string) { + if !hostCompatibleBucketName(r.HTTPRequest.URL, bucketName) { r.Error = awserr.New("InvalidParameterException", - fmt.Sprintf("bucket name %s is not compatible with S3 Accelerate", bucket), + fmt.Sprintf("bucket name %s is not compatible with S3 Accelerate", bucketName), nil) return } @@ -106,7 +90,7 @@ func updateEndpointForAccelerate(r *request.Request) { r.HTTPRequest.URL.Host = strings.Join(parts, ".") - moveBucketToHost(r.HTTPRequest.URL, bucket) + moveBucketToHost(r.HTTPRequest.URL, bucketName) } // Attempts to retrieve the bucket name from the request input parameters. @@ -148,8 +132,5 @@ func dnsCompatibleBucketName(bucket string) bool { // moveBucketToHost moves the bucket name from the URI path to URL host. func moveBucketToHost(u *url.URL, bucket string) { u.Host = bucket + "." + u.Host - u.Path = strings.Replace(u.Path, "/{Bucket}", "", -1) - if u.Path == "" { - u.Path = "/" - } + removeBucketFromPath(u) } diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/internal/arn/accesspoint_arn.go b/vendor/github.com/aws/aws-sdk-go/service/s3/internal/arn/accesspoint_arn.go new file mode 100644 index 000000000..2f93f96fd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/internal/arn/accesspoint_arn.go @@ -0,0 +1,45 @@ +package arn + +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws/arn" +) + +// AccessPointARN provides representation +type AccessPointARN struct { + arn.ARN + AccessPointName string +} + +// GetARN returns the base ARN for the Access Point resource +func (a AccessPointARN) GetARN() arn.ARN { + return a.ARN +} + +// ParseAccessPointResource attempts to parse the ARN's resource as an +// AccessPoint resource. +func ParseAccessPointResource(a arn.ARN, resParts []string) (AccessPointARN, error) { + if len(a.Region) == 0 { + return AccessPointARN{}, InvalidARNError{a, "region not set"} + } + if len(a.AccountID) == 0 { + return AccessPointARN{}, InvalidARNError{a, "account-id not set"} + } + if len(resParts) == 0 { + return AccessPointARN{}, InvalidARNError{a, "resource-id not set"} + } + if len(resParts) > 1 { + return AccessPointARN{}, InvalidARNError{a, "sub resource not supported"} + } + + resID := resParts[0] + if len(strings.TrimSpace(resID)) == 0 { + return AccessPointARN{}, InvalidARNError{a, "resource-id not set"} + } + + return AccessPointARN{ + ARN: a, + AccessPointName: resID, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/internal/arn/arn.go b/vendor/github.com/aws/aws-sdk-go/service/s3/internal/arn/arn.go new file mode 100644 index 000000000..a942d887f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/internal/arn/arn.go @@ -0,0 +1,71 @@ +package arn + +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws/arn" +) + +// Resource provides the interfaces abstracting ARNs of specific resource +// types. +type Resource interface { + GetARN() arn.ARN + String() string +} + +// ResourceParser provides the function for parsing an ARN's resource +// component into a typed resource. +type ResourceParser func(arn.ARN) (Resource, error) + +// ParseResource parses an AWS ARN into a typed resource for the S3 API. +func ParseResource(s string, resParser ResourceParser) (resARN Resource, err error) { + a, err := arn.Parse(s) + if err != nil { + return nil, err + } + + if len(a.Partition) == 0 { + return nil, InvalidARNError{a, "partition not set"} + } + if a.Service != "s3" { + return nil, InvalidARNError{a, "service is not S3"} + } + if len(a.Resource) == 0 { + return nil, InvalidARNError{a, "resource not set"} + } + + return resParser(a) +} + +// SplitResource splits the resource components by the ARN resource delimiters. +func SplitResource(v string) []string { + var parts []string + var offset int + + for offset <= len(v) { + idx := strings.IndexAny(v[offset:], "/:") + if idx < 0 { + parts = append(parts, v[offset:]) + break + } + parts = append(parts, v[offset:idx+offset]) + offset += idx + 1 + } + + return parts +} + +// IsARN returns whether the given string is an ARN +func IsARN(s string) bool { + return arn.IsARN(s) +} + +// InvalidARNError provides the error for an invalid ARN error. +type InvalidARNError struct { + ARN arn.ARN + Reason string +} + +func (e InvalidARNError) Error() string { + return "invalid Amazon S3 ARN, " + e.Reason + ", " + e.ARN.String() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go index d17dcc9da..b4c07b4d4 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go @@ -31,7 +31,7 @@ var initRequest func(*request.Request) const ( ServiceName = "s3" // Name of service. EndpointsID = ServiceName // ID to lookup a service endpoint with. - ServiceID = "S3" // ServiceID is a unique identifer of a specific service. + ServiceID = "S3" // ServiceID is a unique identifier of a specific service. ) // New creates a new instance of the S3 client with a session. @@ -39,6 +39,8 @@ const ( // aws.Config parameter to add your extra config. // // Example: +// mySession := session.Must(session.NewSession()) +// // // Create a S3 client from just a session. // svc := s3.New(mySession) // @@ -46,11 +48,11 @@ const ( // svc := s3.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3 { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *S3 { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *S3 { svc := &S3{ Client: client.New( cfg, @@ -59,6 +61,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2006-03-01", }, @@ -75,6 +78,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + svc.Handlers.BuildStream.PushBackNamed(restxml.BuildHandler) svc.Handlers.UnmarshalStream.PushBackNamed(restxml.UnmarshalHandler) // Run custom client initialization if present diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go index f6a69aed1..247770e4c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go @@ -2,6 +2,7 @@ package s3 import ( "bytes" + "io" "io/ioutil" "net/http" @@ -24,17 +25,18 @@ func copyMultipartStatusOKUnmarhsalError(r *request.Request) { r.HTTPResponse.Body = ioutil.NopCloser(body) defer body.Seek(0, sdkio.SeekStart) - if body.Len() == 0 { - // If there is no body don't attempt to parse the body. - return - } - unmarshalError(r) if err, ok := r.Error.(awserr.Error); ok && err != nil { - if err.Code() == request.ErrCodeSerialization { + if err.Code() == request.ErrCodeSerialization && + err.OrigErr() != io.EOF { r.Error = nil return } - r.HTTPResponse.StatusCode = http.StatusServiceUnavailable + // if empty payload + if err.OrigErr() == io.EOF { + r.HTTPResponse.StatusCode = http.StatusInternalServerError + } else { + r.HTTPResponse.StatusCode = http.StatusServiceUnavailable + } } } diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go index 5b63fac72..6eecf6691 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go @@ -1,6 +1,7 @@ package s3 import ( + "bytes" "encoding/xml" "fmt" "io" @@ -45,17 +46,24 @@ func unmarshalError(r *request.Request) { // Attempt to parse error from body if it is known var errResp xmlErrorResponse - err := xmlutil.UnmarshalXMLError(&errResp, r.HTTPResponse.Body) - if err == io.EOF { - // Only capture the error if an unmarshal error occurs that is not EOF, - // because S3 might send an error without a error message which causes - // the XML unmarshal to fail with EOF. - err = nil + var err error + if r.HTTPResponse.StatusCode >= 200 && r.HTTPResponse.StatusCode < 300 { + err = s3unmarshalXMLError(&errResp, r.HTTPResponse.Body) + } else { + err = xmlutil.UnmarshalXMLError(&errResp, r.HTTPResponse.Body) } + if err != nil { + var errorMsg string + if err == io.EOF { + errorMsg = "empty response payload" + } else { + errorMsg = "failed to unmarshal error message" + } + r.Error = awserr.NewRequestFailure( awserr.New(request.ErrCodeSerialization, - "failed to unmarshal error message", err), + errorMsg, err), r.HTTPResponse.StatusCode, r.RequestID, ) @@ -86,3 +94,21 @@ type RequestFailure interface { // Host ID is the S3 Host ID needed for debug, and contacting support HostID() string } + +// s3unmarshalXMLError is s3 specific xml error unmarshaler +// for 200 OK errors and response payloads. +// This function differs from the xmlUtil.UnmarshalXMLError +// func. It does not ignore the EOF error and passes it up. +// Related to bug fix for `s3 200 OK response with empty payload` +func s3unmarshalXMLError(v interface{}, stream io.Reader) error { + var errBuf bytes.Buffer + body := io.TeeReader(stream, &errBuf) + + err := xml.NewDecoder(body).Decode(v) + if err != nil && err != io.EOF { + return awserr.NewUnmarshalError(err, + "failed to unmarshal error message", errBuf.Bytes()) + } + + return err +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go index eb0a6a417..550b5f687 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -78,6 +78,8 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html) // in the IAM User Guide. // +// Session Duration +// // By default, the temporary security credentials created by AssumeRole last // for one hour. However, you can use the optional DurationSeconds parameter // to specify the duration of your session. You can provide a value from 900 @@ -91,6 +93,8 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) // in the IAM User Guide. // +// Permissions +// // The temporary security credentials created by AssumeRole can be used to make // API calls to any AWS service with the following exception: You cannot call // the AWS STS GetFederationToken or GetSessionToken API operations. @@ -99,7 +103,7 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // to this operation. You can pass a single JSON policy document to use as an // inline session policy. You can also specify up to 10 managed policies to // use as managed session policies. The plain text that you use for both inline -// and managed session policies shouldn't exceed 2048 characters. Passing policies +// and managed session policies can't exceed 2,048 characters. Passing policies // to this operation returns new temporary credentials. The resulting session's // permissions are the intersection of the role's identity-based policy and // the session policies. You can use the role's temporary credentials in subsequent @@ -131,6 +135,24 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) // in the IAM User Guide. // +// Tags +// +// (Optional) You can pass tag key-value pairs to your session. These tags are +// called session tags. For more information about session tags, see Passing +// Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// An administrator must grant you the permissions necessary to pass session +// tags. The administrator can also create granular permissions to allow you +// to pass only specific session tags. For more information, see Tutorial: Using +// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. +// +// You can set the session tags as transitive. Transitive tags persist during +// role chaining. For more information, see Chaining Roles with Session Tags +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// in the IAM User Guide. +// // Using MFA with AssumeRole // // (Optional) You can include multi-factor authentication (MFA) information @@ -165,9 +187,18 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // message describes the specific error. // // * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" -// The request was rejected because the policy document was too large. The error -// message describes how big the policy document is, in packed form, as a percentage -// of what the API allows. +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An AWS conversion compresses the +// session policy document, session policy ARNs, and session tags into a packed +// binary format that has a separate limit. The error message indicates by percentage +// how close the policies and tags are to the upper size limit. For more information, +// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. // // * ErrCodeRegionDisabledException "RegionDisabledException" // STS is not activated in the requested region for the account that is being @@ -256,6 +287,8 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // an access key ID, a secret access key, and a security token. Applications // can use these temporary security credentials to sign calls to AWS services. // +// Session Duration +// // By default, the temporary security credentials created by AssumeRoleWithSAML // last for one hour. However, you can use the optional DurationSeconds parameter // to specify the duration of your session. Your role session lasts for the @@ -271,6 +304,8 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) // in the IAM User Guide. // +// Permissions +// // The temporary security credentials created by AssumeRoleWithSAML can be used // to make API calls to any AWS service with the following exception: you cannot // call the STS GetFederationToken or GetSessionToken API operations. @@ -279,7 +314,7 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // to this operation. You can pass a single JSON policy document to use as an // inline session policy. You can also specify up to 10 managed policies to // use as managed session policies. The plain text that you use for both inline -// and managed session policies shouldn't exceed 2048 characters. Passing policies +// and managed session policies can't exceed 2,048 characters. Passing policies // to this operation returns new temporary credentials. The resulting session's // permissions are the intersection of the role's identity-based policy and // the session policies. You can use the role's temporary credentials in subsequent @@ -289,12 +324,6 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // -// Before your application can call AssumeRoleWithSAML, you must configure your -// SAML identity provider (IdP) to issue the claims required by AWS. Additionally, -// you must use AWS Identity and Access Management (IAM) to create a SAML provider -// entity in your AWS account that represents your identity provider. You must -// also create an IAM role that specifies this SAML provider in its trust policy. -// // Calling AssumeRoleWithSAML does not require the use of AWS security credentials. // The identity of the caller is validated by using keys in the metadata document // that is uploaded for the SAML provider entity for your identity provider. @@ -302,8 +331,50 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // Calling AssumeRoleWithSAML can result in an entry in your AWS CloudTrail // logs. The entry includes the value in the NameID element of the SAML assertion. // We recommend that you use a NameIDType that is not associated with any personally -// identifiable information (PII). For example, you could instead use the Persistent -// Identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent). +// identifiable information (PII). For example, you could instead use the persistent +// identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent). +// +// Tags +// +// (Optional) You can configure your IdP to pass attributes into your SAML assertion +// as session tags. Each session tag consists of a key name and an associated +// value. For more information about session tags, see Passing Session Tags +// in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You can pass up to 50 session tags. The plain text session tag keys can’t +// exceed 128 characters and the values can’t exceed 256 characters. For these +// and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +// +// An AWS conversion compresses the passed session policies and session tags +// into a packed binary format that has a separate limit. Your request can fail +// for this limit even if your plain text meets the other requirements. The +// PackedPolicySize response element indicates by percentage how close the policies +// and tags for your request are to the upper size limit. +// +// You can pass a session tag with the same key as a tag that is attached to +// the role. When you do, session tags override the role's tags with the same +// key. +// +// An administrator must grant you the permissions necessary to pass session +// tags. The administrator can also create granular permissions to allow you +// to pass only specific session tags. For more information, see Tutorial: Using +// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. +// +// You can set the session tags as transitive. Transitive tags persist during +// role chaining. For more information, see Chaining Roles with Session Tags +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// in the IAM User Guide. +// +// SAML Configuration +// +// Before your application can call AssumeRoleWithSAML, you must configure your +// SAML identity provider (IdP) to issue the claims required by AWS. Additionally, +// you must use AWS Identity and Access Management (IAM) to create a SAML provider +// entity in your AWS account that represents your identity provider. You must +// also create an IAM role that specifies this SAML provider in its trust policy. // // For more information, see the following resources: // @@ -332,9 +403,18 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // message describes the specific error. // // * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" -// The request was rejected because the policy document was too large. The error -// message describes how big the policy document is, in packed form, as a percentage -// of what the API allows. +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An AWS conversion compresses the +// session policy document, session policy ARNs, and session tags into a packed +// binary format that has a separate limit. The error message indicates by percentage +// how close the policies and tags are to the upper size limit. For more information, +// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. // // * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" // The identity provider (IdP) reported that authentication failed. This might @@ -456,6 +536,8 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // key ID, a secret access key, and a security token. Applications can use these // temporary security credentials to sign calls to AWS service API operations. // +// Session Duration +// // By default, the temporary security credentials created by AssumeRoleWithWebIdentity // last for one hour. However, you can use the optional DurationSeconds parameter // to specify the duration of your session. You can provide a value from 900 @@ -469,6 +551,8 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) // in the IAM User Guide. // +// Permissions +// // The temporary security credentials created by AssumeRoleWithWebIdentity can // be used to make API calls to any AWS service with the following exception: // you cannot call the STS GetFederationToken or GetSessionToken API operations. @@ -477,7 +561,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // to this operation. You can pass a single JSON policy document to use as an // inline session policy. You can also specify up to 10 managed policies to // use as managed session policies. The plain text that you use for both inline -// and managed session policies shouldn't exceed 2048 characters. Passing policies +// and managed session policies can't exceed 2,048 characters. Passing policies // to this operation returns new temporary credentials. The resulting session's // permissions are the intersection of the role's identity-based policy and // the session policies. You can use the role's temporary credentials in subsequent @@ -487,6 +571,42 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // +// Tags +// +// (Optional) You can configure your IdP to pass attributes into your web identity +// token as session tags. Each session tag consists of a key name and an associated +// value. For more information about session tags, see Passing Session Tags +// in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You can pass up to 50 session tags. The plain text session tag keys can’t +// exceed 128 characters and the values can’t exceed 256 characters. For these +// and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +// +// An AWS conversion compresses the passed session policies and session tags +// into a packed binary format that has a separate limit. Your request can fail +// for this limit even if your plain text meets the other requirements. The +// PackedPolicySize response element indicates by percentage how close the policies +// and tags for your request are to the upper size limit. +// +// You can pass a session tag with the same key as a tag that is attached to +// the role. When you do, the session tag overrides the role tag with the same +// key. +// +// An administrator must grant you the permissions necessary to pass session +// tags. The administrator can also create granular permissions to allow you +// to pass only specific session tags. For more information, see Tutorial: Using +// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. +// +// You can set the session tags as transitive. Transitive tags persist during +// role chaining. For more information, see Chaining Roles with Session Tags +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// in the IAM User Guide. +// +// Identities +// // Before your application can call AssumeRoleWithWebIdentity, you must have // an identity token from a supported identity provider and create a role that // the application can assume. The role that your application assumes must trust @@ -514,8 +634,8 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // * AWS SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) and // AWS SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/). // These toolkits contain sample apps that show how to invoke the identity -// providers, and then how to use the information from these providers to -// get and use temporary security credentials. +// providers. The toolkits then show how to use the information from these +// providers to get and use temporary security credentials. // // * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications). // This article discusses web identity federation and shows an example of @@ -535,9 +655,18 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // message describes the specific error. // // * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" -// The request was rejected because the policy document was too large. The error -// message describes how big the policy document is, in packed form, as a percentage -// of what the API allows. +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An AWS conversion compresses the +// session policy document, session policy ARNs, and session tags into a packed +// binary format that has a separate limit. The error message indicates by percentage +// how close the policies and tags are to the upper size limit. For more information, +// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. // // * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" // The identity provider (IdP) reported that authentication failed. This might @@ -547,11 +676,11 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // can also mean that the claim has expired or has been explicitly revoked. // // * ErrCodeIDPCommunicationErrorException "IDPCommunicationError" -// The request could not be fulfilled because the non-AWS identity provider -// (IDP) that was asked to verify the incoming identity token could not be reached. -// This is often a transient error caused by network conditions. Retry the request +// The request could not be fulfilled because the identity provider (IDP) that +// was asked to verify the incoming identity token could not be reached. This +// is often a transient error caused by network conditions. Retry the request // a limited number of times so that you don't exceed the request rate. If the -// error persists, the non-AWS identity provider might be down or not responding. +// error persists, the identity provider might be down or not responding. // // * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" // The web identity token that was passed could not be validated by AWS. Get @@ -763,7 +892,8 @@ func (c *STS) GetAccessKeyInfoRequest(input *GetAccessKeyInfoInput) (req *reques // pull a credentials report (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html) // to learn which IAM user owns the keys. To learn who requested the temporary // credentials for an ASIA access key, view the STS events in your CloudTrail -// logs (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html). +// logs (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html) +// in the IAM User Guide. // // This operation does not indicate the state of the access key. The key might // be active, inactive, or deleted. Active keys might not have permissions to @@ -850,7 +980,8 @@ func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *requ // sts:GetCallerIdentity action, you can still perform this operation. Permissions // are not required because the same information is returned when an IAM user // or role is denied access. To view an example response, see I Am Not Authorized -// to Perform: iam:DeleteVirtualMFADevice (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa). +// to Perform: iam:DeleteVirtualMFADevice (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa) +// in the IAM User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -942,7 +1073,8 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // or an OpenID Connect-compatible identity provider. In this case, we recommend // that you use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity. // For more information, see Federation Through a Web-based Identity Provider -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) +// in the IAM User Guide. // // You can also call GetFederationToken using the security credentials of an // AWS account root user, but we do not recommend it. Instead, we recommend @@ -952,41 +1084,67 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // Practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) // in the IAM User Guide. // +// Session duration +// // The temporary credentials are valid for the specified duration, from 900 // seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default -// is 43,200 seconds (12 hours). Temporary credentials that are obtained by -// using AWS account root user credentials have a maximum duration of 3,600 -// seconds (1 hour). +// session duration is 43,200 seconds (12 hours). Temporary credentials that +// are obtained by using AWS account root user credentials have a maximum duration +// of 3,600 seconds (1 hour). // -// The temporary security credentials created by GetFederationToken can be used -// to make API calls to any AWS service with the following exceptions: +// Permissions // -// * You cannot use these credentials to call any IAM API operations. +// You can use the temporary credentials created by GetFederationToken in any +// AWS service except the following: // -// * You cannot call any STS API operations except GetCallerIdentity. +// * You cannot call any IAM operations using the AWS CLI or the AWS API. // -// Permissions +// * You cannot call any STS operations except GetCallerIdentity. // // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an // inline session policy. You can also specify up to 10 managed policies to // use as managed session policies. The plain text that you use for both inline -// and managed session policies shouldn't exceed 2048 characters. +// and managed session policies can't exceed 2,048 characters. // // Though the session policy parameters are optional, if you do not pass a policy, -// then the resulting federated user session has no permissions. The only exception -// is when the credentials are used to access a resource that has a resource-based -// policy that specifically references the federated user session in the Principal -// element of the policy. When you pass session policies, the session permissions -// are the intersection of the IAM user policies and the session policies that -// you pass. This gives you a way to further restrict the permissions for a -// federated user. You cannot use session policies to grant more permissions -// than those that are defined in the permissions policy of the IAM user. For -// more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// then the resulting federated user session has no permissions. When you pass +// session policies, the session permissions are the intersection of the IAM +// user policies and the session policies that you pass. This gives you a way +// to further restrict the permissions for a federated user. You cannot use +// session policies to grant more permissions than those that are defined in +// the permissions policy of the IAM user. For more information, see Session +// Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. For information about using GetFederationToken to // create temporary security credentials, see GetFederationToken—Federation // Through a Custom Identity Broker (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken). // +// You can use the credentials to access a resource that has a resource-based +// policy. If that policy specifically references the federated user session +// in the Principal element of the policy, the session has the permissions allowed +// by the policy. These permissions are granted in addition to the permissions +// granted by the session policies. +// +// Tags +// +// (Optional) You can pass tag key-value pairs to your session. These are called +// session tags. For more information about session tags, see Passing Session +// Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// An administrator must grant you the permissions necessary to pass session +// tags. The administrator can also create granular permissions to allow you +// to pass only specific session tags. For more information, see Tutorial: Using +// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. +// +// Tag key–value pairs are not case sensitive, but case is preserved. This +// means that you cannot have separate Department and department tag keys. Assume +// that the user that you are federating has the Department=Marketing tag and +// you pass the department=engineering session tag. Department and department +// are not saved as separate tags, and the session tag passed in the request +// takes precedence over the user tag. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -1000,9 +1158,18 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // message describes the specific error. // // * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" -// The request was rejected because the policy document was too large. The error -// message describes how big the policy document is, in packed form, as a percentage -// of what the API allows. +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An AWS conversion compresses the +// session policy document, session policy ARNs, and session tags into a packed +// binary format that has a separate limit. The error message indicates by percentage +// how close the policies and tags are to the upper size limit. For more information, +// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. // // * ErrCodeRegionDisabledException "RegionDisabledException" // STS is not activated in the requested region for the account that is being @@ -1091,6 +1258,8 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request. // and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // +// Session Duration +// // The GetSessionToken operation must be called by using the long-term AWS security // credentials of the AWS account root user or an IAM user. Credentials that // are created by IAM users are valid for the duration that you specify. This @@ -1099,6 +1268,8 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request. // based on account credentials can range from 900 seconds (15 minutes) up to // 3,600 seconds (1 hour), with a default of 1 hour. // +// Permissions +// // The temporary security credentials created by GetSessionToken can be used // to make API calls to any AWS service with the following exceptions: // @@ -1213,16 +1384,16 @@ type AssumeRoleInput struct { // in the IAM User Guide. // // The plain text that you use for both inline and managed session policies - // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII + // can't exceed 2,048 characters. The JSON policy characters can be any ASCII // character from the space character to the end of the valid character list // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), // and carriage return (\u000D) characters. // - // The characters in this parameter count towards the 2048 character session - // policy guideline. However, an AWS conversion compresses the session policies - // into a packed binary format that has a separate limit. This is the enforced - // limit. The PackedPolicySize response element indicates by percentage how - // close the policy is to the upper size limit. + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. Policy *string `min:"1" type:"string"` // The Amazon Resource Names (ARNs) of the IAM managed policies that you want @@ -1231,15 +1402,15 @@ type AssumeRoleInput struct { // // This parameter is optional. You can provide up to 10 managed policy ARNs. // However, the plain text that you use for both inline and managed session - // policies shouldn't exceed 2048 characters. For more information about ARNs, + // policies can't exceed 2,048 characters. For more information about ARNs, // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the AWS General Reference. // - // The characters in this parameter count towards the 2048 character session - // policy guideline. However, an AWS conversion compresses the session policies - // into a packed binary format that has a separate limit. This is the enforced - // limit. The PackedPolicySize response element indicates by percentage how - // close the policy is to the upper size limit. + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. // // Passing policies to this operation returns new temporary credentials. The // resulting session's permissions are the intersection of the role's identity-based @@ -1284,6 +1455,41 @@ type AssumeRoleInput struct { // also include underscores or any of the following characters: =,.@- SerialNumber *string `min:"9" type:"string"` + // A list of session tags that you want to pass. Each session tag consists of + // a key name and an associated value. For more information about session tags, + // see Tagging AWS STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) + // in the IAM User Guide. + // + // This parameter is optional. You can pass up to 50 session tags. The plain + // text session tag keys can’t exceed 128 characters, and the values can’t + // exceed 256 characters. For these and additional limits, see IAM and STS Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + // + // You can pass a session tag with the same key as a tag that is already attached + // to the role. When you do, session tags override a role tag with the same + // key. + // + // Tag key–value pairs are not case sensitive, but case is preserved. This + // means that you cannot have separate Department and department tag keys. Assume + // that the role has the Department=Marketing tag and you pass the department=engineering + // session tag. Department and department are not saved as separate tags, and + // the session tag passed in the request takes precedence over the role tag. + // + // Additionally, if you used temporary credentials to perform this operation, + // the new session inherits any transitive session tags from the calling session. + // If you pass a session tag with the same key as an inherited tag, the operation + // fails. To view the inherited tags for a session, see the AWS CloudTrail logs. + // For more information, see Viewing Session Tags in CloudTrail (https://docs.aws.amazon.com/IAM/latest/UserGuide/session-tags.html#id_session-tags_ctlogs) + // in the IAM User Guide. + Tags []*Tag `type:"list"` + // The value provided by the MFA device, if the trust policy of the role being // assumed requires MFA (that is, if the policy includes a condition that tests // for MFA). If the role being assumed requires MFA and if the TokenCode value @@ -1292,6 +1498,19 @@ type AssumeRoleInput struct { // The format for this parameter, as described by its regex pattern, is a sequence // of six numeric digits. TokenCode *string `min:"6" type:"string"` + + // A list of keys for session tags that you want to set as transitive. If you + // set a tag key as transitive, the corresponding key and value passes to subsequent + // sessions in a role chain. For more information, see Chaining Roles with Session + // Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) + // in the IAM User Guide. + // + // This parameter is optional. When you set session tags as transitive, the + // session policy and session tags packed binary limit is not affected. + // + // If you choose not to specify a transitive tag key, then no tags are passed + // from this session to any subsequent sessions. + TransitiveTagKeys []*string `type:"list"` } // String returns the string representation @@ -1344,6 +1563,16 @@ func (s *AssumeRoleInput) Validate() error { } } } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -1393,12 +1622,24 @@ func (s *AssumeRoleInput) SetSerialNumber(v string) *AssumeRoleInput { return s } +// SetTags sets the Tags field's value. +func (s *AssumeRoleInput) SetTags(v []*Tag) *AssumeRoleInput { + s.Tags = v + return s +} + // SetTokenCode sets the TokenCode field's value. func (s *AssumeRoleInput) SetTokenCode(v string) *AssumeRoleInput { s.TokenCode = &v return s } +// SetTransitiveTagKeys sets the TransitiveTagKeys field's value. +func (s *AssumeRoleInput) SetTransitiveTagKeys(v []*string) *AssumeRoleInput { + s.TransitiveTagKeys = v + return s +} + // Contains the response to a successful AssumeRole request, including temporary // AWS credentials that can be used to make AWS requests. type AssumeRoleOutput struct { @@ -1418,9 +1659,10 @@ type AssumeRoleOutput struct { // We strongly recommend that you make no assumptions about the maximum size. Credentials *Credentials `type:"structure"` - // A percentage value that indicates the size of the policy in packed form. - // The service rejects any policy with a packed size greater than 100 percent, - // which means the policy exceeded the allowed space. + // A percentage value that indicates the packed size of the session policies + // and session tags combined passed in the request. The request fails if the + // packed size is greater than 100 percent, which means the policies and tags + // exceeded the allowed space. PackedPolicySize *int64 `type:"integer"` } @@ -1491,16 +1733,16 @@ type AssumeRoleWithSAMLInput struct { // in the IAM User Guide. // // The plain text that you use for both inline and managed session policies - // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII + // can't exceed 2,048 characters. The JSON policy characters can be any ASCII // character from the space character to the end of the valid character list // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), // and carriage return (\u000D) characters. // - // The characters in this parameter count towards the 2048 character session - // policy guideline. However, an AWS conversion compresses the session policies - // into a packed binary format that has a separate limit. This is the enforced - // limit. The PackedPolicySize response element indicates by percentage how - // close the policy is to the upper size limit. + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. Policy *string `min:"1" type:"string"` // The Amazon Resource Names (ARNs) of the IAM managed policies that you want @@ -1509,15 +1751,15 @@ type AssumeRoleWithSAMLInput struct { // // This parameter is optional. You can provide up to 10 managed policy ARNs. // However, the plain text that you use for both inline and managed session - // policies shouldn't exceed 2048 characters. For more information about ARNs, + // policies can't exceed 2,048 characters. For more information about ARNs, // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the AWS General Reference. // - // The characters in this parameter count towards the 2048 character session - // policy guideline. However, an AWS conversion compresses the session policies - // into a packed binary format that has a separate limit. This is the enforced - // limit. The PackedPolicySize response element indicates by percentage how - // close the policy is to the upper size limit. + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. // // Passing policies to this operation returns new temporary credentials. The // resulting session's permissions are the intersection of the role's identity-based @@ -1546,7 +1788,7 @@ type AssumeRoleWithSAMLInput struct { // in the IAM User Guide. // // SAMLAssertion is a required field - SAMLAssertion *string `min:"4" type:"string" required:"true"` + SAMLAssertion *string `min:"4" type:"string" required:"true" sensitive:"true"` } // String returns the string representation @@ -1673,9 +1915,10 @@ type AssumeRoleWithSAMLOutput struct { // ) ) NameQualifier *string `type:"string"` - // A percentage value that indicates the size of the policy in packed form. - // The service rejects any policy with a packed size greater than 100 percent, - // which means the policy exceeded the allowed space. + // A percentage value that indicates the packed size of the session policies + // and session tags combined passed in the request. The request fails if the + // packed size is greater than 100 percent, which means the policies and tags + // exceeded the allowed space. PackedPolicySize *int64 `type:"integer"` // The value of the NameID element in the Subject element of the SAML assertion. @@ -1786,16 +2029,16 @@ type AssumeRoleWithWebIdentityInput struct { // in the IAM User Guide. // // The plain text that you use for both inline and managed session policies - // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII + // can't exceed 2,048 characters. The JSON policy characters can be any ASCII // character from the space character to the end of the valid character list // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), // and carriage return (\u000D) characters. // - // The characters in this parameter count towards the 2048 character session - // policy guideline. However, an AWS conversion compresses the session policies - // into a packed binary format that has a separate limit. This is the enforced - // limit. The PackedPolicySize response element indicates by percentage how - // close the policy is to the upper size limit. + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. Policy *string `min:"1" type:"string"` // The Amazon Resource Names (ARNs) of the IAM managed policies that you want @@ -1804,15 +2047,15 @@ type AssumeRoleWithWebIdentityInput struct { // // This parameter is optional. You can provide up to 10 managed policy ARNs. // However, the plain text that you use for both inline and managed session - // policies shouldn't exceed 2048 characters. For more information about ARNs, + // policies can't exceed 2,048 characters. For more information about ARNs, // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the AWS General Reference. // - // The characters in this parameter count towards the 2048 character session - // policy guideline. However, an AWS conversion compresses the session policies - // into a packed binary format that has a separate limit. This is the enforced - // limit. The PackedPolicySize response element indicates by percentage how - // close the policy is to the upper size limit. + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. // // Passing policies to this operation returns new temporary credentials. The // resulting session's permissions are the intersection of the role's identity-based @@ -1857,7 +2100,7 @@ type AssumeRoleWithWebIdentityInput struct { // the application makes an AssumeRoleWithWebIdentity call. // // WebIdentityToken is a required field - WebIdentityToken *string `min:"4" type:"string" required:"true"` + WebIdentityToken *string `min:"4" type:"string" required:"true" sensitive:"true"` } // String returns the string representation @@ -1983,9 +2226,10 @@ type AssumeRoleWithWebIdentityOutput struct { // We strongly recommend that you make no assumptions about the maximum size. Credentials *Credentials `type:"structure"` - // A percentage value that indicates the size of the policy in packed form. - // The service rejects any policy with a packed size greater than 100 percent, - // which means the policy exceeded the allowed space. + // A percentage value that indicates the packed size of the session policies + // and session tags combined passed in the request. The request fails if the + // packed size is greater than 100 percent, which means the policies and tags + // exceeded the allowed space. PackedPolicySize *int64 `type:"integer"` // The issuing authority of the web identity token presented. For OpenID Connect @@ -2057,7 +2301,7 @@ type AssumedRoleUser struct { // The ARN of the temporary security credentials that are returned from the // AssumeRole action. For more information about ARNs and how to use them in // policies, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) - // in Using IAM. + // in the IAM User Guide. // // Arn is a required field Arn *string `min:"20" type:"string" required:"true"` @@ -2225,7 +2469,7 @@ type FederatedUser struct { // The ARN that specifies the federated user that is associated with the credentials. // For more information about ARNs and how to use them in policies, see IAM // Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) - // in Using IAM. + // in the IAM User Guide. // // Arn is a required field Arn *string `min:"20" type:"string" required:"true"` @@ -2265,7 +2509,7 @@ type GetAccessKeyInfoInput struct { // The identifier of an access key. // // This parameter allows (through its regex pattern) a string of characters - // that can consist of any upper- or lowercased letter or digit. + // that can consist of any upper- or lowercase letter or digit. // // AccessKeyId is a required field AccessKeyId *string `min:"16" type:"string" required:"true"` @@ -2418,10 +2662,7 @@ type GetFederationTokenInput struct { // use as managed session policies. // // This parameter is optional. However, if you do not pass any session policies, - // then the resulting federated user session has no permissions. The only exception - // is when the credentials are used to access a resource that has a resource-based - // policy that specifically references the federated user session in the Principal - // element of the policy. + // then the resulting federated user session has no permissions. // // When you pass session policies, the session permissions are the intersection // of the IAM user policies and the session policies that you pass. This gives @@ -2431,17 +2672,23 @@ type GetFederationTokenInput struct { // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // + // The resulting credentials can be used to access a resource that has a resource-based + // policy. If that policy specifically references the federated user session + // in the Principal element of the policy, the session has the permissions allowed + // by the policy. These permissions are granted in addition to the permissions + // that are granted by the session policies. + // // The plain text that you use for both inline and managed session policies - // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII + // can't exceed 2,048 characters. The JSON policy characters can be any ASCII // character from the space character to the end of the valid character list // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), // and carriage return (\u000D) characters. // - // The characters in this parameter count towards the 2048 character session - // policy guideline. However, an AWS conversion compresses the session policies - // into a packed binary format that has a separate limit. This is the enforced - // limit. The PackedPolicySize response element indicates by percentage how - // close the policy is to the upper size limit. + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. Policy *string `min:"1" type:"string"` // The Amazon Resource Names (ARNs) of the IAM managed policies that you want @@ -2452,16 +2699,13 @@ type GetFederationTokenInput struct { // to this operation. You can pass a single JSON policy document to use as an // inline session policy. You can also specify up to 10 managed policies to // use as managed session policies. The plain text that you use for both inline - // and managed session policies shouldn't exceed 2048 characters. You can provide + // and managed session policies can't exceed 2,048 characters. You can provide // up to 10 managed policy ARNs. For more information about ARNs, see Amazon // Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the AWS General Reference. // // This parameter is optional. However, if you do not pass any session policies, - // then the resulting federated user session has no permissions. The only exception - // is when the credentials are used to access a resource that has a resource-based - // policy that specifically references the federated user session in the Principal - // element of the policy. + // then the resulting federated user session has no permissions. // // When you pass session policies, the session permissions are the intersection // of the IAM user policies and the session policies that you pass. This gives @@ -2471,12 +2715,46 @@ type GetFederationTokenInput struct { // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // - // The characters in this parameter count towards the 2048 character session - // policy guideline. However, an AWS conversion compresses the session policies - // into a packed binary format that has a separate limit. This is the enforced - // limit. The PackedPolicySize response element indicates by percentage how - // close the policy is to the upper size limit. + // The resulting credentials can be used to access a resource that has a resource-based + // policy. If that policy specifically references the federated user session + // in the Principal element of the policy, the session has the permissions allowed + // by the policy. These permissions are granted in addition to the permissions + // that are granted by the session policies. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. PolicyArns []*PolicyDescriptorType `type:"list"` + + // A list of session tags. Each session tag consists of a key name and an associated + // value. For more information about session tags, see Passing Session Tags + // in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) + // in the IAM User Guide. + // + // This parameter is optional. You can pass up to 50 session tags. The plain + // text session tag keys can’t exceed 128 characters and the values can’t + // exceed 256 characters. For these and additional limits, see IAM and STS Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + // + // You can pass a session tag with the same key as a tag that is already attached + // to the user you are federating. When you do, session tags override a user + // tag with the same key. + // + // Tag key–value pairs are not case sensitive, but case is preserved. This + // means that you cannot have separate Department and department tag keys. Assume + // that the role has the Department=Marketing tag and you pass the department=engineering + // session tag. Department and department are not saved as separate tags, and + // the session tag passed in the request takes precedence over the role tag. + Tags []*Tag `type:"list"` } // String returns the string representation @@ -2514,6 +2792,16 @@ func (s *GetFederationTokenInput) Validate() error { } } } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -2545,6 +2833,12 @@ func (s *GetFederationTokenInput) SetPolicyArns(v []*PolicyDescriptorType) *GetF return s } +// SetTags sets the Tags field's value. +func (s *GetFederationTokenInput) SetTags(v []*Tag) *GetFederationTokenInput { + s.Tags = v + return s +} + // Contains the response to a successful GetFederationToken request, including // temporary AWS credentials that can be used to make AWS requests. type GetFederationTokenOutput struct { @@ -2563,9 +2857,10 @@ type GetFederationTokenOutput struct { // an Amazon S3 bucket policy. FederatedUser *FederatedUser `type:"structure"` - // A percentage value indicating the size of the policy in packed form. The - // service rejects policies for which the packed size is greater than 100 percent - // of the allowed value. + // A percentage value that indicates the packed size of the session policies + // and session tags combined passed in the request. The request fails if the + // packed size is greater than 100 percent, which means the policies and tags + // exceeded the allowed space. PackedPolicySize *int64 `type:"integer"` } @@ -2748,3 +3043,73 @@ func (s *PolicyDescriptorType) SetArn(v string) *PolicyDescriptorType { s.Arn = &v return s } + +// You can pass custom key-value pair attributes when you assume a role or federate +// a user. These are called session tags. You can then use the session tags +// to control access to resources. For more information, see Tagging AWS STS +// Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +type Tag struct { + _ struct{} `type:"structure"` + + // The key for a session tag. + // + // You can pass up to 50 session tags. The plain text session tag keys can’t + // exceed 128 characters. For these and additional limits, see IAM and STS Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // The value for a session tag. + // + // You can pass up to 50 session tags. The plain text session tag values can’t + // exceed 256 characters. For these and additional limits, see IAM and STS Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go index 41ea09c35..a233f542e 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go @@ -14,11 +14,11 @@ const ( // ErrCodeIDPCommunicationErrorException for service response error code // "IDPCommunicationError". // - // The request could not be fulfilled because the non-AWS identity provider - // (IDP) that was asked to verify the incoming identity token could not be reached. - // This is often a transient error caused by network conditions. Retry the request + // The request could not be fulfilled because the identity provider (IDP) that + // was asked to verify the incoming identity token could not be reached. This + // is often a transient error caused by network conditions. Retry the request // a limited number of times so that you don't exceed the request rate. If the - // error persists, the non-AWS identity provider might be down or not responding. + // error persists, the identity provider might be down or not responding. ErrCodeIDPCommunicationErrorException = "IDPCommunicationError" // ErrCodeIDPRejectedClaimException for service response error code @@ -56,9 +56,18 @@ const ( // ErrCodePackedPolicyTooLargeException for service response error code // "PackedPolicyTooLarge". // - // The request was rejected because the policy document was too large. The error - // message describes how big the policy document is, in packed form, as a percentage - // of what the API allows. + // The request was rejected because the total packed size of the session policies + // and session tags combined was too large. An AWS conversion compresses the + // session policy document, session policy ARNs, and session tags into a packed + // binary format that has a separate limit. The error message indicates by percentage + // how close the policies and tags are to the upper size limit. For more information, + // see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) + // in the IAM User Guide. + // + // You could receive this error even though you meet other defined session policy + // and session tag limits. For more information, see IAM and STS Entity Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) + // in the IAM User Guide. ErrCodePackedPolicyTooLargeException = "PackedPolicyTooLarge" // ErrCodeRegionDisabledException for service response error code diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go index 185c914d1..d34a68553 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go @@ -31,7 +31,7 @@ var initRequest func(*request.Request) const ( ServiceName = "sts" // Name of service. EndpointsID = ServiceName // ID to lookup a service endpoint with. - ServiceID = "STS" // ServiceID is a unique identifer of a specific service. + ServiceID = "STS" // ServiceID is a unique identifier of a specific service. ) // New creates a new instance of the STS client with a session. @@ -39,6 +39,8 @@ const ( // aws.Config parameter to add your extra config. // // Example: +// mySession := session.Must(session.NewSession()) +// // // Create a STS client from just a session. // svc := sts.New(mySession) // @@ -46,11 +48,11 @@ const ( // svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *STS { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *STS { svc := &STS{ Client: client.New( cfg, @@ -59,6 +61,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2011-06-15", }, diff --git a/vendor/github.com/jmespath/go-jmespath/.travis.yml b/vendor/github.com/jmespath/go-jmespath/.travis.yml index 1f9807757..730c7fa51 100644 --- a/vendor/github.com/jmespath/go-jmespath/.travis.yml +++ b/vendor/github.com/jmespath/go-jmespath/.travis.yml @@ -3,7 +3,15 @@ language: go sudo: false go: - - 1.4 + - 1.5.x + - 1.6.x + - 1.7.x + - 1.8.x + - 1.9.x + - 1.10.x + - 1.11.x + - 1.12.x + - 1.13.x install: go get -v -t ./... script: make test diff --git a/vendor/github.com/jmespath/go-jmespath/README.md b/vendor/github.com/jmespath/go-jmespath/README.md index 187ef676d..110ad7999 100644 --- a/vendor/github.com/jmespath/go-jmespath/README.md +++ b/vendor/github.com/jmespath/go-jmespath/README.md @@ -4,4 +4,84 @@ -See http://jmespath.org for more info. +go-jmespath is a GO implementation of JMESPath, +which is a query language for JSON. It will take a JSON +document and transform it into another JSON document +through a JMESPath expression. + +Using go-jmespath is really easy. There's a single function +you use, `jmespath.search`: + + +```go +> import "github.com/jmespath/go-jmespath" +> +> var jsondata = []byte(`{"foo": {"bar": {"baz": [0, 1, 2, 3, 4]}}}`) // your data +> var data interface{} +> err := json.Unmarshal(jsondata, &data) +> result, err := jmespath.Search("foo.bar.baz[2]", data) +result = 2 +``` + +In the example we gave the ``search`` function input data of +`{"foo": {"bar": {"baz": [0, 1, 2, 3, 4]}}}` as well as the JMESPath +expression `foo.bar.baz[2]`, and the `search` function evaluated +the expression against the input data to produce the result ``2``. + +The JMESPath language can do a lot more than select an element +from a list. Here are a few more examples: + +```go +> var jsondata = []byte(`{"foo": {"bar": {"baz": [0, 1, 2, 3, 4]}}}`) // your data +> var data interface{} +> err := json.Unmarshal(jsondata, &data) +> result, err := jmespath.search("foo.bar", data) +result = { "baz": [ 0, 1, 2, 3, 4 ] } + + +> var jsondata = []byte(`{"foo": [{"first": "a", "last": "b"}, + {"first": "c", "last": "d"}]}`) // your data +> var data interface{} +> err := json.Unmarshal(jsondata, &data) +> result, err := jmespath.search({"foo[*].first", data) +result [ 'a', 'c' ] + + +> var jsondata = []byte(`{"foo": [{"age": 20}, {"age": 25}, + {"age": 30}, {"age": 35}, + {"age": 40}]}`) // your data +> var data interface{} +> err := json.Unmarshal(jsondata, &data) +> result, err := jmespath.search("foo[?age > `30`]") +result = [ { age: 35 }, { age: 40 } ] +``` + +You can also pre-compile your query. This is usefull if +you are going to run multiple searches with it: + +```go + > var jsondata = []byte(`{"foo": "bar"}`) + > var data interface{} + > err := json.Unmarshal(jsondata, &data) + > precompiled, err := Compile("foo") + > if err != nil{ + > // ... handle the error + > } + > result, err := precompiled.Search(data) + result = "bar" +``` + +## More Resources + +The example above only show a small amount of what +a JMESPath expression can do. If you want to take a +tour of the language, the *best* place to go is the +[JMESPath Tutorial](http://jmespath.org/tutorial.html). + +One of the best things about JMESPath is that it is +implemented in many different programming languages including +python, ruby, php, lua, etc. To see a complete list of libraries, +check out the [JMESPath libraries page](http://jmespath.org/libraries.html). + +And finally, the full JMESPath specification can be found +on the [JMESPath site](http://jmespath.org/specification.html). diff --git a/vendor/github.com/jmespath/go-jmespath/api.go b/vendor/github.com/jmespath/go-jmespath/api.go index 8e26ffeec..010efe9bf 100644 --- a/vendor/github.com/jmespath/go-jmespath/api.go +++ b/vendor/github.com/jmespath/go-jmespath/api.go @@ -2,7 +2,7 @@ package jmespath import "strconv" -// JMESPath is the epresentation of a compiled JMES path query. A JMESPath is +// JMESPath is the representation of a compiled JMES path query. A JMESPath is // safe for concurrent use by multiple goroutines. type JMESPath struct { ast ASTNode diff --git a/vendor/github.com/jmespath/go-jmespath/go.mod b/vendor/github.com/jmespath/go-jmespath/go.mod new file mode 100644 index 000000000..aa1e3f1c9 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/go.mod @@ -0,0 +1,5 @@ +module github.com/jmespath/go-jmespath + +go 1.14 + +require github.com/stretchr/testify v1.5.1 diff --git a/vendor/github.com/jmespath/go-jmespath/go.sum b/vendor/github.com/jmespath/go-jmespath/go.sum new file mode 100644 index 000000000..331fa6982 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/go.sum @@ -0,0 +1,11 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/jmespath/go-jmespath/parser.go b/vendor/github.com/jmespath/go-jmespath/parser.go index 1240a1755..4abc303ab 100644 --- a/vendor/github.com/jmespath/go-jmespath/parser.go +++ b/vendor/github.com/jmespath/go-jmespath/parser.go @@ -137,7 +137,7 @@ func (p *Parser) Parse(expression string) (ASTNode, error) { } if p.current() != tEOF { return ASTNode{}, p.syntaxError(fmt.Sprintf( - "Unexpected token at the end of the expresssion: %s", p.current())) + "Unexpected token at the end of the expression: %s", p.current())) } return parsed, nil } diff --git a/vendor/modules.txt b/vendor/modules.txt index 63674db3e..1d9d3d202 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -25,8 +25,9 @@ github.com/apparentlymart/go-cidr/cidr github.com/apparentlymart/go-textseg/textseg # github.com/armon/go-radix v1.0.0 github.com/armon/go-radix -# github.com/aws/aws-sdk-go v1.25.3 +# github.com/aws/aws-sdk-go v1.32.1 github.com/aws/aws-sdk-go/aws +github.com/aws/aws-sdk-go/aws/arn github.com/aws/aws-sdk-go/aws/awserr github.com/aws/aws-sdk-go/aws/awsutil github.com/aws/aws-sdk-go/aws/client @@ -44,6 +45,7 @@ github.com/aws/aws-sdk-go/aws/endpoints github.com/aws/aws-sdk-go/aws/request github.com/aws/aws-sdk-go/aws/session github.com/aws/aws-sdk-go/aws/signer/v4 +github.com/aws/aws-sdk-go/internal/context github.com/aws/aws-sdk-go/internal/ini github.com/aws/aws-sdk-go/internal/s3err github.com/aws/aws-sdk-go/internal/sdkio @@ -51,6 +53,9 @@ github.com/aws/aws-sdk-go/internal/sdkmath github.com/aws/aws-sdk-go/internal/sdkrand github.com/aws/aws-sdk-go/internal/sdkuri github.com/aws/aws-sdk-go/internal/shareddefaults +github.com/aws/aws-sdk-go/internal/strings +github.com/aws/aws-sdk-go/internal/sync/singleflight +github.com/aws/aws-sdk-go/private/checksum github.com/aws/aws-sdk-go/private/protocol github.com/aws/aws-sdk-go/private/protocol/eventstream github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi @@ -61,6 +66,7 @@ github.com/aws/aws-sdk-go/private/protocol/rest github.com/aws/aws-sdk-go/private/protocol/restxml github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil github.com/aws/aws-sdk-go/service/s3 +github.com/aws/aws-sdk-go/service/s3/internal/arn github.com/aws/aws-sdk-go/service/sts github.com/aws/aws-sdk-go/service/sts/stsiface # github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d @@ -197,7 +203,7 @@ github.com/hashicorp/terraform-svchost/auth github.com/hashicorp/terraform-svchost/disco # github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d github.com/hashicorp/yamux -# github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af +# github.com/jmespath/go-jmespath v0.3.0 github.com/jmespath/go-jmespath # github.com/joho/godotenv v1.3.0 github.com/joho/godotenv